xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_nvme.c (revision b830f94f)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2017 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_nvme.h"
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
12 
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
14 
15 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
16 {
17 	struct qla_nvme_rport *rport;
18 	struct nvme_fc_port_info req;
19 	int ret;
20 
21 	if (!IS_ENABLED(CONFIG_NVME_FC))
22 		return 0;
23 
24 	if (!vha->flags.nvme_enabled) {
25 		ql_log(ql_log_info, vha, 0x2100,
26 		    "%s: Not registering target since Host NVME is not enabled\n",
27 		    __func__);
28 		return 0;
29 	}
30 
31 	if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
32 		return 0;
33 
34 	if (!(fcport->nvme_prli_service_param &
35 	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
36 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
37 		return 0;
38 
39 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
40 
41 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
42 	req.port_name = wwn_to_u64(fcport->port_name);
43 	req.node_name = wwn_to_u64(fcport->node_name);
44 	req.port_role = 0;
45 	req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
46 
47 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
48 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
49 
50 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
51 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
52 
53 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
54 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
55 
56 	req.port_id = fcport->d_id.b24;
57 
58 	ql_log(ql_log_info, vha, 0x2102,
59 	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
60 	    __func__, req.node_name, req.port_name,
61 	    req.port_id);
62 
63 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
64 	    &fcport->nvme_remote_port);
65 	if (ret) {
66 		ql_log(ql_log_warn, vha, 0x212e,
67 		    "Failed to register remote port. Transport returned %d\n",
68 		    ret);
69 		return ret;
70 	}
71 
72 	rport = fcport->nvme_remote_port->private;
73 	rport->fcport = fcport;
74 
75 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
76 	return 0;
77 }
78 
79 /* Allocate a queue for NVMe traffic */
80 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
81     unsigned int qidx, u16 qsize, void **handle)
82 {
83 	struct scsi_qla_host *vha;
84 	struct qla_hw_data *ha;
85 	struct qla_qpair *qpair;
86 
87 	if (!qidx)
88 		qidx++;
89 
90 	vha = (struct scsi_qla_host *)lport->private;
91 	ha = vha->hw;
92 
93 	ql_log(ql_log_info, vha, 0x2104,
94 	    "%s: handle %p, idx =%d, qsize %d\n",
95 	    __func__, handle, qidx, qsize);
96 
97 	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
98 		ql_log(ql_log_warn, vha, 0x212f,
99 		    "%s: Illegal qidx=%d. Max=%d\n",
100 		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
101 		return -EINVAL;
102 	}
103 
104 	if (ha->queue_pair_map[qidx]) {
105 		*handle = ha->queue_pair_map[qidx];
106 		ql_log(ql_log_info, vha, 0x2121,
107 		    "Returning existing qpair of %p for idx=%x\n",
108 		    *handle, qidx);
109 		return 0;
110 	}
111 
112 	qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
113 	if (qpair == NULL) {
114 		ql_log(ql_log_warn, vha, 0x2122,
115 		    "Failed to allocate qpair\n");
116 		return -EINVAL;
117 	}
118 	*handle = qpair;
119 
120 	return 0;
121 }
122 
123 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
124 {
125 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
126 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
127 	struct nvmefc_fcp_req *fd;
128 	struct srb_iocb *nvme;
129 	unsigned long flags;
130 
131 	if (!priv)
132 		goto out;
133 
134 	nvme = &sp->u.iocb_cmd;
135 	fd = nvme->u.nvme.desc;
136 
137 	spin_lock_irqsave(&priv->cmd_lock, flags);
138 	priv->sp = NULL;
139 	sp->priv = NULL;
140 	if (priv->comp_status == QLA_SUCCESS) {
141 		fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
142 	} else {
143 		fd->rcv_rsplen = 0;
144 		fd->transferred_length = 0;
145 	}
146 	fd->status = 0;
147 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
148 
149 	fd->done(fd);
150 out:
151 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
152 }
153 
154 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
155 {
156 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
157 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
158 	struct nvmefc_ls_req *fd;
159 	unsigned long flags;
160 
161 	if (!priv)
162 		goto out;
163 
164 	spin_lock_irqsave(&priv->cmd_lock, flags);
165 	priv->sp = NULL;
166 	sp->priv = NULL;
167 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
168 
169 	fd = priv->fd;
170 	fd->done(fd, priv->comp_status);
171 out:
172 	qla2x00_rel_sp(sp);
173 }
174 
175 static void qla_nvme_ls_complete(struct work_struct *work)
176 {
177 	struct nvme_private *priv =
178 		container_of(work, struct nvme_private, ls_work);
179 
180 	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
181 }
182 
183 static void qla_nvme_sp_ls_done(void *ptr, int res)
184 {
185 	srb_t *sp = ptr;
186 	struct nvme_private *priv;
187 
188 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
189 		return;
190 
191 	if (res)
192 		res = -EINVAL;
193 
194 	priv = (struct nvme_private *)sp->priv;
195 	priv->comp_status = res;
196 	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
197 	schedule_work(&priv->ls_work);
198 }
199 
200 /* it assumed that QPair lock is held. */
201 static void qla_nvme_sp_done(void *ptr, int res)
202 {
203 	srb_t *sp = ptr;
204 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
205 
206 	priv->comp_status = res;
207 	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
208 
209 	return;
210 }
211 
212 static void qla_nvme_abort_work(struct work_struct *work)
213 {
214 	struct nvme_private *priv =
215 		container_of(work, struct nvme_private, abort_work);
216 	srb_t *sp = priv->sp;
217 	fc_port_t *fcport = sp->fcport;
218 	struct qla_hw_data *ha = fcport->vha->hw;
219 	int rval;
220 
221 	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
222 	       "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
223 	       __func__, sp, sp->handle, fcport, fcport->deleted);
224 
225 	if (!ha->flags.fw_started && (fcport && fcport->deleted))
226 		goto out;
227 
228 	if (ha->flags.host_shutting_down) {
229 		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
230 		    "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
231 		    __func__, sp, sp->type, atomic_read(&sp->ref_count));
232 		sp->done(sp, 0);
233 		goto out;
234 	}
235 
236 	rval = ha->isp_ops->abort_command(sp);
237 
238 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
239 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
240 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
241 	    sp, sp->handle, fcport, rval);
242 
243 out:
244 	/* kref_get was done before work was schedule. */
245 	kref_put(&sp->cmd_kref, sp->put_fn);
246 }
247 
248 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
249     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
250 {
251 	struct nvme_private *priv = fd->private;
252 	unsigned long flags;
253 
254 	spin_lock_irqsave(&priv->cmd_lock, flags);
255 	if (!priv->sp) {
256 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
257 		return;
258 	}
259 
260 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
261 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
262 		return;
263 	}
264 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
265 
266 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
267 	schedule_work(&priv->abort_work);
268 }
269 
270 
271 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
272     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
273 {
274 	struct qla_nvme_rport *qla_rport = rport->private;
275 	fc_port_t *fcport = qla_rport->fcport;
276 	struct srb_iocb   *nvme;
277 	struct nvme_private *priv = fd->private;
278 	struct scsi_qla_host *vha;
279 	int     rval = QLA_FUNCTION_FAILED;
280 	struct qla_hw_data *ha;
281 	srb_t           *sp;
282 
283 
284 	if (!fcport || (fcport && fcport->deleted))
285 		return rval;
286 
287 	vha = fcport->vha;
288 	ha = vha->hw;
289 
290 	if (!ha->flags.fw_started)
291 		return rval;
292 
293 	/* Alloc SRB structure */
294 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
295 	if (!sp)
296 		return rval;
297 
298 	sp->type = SRB_NVME_LS;
299 	sp->name = "nvme_ls";
300 	sp->done = qla_nvme_sp_ls_done;
301 	sp->put_fn = qla_nvme_release_ls_cmd_kref;
302 	sp->priv = (void *)priv;
303 	priv->sp = sp;
304 	kref_init(&sp->cmd_kref);
305 	spin_lock_init(&priv->cmd_lock);
306 	nvme = &sp->u.iocb_cmd;
307 	priv->fd = fd;
308 	nvme->u.nvme.desc = fd;
309 	nvme->u.nvme.dir = 0;
310 	nvme->u.nvme.dl = 0;
311 	nvme->u.nvme.cmd_len = fd->rqstlen;
312 	nvme->u.nvme.rsp_len = fd->rsplen;
313 	nvme->u.nvme.rsp_dma = fd->rspdma;
314 	nvme->u.nvme.timeout_sec = fd->timeout;
315 	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
316 	    fd->rqstlen, DMA_TO_DEVICE);
317 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
318 	    fd->rqstlen, DMA_TO_DEVICE);
319 
320 	rval = qla2x00_start_sp(sp);
321 	if (rval != QLA_SUCCESS) {
322 		ql_log(ql_log_warn, vha, 0x700e,
323 		    "qla2x00_start_sp failed = %d\n", rval);
324 		wake_up(&sp->nvme_ls_waitq);
325 		sp->priv = NULL;
326 		priv->sp = NULL;
327 		qla2x00_rel_sp(sp);
328 		return rval;
329 	}
330 
331 	return rval;
332 }
333 
334 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
335     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
336     struct nvmefc_fcp_req *fd)
337 {
338 	struct nvme_private *priv = fd->private;
339 	unsigned long flags;
340 
341 	spin_lock_irqsave(&priv->cmd_lock, flags);
342 	if (!priv->sp) {
343 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
344 		return;
345 	}
346 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
347 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
348 		return;
349 	}
350 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
351 
352 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
353 	schedule_work(&priv->abort_work);
354 }
355 
356 static inline int qla2x00_start_nvme_mq(srb_t *sp)
357 {
358 	unsigned long   flags;
359 	uint32_t        *clr_ptr;
360 	uint32_t        index;
361 	uint32_t        handle;
362 	struct cmd_nvme *cmd_pkt;
363 	uint16_t        cnt, i;
364 	uint16_t        req_cnt;
365 	uint16_t        tot_dsds;
366 	uint16_t	avail_dsds;
367 	struct dsd64	*cur_dsd;
368 	struct req_que *req = NULL;
369 	struct scsi_qla_host *vha = sp->fcport->vha;
370 	struct qla_hw_data *ha = vha->hw;
371 	struct qla_qpair *qpair = sp->qpair;
372 	struct srb_iocb *nvme = &sp->u.iocb_cmd;
373 	struct scatterlist *sgl, *sg;
374 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
375 	uint32_t        rval = QLA_SUCCESS;
376 
377 	/* Setup qpair pointers */
378 	req = qpair->req;
379 	tot_dsds = fd->sg_cnt;
380 
381 	/* Acquire qpair specific lock */
382 	spin_lock_irqsave(&qpair->qp_lock, flags);
383 
384 	/* Check for room in outstanding command list. */
385 	handle = req->current_outstanding_cmd;
386 	for (index = 1; index < req->num_outstanding_cmds; index++) {
387 		handle++;
388 		if (handle == req->num_outstanding_cmds)
389 			handle = 1;
390 		if (!req->outstanding_cmds[handle])
391 			break;
392 	}
393 
394 	if (index == req->num_outstanding_cmds) {
395 		rval = -EBUSY;
396 		goto queuing_error;
397 	}
398 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
399 	if (req->cnt < (req_cnt + 2)) {
400 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
401 		    RD_REG_DWORD_RELAXED(req->req_q_out);
402 
403 		if (req->ring_index < cnt)
404 			req->cnt = cnt - req->ring_index;
405 		else
406 			req->cnt = req->length - (req->ring_index - cnt);
407 
408 		if (req->cnt < (req_cnt + 2)){
409 			rval = -EBUSY;
410 			goto queuing_error;
411 		}
412 	}
413 
414 	if (unlikely(!fd->sqid)) {
415 		struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
416 
417 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
418 			nvme->u.nvme.aen_op = 1;
419 			atomic_inc(&ha->nvme_active_aen_cnt);
420 		}
421 	}
422 
423 	/* Build command packet. */
424 	req->current_outstanding_cmd = handle;
425 	req->outstanding_cmds[handle] = sp;
426 	sp->handle = handle;
427 	req->cnt -= req_cnt;
428 
429 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
430 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
431 
432 	/* Zero out remaining portion of packet. */
433 	clr_ptr = (uint32_t *)cmd_pkt + 2;
434 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
435 
436 	cmd_pkt->entry_status = 0;
437 
438 	/* Update entry type to indicate Command NVME IOCB */
439 	cmd_pkt->entry_type = COMMAND_NVME;
440 
441 	/* No data transfer how do we check buffer len == 0?? */
442 	if (fd->io_dir == NVMEFC_FCP_READ) {
443 		cmd_pkt->control_flags = CF_READ_DATA;
444 		vha->qla_stats.input_bytes += fd->payload_length;
445 		vha->qla_stats.input_requests++;
446 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
447 		cmd_pkt->control_flags = CF_WRITE_DATA;
448 		if ((vha->flags.nvme_first_burst) &&
449 		    (sp->fcport->nvme_prli_service_param &
450 			NVME_PRLI_SP_FIRST_BURST)) {
451 			if ((fd->payload_length <=
452 			    sp->fcport->nvme_first_burst_size) ||
453 				(sp->fcport->nvme_first_burst_size == 0))
454 				cmd_pkt->control_flags |=
455 				    CF_NVME_FIRST_BURST_ENABLE;
456 		}
457 		vha->qla_stats.output_bytes += fd->payload_length;
458 		vha->qla_stats.output_requests++;
459 	} else if (fd->io_dir == 0) {
460 		cmd_pkt->control_flags = 0;
461 	}
462 
463 	/* Set NPORT-ID */
464 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
465 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
466 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
467 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
468 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
469 
470 	/* NVME RSP IU */
471 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
472 	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
473 
474 	/* NVME CNMD IU */
475 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
476 	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
477 
478 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
479 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
480 
481 	/* One DSD is available in the Command Type NVME IOCB */
482 	avail_dsds = 1;
483 	cur_dsd = &cmd_pkt->nvme_dsd;
484 	sgl = fd->first_sgl;
485 
486 	/* Load data segments */
487 	for_each_sg(sgl, sg, tot_dsds, i) {
488 		cont_a64_entry_t *cont_pkt;
489 
490 		/* Allocate additional continuation packets? */
491 		if (avail_dsds == 0) {
492 			/*
493 			 * Five DSDs are available in the Continuation
494 			 * Type 1 IOCB.
495 			 */
496 
497 			/* Adjust ring index */
498 			req->ring_index++;
499 			if (req->ring_index == req->length) {
500 				req->ring_index = 0;
501 				req->ring_ptr = req->ring;
502 			} else {
503 				req->ring_ptr++;
504 			}
505 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
506 			put_unaligned_le32(CONTINUE_A64_TYPE,
507 					   &cont_pkt->entry_type);
508 
509 			cur_dsd = cont_pkt->dsd;
510 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
511 		}
512 
513 		append_dsd64(&cur_dsd, sg);
514 		avail_dsds--;
515 	}
516 
517 	/* Set total entry count. */
518 	cmd_pkt->entry_count = (uint8_t)req_cnt;
519 	wmb();
520 
521 	/* Adjust ring index. */
522 	req->ring_index++;
523 	if (req->ring_index == req->length) {
524 		req->ring_index = 0;
525 		req->ring_ptr = req->ring;
526 	} else {
527 		req->ring_ptr++;
528 	}
529 
530 	/* Set chip new ring index. */
531 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
532 
533 queuing_error:
534 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
535 	return rval;
536 }
537 
538 /* Post a command */
539 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
540     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
541     struct nvmefc_fcp_req *fd)
542 {
543 	fc_port_t *fcport;
544 	struct srb_iocb *nvme;
545 	struct scsi_qla_host *vha;
546 	int rval = -ENODEV;
547 	srb_t *sp;
548 	struct qla_qpair *qpair = hw_queue_handle;
549 	struct nvme_private *priv = fd->private;
550 	struct qla_nvme_rport *qla_rport = rport->private;
551 
552 	fcport = qla_rport->fcport;
553 
554 	if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
555 	    (fcport && fcport->deleted))
556 		return rval;
557 
558 	vha = fcport->vha;
559 	/*
560 	 * If we know the dev is going away while the transport is still sending
561 	 * IO's return busy back to stall the IO Q.  This happens when the
562 	 * link goes away and fw hasn't notified us yet, but IO's are being
563 	 * returned. If the dev comes back quickly we won't exhaust the IO
564 	 * retry count at the core.
565 	 */
566 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
567 		return -EBUSY;
568 
569 	/* Alloc SRB structure */
570 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
571 	if (!sp)
572 		return -EBUSY;
573 
574 	init_waitqueue_head(&sp->nvme_ls_waitq);
575 	kref_init(&sp->cmd_kref);
576 	spin_lock_init(&priv->cmd_lock);
577 	sp->priv = (void *)priv;
578 	priv->sp = sp;
579 	sp->type = SRB_NVME_CMD;
580 	sp->name = "nvme_cmd";
581 	sp->done = qla_nvme_sp_done;
582 	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
583 	sp->qpair = qpair;
584 	sp->vha = vha;
585 	nvme = &sp->u.iocb_cmd;
586 	nvme->u.nvme.desc = fd;
587 
588 	rval = qla2x00_start_nvme_mq(sp);
589 	if (rval != QLA_SUCCESS) {
590 		ql_log(ql_log_warn, vha, 0x212d,
591 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
592 		wake_up(&sp->nvme_ls_waitq);
593 		sp->priv = NULL;
594 		priv->sp = NULL;
595 		qla2xxx_rel_qpair_sp(sp->qpair, sp);
596 	}
597 
598 	return rval;
599 }
600 
601 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
602 {
603 	struct scsi_qla_host *vha = lport->private;
604 
605 	ql_log(ql_log_info, vha, 0x210f,
606 	    "localport delete of %p completed.\n", vha->nvme_local_port);
607 	vha->nvme_local_port = NULL;
608 	complete(&vha->nvme_del_done);
609 }
610 
611 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
612 {
613 	fc_port_t *fcport;
614 	struct qla_nvme_rport *qla_rport = rport->private;
615 
616 	fcport = qla_rport->fcport;
617 	fcport->nvme_remote_port = NULL;
618 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
619 	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
620 	ql_log(ql_log_info, fcport->vha, 0x2110,
621 	    "remoteport_delete of %p %8phN completed.\n",
622 	    fcport, fcport->port_name);
623 	complete(&fcport->nvme_del_done);
624 }
625 
626 static struct nvme_fc_port_template qla_nvme_fc_transport = {
627 	.localport_delete = qla_nvme_localport_delete,
628 	.remoteport_delete = qla_nvme_remoteport_delete,
629 	.create_queue   = qla_nvme_alloc_queue,
630 	.delete_queue 	= NULL,
631 	.ls_req		= qla_nvme_ls_req,
632 	.ls_abort	= qla_nvme_ls_abort,
633 	.fcp_io		= qla_nvme_post_cmd,
634 	.fcp_abort	= qla_nvme_fcp_abort,
635 	.max_hw_queues  = 8,
636 	.max_sgl_segments = 1024,
637 	.max_dif_sgl_segments = 64,
638 	.dma_boundary = 0xFFFFFFFF,
639 	.local_priv_sz  = 8,
640 	.remote_priv_sz = sizeof(struct qla_nvme_rport),
641 	.lsrqst_priv_sz = sizeof(struct nvme_private),
642 	.fcprqst_priv_sz = sizeof(struct nvme_private),
643 };
644 
645 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
646 {
647 	int ret;
648 
649 	if (!IS_ENABLED(CONFIG_NVME_FC))
650 		return;
651 
652 	ql_log(ql_log_warn, NULL, 0x2112,
653 	    "%s: unregister remoteport on %p %8phN\n",
654 	    __func__, fcport, fcport->port_name);
655 
656 	nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
657 	init_completion(&fcport->nvme_del_done);
658 	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
659 	if (ret)
660 		ql_log(ql_log_info, fcport->vha, 0x2114,
661 			"%s: Failed to unregister nvme_remote_port (%d)\n",
662 			    __func__, ret);
663 	wait_for_completion(&fcport->nvme_del_done);
664 }
665 
666 void qla_nvme_delete(struct scsi_qla_host *vha)
667 {
668 	int nv_ret;
669 
670 	if (!IS_ENABLED(CONFIG_NVME_FC))
671 		return;
672 
673 	if (vha->nvme_local_port) {
674 		init_completion(&vha->nvme_del_done);
675 		ql_log(ql_log_info, vha, 0x2116,
676 			"unregister localport=%p\n",
677 			vha->nvme_local_port);
678 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
679 		if (nv_ret)
680 			ql_log(ql_log_info, vha, 0x2115,
681 			    "Unregister of localport failed\n");
682 		else
683 			wait_for_completion(&vha->nvme_del_done);
684 	}
685 }
686 
687 int qla_nvme_register_hba(struct scsi_qla_host *vha)
688 {
689 	struct nvme_fc_port_template *tmpl;
690 	struct qla_hw_data *ha;
691 	struct nvme_fc_port_info pinfo;
692 	int ret = EINVAL;
693 
694 	if (!IS_ENABLED(CONFIG_NVME_FC))
695 		return ret;
696 
697 	ha = vha->hw;
698 	tmpl = &qla_nvme_fc_transport;
699 
700 	WARN_ON(vha->nvme_local_port);
701 	WARN_ON(ha->max_req_queues < 3);
702 
703 	qla_nvme_fc_transport.max_hw_queues =
704 	    min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
705 		(uint8_t)(ha->max_req_queues - 2));
706 
707 	pinfo.node_name = wwn_to_u64(vha->node_name);
708 	pinfo.port_name = wwn_to_u64(vha->port_name);
709 	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
710 	pinfo.port_id = vha->d_id.b24;
711 
712 	ql_log(ql_log_info, vha, 0xffff,
713 	    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
714 	    pinfo.node_name, pinfo.port_name, pinfo.port_id);
715 	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
716 
717 	ret = nvme_fc_register_localport(&pinfo, tmpl,
718 	    get_device(&ha->pdev->dev), &vha->nvme_local_port);
719 	if (ret) {
720 		ql_log(ql_log_warn, vha, 0xffff,
721 		    "register_localport failed: ret=%x\n", ret);
722 	} else {
723 		vha->nvme_local_port->private = vha;
724 	}
725 
726 	return ret;
727 }
728