xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_nvme.c (revision 61163895)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2017 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_nvme.h"
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
12 
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
14 
15 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
16 {
17 	struct qla_nvme_rport *rport;
18 	struct nvme_fc_port_info req;
19 	int ret;
20 
21 	if (!IS_ENABLED(CONFIG_NVME_FC))
22 		return 0;
23 
24 	if (!vha->flags.nvme_enabled) {
25 		ql_log(ql_log_info, vha, 0x2100,
26 		    "%s: Not registering target since Host NVME is not enabled\n",
27 		    __func__);
28 		return 0;
29 	}
30 
31 	if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
32 		return 0;
33 
34 	if (!(fcport->nvme_prli_service_param &
35 	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
36 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
37 		return 0;
38 
39 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
40 
41 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
42 	req.port_name = wwn_to_u64(fcport->port_name);
43 	req.node_name = wwn_to_u64(fcport->node_name);
44 	req.port_role = 0;
45 	req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
46 
47 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
48 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
49 
50 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
51 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
52 
53 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
54 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
55 
56 	req.port_id = fcport->d_id.b24;
57 
58 	ql_log(ql_log_info, vha, 0x2102,
59 	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
60 	    __func__, req.node_name, req.port_name,
61 	    req.port_id);
62 
63 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
64 	    &fcport->nvme_remote_port);
65 	if (ret) {
66 		ql_log(ql_log_warn, vha, 0x212e,
67 		    "Failed to register remote port. Transport returned %d\n",
68 		    ret);
69 		return ret;
70 	}
71 
72 	rport = fcport->nvme_remote_port->private;
73 	rport->fcport = fcport;
74 
75 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
76 	return 0;
77 }
78 
79 /* Allocate a queue for NVMe traffic */
80 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
81     unsigned int qidx, u16 qsize, void **handle)
82 {
83 	struct scsi_qla_host *vha;
84 	struct qla_hw_data *ha;
85 	struct qla_qpair *qpair;
86 
87 	if (!qidx)
88 		qidx++;
89 
90 	vha = (struct scsi_qla_host *)lport->private;
91 	ha = vha->hw;
92 
93 	ql_log(ql_log_info, vha, 0x2104,
94 	    "%s: handle %p, idx =%d, qsize %d\n",
95 	    __func__, handle, qidx, qsize);
96 
97 	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
98 		ql_log(ql_log_warn, vha, 0x212f,
99 		    "%s: Illegal qidx=%d. Max=%d\n",
100 		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
101 		return -EINVAL;
102 	}
103 
104 	if (ha->queue_pair_map[qidx]) {
105 		*handle = ha->queue_pair_map[qidx];
106 		ql_log(ql_log_info, vha, 0x2121,
107 		    "Returning existing qpair of %p for idx=%x\n",
108 		    *handle, qidx);
109 		return 0;
110 	}
111 
112 	qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
113 	if (qpair == NULL) {
114 		ql_log(ql_log_warn, vha, 0x2122,
115 		    "Failed to allocate qpair\n");
116 		return -EINVAL;
117 	}
118 	*handle = qpair;
119 
120 	return 0;
121 }
122 
123 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
124 {
125 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
126 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
127 	struct nvmefc_fcp_req *fd;
128 	struct srb_iocb *nvme;
129 	unsigned long flags;
130 
131 	if (!priv)
132 		goto out;
133 
134 	nvme = &sp->u.iocb_cmd;
135 	fd = nvme->u.nvme.desc;
136 
137 	spin_lock_irqsave(&priv->cmd_lock, flags);
138 	priv->sp = NULL;
139 	sp->priv = NULL;
140 	if (priv->comp_status == QLA_SUCCESS) {
141 		fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
142 		fd->status = NVME_SC_SUCCESS;
143 	} else {
144 		fd->rcv_rsplen = 0;
145 		fd->transferred_length = 0;
146 		fd->status = NVME_SC_INTERNAL;
147 	}
148 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
149 
150 	fd->done(fd);
151 out:
152 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
153 }
154 
155 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
156 {
157 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
158 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
159 	struct nvmefc_ls_req *fd;
160 	unsigned long flags;
161 
162 	if (!priv)
163 		goto out;
164 
165 	spin_lock_irqsave(&priv->cmd_lock, flags);
166 	priv->sp = NULL;
167 	sp->priv = NULL;
168 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
169 
170 	fd = priv->fd;
171 	fd->done(fd, priv->comp_status);
172 out:
173 	qla2x00_rel_sp(sp);
174 }
175 
176 static void qla_nvme_ls_complete(struct work_struct *work)
177 {
178 	struct nvme_private *priv =
179 		container_of(work, struct nvme_private, ls_work);
180 
181 	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
182 }
183 
184 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
185 {
186 	struct nvme_private *priv = sp->priv;
187 
188 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
189 		return;
190 
191 	if (res)
192 		res = -EINVAL;
193 
194 	priv->comp_status = res;
195 	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
196 	schedule_work(&priv->ls_work);
197 }
198 
199 /* it assumed that QPair lock is held. */
200 static void qla_nvme_sp_done(srb_t *sp, int res)
201 {
202 	struct nvme_private *priv = sp->priv;
203 
204 	priv->comp_status = res;
205 	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
206 
207 	return;
208 }
209 
210 static void qla_nvme_abort_work(struct work_struct *work)
211 {
212 	struct nvme_private *priv =
213 		container_of(work, struct nvme_private, abort_work);
214 	srb_t *sp = priv->sp;
215 	fc_port_t *fcport = sp->fcport;
216 	struct qla_hw_data *ha = fcport->vha->hw;
217 	int rval;
218 
219 	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
220 	       "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
221 	       __func__, sp, sp->handle, fcport, fcport->deleted);
222 
223 	if (!ha->flags.fw_started && fcport->deleted)
224 		goto out;
225 
226 	if (ha->flags.host_shutting_down) {
227 		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
228 		    "%s Calling done on sp: %p, type: 0x%x\n",
229 		    __func__, sp, sp->type);
230 		sp->done(sp, 0);
231 		goto out;
232 	}
233 
234 	rval = ha->isp_ops->abort_command(sp);
235 
236 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
237 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
238 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
239 	    sp, sp->handle, fcport, rval);
240 
241 out:
242 	/* kref_get was done before work was schedule. */
243 	kref_put(&sp->cmd_kref, sp->put_fn);
244 }
245 
246 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
247     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
248 {
249 	struct nvme_private *priv = fd->private;
250 	unsigned long flags;
251 
252 	spin_lock_irqsave(&priv->cmd_lock, flags);
253 	if (!priv->sp) {
254 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
255 		return;
256 	}
257 
258 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
259 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
260 		return;
261 	}
262 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
263 
264 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
265 	schedule_work(&priv->abort_work);
266 }
267 
268 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
269     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
270 {
271 	struct qla_nvme_rport *qla_rport = rport->private;
272 	fc_port_t *fcport = qla_rport->fcport;
273 	struct srb_iocb   *nvme;
274 	struct nvme_private *priv = fd->private;
275 	struct scsi_qla_host *vha;
276 	int     rval = QLA_FUNCTION_FAILED;
277 	struct qla_hw_data *ha;
278 	srb_t           *sp;
279 
280 
281 	if (!fcport || (fcport && fcport->deleted))
282 		return rval;
283 
284 	vha = fcport->vha;
285 	ha = vha->hw;
286 
287 	if (!ha->flags.fw_started)
288 		return rval;
289 
290 	/* Alloc SRB structure */
291 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
292 	if (!sp)
293 		return rval;
294 
295 	sp->type = SRB_NVME_LS;
296 	sp->name = "nvme_ls";
297 	sp->done = qla_nvme_sp_ls_done;
298 	sp->put_fn = qla_nvme_release_ls_cmd_kref;
299 	sp->priv = priv;
300 	priv->sp = sp;
301 	kref_init(&sp->cmd_kref);
302 	spin_lock_init(&priv->cmd_lock);
303 	nvme = &sp->u.iocb_cmd;
304 	priv->fd = fd;
305 	nvme->u.nvme.desc = fd;
306 	nvme->u.nvme.dir = 0;
307 	nvme->u.nvme.dl = 0;
308 	nvme->u.nvme.cmd_len = fd->rqstlen;
309 	nvme->u.nvme.rsp_len = fd->rsplen;
310 	nvme->u.nvme.rsp_dma = fd->rspdma;
311 	nvme->u.nvme.timeout_sec = fd->timeout;
312 	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
313 	    fd->rqstlen, DMA_TO_DEVICE);
314 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
315 	    fd->rqstlen, DMA_TO_DEVICE);
316 
317 	rval = qla2x00_start_sp(sp);
318 	if (rval != QLA_SUCCESS) {
319 		ql_log(ql_log_warn, vha, 0x700e,
320 		    "qla2x00_start_sp failed = %d\n", rval);
321 		wake_up(&sp->nvme_ls_waitq);
322 		sp->priv = NULL;
323 		priv->sp = NULL;
324 		qla2x00_rel_sp(sp);
325 		return rval;
326 	}
327 
328 	return rval;
329 }
330 
331 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
332     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
333     struct nvmefc_fcp_req *fd)
334 {
335 	struct nvme_private *priv = fd->private;
336 	unsigned long flags;
337 
338 	spin_lock_irqsave(&priv->cmd_lock, flags);
339 	if (!priv->sp) {
340 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
341 		return;
342 	}
343 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
344 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
345 		return;
346 	}
347 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
348 
349 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
350 	schedule_work(&priv->abort_work);
351 }
352 
353 static inline int qla2x00_start_nvme_mq(srb_t *sp)
354 {
355 	unsigned long   flags;
356 	uint32_t        *clr_ptr;
357 	uint32_t        handle;
358 	struct cmd_nvme *cmd_pkt;
359 	uint16_t        cnt, i;
360 	uint16_t        req_cnt;
361 	uint16_t        tot_dsds;
362 	uint16_t	avail_dsds;
363 	struct dsd64	*cur_dsd;
364 	struct req_que *req = NULL;
365 	struct scsi_qla_host *vha = sp->fcport->vha;
366 	struct qla_hw_data *ha = vha->hw;
367 	struct qla_qpair *qpair = sp->qpair;
368 	struct srb_iocb *nvme = &sp->u.iocb_cmd;
369 	struct scatterlist *sgl, *sg;
370 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
371 	uint32_t        rval = QLA_SUCCESS;
372 
373 	/* Setup qpair pointers */
374 	req = qpair->req;
375 	tot_dsds = fd->sg_cnt;
376 
377 	/* Acquire qpair specific lock */
378 	spin_lock_irqsave(&qpair->qp_lock, flags);
379 
380 	handle = qla2xxx_get_next_handle(req);
381 	if (handle == 0) {
382 		rval = -EBUSY;
383 		goto queuing_error;
384 	}
385 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
386 	if (req->cnt < (req_cnt + 2)) {
387 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
388 		    rd_reg_dword_relaxed(req->req_q_out);
389 
390 		if (req->ring_index < cnt)
391 			req->cnt = cnt - req->ring_index;
392 		else
393 			req->cnt = req->length - (req->ring_index - cnt);
394 
395 		if (req->cnt < (req_cnt + 2)){
396 			rval = -EBUSY;
397 			goto queuing_error;
398 		}
399 	}
400 
401 	if (unlikely(!fd->sqid)) {
402 		struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
403 
404 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
405 			nvme->u.nvme.aen_op = 1;
406 			atomic_inc(&ha->nvme_active_aen_cnt);
407 		}
408 	}
409 
410 	/* Build command packet. */
411 	req->current_outstanding_cmd = handle;
412 	req->outstanding_cmds[handle] = sp;
413 	sp->handle = handle;
414 	req->cnt -= req_cnt;
415 
416 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
417 	cmd_pkt->handle = make_handle(req->id, handle);
418 
419 	/* Zero out remaining portion of packet. */
420 	clr_ptr = (uint32_t *)cmd_pkt + 2;
421 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
422 
423 	cmd_pkt->entry_status = 0;
424 
425 	/* Update entry type to indicate Command NVME IOCB */
426 	cmd_pkt->entry_type = COMMAND_NVME;
427 
428 	/* No data transfer how do we check buffer len == 0?? */
429 	if (fd->io_dir == NVMEFC_FCP_READ) {
430 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
431 		vha->qla_stats.input_bytes += fd->payload_length;
432 		vha->qla_stats.input_requests++;
433 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
434 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
435 		if ((vha->flags.nvme_first_burst) &&
436 		    (sp->fcport->nvme_prli_service_param &
437 			NVME_PRLI_SP_FIRST_BURST)) {
438 			if ((fd->payload_length <=
439 			    sp->fcport->nvme_first_burst_size) ||
440 				(sp->fcport->nvme_first_burst_size == 0))
441 				cmd_pkt->control_flags |=
442 					cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
443 		}
444 		vha->qla_stats.output_bytes += fd->payload_length;
445 		vha->qla_stats.output_requests++;
446 	} else if (fd->io_dir == 0) {
447 		cmd_pkt->control_flags = 0;
448 	}
449 
450 	/* Set NPORT-ID */
451 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
452 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
453 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
454 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
455 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
456 
457 	/* NVME RSP IU */
458 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
459 	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
460 
461 	/* NVME CNMD IU */
462 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
463 	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
464 
465 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
466 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
467 
468 	/* One DSD is available in the Command Type NVME IOCB */
469 	avail_dsds = 1;
470 	cur_dsd = &cmd_pkt->nvme_dsd;
471 	sgl = fd->first_sgl;
472 
473 	/* Load data segments */
474 	for_each_sg(sgl, sg, tot_dsds, i) {
475 		cont_a64_entry_t *cont_pkt;
476 
477 		/* Allocate additional continuation packets? */
478 		if (avail_dsds == 0) {
479 			/*
480 			 * Five DSDs are available in the Continuation
481 			 * Type 1 IOCB.
482 			 */
483 
484 			/* Adjust ring index */
485 			req->ring_index++;
486 			if (req->ring_index == req->length) {
487 				req->ring_index = 0;
488 				req->ring_ptr = req->ring;
489 			} else {
490 				req->ring_ptr++;
491 			}
492 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
493 			put_unaligned_le32(CONTINUE_A64_TYPE,
494 					   &cont_pkt->entry_type);
495 
496 			cur_dsd = cont_pkt->dsd;
497 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
498 		}
499 
500 		append_dsd64(&cur_dsd, sg);
501 		avail_dsds--;
502 	}
503 
504 	/* Set total entry count. */
505 	cmd_pkt->entry_count = (uint8_t)req_cnt;
506 	wmb();
507 
508 	/* Adjust ring index. */
509 	req->ring_index++;
510 	if (req->ring_index == req->length) {
511 		req->ring_index = 0;
512 		req->ring_ptr = req->ring;
513 	} else {
514 		req->ring_ptr++;
515 	}
516 
517 	/* Set chip new ring index. */
518 	wrt_reg_dword(req->req_q_in, req->ring_index);
519 
520 queuing_error:
521 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
522 	return rval;
523 }
524 
525 /* Post a command */
526 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
527     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
528     struct nvmefc_fcp_req *fd)
529 {
530 	fc_port_t *fcport;
531 	struct srb_iocb *nvme;
532 	struct scsi_qla_host *vha;
533 	int rval = -ENODEV;
534 	srb_t *sp;
535 	struct qla_qpair *qpair = hw_queue_handle;
536 	struct nvme_private *priv = fd->private;
537 	struct qla_nvme_rport *qla_rport = rport->private;
538 
539 	if (!priv) {
540 		/* nvme association has been torn down */
541 		return rval;
542 	}
543 
544 	fcport = qla_rport->fcport;
545 
546 	if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
547 	    (fcport && fcport->deleted))
548 		return rval;
549 
550 	vha = fcport->vha;
551 	/*
552 	 * If we know the dev is going away while the transport is still sending
553 	 * IO's return busy back to stall the IO Q.  This happens when the
554 	 * link goes away and fw hasn't notified us yet, but IO's are being
555 	 * returned. If the dev comes back quickly we won't exhaust the IO
556 	 * retry count at the core.
557 	 */
558 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
559 		return -EBUSY;
560 
561 	/* Alloc SRB structure */
562 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
563 	if (!sp)
564 		return -EBUSY;
565 
566 	init_waitqueue_head(&sp->nvme_ls_waitq);
567 	kref_init(&sp->cmd_kref);
568 	spin_lock_init(&priv->cmd_lock);
569 	sp->priv = priv;
570 	priv->sp = sp;
571 	sp->type = SRB_NVME_CMD;
572 	sp->name = "nvme_cmd";
573 	sp->done = qla_nvme_sp_done;
574 	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
575 	sp->qpair = qpair;
576 	sp->vha = vha;
577 	nvme = &sp->u.iocb_cmd;
578 	nvme->u.nvme.desc = fd;
579 
580 	rval = qla2x00_start_nvme_mq(sp);
581 	if (rval != QLA_SUCCESS) {
582 		ql_log(ql_log_warn, vha, 0x212d,
583 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
584 		wake_up(&sp->nvme_ls_waitq);
585 		sp->priv = NULL;
586 		priv->sp = NULL;
587 		qla2xxx_rel_qpair_sp(sp->qpair, sp);
588 	}
589 
590 	return rval;
591 }
592 
593 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
594 {
595 	struct scsi_qla_host *vha = lport->private;
596 
597 	ql_log(ql_log_info, vha, 0x210f,
598 	    "localport delete of %p completed.\n", vha->nvme_local_port);
599 	vha->nvme_local_port = NULL;
600 	complete(&vha->nvme_del_done);
601 }
602 
603 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
604 {
605 	fc_port_t *fcport;
606 	struct qla_nvme_rport *qla_rport = rport->private;
607 
608 	fcport = qla_rport->fcport;
609 	fcport->nvme_remote_port = NULL;
610 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
611 	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
612 	ql_log(ql_log_info, fcport->vha, 0x2110,
613 	    "remoteport_delete of %p %8phN completed.\n",
614 	    fcport, fcport->port_name);
615 	complete(&fcport->nvme_del_done);
616 }
617 
618 static struct nvme_fc_port_template qla_nvme_fc_transport = {
619 	.localport_delete = qla_nvme_localport_delete,
620 	.remoteport_delete = qla_nvme_remoteport_delete,
621 	.create_queue   = qla_nvme_alloc_queue,
622 	.delete_queue 	= NULL,
623 	.ls_req		= qla_nvme_ls_req,
624 	.ls_abort	= qla_nvme_ls_abort,
625 	.fcp_io		= qla_nvme_post_cmd,
626 	.fcp_abort	= qla_nvme_fcp_abort,
627 	.max_hw_queues  = 8,
628 	.max_sgl_segments = 1024,
629 	.max_dif_sgl_segments = 64,
630 	.dma_boundary = 0xFFFFFFFF,
631 	.local_priv_sz  = 8,
632 	.remote_priv_sz = sizeof(struct qla_nvme_rport),
633 	.lsrqst_priv_sz = sizeof(struct nvme_private),
634 	.fcprqst_priv_sz = sizeof(struct nvme_private),
635 };
636 
637 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
638 {
639 	int ret;
640 
641 	if (!IS_ENABLED(CONFIG_NVME_FC))
642 		return;
643 
644 	ql_log(ql_log_warn, NULL, 0x2112,
645 	    "%s: unregister remoteport on %p %8phN\n",
646 	    __func__, fcport, fcport->port_name);
647 
648 	if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
649 		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
650 
651 	init_completion(&fcport->nvme_del_done);
652 	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
653 	if (ret)
654 		ql_log(ql_log_info, fcport->vha, 0x2114,
655 			"%s: Failed to unregister nvme_remote_port (%d)\n",
656 			    __func__, ret);
657 	wait_for_completion(&fcport->nvme_del_done);
658 }
659 
660 void qla_nvme_delete(struct scsi_qla_host *vha)
661 {
662 	int nv_ret;
663 
664 	if (!IS_ENABLED(CONFIG_NVME_FC))
665 		return;
666 
667 	if (vha->nvme_local_port) {
668 		init_completion(&vha->nvme_del_done);
669 		ql_log(ql_log_info, vha, 0x2116,
670 			"unregister localport=%p\n",
671 			vha->nvme_local_port);
672 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
673 		if (nv_ret)
674 			ql_log(ql_log_info, vha, 0x2115,
675 			    "Unregister of localport failed\n");
676 		else
677 			wait_for_completion(&vha->nvme_del_done);
678 	}
679 }
680 
681 int qla_nvme_register_hba(struct scsi_qla_host *vha)
682 {
683 	struct nvme_fc_port_template *tmpl;
684 	struct qla_hw_data *ha;
685 	struct nvme_fc_port_info pinfo;
686 	int ret = EINVAL;
687 
688 	if (!IS_ENABLED(CONFIG_NVME_FC))
689 		return ret;
690 
691 	ha = vha->hw;
692 	tmpl = &qla_nvme_fc_transport;
693 
694 	WARN_ON(vha->nvme_local_port);
695 
696 	if (ha->max_req_queues < 3) {
697 		if (!ha->flags.max_req_queue_warned)
698 			ql_log(ql_log_info, vha, 0x2120,
699 			       "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n",
700 			       __func__, ha->max_req_queues);
701 		ha->flags.max_req_queue_warned = 1;
702 		return ret;
703 	}
704 
705 	qla_nvme_fc_transport.max_hw_queues =
706 	    min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
707 		(uint8_t)(ha->max_req_queues - 2));
708 
709 	pinfo.node_name = wwn_to_u64(vha->node_name);
710 	pinfo.port_name = wwn_to_u64(vha->port_name);
711 	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
712 	pinfo.port_id = vha->d_id.b24;
713 
714 	ql_log(ql_log_info, vha, 0xffff,
715 	    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
716 	    pinfo.node_name, pinfo.port_name, pinfo.port_id);
717 	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
718 
719 	ret = nvme_fc_register_localport(&pinfo, tmpl,
720 	    get_device(&ha->pdev->dev), &vha->nvme_local_port);
721 	if (ret) {
722 		ql_log(ql_log_warn, vha, 0xffff,
723 		    "register_localport failed: ret=%x\n", ret);
724 	} else {
725 		vha->nvme_local_port->private = vha;
726 	}
727 
728 	return ret;
729 }
730