xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_nvme.c (revision da2ef666)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2017 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_nvme.h"
8 #include <linux/scatterlist.h>
9 #include <linux/delay.h>
10 #include <linux/nvme.h>
11 #include <linux/nvme-fc.h>
12 
13 static struct nvme_fc_port_template qla_nvme_fc_transport;
14 
15 static void qla_nvme_unregister_remote_port(struct work_struct *);
16 
17 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
18 {
19 	struct qla_nvme_rport *rport;
20 	struct nvme_fc_port_info req;
21 	int ret;
22 
23 	if (!IS_ENABLED(CONFIG_NVME_FC))
24 		return 0;
25 
26 	if (!vha->flags.nvme_enabled) {
27 		ql_log(ql_log_info, vha, 0x2100,
28 		    "%s: Not registering target since Host NVME is not enabled\n",
29 		    __func__);
30 		return 0;
31 	}
32 
33 	if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
34 		return 0;
35 
36 	if (!(fcport->nvme_prli_service_param &
37 	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
38 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
39 		return 0;
40 
41 	INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
42 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
43 
44 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
45 	req.port_name = wwn_to_u64(fcport->port_name);
46 	req.node_name = wwn_to_u64(fcport->node_name);
47 	req.port_role = 0;
48 	req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
49 
50 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
51 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
52 
53 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
54 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
55 
56 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
57 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
58 
59 	req.port_id = fcport->d_id.b24;
60 
61 	ql_log(ql_log_info, vha, 0x2102,
62 	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
63 	    __func__, req.node_name, req.port_name,
64 	    req.port_id);
65 
66 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
67 	    &fcport->nvme_remote_port);
68 	if (ret) {
69 		ql_log(ql_log_warn, vha, 0x212e,
70 		    "Failed to register remote port. Transport returned %d\n",
71 		    ret);
72 		return ret;
73 	}
74 
75 	rport = fcport->nvme_remote_port->private;
76 	rport->fcport = fcport;
77 	list_add_tail(&rport->list, &vha->nvme_rport_list);
78 
79 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
80 	return 0;
81 }
82 
83 /* Allocate a queue for NVMe traffic */
84 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
85     unsigned int qidx, u16 qsize, void **handle)
86 {
87 	struct scsi_qla_host *vha;
88 	struct qla_hw_data *ha;
89 	struct qla_qpair *qpair;
90 
91 	if (!qidx)
92 		qidx++;
93 
94 	vha = (struct scsi_qla_host *)lport->private;
95 	ha = vha->hw;
96 
97 	ql_log(ql_log_info, vha, 0x2104,
98 	    "%s: handle %p, idx =%d, qsize %d\n",
99 	    __func__, handle, qidx, qsize);
100 
101 	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
102 		ql_log(ql_log_warn, vha, 0x212f,
103 		    "%s: Illegal qidx=%d. Max=%d\n",
104 		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
105 		return -EINVAL;
106 	}
107 
108 	if (ha->queue_pair_map[qidx]) {
109 		*handle = ha->queue_pair_map[qidx];
110 		ql_log(ql_log_info, vha, 0x2121,
111 		    "Returning existing qpair of %p for idx=%x\n",
112 		    *handle, qidx);
113 		return 0;
114 	}
115 
116 	qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
117 	if (qpair == NULL) {
118 		ql_log(ql_log_warn, vha, 0x2122,
119 		    "Failed to allocate qpair\n");
120 		return -EINVAL;
121 	}
122 	*handle = qpair;
123 
124 	return 0;
125 }
126 
127 static void qla_nvme_sp_ls_done(void *ptr, int res)
128 {
129 	srb_t *sp = ptr;
130 	struct srb_iocb *nvme;
131 	struct nvmefc_ls_req   *fd;
132 	struct nvme_private *priv;
133 
134 	if (atomic_read(&sp->ref_count) == 0) {
135 		ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
136 		    "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
137 		return;
138 	}
139 
140 	if (!atomic_dec_and_test(&sp->ref_count))
141 		return;
142 
143 	if (res)
144 		res = -EINVAL;
145 
146 	nvme = &sp->u.iocb_cmd;
147 	fd = nvme->u.nvme.desc;
148 	priv = fd->private;
149 	priv->comp_status = res;
150 	schedule_work(&priv->ls_work);
151 	/* work schedule doesn't need the sp */
152 	qla2x00_rel_sp(sp);
153 }
154 
155 static void qla_nvme_sp_done(void *ptr, int res)
156 {
157 	srb_t *sp = ptr;
158 	struct srb_iocb *nvme;
159 	struct nvmefc_fcp_req *fd;
160 
161 	nvme = &sp->u.iocb_cmd;
162 	fd = nvme->u.nvme.desc;
163 
164 	if (!atomic_dec_and_test(&sp->ref_count))
165 		return;
166 
167 	if (res == QLA_SUCCESS)
168 		fd->status = 0;
169 	else
170 		fd->status = NVME_SC_INTERNAL;
171 
172 	fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
173 	fd->done(fd);
174 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
175 
176 	return;
177 }
178 
179 static void qla_nvme_abort_work(struct work_struct *work)
180 {
181 	struct nvme_private *priv =
182 		container_of(work, struct nvme_private, abort_work);
183 	srb_t *sp = priv->sp;
184 	fc_port_t *fcport = sp->fcport;
185 	struct qla_hw_data *ha = fcport->vha->hw;
186 	int rval;
187 
188 	rval = ha->isp_ops->abort_command(sp);
189 
190 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
191 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
192 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
193 	    sp, sp->handle, fcport, rval);
194 }
195 
196 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
197     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
198 {
199 	struct nvme_private *priv = fd->private;
200 
201 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
202 	schedule_work(&priv->abort_work);
203 }
204 
205 static void qla_nvme_ls_complete(struct work_struct *work)
206 {
207 	struct nvme_private *priv =
208 	    container_of(work, struct nvme_private, ls_work);
209 	struct nvmefc_ls_req *fd = priv->fd;
210 
211 	fd->done(fd, priv->comp_status);
212 }
213 
214 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
215     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
216 {
217 	struct qla_nvme_rport *qla_rport = rport->private;
218 	fc_port_t *fcport = qla_rport->fcport;
219 	struct srb_iocb   *nvme;
220 	struct nvme_private *priv = fd->private;
221 	struct scsi_qla_host *vha;
222 	int     rval = QLA_FUNCTION_FAILED;
223 	struct qla_hw_data *ha;
224 	srb_t           *sp;
225 
226 	vha = fcport->vha;
227 	ha = vha->hw;
228 	/* Alloc SRB structure */
229 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
230 	if (!sp)
231 		return rval;
232 
233 	sp->type = SRB_NVME_LS;
234 	sp->name = "nvme_ls";
235 	sp->done = qla_nvme_sp_ls_done;
236 	atomic_set(&sp->ref_count, 1);
237 	nvme = &sp->u.iocb_cmd;
238 	priv->sp = sp;
239 	priv->fd = fd;
240 	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
241 	nvme->u.nvme.desc = fd;
242 	nvme->u.nvme.dir = 0;
243 	nvme->u.nvme.dl = 0;
244 	nvme->u.nvme.cmd_len = fd->rqstlen;
245 	nvme->u.nvme.rsp_len = fd->rsplen;
246 	nvme->u.nvme.rsp_dma = fd->rspdma;
247 	nvme->u.nvme.timeout_sec = fd->timeout;
248 	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
249 	    fd->rqstlen, DMA_TO_DEVICE);
250 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
251 	    fd->rqstlen, DMA_TO_DEVICE);
252 
253 	rval = qla2x00_start_sp(sp);
254 	if (rval != QLA_SUCCESS) {
255 		ql_log(ql_log_warn, vha, 0x700e,
256 		    "qla2x00_start_sp failed = %d\n", rval);
257 		atomic_dec(&sp->ref_count);
258 		wake_up(&sp->nvme_ls_waitq);
259 		return rval;
260 	}
261 
262 	return rval;
263 }
264 
265 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
266     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
267     struct nvmefc_fcp_req *fd)
268 {
269 	struct nvme_private *priv = fd->private;
270 
271 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
272 	schedule_work(&priv->abort_work);
273 }
274 
275 static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
276 {
277 	struct qla_qpair *qpair = hw_queue_handle;
278 	unsigned long flags;
279 	struct scsi_qla_host *vha = lport->private;
280 
281 	spin_lock_irqsave(&qpair->qp_lock, flags);
282 	qla24xx_process_response_queue(vha, qpair->rsp);
283 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
284 }
285 
286 static inline int qla2x00_start_nvme_mq(srb_t *sp)
287 {
288 	unsigned long   flags;
289 	uint32_t        *clr_ptr;
290 	uint32_t        index;
291 	uint32_t        handle;
292 	struct cmd_nvme *cmd_pkt;
293 	uint16_t        cnt, i;
294 	uint16_t        req_cnt;
295 	uint16_t        tot_dsds;
296 	uint16_t	avail_dsds;
297 	uint32_t	*cur_dsd;
298 	struct req_que *req = NULL;
299 	struct scsi_qla_host *vha = sp->fcport->vha;
300 	struct qla_hw_data *ha = vha->hw;
301 	struct qla_qpair *qpair = sp->qpair;
302 	struct srb_iocb *nvme = &sp->u.iocb_cmd;
303 	struct scatterlist *sgl, *sg;
304 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
305 	uint32_t        rval = QLA_SUCCESS;
306 
307 	/* Setup qpair pointers */
308 	req = qpair->req;
309 	tot_dsds = fd->sg_cnt;
310 
311 	/* Acquire qpair specific lock */
312 	spin_lock_irqsave(&qpair->qp_lock, flags);
313 
314 	/* Check for room in outstanding command list. */
315 	handle = req->current_outstanding_cmd;
316 	for (index = 1; index < req->num_outstanding_cmds; index++) {
317 		handle++;
318 		if (handle == req->num_outstanding_cmds)
319 			handle = 1;
320 		if (!req->outstanding_cmds[handle])
321 			break;
322 	}
323 
324 	if (index == req->num_outstanding_cmds) {
325 		rval = -EBUSY;
326 		goto queuing_error;
327 	}
328 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
329 	if (req->cnt < (req_cnt + 2)) {
330 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
331 		    RD_REG_DWORD_RELAXED(req->req_q_out);
332 
333 		if (req->ring_index < cnt)
334 			req->cnt = cnt - req->ring_index;
335 		else
336 			req->cnt = req->length - (req->ring_index - cnt);
337 
338 		if (req->cnt < (req_cnt + 2)){
339 			rval = -EBUSY;
340 			goto queuing_error;
341 		}
342 	}
343 
344 	if (unlikely(!fd->sqid)) {
345 		struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
346 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
347 			nvme->u.nvme.aen_op = 1;
348 			atomic_inc(&ha->nvme_active_aen_cnt);
349 		}
350 	}
351 
352 	/* Build command packet. */
353 	req->current_outstanding_cmd = handle;
354 	req->outstanding_cmds[handle] = sp;
355 	sp->handle = handle;
356 	req->cnt -= req_cnt;
357 
358 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
359 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
360 
361 	/* Zero out remaining portion of packet. */
362 	clr_ptr = (uint32_t *)cmd_pkt + 2;
363 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
364 
365 	cmd_pkt->entry_status = 0;
366 
367 	/* Update entry type to indicate Command NVME IOCB */
368 	cmd_pkt->entry_type = COMMAND_NVME;
369 
370 	/* No data transfer how do we check buffer len == 0?? */
371 	if (fd->io_dir == NVMEFC_FCP_READ) {
372 		cmd_pkt->control_flags =
373 		    cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
374 		vha->qla_stats.input_bytes += fd->payload_length;
375 		vha->qla_stats.input_requests++;
376 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
377 		cmd_pkt->control_flags =
378 		    cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
379 		vha->qla_stats.output_bytes += fd->payload_length;
380 		vha->qla_stats.output_requests++;
381 	} else if (fd->io_dir == 0) {
382 		cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
383 	}
384 
385 	/* Set NPORT-ID */
386 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
387 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
388 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
389 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
390 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
391 
392 	/* NVME RSP IU */
393 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
394 	cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
395 	cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
396 
397 	/* NVME CNMD IU */
398 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
399 	cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
400 	cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
401 
402 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
404 
405 	/* One DSD is available in the Command Type NVME IOCB */
406 	avail_dsds = 1;
407 	cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
408 	sgl = fd->first_sgl;
409 
410 	/* Load data segments */
411 	for_each_sg(sgl, sg, tot_dsds, i) {
412 		dma_addr_t      sle_dma;
413 		cont_a64_entry_t *cont_pkt;
414 
415 		/* Allocate additional continuation packets? */
416 		if (avail_dsds == 0) {
417 			/*
418 			 * Five DSDs are available in the Continuation
419 			 * Type 1 IOCB.
420 			 */
421 
422 			/* Adjust ring index */
423 			req->ring_index++;
424 			if (req->ring_index == req->length) {
425 				req->ring_index = 0;
426 				req->ring_ptr = req->ring;
427 			} else {
428 				req->ring_ptr++;
429 			}
430 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
431 			*((uint32_t *)(&cont_pkt->entry_type)) =
432 			    cpu_to_le32(CONTINUE_A64_TYPE);
433 
434 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
435 			avail_dsds = 5;
436 		}
437 
438 		sle_dma = sg_dma_address(sg);
439 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
440 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
441 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
442 		avail_dsds--;
443 	}
444 
445 	/* Set total entry count. */
446 	cmd_pkt->entry_count = (uint8_t)req_cnt;
447 	wmb();
448 
449 	/* Adjust ring index. */
450 	req->ring_index++;
451 	if (req->ring_index == req->length) {
452 		req->ring_index = 0;
453 		req->ring_ptr = req->ring;
454 	} else {
455 		req->ring_ptr++;
456 	}
457 
458 	/* Set chip new ring index. */
459 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
460 
461 queuing_error:
462 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
463 	return rval;
464 }
465 
466 /* Post a command */
467 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
468     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
469     struct nvmefc_fcp_req *fd)
470 {
471 	fc_port_t *fcport;
472 	struct srb_iocb *nvme;
473 	struct scsi_qla_host *vha;
474 	int rval = -ENODEV;
475 	srb_t *sp;
476 	struct qla_qpair *qpair = hw_queue_handle;
477 	struct nvme_private *priv;
478 	struct qla_nvme_rport *qla_rport = rport->private;
479 
480 	if (!fd || !qpair) {
481 		ql_log(ql_log_warn, NULL, 0x2134,
482 		    "NO NVMe request or Queue Handle\n");
483 		return rval;
484 	}
485 
486 	priv = fd->private;
487 	fcport = qla_rport->fcport;
488 	if (!fcport) {
489 		ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
490 		return rval;
491 	}
492 
493 	vha = fcport->vha;
494 
495 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
496 		return rval;
497 
498 	/*
499 	 * If we know the dev is going away while the transport is still sending
500 	 * IO's return busy back to stall the IO Q.  This happens when the
501 	 * link goes away and fw hasn't notified us yet, but IO's are being
502 	 * returned. If the dev comes back quickly we won't exhaust the IO
503 	 * retry count at the core.
504 	 */
505 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
506 		return -EBUSY;
507 
508 	/* Alloc SRB structure */
509 	sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
510 	if (!sp)
511 		return -EBUSY;
512 
513 	atomic_set(&sp->ref_count, 1);
514 	init_waitqueue_head(&sp->nvme_ls_waitq);
515 	priv->sp = sp;
516 	sp->type = SRB_NVME_CMD;
517 	sp->name = "nvme_cmd";
518 	sp->done = qla_nvme_sp_done;
519 	sp->qpair = qpair;
520 	nvme = &sp->u.iocb_cmd;
521 	nvme->u.nvme.desc = fd;
522 
523 	rval = qla2x00_start_nvme_mq(sp);
524 	if (rval != QLA_SUCCESS) {
525 		ql_log(ql_log_warn, vha, 0x212d,
526 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
527 		atomic_dec(&sp->ref_count);
528 		wake_up(&sp->nvme_ls_waitq);
529 	}
530 
531 	return rval;
532 }
533 
534 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
535 {
536 	struct scsi_qla_host *vha = lport->private;
537 
538 	ql_log(ql_log_info, vha, 0x210f,
539 	    "localport delete of %p completed.\n", vha->nvme_local_port);
540 	vha->nvme_local_port = NULL;
541 	complete(&vha->nvme_del_done);
542 }
543 
544 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
545 {
546 	fc_port_t *fcport;
547 	struct qla_nvme_rport *qla_rport = rport->private, *trport;
548 
549 	fcport = qla_rport->fcport;
550 	fcport->nvme_remote_port = NULL;
551 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
552 
553 	list_for_each_entry_safe(qla_rport, trport,
554 	    &fcport->vha->nvme_rport_list, list) {
555 		if (qla_rport->fcport == fcport) {
556 			list_del(&qla_rport->list);
557 			break;
558 		}
559 	}
560 	complete(&fcport->nvme_del_done);
561 
562 	if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
563 		INIT_WORK(&fcport->free_work, qlt_free_session_done);
564 		schedule_work(&fcport->free_work);
565 	}
566 
567 	fcport->nvme_flag &= ~(NVME_FLAG_REGISTERED | NVME_FLAG_DELETING);
568 	ql_log(ql_log_info, fcport->vha, 0x2110,
569 	    "remoteport_delete of %p completed.\n", fcport);
570 }
571 
572 static struct nvme_fc_port_template qla_nvme_fc_transport = {
573 	.localport_delete = qla_nvme_localport_delete,
574 	.remoteport_delete = qla_nvme_remoteport_delete,
575 	.create_queue   = qla_nvme_alloc_queue,
576 	.delete_queue 	= NULL,
577 	.ls_req		= qla_nvme_ls_req,
578 	.ls_abort	= qla_nvme_ls_abort,
579 	.fcp_io		= qla_nvme_post_cmd,
580 	.fcp_abort	= qla_nvme_fcp_abort,
581 	.poll_queue	= qla_nvme_poll,
582 	.max_hw_queues  = 8,
583 	.max_sgl_segments = 128,
584 	.max_dif_sgl_segments = 64,
585 	.dma_boundary = 0xFFFFFFFF,
586 	.local_priv_sz  = 8,
587 	.remote_priv_sz = sizeof(struct qla_nvme_rport),
588 	.lsrqst_priv_sz = sizeof(struct nvme_private),
589 	.fcprqst_priv_sz = sizeof(struct nvme_private),
590 };
591 
592 #define NVME_ABORT_POLLING_PERIOD    2
593 static int qla_nvme_wait_on_command(srb_t *sp)
594 {
595 	int ret = QLA_SUCCESS;
596 
597 	wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1),
598 	    NVME_ABORT_POLLING_PERIOD*HZ);
599 
600 	if (atomic_read(&sp->ref_count) > 1)
601 		ret = QLA_FUNCTION_FAILED;
602 
603 	return ret;
604 }
605 
606 void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
607 {
608 	int rval;
609 
610 	if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) {
611 		rval = ha->isp_ops->abort_command(sp);
612 		if (!rval && !qla_nvme_wait_on_command(sp))
613 			ql_log(ql_log_warn, NULL, 0x2112,
614 			    "timed out waiting on sp=%p\n", sp);
615 	} else {
616 		sp->done(sp, res);
617 	}
618 }
619 
620 static void qla_nvme_unregister_remote_port(struct work_struct *work)
621 {
622 	struct fc_port *fcport = container_of(work, struct fc_port,
623 	    nvme_del_work);
624 	struct qla_nvme_rport *qla_rport, *trport;
625 
626 	if (!IS_ENABLED(CONFIG_NVME_FC))
627 		return;
628 
629 	ql_log(ql_log_warn, NULL, 0x2112,
630 	    "%s: unregister remoteport on %p\n",__func__, fcport);
631 
632 	list_for_each_entry_safe(qla_rport, trport,
633 	    &fcport->vha->nvme_rport_list, list) {
634 		if (qla_rport->fcport == fcport) {
635 			ql_log(ql_log_info, fcport->vha, 0x2113,
636 			    "%s: fcport=%p\n", __func__, fcport);
637 			init_completion(&fcport->nvme_del_done);
638 			nvme_fc_unregister_remoteport(
639 			    fcport->nvme_remote_port);
640 			wait_for_completion(&fcport->nvme_del_done);
641 			break;
642 		}
643 	}
644 }
645 
646 void qla_nvme_delete(struct scsi_qla_host *vha)
647 {
648 	struct qla_nvme_rport *qla_rport, *trport;
649 	fc_port_t *fcport;
650 	int nv_ret;
651 
652 	if (!IS_ENABLED(CONFIG_NVME_FC))
653 		return;
654 
655 	list_for_each_entry_safe(qla_rport, trport,
656 	    &vha->nvme_rport_list, list) {
657 		fcport = qla_rport->fcport;
658 
659 		ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
660 		    __func__, fcport);
661 
662 		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
663 		init_completion(&fcport->nvme_del_done);
664 		nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
665 		wait_for_completion(&fcport->nvme_del_done);
666 	}
667 
668 	if (vha->nvme_local_port) {
669 		init_completion(&vha->nvme_del_done);
670 		ql_log(ql_log_info, vha, 0x2116,
671 			"unregister localport=%p\n",
672 			vha->nvme_local_port);
673 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
674 		if (nv_ret)
675 			ql_log(ql_log_info, vha, 0x2115,
676 			    "Unregister of localport failed\n");
677 		else
678 			wait_for_completion(&vha->nvme_del_done);
679 	}
680 }
681 
682 int qla_nvme_register_hba(struct scsi_qla_host *vha)
683 {
684 	struct nvme_fc_port_template *tmpl;
685 	struct qla_hw_data *ha;
686 	struct nvme_fc_port_info pinfo;
687 	int ret = EINVAL;
688 
689 	if (!IS_ENABLED(CONFIG_NVME_FC))
690 		return ret;
691 
692 	ha = vha->hw;
693 	tmpl = &qla_nvme_fc_transport;
694 
695 	WARN_ON(vha->nvme_local_port);
696 	WARN_ON(ha->max_req_queues < 3);
697 
698 	qla_nvme_fc_transport.max_hw_queues =
699 	    min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
700 		(uint8_t)(ha->max_req_queues - 2));
701 
702 	pinfo.node_name = wwn_to_u64(vha->node_name);
703 	pinfo.port_name = wwn_to_u64(vha->port_name);
704 	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
705 	pinfo.port_id = vha->d_id.b24;
706 
707 	ql_log(ql_log_info, vha, 0xffff,
708 	    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
709 	    pinfo.node_name, pinfo.port_name, pinfo.port_id);
710 	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
711 
712 	ret = nvme_fc_register_localport(&pinfo, tmpl,
713 	    get_device(&ha->pdev->dev), &vha->nvme_local_port);
714 	if (ret) {
715 		ql_log(ql_log_warn, vha, 0xffff,
716 		    "register_localport failed: ret=%x\n", ret);
717 	} else {
718 		vha->nvme_local_port->private = vha;
719 	}
720 
721 	return ret;
722 }
723