xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_nvme.c (revision f6be298c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2017 QLogic Corporation
5  */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-mq.h>
13 
14 static struct nvme_fc_port_template qla_nvme_fc_transport;
15 static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
16 				   struct qla_qpair *qp,
17 				   struct qla_nvme_lsrjt_pt_arg *a,
18 				   bool is_xchg_terminate);
19 
20 struct qla_nvme_unsol_ctx {
21 	struct list_head elem;
22 	struct scsi_qla_host *vha;
23 	struct fc_port *fcport;
24 	struct srb *sp;
25 	struct nvmefc_ls_rsp lsrsp;
26 	struct nvmefc_ls_rsp *fd_rsp;
27 	struct work_struct lsrsp_work;
28 	struct work_struct abort_work;
29 	__le32 exchange_address;
30 	__le16 nport_handle;
31 	__le16 ox_id;
32 	int comp_status;
33 	spinlock_t cmd_lock;
34 };
35 
qla_nvme_register_remote(struct scsi_qla_host * vha,struct fc_port * fcport)36 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
37 {
38 	struct qla_nvme_rport *rport;
39 	struct nvme_fc_port_info req;
40 	int ret;
41 
42 	if (!IS_ENABLED(CONFIG_NVME_FC))
43 		return 0;
44 
45 	if (!vha->flags.nvme_enabled) {
46 		ql_log(ql_log_info, vha, 0x2100,
47 		    "%s: Not registering target since Host NVME is not enabled\n",
48 		    __func__);
49 		return 0;
50 	}
51 
52 	if (qla_nvme_register_hba(vha))
53 		return 0;
54 
55 	if (!vha->nvme_local_port)
56 		return 0;
57 
58 	if (!(fcport->nvme_prli_service_param &
59 	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
60 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
61 		return 0;
62 
63 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
64 
65 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
66 	req.port_name = wwn_to_u64(fcport->port_name);
67 	req.node_name = wwn_to_u64(fcport->node_name);
68 	req.port_role = 0;
69 	req.dev_loss_tmo = fcport->dev_loss_tmo;
70 
71 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
72 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
73 
74 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
75 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
76 
77 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
78 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
79 
80 	req.port_id = fcport->d_id.b24;
81 
82 	ql_log(ql_log_info, vha, 0x2102,
83 	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
84 	    __func__, req.node_name, req.port_name,
85 	    req.port_id);
86 
87 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
88 	    &fcport->nvme_remote_port);
89 	if (ret) {
90 		ql_log(ql_log_warn, vha, 0x212e,
91 		    "Failed to register remote port. Transport returned %d\n",
92 		    ret);
93 		return ret;
94 	}
95 
96 	nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
97 				       fcport->dev_loss_tmo);
98 
99 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
100 		ql_log(ql_log_info, vha, 0x212a,
101 		       "PortID:%06x Supports SLER\n", req.port_id);
102 
103 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
104 		ql_log(ql_log_info, vha, 0x212b,
105 		       "PortID:%06x Supports PI control\n", req.port_id);
106 
107 	rport = fcport->nvme_remote_port->private;
108 	rport->fcport = fcport;
109 
110 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
111 	return 0;
112 }
113 
114 /* Allocate a queue for NVMe traffic */
qla_nvme_alloc_queue(struct nvme_fc_local_port * lport,unsigned int qidx,u16 qsize,void ** handle)115 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
116     unsigned int qidx, u16 qsize, void **handle)
117 {
118 	struct scsi_qla_host *vha;
119 	struct qla_hw_data *ha;
120 	struct qla_qpair *qpair;
121 
122 	/* Map admin queue and 1st IO queue to index 0 */
123 	if (qidx)
124 		qidx--;
125 
126 	vha = (struct scsi_qla_host *)lport->private;
127 	ha = vha->hw;
128 
129 	ql_log(ql_log_info, vha, 0x2104,
130 	    "%s: handle %p, idx =%d, qsize %d\n",
131 	    __func__, handle, qidx, qsize);
132 
133 	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
134 		ql_log(ql_log_warn, vha, 0x212f,
135 		    "%s: Illegal qidx=%d. Max=%d\n",
136 		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
137 		return -EINVAL;
138 	}
139 
140 	/* Use base qpair if max_qpairs is 0 */
141 	if (!ha->max_qpairs) {
142 		qpair = ha->base_qpair;
143 	} else {
144 		if (ha->queue_pair_map[qidx]) {
145 			*handle = ha->queue_pair_map[qidx];
146 			ql_log(ql_log_info, vha, 0x2121,
147 			       "Returning existing qpair of %p for idx=%x\n",
148 			       *handle, qidx);
149 			return 0;
150 		}
151 
152 		qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
153 		if (!qpair) {
154 			ql_log(ql_log_warn, vha, 0x2122,
155 			       "Failed to allocate qpair\n");
156 			return -EINVAL;
157 		}
158 		qla_adjust_iocb_limit(vha);
159 	}
160 	*handle = qpair;
161 
162 	return 0;
163 }
164 
qla_nvme_release_fcp_cmd_kref(struct kref * kref)165 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
166 {
167 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
168 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
169 	struct nvmefc_fcp_req *fd;
170 	struct srb_iocb *nvme;
171 	unsigned long flags;
172 
173 	if (!priv)
174 		goto out;
175 
176 	nvme = &sp->u.iocb_cmd;
177 	fd = nvme->u.nvme.desc;
178 
179 	spin_lock_irqsave(&priv->cmd_lock, flags);
180 	priv->sp = NULL;
181 	sp->priv = NULL;
182 	if (priv->comp_status == QLA_SUCCESS) {
183 		fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
184 		fd->status = NVME_SC_SUCCESS;
185 	} else {
186 		fd->rcv_rsplen = 0;
187 		fd->transferred_length = 0;
188 		fd->status = NVME_SC_INTERNAL;
189 	}
190 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
191 
192 	fd->done(fd);
193 out:
194 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
195 }
196 
qla_nvme_release_ls_cmd_kref(struct kref * kref)197 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
198 {
199 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
200 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
201 	struct nvmefc_ls_req *fd;
202 	unsigned long flags;
203 
204 	if (!priv)
205 		goto out;
206 
207 	spin_lock_irqsave(&priv->cmd_lock, flags);
208 	priv->sp = NULL;
209 	sp->priv = NULL;
210 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
211 
212 	fd = priv->fd;
213 
214 	fd->done(fd, priv->comp_status);
215 out:
216 	qla2x00_rel_sp(sp);
217 }
218 
qla_nvme_ls_complete(struct work_struct * work)219 static void qla_nvme_ls_complete(struct work_struct *work)
220 {
221 	struct nvme_private *priv =
222 		container_of(work, struct nvme_private, ls_work);
223 
224 	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
225 }
226 
qla_nvme_sp_ls_done(srb_t * sp,int res)227 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
228 {
229 	struct nvme_private *priv = sp->priv;
230 
231 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
232 		return;
233 
234 	if (res)
235 		res = -EINVAL;
236 
237 	priv->comp_status = res;
238 	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
239 	schedule_work(&priv->ls_work);
240 }
241 
qla_nvme_release_lsrsp_cmd_kref(struct kref * kref)242 static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref)
243 {
244 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
245 	struct qla_nvme_unsol_ctx *uctx = sp->priv;
246 	struct nvmefc_ls_rsp *fd_rsp;
247 	unsigned long flags;
248 
249 	if (!uctx) {
250 		qla2x00_rel_sp(sp);
251 		return;
252 	}
253 
254 	spin_lock_irqsave(&uctx->cmd_lock, flags);
255 	uctx->sp = NULL;
256 	sp->priv = NULL;
257 	spin_unlock_irqrestore(&uctx->cmd_lock, flags);
258 
259 	fd_rsp = uctx->fd_rsp;
260 
261 	list_del(&uctx->elem);
262 
263 	fd_rsp->done(fd_rsp);
264 	kfree(uctx);
265 	qla2x00_rel_sp(sp);
266 }
267 
qla_nvme_lsrsp_complete(struct work_struct * work)268 static void qla_nvme_lsrsp_complete(struct work_struct *work)
269 {
270 	struct qla_nvme_unsol_ctx *uctx =
271 		container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
272 
273 	kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref);
274 }
275 
qla_nvme_sp_lsrsp_done(srb_t * sp,int res)276 static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res)
277 {
278 	struct qla_nvme_unsol_ctx *uctx = sp->priv;
279 
280 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
281 		return;
282 
283 	if (res)
284 		res = -EINVAL;
285 
286 	uctx->comp_status = res;
287 	INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete);
288 	schedule_work(&uctx->lsrsp_work);
289 }
290 
291 /* it assumed that QPair lock is held. */
qla_nvme_sp_done(srb_t * sp,int res)292 static void qla_nvme_sp_done(srb_t *sp, int res)
293 {
294 	struct nvme_private *priv = sp->priv;
295 
296 	priv->comp_status = res;
297 	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
298 
299 	return;
300 }
301 
qla_nvme_abort_work(struct work_struct * work)302 static void qla_nvme_abort_work(struct work_struct *work)
303 {
304 	struct nvme_private *priv =
305 		container_of(work, struct nvme_private, abort_work);
306 	srb_t *sp = priv->sp;
307 	fc_port_t *fcport = sp->fcport;
308 	struct qla_hw_data *ha = fcport->vha->hw;
309 	int rval, abts_done_called = 1;
310 	bool io_wait_for_abort_done;
311 	uint32_t handle;
312 
313 	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
314 	       "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
315 	       __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
316 
317 	if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
318 		goto out;
319 
320 	if (ha->flags.host_shutting_down) {
321 		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
322 		    "%s Calling done on sp: %p, type: 0x%x\n",
323 		    __func__, sp, sp->type);
324 		sp->done(sp, 0);
325 		goto out;
326 	}
327 
328 	/*
329 	 * sp may not be valid after abort_command if return code is either
330 	 * SUCCESS or ERR_FROM_FW codes, so cache the value here.
331 	 */
332 	io_wait_for_abort_done = ql2xabts_wait_nvme &&
333 					QLA_ABTS_WAIT_ENABLED(sp);
334 	handle = sp->handle;
335 
336 	rval = ha->isp_ops->abort_command(sp);
337 
338 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
339 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
340 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
341 	    sp, handle, fcport, rval);
342 
343 	/*
344 	 * If async tmf is enabled, the abort callback is called only on
345 	 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
346 	 */
347 	if (ql2xasynctmfenable &&
348 	    rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
349 		abts_done_called = 0;
350 
351 	/*
352 	 * Returned before decreasing kref so that I/O requests
353 	 * are waited until ABTS complete. This kref is decreased
354 	 * at qla24xx_abort_sp_done function.
355 	 */
356 	if (abts_done_called && io_wait_for_abort_done)
357 		return;
358 out:
359 	/* kref_get was done before work was schedule. */
360 	kref_put(&sp->cmd_kref, sp->put_fn);
361 }
362 
qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_rsp * fd_resp)363 static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
364 			       struct nvme_fc_remote_port *rport,
365 			       struct nvmefc_ls_rsp *fd_resp)
366 {
367 	struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp,
368 				struct qla_nvme_unsol_ctx, lsrsp);
369 	struct qla_nvme_rport *qla_rport = rport->private;
370 	fc_port_t *fcport = qla_rport->fcport;
371 	struct scsi_qla_host *vha = uctx->vha;
372 	struct qla_hw_data *ha = vha->hw;
373 	struct qla_nvme_lsrjt_pt_arg a;
374 	struct srb_iocb *nvme;
375 	srb_t *sp;
376 	int rval = QLA_FUNCTION_FAILED;
377 	uint8_t cnt = 0;
378 
379 	if (!fcport || fcport->deleted)
380 		goto out;
381 
382 	if (!ha->flags.fw_started)
383 		goto out;
384 
385 	/* Alloc SRB structure */
386 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
387 	if (!sp)
388 		goto out;
389 
390 	sp->type = SRB_NVME_LS;
391 	sp->name = "nvme_ls";
392 	sp->done = qla_nvme_sp_lsrsp_done;
393 	sp->put_fn = qla_nvme_release_lsrsp_cmd_kref;
394 	sp->priv = (void *)uctx;
395 	sp->unsol_rsp = 1;
396 	uctx->sp = sp;
397 	spin_lock_init(&uctx->cmd_lock);
398 	nvme = &sp->u.iocb_cmd;
399 	uctx->fd_rsp = fd_resp;
400 	nvme->u.nvme.desc = fd_resp;
401 	nvme->u.nvme.dir = 0;
402 	nvme->u.nvme.dl = 0;
403 	nvme->u.nvme.timeout_sec = 0;
404 	nvme->u.nvme.cmd_dma = fd_resp->rspdma;
405 	nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
406 	nvme->u.nvme.rsp_len = 0;
407 	nvme->u.nvme.rsp_dma = 0;
408 	nvme->u.nvme.exchange_address = uctx->exchange_address;
409 	nvme->u.nvme.nport_handle = uctx->nport_handle;
410 	nvme->u.nvme.ox_id = uctx->ox_id;
411 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
412 				   fd_resp->rsplen, DMA_TO_DEVICE);
413 
414 	ql_dbg(ql_dbg_unsol, vha, 0x2122,
415 	       "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
416 	       fcport->d_id.b24, fcport->port_name, uctx->exchange_address,
417 	       uctx->ox_id, uctx->nport_handle);
418 retry:
419 	rval = qla2x00_start_sp(sp);
420 	switch (rval) {
421 	case QLA_SUCCESS:
422 		break;
423 	case EAGAIN:
424 		msleep(PURLS_MSLEEP_INTERVAL);
425 		cnt++;
426 		if (cnt < PURLS_RETRY_COUNT)
427 			goto retry;
428 
429 		fallthrough;
430 	default:
431 		ql_dbg(ql_log_warn, vha, 0x2123,
432 		       "Failed to xmit Unsol ls response = %d\n", rval);
433 		rval = -EIO;
434 		qla2x00_rel_sp(sp);
435 		goto out;
436 	}
437 
438 	return 0;
439 out:
440 	memset((void *)&a, 0, sizeof(a));
441 	a.vp_idx = vha->vp_idx;
442 	a.nport_handle = uctx->nport_handle;
443 	a.xchg_address = uctx->exchange_address;
444 	qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
445 	kfree(uctx);
446 	return rval;
447 }
448 
qla_nvme_ls_abort(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_req * fd)449 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
450     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
451 {
452 	struct nvme_private *priv = fd->private;
453 	unsigned long flags;
454 
455 	spin_lock_irqsave(&priv->cmd_lock, flags);
456 	if (!priv->sp) {
457 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
458 		return;
459 	}
460 
461 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
462 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
463 		return;
464 	}
465 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
466 
467 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
468 	schedule_work(&priv->abort_work);
469 }
470 
qla_nvme_ls_req(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,struct nvmefc_ls_req * fd)471 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
472     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
473 {
474 	struct qla_nvme_rport *qla_rport = rport->private;
475 	fc_port_t *fcport = qla_rport->fcport;
476 	struct srb_iocb   *nvme;
477 	struct nvme_private *priv = fd->private;
478 	struct scsi_qla_host *vha;
479 	int     rval = QLA_FUNCTION_FAILED;
480 	struct qla_hw_data *ha;
481 	srb_t           *sp;
482 
483 	if (!fcport || fcport->deleted)
484 		return rval;
485 
486 	vha = fcport->vha;
487 	ha = vha->hw;
488 
489 	if (!ha->flags.fw_started)
490 		return rval;
491 
492 	/* Alloc SRB structure */
493 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
494 	if (!sp)
495 		return rval;
496 
497 	sp->type = SRB_NVME_LS;
498 	sp->name = "nvme_ls";
499 	sp->done = qla_nvme_sp_ls_done;
500 	sp->put_fn = qla_nvme_release_ls_cmd_kref;
501 	sp->priv = priv;
502 	priv->sp = sp;
503 	kref_init(&sp->cmd_kref);
504 	spin_lock_init(&priv->cmd_lock);
505 	nvme = &sp->u.iocb_cmd;
506 	priv->fd = fd;
507 	nvme->u.nvme.desc = fd;
508 	nvme->u.nvme.dir = 0;
509 	nvme->u.nvme.dl = 0;
510 	nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
511 	nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
512 	nvme->u.nvme.rsp_dma = fd->rspdma;
513 	nvme->u.nvme.timeout_sec = fd->timeout;
514 	nvme->u.nvme.cmd_dma = fd->rqstdma;
515 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
516 	    fd->rqstlen, DMA_TO_DEVICE);
517 
518 	rval = qla2x00_start_sp(sp);
519 	if (rval != QLA_SUCCESS) {
520 		ql_log(ql_log_warn, vha, 0x700e,
521 		    "qla2x00_start_sp failed = %d\n", rval);
522 		sp->priv = NULL;
523 		priv->sp = NULL;
524 		qla2x00_rel_sp(sp);
525 		return rval;
526 	}
527 
528 	return rval;
529 }
530 
qla_nvme_fcp_abort(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,void * hw_queue_handle,struct nvmefc_fcp_req * fd)531 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
532     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
533     struct nvmefc_fcp_req *fd)
534 {
535 	struct nvme_private *priv = fd->private;
536 	unsigned long flags;
537 
538 	spin_lock_irqsave(&priv->cmd_lock, flags);
539 	if (!priv->sp) {
540 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
541 		return;
542 	}
543 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
544 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
545 		return;
546 	}
547 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
548 
549 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
550 	schedule_work(&priv->abort_work);
551 }
552 
qla2x00_start_nvme_mq(srb_t * sp)553 static inline int qla2x00_start_nvme_mq(srb_t *sp)
554 {
555 	unsigned long   flags;
556 	uint32_t        *clr_ptr;
557 	uint32_t        handle;
558 	struct cmd_nvme *cmd_pkt;
559 	uint16_t        cnt, i;
560 	uint16_t        req_cnt;
561 	uint16_t        tot_dsds;
562 	uint16_t	avail_dsds;
563 	struct dsd64	*cur_dsd;
564 	struct req_que *req = NULL;
565 	struct rsp_que *rsp = NULL;
566 	struct scsi_qla_host *vha = sp->fcport->vha;
567 	struct qla_hw_data *ha = vha->hw;
568 	struct qla_qpair *qpair = sp->qpair;
569 	struct srb_iocb *nvme = &sp->u.iocb_cmd;
570 	struct scatterlist *sgl, *sg;
571 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
572 	struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
573 	uint32_t        rval = QLA_SUCCESS;
574 
575 	/* Setup qpair pointers */
576 	req = qpair->req;
577 	rsp = qpair->rsp;
578 	tot_dsds = fd->sg_cnt;
579 
580 	/* Acquire qpair specific lock */
581 	spin_lock_irqsave(&qpair->qp_lock, flags);
582 
583 	handle = qla2xxx_get_next_handle(req);
584 	if (handle == 0) {
585 		rval = -EBUSY;
586 		goto queuing_error;
587 	}
588 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
589 
590 	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
591 	sp->iores.exch_cnt = 1;
592 	sp->iores.iocb_cnt = req_cnt;
593 	if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
594 		rval = -EBUSY;
595 		goto queuing_error;
596 	}
597 
598 	if (req->cnt < (req_cnt + 2)) {
599 		if (IS_SHADOW_REG_CAPABLE(ha)) {
600 			cnt = *req->out_ptr;
601 		} else {
602 			cnt = rd_reg_dword_relaxed(req->req_q_out);
603 			if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
604 				rval = -EBUSY;
605 				goto queuing_error;
606 			}
607 		}
608 
609 		if (req->ring_index < cnt)
610 			req->cnt = cnt - req->ring_index;
611 		else
612 			req->cnt = req->length - (req->ring_index - cnt);
613 
614 		if (req->cnt < (req_cnt + 2)){
615 			rval = -EBUSY;
616 			goto queuing_error;
617 		}
618 	}
619 
620 	if (unlikely(!fd->sqid)) {
621 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
622 			nvme->u.nvme.aen_op = 1;
623 			atomic_inc(&ha->nvme_active_aen_cnt);
624 		}
625 	}
626 
627 	/* Build command packet. */
628 	req->current_outstanding_cmd = handle;
629 	req->outstanding_cmds[handle] = sp;
630 	sp->handle = handle;
631 	req->cnt -= req_cnt;
632 
633 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
634 	cmd_pkt->handle = make_handle(req->id, handle);
635 
636 	/* Zero out remaining portion of packet. */
637 	clr_ptr = (uint32_t *)cmd_pkt + 2;
638 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
639 
640 	cmd_pkt->entry_status = 0;
641 
642 	/* Update entry type to indicate Command NVME IOCB */
643 	cmd_pkt->entry_type = COMMAND_NVME;
644 
645 	/* No data transfer how do we check buffer len == 0?? */
646 	if (fd->io_dir == NVMEFC_FCP_READ) {
647 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
648 		qpair->counters.input_bytes += fd->payload_length;
649 		qpair->counters.input_requests++;
650 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
651 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
652 		if ((vha->flags.nvme_first_burst) &&
653 		    (sp->fcport->nvme_prli_service_param &
654 			NVME_PRLI_SP_FIRST_BURST)) {
655 			if ((fd->payload_length <=
656 			    sp->fcport->nvme_first_burst_size) ||
657 				(sp->fcport->nvme_first_burst_size == 0))
658 				cmd_pkt->control_flags |=
659 					cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
660 		}
661 		qpair->counters.output_bytes += fd->payload_length;
662 		qpair->counters.output_requests++;
663 	} else if (fd->io_dir == 0) {
664 		cmd_pkt->control_flags = 0;
665 	}
666 
667 	if (sp->fcport->edif.enable && fd->io_dir != 0)
668 		cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
669 
670 	/* Set BIT_13 of control flags for Async event */
671 	if (vha->flags.nvme2_enabled &&
672 	    cmd->sqe.common.opcode == nvme_admin_async_event) {
673 		cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
674 	}
675 
676 	/* Set NPORT-ID */
677 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
678 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
679 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
680 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
681 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
682 
683 	/* NVME RSP IU */
684 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
685 	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
686 
687 	/* NVME CNMD IU */
688 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
689 	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
690 
691 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
692 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
693 
694 	/* One DSD is available in the Command Type NVME IOCB */
695 	avail_dsds = 1;
696 	cur_dsd = &cmd_pkt->nvme_dsd;
697 	sgl = fd->first_sgl;
698 
699 	/* Load data segments */
700 	for_each_sg(sgl, sg, tot_dsds, i) {
701 		cont_a64_entry_t *cont_pkt;
702 
703 		/* Allocate additional continuation packets? */
704 		if (avail_dsds == 0) {
705 			/*
706 			 * Five DSDs are available in the Continuation
707 			 * Type 1 IOCB.
708 			 */
709 
710 			/* Adjust ring index */
711 			req->ring_index++;
712 			if (req->ring_index == req->length) {
713 				req->ring_index = 0;
714 				req->ring_ptr = req->ring;
715 			} else {
716 				req->ring_ptr++;
717 			}
718 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
719 			put_unaligned_le32(CONTINUE_A64_TYPE,
720 					   &cont_pkt->entry_type);
721 
722 			cur_dsd = cont_pkt->dsd;
723 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
724 		}
725 
726 		append_dsd64(&cur_dsd, sg);
727 		avail_dsds--;
728 	}
729 
730 	/* Set total entry count. */
731 	cmd_pkt->entry_count = (uint8_t)req_cnt;
732 	wmb();
733 
734 	/* Adjust ring index. */
735 	req->ring_index++;
736 	if (req->ring_index == req->length) {
737 		req->ring_index = 0;
738 		req->ring_ptr = req->ring;
739 	} else {
740 		req->ring_ptr++;
741 	}
742 
743 	/* ignore nvme async cmd due to long timeout */
744 	if (!nvme->u.nvme.aen_op)
745 		sp->qpair->cmd_cnt++;
746 
747 	/* Set chip new ring index. */
748 	wrt_reg_dword(req->req_q_in, req->ring_index);
749 
750 	if (vha->flags.process_response_queue &&
751 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
752 		qla24xx_process_response_queue(vha, rsp);
753 
754 queuing_error:
755 	if (rval)
756 		qla_put_fw_resources(sp->qpair, &sp->iores);
757 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
758 
759 	return rval;
760 }
761 
762 /* Post a command */
qla_nvme_post_cmd(struct nvme_fc_local_port * lport,struct nvme_fc_remote_port * rport,void * hw_queue_handle,struct nvmefc_fcp_req * fd)763 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
764     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
765     struct nvmefc_fcp_req *fd)
766 {
767 	fc_port_t *fcport;
768 	struct srb_iocb *nvme;
769 	struct scsi_qla_host *vha;
770 	struct qla_hw_data *ha;
771 	int rval;
772 	srb_t *sp;
773 	struct qla_qpair *qpair = hw_queue_handle;
774 	struct nvme_private *priv = fd->private;
775 	struct qla_nvme_rport *qla_rport = rport->private;
776 
777 	if (!priv) {
778 		/* nvme association has been torn down */
779 		return -ENODEV;
780 	}
781 
782 	fcport = qla_rport->fcport;
783 
784 	if (unlikely(!qpair || !fcport || fcport->deleted))
785 		return -EBUSY;
786 
787 	if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
788 		return -ENODEV;
789 
790 	vha = fcport->vha;
791 	ha = vha->hw;
792 
793 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
794 		return -EBUSY;
795 
796 	/*
797 	 * If we know the dev is going away while the transport is still sending
798 	 * IO's return busy back to stall the IO Q.  This happens when the
799 	 * link goes away and fw hasn't notified us yet, but IO's are being
800 	 * returned. If the dev comes back quickly we won't exhaust the IO
801 	 * retry count at the core.
802 	 */
803 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
804 		return -EBUSY;
805 
806 	qpair = qla_mapq_nvme_select_qpair(ha, qpair);
807 
808 	/* Alloc SRB structure */
809 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
810 	if (!sp)
811 		return -EBUSY;
812 
813 	kref_init(&sp->cmd_kref);
814 	spin_lock_init(&priv->cmd_lock);
815 	sp->priv = priv;
816 	priv->sp = sp;
817 	sp->type = SRB_NVME_CMD;
818 	sp->name = "nvme_cmd";
819 	sp->done = qla_nvme_sp_done;
820 	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
821 	sp->qpair = qpair;
822 	sp->vha = vha;
823 	sp->cmd_sp = sp;
824 	nvme = &sp->u.iocb_cmd;
825 	nvme->u.nvme.desc = fd;
826 
827 	rval = qla2x00_start_nvme_mq(sp);
828 	if (rval != QLA_SUCCESS) {
829 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
830 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
831 		sp->priv = NULL;
832 		priv->sp = NULL;
833 		qla2xxx_rel_qpair_sp(sp->qpair, sp);
834 	}
835 
836 	return rval;
837 }
838 
qla_nvme_map_queues(struct nvme_fc_local_port * lport,struct blk_mq_queue_map * map)839 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
840 		struct blk_mq_queue_map *map)
841 {
842 	struct scsi_qla_host *vha = lport->private;
843 
844 	blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
845 }
846 
qla_nvme_localport_delete(struct nvme_fc_local_port * lport)847 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
848 {
849 	struct scsi_qla_host *vha = lport->private;
850 
851 	ql_log(ql_log_info, vha, 0x210f,
852 	    "localport delete of %p completed.\n", vha->nvme_local_port);
853 	vha->nvme_local_port = NULL;
854 	complete(&vha->nvme_del_done);
855 }
856 
qla_nvme_remoteport_delete(struct nvme_fc_remote_port * rport)857 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
858 {
859 	fc_port_t *fcport;
860 	struct qla_nvme_rport *qla_rport = rport->private;
861 
862 	fcport = qla_rport->fcport;
863 	fcport->nvme_remote_port = NULL;
864 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
865 	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
866 	ql_log(ql_log_info, fcport->vha, 0x2110,
867 	    "remoteport_delete of %p %8phN completed.\n",
868 	    fcport, fcport->port_name);
869 	complete(&fcport->nvme_del_done);
870 }
871 
872 static struct nvme_fc_port_template qla_nvme_fc_transport = {
873 	.localport_delete = qla_nvme_localport_delete,
874 	.remoteport_delete = qla_nvme_remoteport_delete,
875 	.create_queue   = qla_nvme_alloc_queue,
876 	.delete_queue 	= NULL,
877 	.ls_req		= qla_nvme_ls_req,
878 	.ls_abort	= qla_nvme_ls_abort,
879 	.fcp_io		= qla_nvme_post_cmd,
880 	.fcp_abort	= qla_nvme_fcp_abort,
881 	.xmt_ls_rsp	= qla_nvme_xmt_ls_rsp,
882 	.map_queues	= qla_nvme_map_queues,
883 	.max_hw_queues  = DEF_NVME_HW_QUEUES,
884 	.max_sgl_segments = 1024,
885 	.max_dif_sgl_segments = 64,
886 	.dma_boundary = 0xFFFFFFFF,
887 	.local_priv_sz  = 8,
888 	.remote_priv_sz = sizeof(struct qla_nvme_rport),
889 	.lsrqst_priv_sz = sizeof(struct nvme_private),
890 	.fcprqst_priv_sz = sizeof(struct nvme_private),
891 };
892 
qla_nvme_unregister_remote_port(struct fc_port * fcport)893 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
894 {
895 	int ret;
896 
897 	if (!IS_ENABLED(CONFIG_NVME_FC))
898 		return;
899 
900 	ql_log(ql_log_warn, fcport->vha, 0x2112,
901 	    "%s: unregister remoteport on %p %8phN\n",
902 	    __func__, fcport, fcport->port_name);
903 
904 	if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
905 		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
906 
907 	init_completion(&fcport->nvme_del_done);
908 	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
909 	if (ret)
910 		ql_log(ql_log_info, fcport->vha, 0x2114,
911 			"%s: Failed to unregister nvme_remote_port (%d)\n",
912 			    __func__, ret);
913 	wait_for_completion(&fcport->nvme_del_done);
914 }
915 
qla_nvme_delete(struct scsi_qla_host * vha)916 void qla_nvme_delete(struct scsi_qla_host *vha)
917 {
918 	int nv_ret;
919 
920 	if (!IS_ENABLED(CONFIG_NVME_FC))
921 		return;
922 
923 	if (vha->nvme_local_port) {
924 		init_completion(&vha->nvme_del_done);
925 		ql_log(ql_log_info, vha, 0x2116,
926 			"unregister localport=%p\n",
927 			vha->nvme_local_port);
928 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
929 		if (nv_ret)
930 			ql_log(ql_log_info, vha, 0x2115,
931 			    "Unregister of localport failed\n");
932 		else
933 			wait_for_completion(&vha->nvme_del_done);
934 	}
935 }
936 
qla_nvme_register_hba(struct scsi_qla_host * vha)937 int qla_nvme_register_hba(struct scsi_qla_host *vha)
938 {
939 	struct nvme_fc_port_template *tmpl;
940 	struct qla_hw_data *ha;
941 	struct nvme_fc_port_info pinfo;
942 	int ret = -EINVAL;
943 
944 	if (!IS_ENABLED(CONFIG_NVME_FC))
945 		return ret;
946 
947 	ha = vha->hw;
948 	tmpl = &qla_nvme_fc_transport;
949 
950 	if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
951 		ql_log(ql_log_warn, vha, 0xfffd,
952 		    "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
953 		    ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
954 		ql2xnvme_queues = DEF_NVME_HW_QUEUES;
955 	} else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
956 		ql_log(ql_log_warn, vha, 0xfffd,
957 		       "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
958 		       ql2xnvme_queues, (ha->max_qpairs - 1),
959 		       (ha->max_qpairs - 1));
960 		ql2xnvme_queues = ((ha->max_qpairs - 1));
961 	}
962 
963 	qla_nvme_fc_transport.max_hw_queues =
964 	    min((uint8_t)(ql2xnvme_queues),
965 		(uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
966 
967 	ql_log(ql_log_info, vha, 0xfffb,
968 	       "Number of NVME queues used for this port: %d\n",
969 	    qla_nvme_fc_transport.max_hw_queues);
970 
971 	pinfo.node_name = wwn_to_u64(vha->node_name);
972 	pinfo.port_name = wwn_to_u64(vha->port_name);
973 	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
974 	pinfo.port_id = vha->d_id.b24;
975 
976 	mutex_lock(&ha->vport_lock);
977 	/*
978 	 * Check again for nvme_local_port to see if any other thread raced
979 	 * with this one and finished registration.
980 	 */
981 	if (!vha->nvme_local_port) {
982 		ql_log(ql_log_info, vha, 0xffff,
983 		    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
984 		    pinfo.node_name, pinfo.port_name, pinfo.port_id);
985 		qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
986 
987 		ret = nvme_fc_register_localport(&pinfo, tmpl,
988 						 get_device(&ha->pdev->dev),
989 						 &vha->nvme_local_port);
990 		mutex_unlock(&ha->vport_lock);
991 	} else {
992 		mutex_unlock(&ha->vport_lock);
993 		return 0;
994 	}
995 	if (ret) {
996 		ql_log(ql_log_warn, vha, 0xffff,
997 		    "register_localport failed: ret=%x\n", ret);
998 	} else {
999 		vha->nvme_local_port->private = vha;
1000 	}
1001 
1002 	return ret;
1003 }
1004 
qla_nvme_abort_set_option(struct abort_entry_24xx * abt,srb_t * orig_sp)1005 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
1006 {
1007 	struct qla_hw_data *ha;
1008 
1009 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
1010 		return;
1011 
1012 	ha = orig_sp->fcport->vha->hw;
1013 
1014 	WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
1015 	/* Use Driver Specified Retry Count */
1016 	abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
1017 	abt->drv.abts_rty_cnt = cpu_to_le16(2);
1018 	/* Use specified response timeout */
1019 	abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
1020 	/* set it to 2 * r_a_tov in secs */
1021 	abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
1022 }
1023 
qla_nvme_abort_process_comp_status(struct abort_entry_24xx * abt,srb_t * orig_sp)1024 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
1025 {
1026 	u16	comp_status;
1027 	struct scsi_qla_host *vha;
1028 
1029 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
1030 		return;
1031 
1032 	vha = orig_sp->fcport->vha;
1033 
1034 	comp_status = le16_to_cpu(abt->comp_status);
1035 	switch (comp_status) {
1036 	case CS_RESET:		/* reset event aborted */
1037 	case CS_ABORTED:	/* IOCB was cleaned */
1038 	/* N_Port handle is not currently logged in */
1039 	case CS_TIMEOUT:
1040 	/* N_Port handle was logged out while waiting for ABTS to complete */
1041 	case CS_PORT_UNAVAILABLE:
1042 	/* Firmware found that the port name changed */
1043 	case CS_PORT_LOGGED_OUT:
1044 	/* BA_RJT was received for the ABTS */
1045 	case CS_PORT_CONFIG_CHG:
1046 		ql_dbg(ql_dbg_async, vha, 0xf09d,
1047 		       "Abort I/O IOCB completed with error, comp_status=%x\n",
1048 		comp_status);
1049 		break;
1050 
1051 	/* BA_RJT was received for the ABTS */
1052 	case CS_REJECT_RECEIVED:
1053 		ql_dbg(ql_dbg_async, vha, 0xf09e,
1054 		       "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
1055 			abt->fw.ba_rjt_vendorUnique);
1056 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
1057 		       "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
1058 		       abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
1059 		break;
1060 
1061 	case CS_COMPLETE:
1062 		ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
1063 		       "IOCB request is completed successfully comp_status=%x\n",
1064 		comp_status);
1065 		break;
1066 
1067 	case CS_IOCB_ERROR:
1068 		ql_dbg(ql_dbg_async, vha, 0xf0a0,
1069 		       "IOCB request is failed, comp_status=%x\n", comp_status);
1070 		break;
1071 
1072 	default:
1073 		ql_dbg(ql_dbg_async, vha, 0xf0a1,
1074 		       "Invalid Abort IO IOCB Completion Status %x\n",
1075 		comp_status);
1076 		break;
1077 	}
1078 }
1079 
qla_wait_nvme_release_cmd_kref(srb_t * orig_sp)1080 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
1081 {
1082 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
1083 		return;
1084 	kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
1085 }
1086 
qla_nvme_fc_format_rjt(void * buf,u8 ls_cmd,u8 reason,u8 explanation,u8 vendor)1087 static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason,
1088 				   u8 explanation, u8 vendor)
1089 {
1090 	struct fcnvme_ls_rjt *rjt = buf;
1091 
1092 	rjt->w0.ls_cmd = FCNVME_LSDESC_RQST;
1093 	rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt));
1094 	rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1095 	rjt->rqst.desc_len =
1096 		fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1097 	rjt->rqst.w0.ls_cmd = ls_cmd;
1098 	rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1099 	rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1100 	rjt->rjt.reason_code = reason;
1101 	rjt->rjt.reason_explanation = explanation;
1102 	rjt->rjt.vendor = vendor;
1103 }
1104 
qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host * vha,struct pt_ls4_request * lsrjt_iocb,struct qla_nvme_lsrjt_pt_arg * a)1105 static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
1106 				   struct pt_ls4_request *lsrjt_iocb,
1107 				   struct qla_nvme_lsrjt_pt_arg *a)
1108 {
1109 	lsrjt_iocb->entry_type = PT_LS4_REQUEST;
1110 	lsrjt_iocb->entry_count = 1;
1111 	lsrjt_iocb->sys_define = 0;
1112 	lsrjt_iocb->entry_status = 0;
1113 	lsrjt_iocb->handle = QLA_SKIP_HANDLE;
1114 	lsrjt_iocb->nport_handle = a->nport_handle;
1115 	lsrjt_iocb->exchange_address = a->xchg_address;
1116 	lsrjt_iocb->vp_index = a->vp_idx;
1117 
1118 	lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags);
1119 
1120 	put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address);
1121 	lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count);
1122 	lsrjt_iocb->tx_dseg_count = cpu_to_le16(1);
1123 	lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
1124 
1125 	put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address);
1126 	lsrjt_iocb->dsd[1].length = 0;
1127 	lsrjt_iocb->rx_dseg_count = 0;
1128 	lsrjt_iocb->rx_byte_count = 0;
1129 }
1130 
1131 static int
qla_nvme_ls_reject_iocb(struct scsi_qla_host * vha,struct qla_qpair * qp,struct qla_nvme_lsrjt_pt_arg * a,bool is_xchg_terminate)1132 qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
1133 			struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate)
1134 {
1135 	struct pt_ls4_request *lsrjt_iocb;
1136 
1137 	lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
1138 	if (!lsrjt_iocb) {
1139 		ql_log(ql_log_warn, vha, 0x210e,
1140 		       "qla2x00_alloc_iocbs failed.\n");
1141 		return QLA_FUNCTION_FAILED;
1142 	}
1143 
1144 	if (!is_xchg_terminate) {
1145 		qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
1146 				       a->reason, a->explanation, 0);
1147 
1148 		a->tx_byte_count = sizeof(struct fcnvme_ls_rjt);
1149 		a->tx_addr = vha->hw->lsrjt.cdma;
1150 		a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT;
1151 
1152 		ql_dbg(ql_dbg_unsol, vha, 0x211f,
1153 		       "Sending nvme fc ls reject ox_id %04x op %04x\n",
1154 		       a->ox_id, a->opcode);
1155 		ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
1156 			       vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
1157 	} else {
1158 		a->tx_byte_count = 0;
1159 		a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT;
1160 		ql_dbg(ql_dbg_unsol, vha, 0x2110,
1161 		       "Terminate nvme ls xchg 0x%x\n", a->xchg_address);
1162 	}
1163 
1164 	qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
1165 	/* flush iocb to mem before notifying hw doorbell */
1166 	wmb();
1167 	qla2x00_start_iocbs(vha, qp->req);
1168 	return 0;
1169 }
1170 
1171 /*
1172  * qla2xxx_process_purls_pkt() - Pass-up Unsolicited
1173  * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
1174  * LLDD need to provide memory for response buffer, which
1175  * will be used to reference the exchange corresponding
1176  * to the LS when issuing an ls response. LLDD will have to free
1177  * response buffer in lport->ops->xmt_ls_rsp().
1178  *
1179  * @vha: SCSI qla host
1180  * @item: ptr to purex_item
1181  */
1182 static void
qla2xxx_process_purls_pkt(struct scsi_qla_host * vha,struct purex_item * item)1183 qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
1184 {
1185 	struct qla_nvme_unsol_ctx *uctx = item->purls_context;
1186 	struct qla_nvme_lsrjt_pt_arg a;
1187 	int ret = 1;
1188 
1189 #if (IS_ENABLED(CONFIG_NVME_FC))
1190 	ret = nvme_fc_rcv_ls_req(uctx->fcport->nvme_remote_port, &uctx->lsrsp,
1191 				 &item->iocb, item->size);
1192 #endif
1193 	if (ret) {
1194 		ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n");
1195 		memset((void *)&a, 0, sizeof(a));
1196 		a.vp_idx = vha->vp_idx;
1197 		a.nport_handle = uctx->nport_handle;
1198 		a.xchg_address = uctx->exchange_address;
1199 		qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
1200 		list_del(&uctx->elem);
1201 		kfree(uctx);
1202 	}
1203 }
1204 
1205 static scsi_qla_host_t *
qla2xxx_get_vha_from_vp_idx(struct qla_hw_data * ha,uint16_t vp_index)1206 qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index)
1207 {
1208 	scsi_qla_host_t *base_vha, *vha, *tvp;
1209 	unsigned long flags;
1210 
1211 	base_vha = pci_get_drvdata(ha->pdev);
1212 
1213 	if (!vp_index && !ha->num_vhosts)
1214 		return base_vha;
1215 
1216 	spin_lock_irqsave(&ha->vport_slock, flags);
1217 	list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
1218 		if (vha->vp_idx == vp_index) {
1219 			spin_unlock_irqrestore(&ha->vport_slock, flags);
1220 			return vha;
1221 		}
1222 	}
1223 	spin_unlock_irqrestore(&ha->vport_slock, flags);
1224 
1225 	return NULL;
1226 }
1227 
qla2xxx_process_purls_iocb(void ** pkt,struct rsp_que ** rsp)1228 void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
1229 {
1230 	struct nvme_fc_remote_port *rport;
1231 	struct qla_nvme_rport *qla_rport;
1232 	struct qla_nvme_lsrjt_pt_arg a;
1233 	struct pt_ls4_rx_unsol *p = *pkt;
1234 	struct qla_nvme_unsol_ctx *uctx;
1235 	struct rsp_que *rsp_q = *rsp;
1236 	struct qla_hw_data *ha;
1237 	scsi_qla_host_t	*vha;
1238 	fc_port_t *fcport = NULL;
1239 	struct purex_item *item;
1240 	port_id_t d_id = {0};
1241 	port_id_t id = {0};
1242 	u8 *opcode;
1243 	bool xmt_reject = false;
1244 
1245 	ha = rsp_q->hw;
1246 
1247 	vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
1248 	if (!vha) {
1249 		ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index);
1250 		WARN_ON_ONCE(1);
1251 		return;
1252 	}
1253 
1254 	memset((void *)&a, 0, sizeof(a));
1255 	opcode = (u8 *)&p->payload[0];
1256 	a.opcode = opcode[3];
1257 	a.vp_idx = p->vp_index;
1258 	a.nport_handle = p->nport_handle;
1259 	a.ox_id = p->ox_id;
1260 	a.xchg_address = p->exchange_address;
1261 
1262 	id.b.domain = p->s_id.domain;
1263 	id.b.area   = p->s_id.area;
1264 	id.b.al_pa  = p->s_id.al_pa;
1265 	d_id.b.domain = p->d_id[2];
1266 	d_id.b.area   = p->d_id[1];
1267 	d_id.b.al_pa  = p->d_id[0];
1268 
1269 	fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
1270 	if (!fcport) {
1271 		ql_dbg(ql_dbg_unsol, vha, 0x211e,
1272 		       "Failed to find sid=%06x did=%06x\n",
1273 		       id.b24, d_id.b24);
1274 		a.reason = FCNVME_RJT_RC_INV_ASSOC;
1275 		a.explanation = FCNVME_RJT_EXP_NONE;
1276 		xmt_reject = true;
1277 		goto out;
1278 	}
1279 	rport = fcport->nvme_remote_port;
1280 	qla_rport = rport->private;
1281 
1282 	item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
1283 	if (!item) {
1284 		a.reason = FCNVME_RJT_RC_LOGIC;
1285 		a.explanation = FCNVME_RJT_EXP_NONE;
1286 		xmt_reject = true;
1287 		goto out;
1288 	}
1289 
1290 	uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC);
1291 	if (!uctx) {
1292 		ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
1293 		a.reason = FCNVME_RJT_RC_LOGIC;
1294 		a.explanation = FCNVME_RJT_EXP_NONE;
1295 		xmt_reject = true;
1296 		kfree(item);
1297 		goto out;
1298 	}
1299 
1300 	uctx->vha = vha;
1301 	uctx->fcport = fcport;
1302 	uctx->exchange_address = p->exchange_address;
1303 	uctx->nport_handle = p->nport_handle;
1304 	uctx->ox_id = p->ox_id;
1305 	qla_rport->uctx = uctx;
1306 	INIT_LIST_HEAD(&uctx->elem);
1307 	list_add_tail(&uctx->elem, &fcport->unsol_ctx_head);
1308 	item->purls_context = (void *)uctx;
1309 
1310 	ql_dbg(ql_dbg_unsol, vha, 0x2121,
1311 	       "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
1312 	       item->iocb.iocb[3], item->size, uctx->exchange_address,
1313 	       fcport->d_id.b24);
1314 	/* +48    0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F
1315 	 * ----- -----------------------------------------------
1316 	 * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
1317 	 * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
1318 	 * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
1319 	 */
1320 	ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
1321 		       &item->iocb, item->size);
1322 
1323 	qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
1324 out:
1325 	if (xmt_reject) {
1326 		qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
1327 		__qla_consume_iocb(vha, pkt, rsp);
1328 	}
1329 }
1330