1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2017 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_nvme.h" 8 #include <linux/scatterlist.h> 9 #include <linux/delay.h> 10 #include <linux/nvme.h> 11 #include <linux/nvme-fc.h> 12 13 static struct nvme_fc_port_template qla_nvme_fc_transport; 14 15 static void qla_nvme_unregister_remote_port(struct work_struct *); 16 17 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) 18 { 19 struct qla_nvme_rport *rport; 20 struct nvme_fc_port_info req; 21 int ret; 22 23 if (!IS_ENABLED(CONFIG_NVME_FC)) 24 return 0; 25 26 if (!vha->flags.nvme_enabled) { 27 ql_log(ql_log_info, vha, 0x2100, 28 "%s: Not registering target since Host NVME is not enabled\n", 29 __func__); 30 return 0; 31 } 32 33 if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) 34 return 0; 35 36 if (!(fcport->nvme_prli_service_param & 37 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) || 38 (fcport->nvme_flag & NVME_FLAG_REGISTERED)) 39 return 0; 40 41 INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port); 42 fcport->nvme_flag &= ~NVME_FLAG_RESETTING; 43 44 memset(&req, 0, sizeof(struct nvme_fc_port_info)); 45 req.port_name = wwn_to_u64(fcport->port_name); 46 req.node_name = wwn_to_u64(fcport->node_name); 47 req.port_role = 0; 48 req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO; 49 50 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) 51 req.port_role = FC_PORT_ROLE_NVME_INITIATOR; 52 53 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET) 54 req.port_role |= FC_PORT_ROLE_NVME_TARGET; 55 56 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY) 57 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; 58 59 req.port_id = fcport->d_id.b24; 60 61 ql_log(ql_log_info, vha, 0x2102, 62 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n", 63 __func__, req.node_name, req.port_name, 64 req.port_id); 65 66 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req, 67 &fcport->nvme_remote_port); 68 if (ret) { 69 ql_log(ql_log_warn, vha, 0x212e, 70 "Failed to register remote port. Transport returned %d\n", 71 ret); 72 return ret; 73 } 74 75 rport = fcport->nvme_remote_port->private; 76 rport->fcport = fcport; 77 list_add_tail(&rport->list, &vha->nvme_rport_list); 78 79 fcport->nvme_flag |= NVME_FLAG_REGISTERED; 80 return 0; 81 } 82 83 /* Allocate a queue for NVMe traffic */ 84 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, 85 unsigned int qidx, u16 qsize, void **handle) 86 { 87 struct scsi_qla_host *vha; 88 struct qla_hw_data *ha; 89 struct qla_qpair *qpair; 90 91 if (!qidx) 92 qidx++; 93 94 vha = (struct scsi_qla_host *)lport->private; 95 ha = vha->hw; 96 97 ql_log(ql_log_info, vha, 0x2104, 98 "%s: handle %p, idx =%d, qsize %d\n", 99 __func__, handle, qidx, qsize); 100 101 if (qidx > qla_nvme_fc_transport.max_hw_queues) { 102 ql_log(ql_log_warn, vha, 0x212f, 103 "%s: Illegal qidx=%d. Max=%d\n", 104 __func__, qidx, qla_nvme_fc_transport.max_hw_queues); 105 return -EINVAL; 106 } 107 108 if (ha->queue_pair_map[qidx]) { 109 *handle = ha->queue_pair_map[qidx]; 110 ql_log(ql_log_info, vha, 0x2121, 111 "Returning existing qpair of %p for idx=%x\n", 112 *handle, qidx); 113 return 0; 114 } 115 116 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); 117 if (qpair == NULL) { 118 ql_log(ql_log_warn, vha, 0x2122, 119 "Failed to allocate qpair\n"); 120 return -EINVAL; 121 } 122 *handle = qpair; 123 124 return 0; 125 } 126 127 static void qla_nvme_sp_ls_done(void *ptr, int res) 128 { 129 srb_t *sp = ptr; 130 struct srb_iocb *nvme; 131 struct nvmefc_ls_req *fd; 132 struct nvme_private *priv; 133 134 if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0)) 135 return; 136 137 atomic_dec(&sp->ref_count); 138 139 if (res) 140 res = -EINVAL; 141 142 nvme = &sp->u.iocb_cmd; 143 fd = nvme->u.nvme.desc; 144 priv = fd->private; 145 priv->comp_status = res; 146 schedule_work(&priv->ls_work); 147 /* work schedule doesn't need the sp */ 148 qla2x00_rel_sp(sp); 149 } 150 151 static void qla_nvme_sp_done(void *ptr, int res) 152 { 153 srb_t *sp = ptr; 154 struct srb_iocb *nvme; 155 struct nvmefc_fcp_req *fd; 156 157 nvme = &sp->u.iocb_cmd; 158 fd = nvme->u.nvme.desc; 159 160 if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0)) 161 return; 162 163 atomic_dec(&sp->ref_count); 164 165 if (res == QLA_SUCCESS) { 166 fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len; 167 } else { 168 fd->rcv_rsplen = 0; 169 fd->transferred_length = 0; 170 } 171 fd->status = 0; 172 fd->done(fd); 173 qla2xxx_rel_qpair_sp(sp->qpair, sp); 174 175 return; 176 } 177 178 static void qla_nvme_abort_work(struct work_struct *work) 179 { 180 struct nvme_private *priv = 181 container_of(work, struct nvme_private, abort_work); 182 srb_t *sp = priv->sp; 183 fc_port_t *fcport = sp->fcport; 184 struct qla_hw_data *ha = fcport->vha->hw; 185 int rval; 186 187 ql_dbg(ql_dbg_io, fcport->vha, 0xffff, 188 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n", 189 __func__, sp, sp->handle, fcport, fcport->deleted); 190 191 if (!ha->flags.fw_started && (fcport && fcport->deleted)) 192 return; 193 194 if (ha->flags.host_shutting_down) { 195 ql_log(ql_log_info, sp->fcport->vha, 0xffff, 196 "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n", 197 __func__, sp, sp->type, atomic_read(&sp->ref_count)); 198 sp->done(sp, 0); 199 return; 200 } 201 202 if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0)) 203 return; 204 205 rval = ha->isp_ops->abort_command(sp); 206 207 ql_dbg(ql_dbg_io, fcport->vha, 0x212b, 208 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n", 209 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", 210 sp, sp->handle, fcport, rval); 211 } 212 213 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, 214 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 215 { 216 struct nvme_private *priv = fd->private; 217 218 INIT_WORK(&priv->abort_work, qla_nvme_abort_work); 219 schedule_work(&priv->abort_work); 220 } 221 222 static void qla_nvme_ls_complete(struct work_struct *work) 223 { 224 struct nvme_private *priv = 225 container_of(work, struct nvme_private, ls_work); 226 struct nvmefc_ls_req *fd = priv->fd; 227 228 fd->done(fd, priv->comp_status); 229 } 230 231 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, 232 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 233 { 234 struct qla_nvme_rport *qla_rport = rport->private; 235 fc_port_t *fcport = qla_rport->fcport; 236 struct srb_iocb *nvme; 237 struct nvme_private *priv = fd->private; 238 struct scsi_qla_host *vha; 239 int rval = QLA_FUNCTION_FAILED; 240 struct qla_hw_data *ha; 241 srb_t *sp; 242 243 vha = fcport->vha; 244 ha = vha->hw; 245 /* Alloc SRB structure */ 246 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 247 if (!sp) 248 return rval; 249 250 sp->type = SRB_NVME_LS; 251 sp->name = "nvme_ls"; 252 sp->done = qla_nvme_sp_ls_done; 253 atomic_set(&sp->ref_count, 1); 254 nvme = &sp->u.iocb_cmd; 255 priv->sp = sp; 256 priv->fd = fd; 257 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); 258 nvme->u.nvme.desc = fd; 259 nvme->u.nvme.dir = 0; 260 nvme->u.nvme.dl = 0; 261 nvme->u.nvme.cmd_len = fd->rqstlen; 262 nvme->u.nvme.rsp_len = fd->rsplen; 263 nvme->u.nvme.rsp_dma = fd->rspdma; 264 nvme->u.nvme.timeout_sec = fd->timeout; 265 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, 266 fd->rqstlen, DMA_TO_DEVICE); 267 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, 268 fd->rqstlen, DMA_TO_DEVICE); 269 270 rval = qla2x00_start_sp(sp); 271 if (rval != QLA_SUCCESS) { 272 ql_log(ql_log_warn, vha, 0x700e, 273 "qla2x00_start_sp failed = %d\n", rval); 274 atomic_dec(&sp->ref_count); 275 wake_up(&sp->nvme_ls_waitq); 276 return rval; 277 } 278 279 return rval; 280 } 281 282 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport, 283 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 284 struct nvmefc_fcp_req *fd) 285 { 286 struct nvme_private *priv = fd->private; 287 288 INIT_WORK(&priv->abort_work, qla_nvme_abort_work); 289 schedule_work(&priv->abort_work); 290 } 291 292 static inline int qla2x00_start_nvme_mq(srb_t *sp) 293 { 294 unsigned long flags; 295 uint32_t *clr_ptr; 296 uint32_t index; 297 uint32_t handle; 298 struct cmd_nvme *cmd_pkt; 299 uint16_t cnt, i; 300 uint16_t req_cnt; 301 uint16_t tot_dsds; 302 uint16_t avail_dsds; 303 struct dsd64 *cur_dsd; 304 struct req_que *req = NULL; 305 struct scsi_qla_host *vha = sp->fcport->vha; 306 struct qla_hw_data *ha = vha->hw; 307 struct qla_qpair *qpair = sp->qpair; 308 struct srb_iocb *nvme = &sp->u.iocb_cmd; 309 struct scatterlist *sgl, *sg; 310 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; 311 uint32_t rval = QLA_SUCCESS; 312 313 /* Setup qpair pointers */ 314 req = qpair->req; 315 tot_dsds = fd->sg_cnt; 316 317 /* Acquire qpair specific lock */ 318 spin_lock_irqsave(&qpair->qp_lock, flags); 319 320 /* Check for room in outstanding command list. */ 321 handle = req->current_outstanding_cmd; 322 for (index = 1; index < req->num_outstanding_cmds; index++) { 323 handle++; 324 if (handle == req->num_outstanding_cmds) 325 handle = 1; 326 if (!req->outstanding_cmds[handle]) 327 break; 328 } 329 330 if (index == req->num_outstanding_cmds) { 331 rval = -EBUSY; 332 goto queuing_error; 333 } 334 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 335 if (req->cnt < (req_cnt + 2)) { 336 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 337 RD_REG_DWORD_RELAXED(req->req_q_out); 338 339 if (req->ring_index < cnt) 340 req->cnt = cnt - req->ring_index; 341 else 342 req->cnt = req->length - (req->ring_index - cnt); 343 344 if (req->cnt < (req_cnt + 2)){ 345 rval = -EBUSY; 346 goto queuing_error; 347 } 348 } 349 350 if (unlikely(!fd->sqid)) { 351 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; 352 353 if (cmd->sqe.common.opcode == nvme_admin_async_event) { 354 nvme->u.nvme.aen_op = 1; 355 atomic_inc(&ha->nvme_active_aen_cnt); 356 } 357 } 358 359 /* Build command packet. */ 360 req->current_outstanding_cmd = handle; 361 req->outstanding_cmds[handle] = sp; 362 sp->handle = handle; 363 req->cnt -= req_cnt; 364 365 cmd_pkt = (struct cmd_nvme *)req->ring_ptr; 366 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 367 368 /* Zero out remaining portion of packet. */ 369 clr_ptr = (uint32_t *)cmd_pkt + 2; 370 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 371 372 cmd_pkt->entry_status = 0; 373 374 /* Update entry type to indicate Command NVME IOCB */ 375 cmd_pkt->entry_type = COMMAND_NVME; 376 377 /* No data transfer how do we check buffer len == 0?? */ 378 if (fd->io_dir == NVMEFC_FCP_READ) { 379 cmd_pkt->control_flags = CF_READ_DATA; 380 vha->qla_stats.input_bytes += fd->payload_length; 381 vha->qla_stats.input_requests++; 382 } else if (fd->io_dir == NVMEFC_FCP_WRITE) { 383 cmd_pkt->control_flags = CF_WRITE_DATA; 384 if ((vha->flags.nvme_first_burst) && 385 (sp->fcport->nvme_prli_service_param & 386 NVME_PRLI_SP_FIRST_BURST)) { 387 if ((fd->payload_length <= 388 sp->fcport->nvme_first_burst_size) || 389 (sp->fcport->nvme_first_burst_size == 0)) 390 cmd_pkt->control_flags |= 391 CF_NVME_FIRST_BURST_ENABLE; 392 } 393 vha->qla_stats.output_bytes += fd->payload_length; 394 vha->qla_stats.output_requests++; 395 } else if (fd->io_dir == 0) { 396 cmd_pkt->control_flags = 0; 397 } 398 399 /* Set NPORT-ID */ 400 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 401 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 402 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 403 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 404 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 405 406 /* NVME RSP IU */ 407 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen); 408 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address); 409 410 /* NVME CNMD IU */ 411 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen); 412 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma); 413 414 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 415 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length); 416 417 /* One DSD is available in the Command Type NVME IOCB */ 418 avail_dsds = 1; 419 cur_dsd = &cmd_pkt->nvme_dsd; 420 sgl = fd->first_sgl; 421 422 /* Load data segments */ 423 for_each_sg(sgl, sg, tot_dsds, i) { 424 cont_a64_entry_t *cont_pkt; 425 426 /* Allocate additional continuation packets? */ 427 if (avail_dsds == 0) { 428 /* 429 * Five DSDs are available in the Continuation 430 * Type 1 IOCB. 431 */ 432 433 /* Adjust ring index */ 434 req->ring_index++; 435 if (req->ring_index == req->length) { 436 req->ring_index = 0; 437 req->ring_ptr = req->ring; 438 } else { 439 req->ring_ptr++; 440 } 441 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 442 put_unaligned_le32(CONTINUE_A64_TYPE, 443 &cont_pkt->entry_type); 444 445 cur_dsd = cont_pkt->dsd; 446 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 447 } 448 449 append_dsd64(&cur_dsd, sg); 450 avail_dsds--; 451 } 452 453 /* Set total entry count. */ 454 cmd_pkt->entry_count = (uint8_t)req_cnt; 455 wmb(); 456 457 /* Adjust ring index. */ 458 req->ring_index++; 459 if (req->ring_index == req->length) { 460 req->ring_index = 0; 461 req->ring_ptr = req->ring; 462 } else { 463 req->ring_ptr++; 464 } 465 466 /* Set chip new ring index. */ 467 WRT_REG_DWORD(req->req_q_in, req->ring_index); 468 469 queuing_error: 470 spin_unlock_irqrestore(&qpair->qp_lock, flags); 471 return rval; 472 } 473 474 /* Post a command */ 475 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, 476 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 477 struct nvmefc_fcp_req *fd) 478 { 479 fc_port_t *fcport; 480 struct srb_iocb *nvme; 481 struct scsi_qla_host *vha; 482 int rval = -ENODEV; 483 srb_t *sp; 484 struct qla_qpair *qpair = hw_queue_handle; 485 struct nvme_private *priv = fd->private; 486 struct qla_nvme_rport *qla_rport = rport->private; 487 488 fcport = qla_rport->fcport; 489 490 vha = fcport->vha; 491 492 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 493 return rval; 494 495 /* 496 * If we know the dev is going away while the transport is still sending 497 * IO's return busy back to stall the IO Q. This happens when the 498 * link goes away and fw hasn't notified us yet, but IO's are being 499 * returned. If the dev comes back quickly we won't exhaust the IO 500 * retry count at the core. 501 */ 502 if (fcport->nvme_flag & NVME_FLAG_RESETTING) 503 return -EBUSY; 504 505 /* Alloc SRB structure */ 506 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); 507 if (!sp) 508 return -EBUSY; 509 510 atomic_set(&sp->ref_count, 1); 511 init_waitqueue_head(&sp->nvme_ls_waitq); 512 priv->sp = sp; 513 sp->type = SRB_NVME_CMD; 514 sp->name = "nvme_cmd"; 515 sp->done = qla_nvme_sp_done; 516 sp->qpair = qpair; 517 sp->vha = vha; 518 nvme = &sp->u.iocb_cmd; 519 nvme->u.nvme.desc = fd; 520 521 rval = qla2x00_start_nvme_mq(sp); 522 if (rval != QLA_SUCCESS) { 523 ql_log(ql_log_warn, vha, 0x212d, 524 "qla2x00_start_nvme_mq failed = %d\n", rval); 525 atomic_dec(&sp->ref_count); 526 wake_up(&sp->nvme_ls_waitq); 527 } 528 529 return rval; 530 } 531 532 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) 533 { 534 struct scsi_qla_host *vha = lport->private; 535 536 ql_log(ql_log_info, vha, 0x210f, 537 "localport delete of %p completed.\n", vha->nvme_local_port); 538 vha->nvme_local_port = NULL; 539 complete(&vha->nvme_del_done); 540 } 541 542 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) 543 { 544 fc_port_t *fcport; 545 struct qla_nvme_rport *qla_rport = rport->private, *trport; 546 547 fcport = qla_rport->fcport; 548 fcport->nvme_remote_port = NULL; 549 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; 550 551 list_for_each_entry_safe(qla_rport, trport, 552 &fcport->vha->nvme_rport_list, list) { 553 if (qla_rport->fcport == fcport) { 554 list_del(&qla_rport->list); 555 break; 556 } 557 } 558 complete(&fcport->nvme_del_done); 559 560 if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) { 561 INIT_WORK(&fcport->free_work, qlt_free_session_done); 562 schedule_work(&fcport->free_work); 563 } 564 565 fcport->nvme_flag &= ~NVME_FLAG_DELETING; 566 ql_log(ql_log_info, fcport->vha, 0x2110, 567 "remoteport_delete of %p completed.\n", fcport); 568 } 569 570 static struct nvme_fc_port_template qla_nvme_fc_transport = { 571 .localport_delete = qla_nvme_localport_delete, 572 .remoteport_delete = qla_nvme_remoteport_delete, 573 .create_queue = qla_nvme_alloc_queue, 574 .delete_queue = NULL, 575 .ls_req = qla_nvme_ls_req, 576 .ls_abort = qla_nvme_ls_abort, 577 .fcp_io = qla_nvme_post_cmd, 578 .fcp_abort = qla_nvme_fcp_abort, 579 .max_hw_queues = 8, 580 .max_sgl_segments = 1024, 581 .max_dif_sgl_segments = 64, 582 .dma_boundary = 0xFFFFFFFF, 583 .local_priv_sz = 8, 584 .remote_priv_sz = sizeof(struct qla_nvme_rport), 585 .lsrqst_priv_sz = sizeof(struct nvme_private), 586 .fcprqst_priv_sz = sizeof(struct nvme_private), 587 }; 588 589 static void qla_nvme_unregister_remote_port(struct work_struct *work) 590 { 591 struct fc_port *fcport = container_of(work, struct fc_port, 592 nvme_del_work); 593 struct qla_nvme_rport *qla_rport, *trport; 594 595 if (!IS_ENABLED(CONFIG_NVME_FC)) 596 return; 597 598 ql_log(ql_log_warn, NULL, 0x2112, 599 "%s: unregister remoteport on %p\n",__func__, fcport); 600 601 list_for_each_entry_safe(qla_rport, trport, 602 &fcport->vha->nvme_rport_list, list) { 603 if (qla_rport->fcport == fcport) { 604 ql_log(ql_log_info, fcport->vha, 0x2113, 605 "%s: fcport=%p\n", __func__, fcport); 606 nvme_fc_set_remoteport_devloss 607 (fcport->nvme_remote_port, 0); 608 init_completion(&fcport->nvme_del_done); 609 if (nvme_fc_unregister_remoteport 610 (fcport->nvme_remote_port)) 611 ql_log(ql_log_info, fcport->vha, 0x2114, 612 "%s: Failed to unregister nvme_remote_port\n", 613 __func__); 614 wait_for_completion(&fcport->nvme_del_done); 615 break; 616 } 617 } 618 } 619 620 void qla_nvme_delete(struct scsi_qla_host *vha) 621 { 622 int nv_ret; 623 624 if (!IS_ENABLED(CONFIG_NVME_FC)) 625 return; 626 627 if (vha->nvme_local_port) { 628 init_completion(&vha->nvme_del_done); 629 ql_log(ql_log_info, vha, 0x2116, 630 "unregister localport=%p\n", 631 vha->nvme_local_port); 632 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); 633 if (nv_ret) 634 ql_log(ql_log_info, vha, 0x2115, 635 "Unregister of localport failed\n"); 636 else 637 wait_for_completion(&vha->nvme_del_done); 638 } 639 } 640 641 int qla_nvme_register_hba(struct scsi_qla_host *vha) 642 { 643 struct nvme_fc_port_template *tmpl; 644 struct qla_hw_data *ha; 645 struct nvme_fc_port_info pinfo; 646 int ret = EINVAL; 647 648 if (!IS_ENABLED(CONFIG_NVME_FC)) 649 return ret; 650 651 ha = vha->hw; 652 tmpl = &qla_nvme_fc_transport; 653 654 WARN_ON(vha->nvme_local_port); 655 WARN_ON(ha->max_req_queues < 3); 656 657 qla_nvme_fc_transport.max_hw_queues = 658 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), 659 (uint8_t)(ha->max_req_queues - 2)); 660 661 pinfo.node_name = wwn_to_u64(vha->node_name); 662 pinfo.port_name = wwn_to_u64(vha->port_name); 663 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR; 664 pinfo.port_id = vha->d_id.b24; 665 666 ql_log(ql_log_info, vha, 0xffff, 667 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n", 668 pinfo.node_name, pinfo.port_name, pinfo.port_id); 669 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; 670 671 ret = nvme_fc_register_localport(&pinfo, tmpl, 672 get_device(&ha->pdev->dev), &vha->nvme_local_port); 673 if (ret) { 674 ql_log(ql_log_warn, vha, 0xffff, 675 "register_localport failed: ret=%x\n", ret); 676 } else { 677 vha->nvme_local_port->private = vha; 678 } 679 680 return ret; 681 } 682