1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2017 QLogic Corporation 5 */ 6 #include "qla_nvme.h" 7 #include <linux/scatterlist.h> 8 #include <linux/delay.h> 9 #include <linux/nvme.h> 10 #include <linux/nvme-fc.h> 11 12 static struct nvme_fc_port_template qla_nvme_fc_transport; 13 14 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) 15 { 16 struct qla_nvme_rport *rport; 17 struct nvme_fc_port_info req; 18 int ret; 19 20 if (!IS_ENABLED(CONFIG_NVME_FC)) 21 return 0; 22 23 if (!vha->flags.nvme_enabled) { 24 ql_log(ql_log_info, vha, 0x2100, 25 "%s: Not registering target since Host NVME is not enabled\n", 26 __func__); 27 return 0; 28 } 29 30 if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) 31 return 0; 32 33 if (!(fcport->nvme_prli_service_param & 34 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) || 35 (fcport->nvme_flag & NVME_FLAG_REGISTERED)) 36 return 0; 37 38 fcport->nvme_flag &= ~NVME_FLAG_RESETTING; 39 40 memset(&req, 0, sizeof(struct nvme_fc_port_info)); 41 req.port_name = wwn_to_u64(fcport->port_name); 42 req.node_name = wwn_to_u64(fcport->node_name); 43 req.port_role = 0; 44 req.dev_loss_tmo = 0; 45 46 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) 47 req.port_role = FC_PORT_ROLE_NVME_INITIATOR; 48 49 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET) 50 req.port_role |= FC_PORT_ROLE_NVME_TARGET; 51 52 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY) 53 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; 54 55 req.port_id = fcport->d_id.b24; 56 57 ql_log(ql_log_info, vha, 0x2102, 58 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n", 59 __func__, req.node_name, req.port_name, 60 req.port_id); 61 62 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req, 63 &fcport->nvme_remote_port); 64 if (ret) { 65 ql_log(ql_log_warn, vha, 0x212e, 66 "Failed to register remote port. Transport returned %d\n", 67 ret); 68 return ret; 69 } 70 71 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER) 72 ql_log(ql_log_info, vha, 0x212a, 73 "PortID:%06x Supports SLER\n", req.port_id); 74 75 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL) 76 ql_log(ql_log_info, vha, 0x212b, 77 "PortID:%06x Supports PI control\n", req.port_id); 78 79 rport = fcport->nvme_remote_port->private; 80 rport->fcport = fcport; 81 82 fcport->nvme_flag |= NVME_FLAG_REGISTERED; 83 return 0; 84 } 85 86 /* Allocate a queue for NVMe traffic */ 87 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, 88 unsigned int qidx, u16 qsize, void **handle) 89 { 90 struct scsi_qla_host *vha; 91 struct qla_hw_data *ha; 92 struct qla_qpair *qpair; 93 94 if (!qidx) 95 qidx++; 96 97 vha = (struct scsi_qla_host *)lport->private; 98 ha = vha->hw; 99 100 ql_log(ql_log_info, vha, 0x2104, 101 "%s: handle %p, idx =%d, qsize %d\n", 102 __func__, handle, qidx, qsize); 103 104 if (qidx > qla_nvme_fc_transport.max_hw_queues) { 105 ql_log(ql_log_warn, vha, 0x212f, 106 "%s: Illegal qidx=%d. Max=%d\n", 107 __func__, qidx, qla_nvme_fc_transport.max_hw_queues); 108 return -EINVAL; 109 } 110 111 if (ha->queue_pair_map[qidx]) { 112 *handle = ha->queue_pair_map[qidx]; 113 ql_log(ql_log_info, vha, 0x2121, 114 "Returning existing qpair of %p for idx=%x\n", 115 *handle, qidx); 116 return 0; 117 } 118 119 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); 120 if (qpair == NULL) { 121 ql_log(ql_log_warn, vha, 0x2122, 122 "Failed to allocate qpair\n"); 123 return -EINVAL; 124 } 125 *handle = qpair; 126 127 return 0; 128 } 129 130 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) 131 { 132 struct srb *sp = container_of(kref, struct srb, cmd_kref); 133 struct nvme_private *priv = (struct nvme_private *)sp->priv; 134 struct nvmefc_fcp_req *fd; 135 struct srb_iocb *nvme; 136 unsigned long flags; 137 138 if (!priv) 139 goto out; 140 141 nvme = &sp->u.iocb_cmd; 142 fd = nvme->u.nvme.desc; 143 144 spin_lock_irqsave(&priv->cmd_lock, flags); 145 priv->sp = NULL; 146 sp->priv = NULL; 147 if (priv->comp_status == QLA_SUCCESS) { 148 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); 149 fd->status = NVME_SC_SUCCESS; 150 } else { 151 fd->rcv_rsplen = 0; 152 fd->transferred_length = 0; 153 fd->status = NVME_SC_INTERNAL; 154 } 155 spin_unlock_irqrestore(&priv->cmd_lock, flags); 156 157 fd->done(fd); 158 out: 159 qla2xxx_rel_qpair_sp(sp->qpair, sp); 160 } 161 162 static void qla_nvme_release_ls_cmd_kref(struct kref *kref) 163 { 164 struct srb *sp = container_of(kref, struct srb, cmd_kref); 165 struct nvme_private *priv = (struct nvme_private *)sp->priv; 166 struct nvmefc_ls_req *fd; 167 unsigned long flags; 168 169 if (!priv) 170 goto out; 171 172 spin_lock_irqsave(&priv->cmd_lock, flags); 173 priv->sp = NULL; 174 sp->priv = NULL; 175 spin_unlock_irqrestore(&priv->cmd_lock, flags); 176 177 fd = priv->fd; 178 fd->done(fd, priv->comp_status); 179 out: 180 qla2x00_rel_sp(sp); 181 } 182 183 static void qla_nvme_ls_complete(struct work_struct *work) 184 { 185 struct nvme_private *priv = 186 container_of(work, struct nvme_private, ls_work); 187 188 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref); 189 } 190 191 static void qla_nvme_sp_ls_done(srb_t *sp, int res) 192 { 193 struct nvme_private *priv = sp->priv; 194 195 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) 196 return; 197 198 if (res) 199 res = -EINVAL; 200 201 priv->comp_status = res; 202 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); 203 schedule_work(&priv->ls_work); 204 } 205 206 /* it assumed that QPair lock is held. */ 207 static void qla_nvme_sp_done(srb_t *sp, int res) 208 { 209 struct nvme_private *priv = sp->priv; 210 211 priv->comp_status = res; 212 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref); 213 214 return; 215 } 216 217 static void qla_nvme_abort_work(struct work_struct *work) 218 { 219 struct nvme_private *priv = 220 container_of(work, struct nvme_private, abort_work); 221 srb_t *sp = priv->sp; 222 fc_port_t *fcport = sp->fcport; 223 struct qla_hw_data *ha = fcport->vha->hw; 224 int rval; 225 226 ql_dbg(ql_dbg_io, fcport->vha, 0xffff, 227 "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n", 228 __func__, sp, sp->handle, fcport, fcport->deleted); 229 230 if (!ha->flags.fw_started || fcport->deleted) 231 goto out; 232 233 if (ha->flags.host_shutting_down) { 234 ql_log(ql_log_info, sp->fcport->vha, 0xffff, 235 "%s Calling done on sp: %p, type: 0x%x\n", 236 __func__, sp, sp->type); 237 sp->done(sp, 0); 238 goto out; 239 } 240 241 rval = ha->isp_ops->abort_command(sp); 242 243 ql_dbg(ql_dbg_io, fcport->vha, 0x212b, 244 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n", 245 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", 246 sp, sp->handle, fcport, rval); 247 248 /* 249 * Returned before decreasing kref so that I/O requests 250 * are waited until ABTS complete. This kref is decreased 251 * at qla24xx_abort_sp_done function. 252 */ 253 if (ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp)) 254 return; 255 out: 256 /* kref_get was done before work was schedule. */ 257 kref_put(&sp->cmd_kref, sp->put_fn); 258 } 259 260 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, 261 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 262 { 263 struct nvme_private *priv = fd->private; 264 unsigned long flags; 265 266 spin_lock_irqsave(&priv->cmd_lock, flags); 267 if (!priv->sp) { 268 spin_unlock_irqrestore(&priv->cmd_lock, flags); 269 return; 270 } 271 272 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { 273 spin_unlock_irqrestore(&priv->cmd_lock, flags); 274 return; 275 } 276 spin_unlock_irqrestore(&priv->cmd_lock, flags); 277 278 INIT_WORK(&priv->abort_work, qla_nvme_abort_work); 279 schedule_work(&priv->abort_work); 280 } 281 282 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, 283 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 284 { 285 struct qla_nvme_rport *qla_rport = rport->private; 286 fc_port_t *fcport = qla_rport->fcport; 287 struct srb_iocb *nvme; 288 struct nvme_private *priv = fd->private; 289 struct scsi_qla_host *vha; 290 int rval = QLA_FUNCTION_FAILED; 291 struct qla_hw_data *ha; 292 srb_t *sp; 293 294 if (!fcport || fcport->deleted) 295 return rval; 296 297 vha = fcport->vha; 298 ha = vha->hw; 299 300 if (!ha->flags.fw_started) 301 return rval; 302 303 /* Alloc SRB structure */ 304 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 305 if (!sp) 306 return rval; 307 308 sp->type = SRB_NVME_LS; 309 sp->name = "nvme_ls"; 310 sp->done = qla_nvme_sp_ls_done; 311 sp->put_fn = qla_nvme_release_ls_cmd_kref; 312 sp->priv = priv; 313 priv->sp = sp; 314 kref_init(&sp->cmd_kref); 315 spin_lock_init(&priv->cmd_lock); 316 nvme = &sp->u.iocb_cmd; 317 priv->fd = fd; 318 nvme->u.nvme.desc = fd; 319 nvme->u.nvme.dir = 0; 320 nvme->u.nvme.dl = 0; 321 nvme->u.nvme.cmd_len = fd->rqstlen; 322 nvme->u.nvme.rsp_len = fd->rsplen; 323 nvme->u.nvme.rsp_dma = fd->rspdma; 324 nvme->u.nvme.timeout_sec = fd->timeout; 325 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, 326 fd->rqstlen, DMA_TO_DEVICE); 327 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, 328 fd->rqstlen, DMA_TO_DEVICE); 329 330 rval = qla2x00_start_sp(sp); 331 if (rval != QLA_SUCCESS) { 332 ql_log(ql_log_warn, vha, 0x700e, 333 "qla2x00_start_sp failed = %d\n", rval); 334 wake_up(&sp->nvme_ls_waitq); 335 sp->priv = NULL; 336 priv->sp = NULL; 337 qla2x00_rel_sp(sp); 338 return rval; 339 } 340 341 return rval; 342 } 343 344 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport, 345 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 346 struct nvmefc_fcp_req *fd) 347 { 348 struct nvme_private *priv = fd->private; 349 unsigned long flags; 350 351 spin_lock_irqsave(&priv->cmd_lock, flags); 352 if (!priv->sp) { 353 spin_unlock_irqrestore(&priv->cmd_lock, flags); 354 return; 355 } 356 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { 357 spin_unlock_irqrestore(&priv->cmd_lock, flags); 358 return; 359 } 360 spin_unlock_irqrestore(&priv->cmd_lock, flags); 361 362 INIT_WORK(&priv->abort_work, qla_nvme_abort_work); 363 schedule_work(&priv->abort_work); 364 } 365 366 static inline int qla2x00_start_nvme_mq(srb_t *sp) 367 { 368 unsigned long flags; 369 uint32_t *clr_ptr; 370 uint32_t handle; 371 struct cmd_nvme *cmd_pkt; 372 uint16_t cnt, i; 373 uint16_t req_cnt; 374 uint16_t tot_dsds; 375 uint16_t avail_dsds; 376 struct dsd64 *cur_dsd; 377 struct req_que *req = NULL; 378 struct scsi_qla_host *vha = sp->fcport->vha; 379 struct qla_hw_data *ha = vha->hw; 380 struct qla_qpair *qpair = sp->qpair; 381 struct srb_iocb *nvme = &sp->u.iocb_cmd; 382 struct scatterlist *sgl, *sg; 383 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; 384 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; 385 uint32_t rval = QLA_SUCCESS; 386 387 /* Setup qpair pointers */ 388 req = qpair->req; 389 tot_dsds = fd->sg_cnt; 390 391 /* Acquire qpair specific lock */ 392 spin_lock_irqsave(&qpair->qp_lock, flags); 393 394 handle = qla2xxx_get_next_handle(req); 395 if (handle == 0) { 396 rval = -EBUSY; 397 goto queuing_error; 398 } 399 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 400 if (req->cnt < (req_cnt + 2)) { 401 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 402 rd_reg_dword_relaxed(req->req_q_out); 403 404 if (req->ring_index < cnt) 405 req->cnt = cnt - req->ring_index; 406 else 407 req->cnt = req->length - (req->ring_index - cnt); 408 409 if (req->cnt < (req_cnt + 2)){ 410 rval = -EBUSY; 411 goto queuing_error; 412 } 413 } 414 415 if (unlikely(!fd->sqid)) { 416 if (cmd->sqe.common.opcode == nvme_admin_async_event) { 417 nvme->u.nvme.aen_op = 1; 418 atomic_inc(&ha->nvme_active_aen_cnt); 419 } 420 } 421 422 /* Build command packet. */ 423 req->current_outstanding_cmd = handle; 424 req->outstanding_cmds[handle] = sp; 425 sp->handle = handle; 426 req->cnt -= req_cnt; 427 428 cmd_pkt = (struct cmd_nvme *)req->ring_ptr; 429 cmd_pkt->handle = make_handle(req->id, handle); 430 431 /* Zero out remaining portion of packet. */ 432 clr_ptr = (uint32_t *)cmd_pkt + 2; 433 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 434 435 cmd_pkt->entry_status = 0; 436 437 /* Update entry type to indicate Command NVME IOCB */ 438 cmd_pkt->entry_type = COMMAND_NVME; 439 440 /* No data transfer how do we check buffer len == 0?? */ 441 if (fd->io_dir == NVMEFC_FCP_READ) { 442 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 443 qpair->counters.input_bytes += fd->payload_length; 444 qpair->counters.input_requests++; 445 } else if (fd->io_dir == NVMEFC_FCP_WRITE) { 446 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 447 if ((vha->flags.nvme_first_burst) && 448 (sp->fcport->nvme_prli_service_param & 449 NVME_PRLI_SP_FIRST_BURST)) { 450 if ((fd->payload_length <= 451 sp->fcport->nvme_first_burst_size) || 452 (sp->fcport->nvme_first_burst_size == 0)) 453 cmd_pkt->control_flags |= 454 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE); 455 } 456 qpair->counters.output_bytes += fd->payload_length; 457 qpair->counters.output_requests++; 458 } else if (fd->io_dir == 0) { 459 cmd_pkt->control_flags = 0; 460 } 461 /* Set BIT_13 of control flags for Async event */ 462 if (vha->flags.nvme2_enabled && 463 cmd->sqe.common.opcode == nvme_admin_async_event) { 464 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT); 465 } 466 467 /* Set NPORT-ID */ 468 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 469 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 470 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 471 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 472 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 473 474 /* NVME RSP IU */ 475 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen); 476 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address); 477 478 /* NVME CNMD IU */ 479 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen); 480 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma); 481 482 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 483 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length); 484 485 /* One DSD is available in the Command Type NVME IOCB */ 486 avail_dsds = 1; 487 cur_dsd = &cmd_pkt->nvme_dsd; 488 sgl = fd->first_sgl; 489 490 /* Load data segments */ 491 for_each_sg(sgl, sg, tot_dsds, i) { 492 cont_a64_entry_t *cont_pkt; 493 494 /* Allocate additional continuation packets? */ 495 if (avail_dsds == 0) { 496 /* 497 * Five DSDs are available in the Continuation 498 * Type 1 IOCB. 499 */ 500 501 /* Adjust ring index */ 502 req->ring_index++; 503 if (req->ring_index == req->length) { 504 req->ring_index = 0; 505 req->ring_ptr = req->ring; 506 } else { 507 req->ring_ptr++; 508 } 509 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 510 put_unaligned_le32(CONTINUE_A64_TYPE, 511 &cont_pkt->entry_type); 512 513 cur_dsd = cont_pkt->dsd; 514 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 515 } 516 517 append_dsd64(&cur_dsd, sg); 518 avail_dsds--; 519 } 520 521 /* Set total entry count. */ 522 cmd_pkt->entry_count = (uint8_t)req_cnt; 523 wmb(); 524 525 /* Adjust ring index. */ 526 req->ring_index++; 527 if (req->ring_index == req->length) { 528 req->ring_index = 0; 529 req->ring_ptr = req->ring; 530 } else { 531 req->ring_ptr++; 532 } 533 534 /* Set chip new ring index. */ 535 wrt_reg_dword(req->req_q_in, req->ring_index); 536 537 queuing_error: 538 spin_unlock_irqrestore(&qpair->qp_lock, flags); 539 return rval; 540 } 541 542 /* Post a command */ 543 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, 544 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 545 struct nvmefc_fcp_req *fd) 546 { 547 fc_port_t *fcport; 548 struct srb_iocb *nvme; 549 struct scsi_qla_host *vha; 550 int rval; 551 srb_t *sp; 552 struct qla_qpair *qpair = hw_queue_handle; 553 struct nvme_private *priv = fd->private; 554 struct qla_nvme_rport *qla_rport = rport->private; 555 556 if (!priv) { 557 /* nvme association has been torn down */ 558 return -ENODEV; 559 } 560 561 fcport = qla_rport->fcport; 562 563 if (unlikely(!qpair || !fcport || fcport->deleted)) 564 return -EBUSY; 565 566 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) 567 return -ENODEV; 568 569 vha = fcport->vha; 570 571 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 572 return -EBUSY; 573 574 /* 575 * If we know the dev is going away while the transport is still sending 576 * IO's return busy back to stall the IO Q. This happens when the 577 * link goes away and fw hasn't notified us yet, but IO's are being 578 * returned. If the dev comes back quickly we won't exhaust the IO 579 * retry count at the core. 580 */ 581 if (fcport->nvme_flag & NVME_FLAG_RESETTING) 582 return -EBUSY; 583 584 /* Alloc SRB structure */ 585 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); 586 if (!sp) 587 return -EBUSY; 588 589 init_waitqueue_head(&sp->nvme_ls_waitq); 590 kref_init(&sp->cmd_kref); 591 spin_lock_init(&priv->cmd_lock); 592 sp->priv = priv; 593 priv->sp = sp; 594 sp->type = SRB_NVME_CMD; 595 sp->name = "nvme_cmd"; 596 sp->done = qla_nvme_sp_done; 597 sp->put_fn = qla_nvme_release_fcp_cmd_kref; 598 sp->qpair = qpair; 599 sp->vha = vha; 600 sp->cmd_sp = sp; 601 nvme = &sp->u.iocb_cmd; 602 nvme->u.nvme.desc = fd; 603 604 rval = qla2x00_start_nvme_mq(sp); 605 if (rval != QLA_SUCCESS) { 606 ql_log(ql_log_warn, vha, 0x212d, 607 "qla2x00_start_nvme_mq failed = %d\n", rval); 608 wake_up(&sp->nvme_ls_waitq); 609 sp->priv = NULL; 610 priv->sp = NULL; 611 qla2xxx_rel_qpair_sp(sp->qpair, sp); 612 } 613 614 return rval; 615 } 616 617 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) 618 { 619 struct scsi_qla_host *vha = lport->private; 620 621 ql_log(ql_log_info, vha, 0x210f, 622 "localport delete of %p completed.\n", vha->nvme_local_port); 623 vha->nvme_local_port = NULL; 624 complete(&vha->nvme_del_done); 625 } 626 627 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) 628 { 629 fc_port_t *fcport; 630 struct qla_nvme_rport *qla_rport = rport->private; 631 632 fcport = qla_rport->fcport; 633 fcport->nvme_remote_port = NULL; 634 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; 635 fcport->nvme_flag &= ~NVME_FLAG_DELETING; 636 ql_log(ql_log_info, fcport->vha, 0x2110, 637 "remoteport_delete of %p %8phN completed.\n", 638 fcport, fcport->port_name); 639 complete(&fcport->nvme_del_done); 640 } 641 642 static struct nvme_fc_port_template qla_nvme_fc_transport = { 643 .localport_delete = qla_nvme_localport_delete, 644 .remoteport_delete = qla_nvme_remoteport_delete, 645 .create_queue = qla_nvme_alloc_queue, 646 .delete_queue = NULL, 647 .ls_req = qla_nvme_ls_req, 648 .ls_abort = qla_nvme_ls_abort, 649 .fcp_io = qla_nvme_post_cmd, 650 .fcp_abort = qla_nvme_fcp_abort, 651 .max_hw_queues = 8, 652 .max_sgl_segments = 1024, 653 .max_dif_sgl_segments = 64, 654 .dma_boundary = 0xFFFFFFFF, 655 .local_priv_sz = 8, 656 .remote_priv_sz = sizeof(struct qla_nvme_rport), 657 .lsrqst_priv_sz = sizeof(struct nvme_private), 658 .fcprqst_priv_sz = sizeof(struct nvme_private), 659 }; 660 661 void qla_nvme_unregister_remote_port(struct fc_port *fcport) 662 { 663 int ret; 664 665 if (!IS_ENABLED(CONFIG_NVME_FC)) 666 return; 667 668 ql_log(ql_log_warn, NULL, 0x2112, 669 "%s: unregister remoteport on %p %8phN\n", 670 __func__, fcport, fcport->port_name); 671 672 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags)) 673 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); 674 675 init_completion(&fcport->nvme_del_done); 676 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port); 677 if (ret) 678 ql_log(ql_log_info, fcport->vha, 0x2114, 679 "%s: Failed to unregister nvme_remote_port (%d)\n", 680 __func__, ret); 681 wait_for_completion(&fcport->nvme_del_done); 682 } 683 684 void qla_nvme_delete(struct scsi_qla_host *vha) 685 { 686 int nv_ret; 687 688 if (!IS_ENABLED(CONFIG_NVME_FC)) 689 return; 690 691 if (vha->nvme_local_port) { 692 init_completion(&vha->nvme_del_done); 693 ql_log(ql_log_info, vha, 0x2116, 694 "unregister localport=%p\n", 695 vha->nvme_local_port); 696 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); 697 if (nv_ret) 698 ql_log(ql_log_info, vha, 0x2115, 699 "Unregister of localport failed\n"); 700 else 701 wait_for_completion(&vha->nvme_del_done); 702 } 703 } 704 705 int qla_nvme_register_hba(struct scsi_qla_host *vha) 706 { 707 struct nvme_fc_port_template *tmpl; 708 struct qla_hw_data *ha; 709 struct nvme_fc_port_info pinfo; 710 int ret = -EINVAL; 711 712 if (!IS_ENABLED(CONFIG_NVME_FC)) 713 return ret; 714 715 ha = vha->hw; 716 tmpl = &qla_nvme_fc_transport; 717 718 WARN_ON(vha->nvme_local_port); 719 720 if (ha->max_req_queues < 3) { 721 if (!ha->flags.max_req_queue_warned) 722 ql_log(ql_log_info, vha, 0x2120, 723 "%s: Disabling FC-NVME due to lack of free queue pairs (%d).\n", 724 __func__, ha->max_req_queues); 725 ha->flags.max_req_queue_warned = 1; 726 return ret; 727 } 728 729 qla_nvme_fc_transport.max_hw_queues = 730 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), 731 (uint8_t)(ha->max_req_queues - 2)); 732 733 pinfo.node_name = wwn_to_u64(vha->node_name); 734 pinfo.port_name = wwn_to_u64(vha->port_name); 735 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR; 736 pinfo.port_id = vha->d_id.b24; 737 738 ql_log(ql_log_info, vha, 0xffff, 739 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n", 740 pinfo.node_name, pinfo.port_name, pinfo.port_id); 741 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; 742 743 ret = nvme_fc_register_localport(&pinfo, tmpl, 744 get_device(&ha->pdev->dev), &vha->nvme_local_port); 745 if (ret) { 746 ql_log(ql_log_warn, vha, 0xffff, 747 "register_localport failed: ret=%x\n", ret); 748 } else { 749 vha->nvme_local_port->private = vha; 750 } 751 752 return ret; 753 } 754 755 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp) 756 { 757 struct qla_hw_data *ha; 758 759 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) 760 return; 761 762 ha = orig_sp->fcport->vha->hw; 763 764 WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0)); 765 /* Use Driver Specified Retry Count */ 766 abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT); 767 abt->drv.abts_rty_cnt = cpu_to_le16(2); 768 /* Use specified response timeout */ 769 abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT); 770 /* set it to 2 * r_a_tov in secs */ 771 abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10)); 772 } 773 774 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp) 775 { 776 u16 comp_status; 777 struct scsi_qla_host *vha; 778 779 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) 780 return; 781 782 vha = orig_sp->fcport->vha; 783 784 comp_status = le16_to_cpu(abt->comp_status); 785 switch (comp_status) { 786 case CS_RESET: /* reset event aborted */ 787 case CS_ABORTED: /* IOCB was cleaned */ 788 /* N_Port handle is not currently logged in */ 789 case CS_TIMEOUT: 790 /* N_Port handle was logged out while waiting for ABTS to complete */ 791 case CS_PORT_UNAVAILABLE: 792 /* Firmware found that the port name changed */ 793 case CS_PORT_LOGGED_OUT: 794 /* BA_RJT was received for the ABTS */ 795 case CS_PORT_CONFIG_CHG: 796 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09d, 797 "Abort I/O IOCB completed with error, comp_status=%x\n", 798 comp_status); 799 break; 800 801 /* BA_RJT was received for the ABTS */ 802 case CS_REJECT_RECEIVED: 803 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e, 804 "BA_RJT was received for the ABTS rjt_vendorUnique = %u", 805 abt->fw.ba_rjt_vendorUnique); 806 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e, 807 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n", 808 abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode); 809 break; 810 811 case CS_COMPLETE: 812 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09f, 813 "IOCB request is completed successfully comp_status=%x\n", 814 comp_status); 815 break; 816 817 case CS_IOCB_ERROR: 818 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a0, 819 "IOCB request is failed, comp_status=%x\n", comp_status); 820 break; 821 822 default: 823 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a1, 824 "Invalid Abort IO IOCB Completion Status %x\n", 825 comp_status); 826 break; 827 } 828 } 829 830 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp) 831 { 832 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) 833 return; 834 kref_put(&orig_sp->cmd_kref, orig_sp->put_fn); 835 } 836