1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2017 QLogic Corporation 5 */ 6 #include "qla_nvme.h" 7 #include <linux/scatterlist.h> 8 #include <linux/delay.h> 9 #include <linux/nvme.h> 10 #include <linux/nvme-fc.h> 11 #include <linux/blk-mq-pci.h> 12 #include <linux/blk-mq.h> 13 14 static struct nvme_fc_port_template qla_nvme_fc_transport; 15 16 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) 17 { 18 struct qla_nvme_rport *rport; 19 struct nvme_fc_port_info req; 20 int ret; 21 22 if (!IS_ENABLED(CONFIG_NVME_FC)) 23 return 0; 24 25 if (!vha->flags.nvme_enabled) { 26 ql_log(ql_log_info, vha, 0x2100, 27 "%s: Not registering target since Host NVME is not enabled\n", 28 __func__); 29 return 0; 30 } 31 32 if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) 33 return 0; 34 35 if (!(fcport->nvme_prli_service_param & 36 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) || 37 (fcport->nvme_flag & NVME_FLAG_REGISTERED)) 38 return 0; 39 40 fcport->nvme_flag &= ~NVME_FLAG_RESETTING; 41 42 memset(&req, 0, sizeof(struct nvme_fc_port_info)); 43 req.port_name = wwn_to_u64(fcport->port_name); 44 req.node_name = wwn_to_u64(fcport->node_name); 45 req.port_role = 0; 46 req.dev_loss_tmo = fcport->dev_loss_tmo; 47 48 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) 49 req.port_role = FC_PORT_ROLE_NVME_INITIATOR; 50 51 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET) 52 req.port_role |= FC_PORT_ROLE_NVME_TARGET; 53 54 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY) 55 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; 56 57 req.port_id = fcport->d_id.b24; 58 59 ql_log(ql_log_info, vha, 0x2102, 60 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n", 61 __func__, req.node_name, req.port_name, 62 req.port_id); 63 64 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req, 65 &fcport->nvme_remote_port); 66 if (ret) { 67 ql_log(ql_log_warn, vha, 0x212e, 68 "Failed to register remote port. Transport returned %d\n", 69 ret); 70 return ret; 71 } 72 73 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 74 fcport->dev_loss_tmo); 75 76 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER) 77 ql_log(ql_log_info, vha, 0x212a, 78 "PortID:%06x Supports SLER\n", req.port_id); 79 80 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL) 81 ql_log(ql_log_info, vha, 0x212b, 82 "PortID:%06x Supports PI control\n", req.port_id); 83 84 rport = fcport->nvme_remote_port->private; 85 rport->fcport = fcport; 86 87 fcport->nvme_flag |= NVME_FLAG_REGISTERED; 88 return 0; 89 } 90 91 /* Allocate a queue for NVMe traffic */ 92 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, 93 unsigned int qidx, u16 qsize, void **handle) 94 { 95 struct scsi_qla_host *vha; 96 struct qla_hw_data *ha; 97 struct qla_qpair *qpair; 98 99 /* Map admin queue and 1st IO queue to index 0 */ 100 if (qidx) 101 qidx--; 102 103 vha = (struct scsi_qla_host *)lport->private; 104 ha = vha->hw; 105 106 ql_log(ql_log_info, vha, 0x2104, 107 "%s: handle %p, idx =%d, qsize %d\n", 108 __func__, handle, qidx, qsize); 109 110 if (qidx > qla_nvme_fc_transport.max_hw_queues) { 111 ql_log(ql_log_warn, vha, 0x212f, 112 "%s: Illegal qidx=%d. Max=%d\n", 113 __func__, qidx, qla_nvme_fc_transport.max_hw_queues); 114 return -EINVAL; 115 } 116 117 /* Use base qpair if max_qpairs is 0 */ 118 if (!ha->max_qpairs) { 119 qpair = ha->base_qpair; 120 } else { 121 if (ha->queue_pair_map[qidx]) { 122 *handle = ha->queue_pair_map[qidx]; 123 ql_log(ql_log_info, vha, 0x2121, 124 "Returning existing qpair of %p for idx=%x\n", 125 *handle, qidx); 126 return 0; 127 } 128 129 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); 130 if (!qpair) { 131 ql_log(ql_log_warn, vha, 0x2122, 132 "Failed to allocate qpair\n"); 133 return -EINVAL; 134 } 135 } 136 *handle = qpair; 137 138 return 0; 139 } 140 141 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) 142 { 143 struct srb *sp = container_of(kref, struct srb, cmd_kref); 144 struct nvme_private *priv = (struct nvme_private *)sp->priv; 145 struct nvmefc_fcp_req *fd; 146 struct srb_iocb *nvme; 147 unsigned long flags; 148 149 if (!priv) 150 goto out; 151 152 nvme = &sp->u.iocb_cmd; 153 fd = nvme->u.nvme.desc; 154 155 spin_lock_irqsave(&priv->cmd_lock, flags); 156 priv->sp = NULL; 157 sp->priv = NULL; 158 if (priv->comp_status == QLA_SUCCESS) { 159 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); 160 fd->status = NVME_SC_SUCCESS; 161 } else { 162 fd->rcv_rsplen = 0; 163 fd->transferred_length = 0; 164 fd->status = NVME_SC_INTERNAL; 165 } 166 spin_unlock_irqrestore(&priv->cmd_lock, flags); 167 168 fd->done(fd); 169 out: 170 qla2xxx_rel_qpair_sp(sp->qpair, sp); 171 } 172 173 static void qla_nvme_release_ls_cmd_kref(struct kref *kref) 174 { 175 struct srb *sp = container_of(kref, struct srb, cmd_kref); 176 struct nvme_private *priv = (struct nvme_private *)sp->priv; 177 struct nvmefc_ls_req *fd; 178 unsigned long flags; 179 180 if (!priv) 181 goto out; 182 183 spin_lock_irqsave(&priv->cmd_lock, flags); 184 priv->sp = NULL; 185 sp->priv = NULL; 186 spin_unlock_irqrestore(&priv->cmd_lock, flags); 187 188 fd = priv->fd; 189 fd->done(fd, priv->comp_status); 190 out: 191 qla2x00_rel_sp(sp); 192 } 193 194 static void qla_nvme_ls_complete(struct work_struct *work) 195 { 196 struct nvme_private *priv = 197 container_of(work, struct nvme_private, ls_work); 198 199 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref); 200 } 201 202 static void qla_nvme_sp_ls_done(srb_t *sp, int res) 203 { 204 struct nvme_private *priv = sp->priv; 205 206 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) 207 return; 208 209 if (res) 210 res = -EINVAL; 211 212 priv->comp_status = res; 213 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); 214 schedule_work(&priv->ls_work); 215 } 216 217 /* it assumed that QPair lock is held. */ 218 static void qla_nvme_sp_done(srb_t *sp, int res) 219 { 220 struct nvme_private *priv = sp->priv; 221 222 priv->comp_status = res; 223 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref); 224 225 return; 226 } 227 228 static void qla_nvme_abort_work(struct work_struct *work) 229 { 230 struct nvme_private *priv = 231 container_of(work, struct nvme_private, abort_work); 232 srb_t *sp = priv->sp; 233 fc_port_t *fcport = sp->fcport; 234 struct qla_hw_data *ha = fcport->vha->hw; 235 int rval, abts_done_called = 1; 236 bool io_wait_for_abort_done; 237 uint32_t handle; 238 239 ql_dbg(ql_dbg_io, fcport->vha, 0xffff, 240 "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n", 241 __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted); 242 243 if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED) 244 goto out; 245 246 if (ha->flags.host_shutting_down) { 247 ql_log(ql_log_info, sp->fcport->vha, 0xffff, 248 "%s Calling done on sp: %p, type: 0x%x\n", 249 __func__, sp, sp->type); 250 sp->done(sp, 0); 251 goto out; 252 } 253 254 /* 255 * sp may not be valid after abort_command if return code is either 256 * SUCCESS or ERR_FROM_FW codes, so cache the value here. 257 */ 258 io_wait_for_abort_done = ql2xabts_wait_nvme && 259 QLA_ABTS_WAIT_ENABLED(sp); 260 handle = sp->handle; 261 262 rval = ha->isp_ops->abort_command(sp); 263 264 ql_dbg(ql_dbg_io, fcport->vha, 0x212b, 265 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n", 266 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", 267 sp, handle, fcport, rval); 268 269 /* 270 * If async tmf is enabled, the abort callback is called only on 271 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW. 272 */ 273 if (ql2xasynctmfenable && 274 rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW) 275 abts_done_called = 0; 276 277 /* 278 * Returned before decreasing kref so that I/O requests 279 * are waited until ABTS complete. This kref is decreased 280 * at qla24xx_abort_sp_done function. 281 */ 282 if (abts_done_called && io_wait_for_abort_done) 283 return; 284 out: 285 /* kref_get was done before work was schedule. */ 286 kref_put(&sp->cmd_kref, sp->put_fn); 287 } 288 289 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, 290 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 291 { 292 struct nvme_private *priv = fd->private; 293 unsigned long flags; 294 295 spin_lock_irqsave(&priv->cmd_lock, flags); 296 if (!priv->sp) { 297 spin_unlock_irqrestore(&priv->cmd_lock, flags); 298 return; 299 } 300 301 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { 302 spin_unlock_irqrestore(&priv->cmd_lock, flags); 303 return; 304 } 305 spin_unlock_irqrestore(&priv->cmd_lock, flags); 306 307 INIT_WORK(&priv->abort_work, qla_nvme_abort_work); 308 schedule_work(&priv->abort_work); 309 } 310 311 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, 312 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 313 { 314 struct qla_nvme_rport *qla_rport = rport->private; 315 fc_port_t *fcport = qla_rport->fcport; 316 struct srb_iocb *nvme; 317 struct nvme_private *priv = fd->private; 318 struct scsi_qla_host *vha; 319 int rval = QLA_FUNCTION_FAILED; 320 struct qla_hw_data *ha; 321 srb_t *sp; 322 323 if (!fcport || fcport->deleted) 324 return rval; 325 326 vha = fcport->vha; 327 ha = vha->hw; 328 329 if (!ha->flags.fw_started) 330 return rval; 331 332 /* Alloc SRB structure */ 333 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 334 if (!sp) 335 return rval; 336 337 sp->type = SRB_NVME_LS; 338 sp->name = "nvme_ls"; 339 sp->done = qla_nvme_sp_ls_done; 340 sp->put_fn = qla_nvme_release_ls_cmd_kref; 341 sp->priv = priv; 342 priv->sp = sp; 343 kref_init(&sp->cmd_kref); 344 spin_lock_init(&priv->cmd_lock); 345 nvme = &sp->u.iocb_cmd; 346 priv->fd = fd; 347 nvme->u.nvme.desc = fd; 348 nvme->u.nvme.dir = 0; 349 nvme->u.nvme.dl = 0; 350 nvme->u.nvme.cmd_len = fd->rqstlen; 351 nvme->u.nvme.rsp_len = fd->rsplen; 352 nvme->u.nvme.rsp_dma = fd->rspdma; 353 nvme->u.nvme.timeout_sec = fd->timeout; 354 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, 355 fd->rqstlen, DMA_TO_DEVICE); 356 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, 357 fd->rqstlen, DMA_TO_DEVICE); 358 359 rval = qla2x00_start_sp(sp); 360 if (rval != QLA_SUCCESS) { 361 ql_log(ql_log_warn, vha, 0x700e, 362 "qla2x00_start_sp failed = %d\n", rval); 363 wake_up(&sp->nvme_ls_waitq); 364 sp->priv = NULL; 365 priv->sp = NULL; 366 qla2x00_rel_sp(sp); 367 return rval; 368 } 369 370 return rval; 371 } 372 373 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport, 374 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 375 struct nvmefc_fcp_req *fd) 376 { 377 struct nvme_private *priv = fd->private; 378 unsigned long flags; 379 380 spin_lock_irqsave(&priv->cmd_lock, flags); 381 if (!priv->sp) { 382 spin_unlock_irqrestore(&priv->cmd_lock, flags); 383 return; 384 } 385 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { 386 spin_unlock_irqrestore(&priv->cmd_lock, flags); 387 return; 388 } 389 spin_unlock_irqrestore(&priv->cmd_lock, flags); 390 391 INIT_WORK(&priv->abort_work, qla_nvme_abort_work); 392 schedule_work(&priv->abort_work); 393 } 394 395 static inline int qla2x00_start_nvme_mq(srb_t *sp) 396 { 397 unsigned long flags; 398 uint32_t *clr_ptr; 399 uint32_t handle; 400 struct cmd_nvme *cmd_pkt; 401 uint16_t cnt, i; 402 uint16_t req_cnt; 403 uint16_t tot_dsds; 404 uint16_t avail_dsds; 405 struct dsd64 *cur_dsd; 406 struct req_que *req = NULL; 407 struct rsp_que *rsp = NULL; 408 struct scsi_qla_host *vha = sp->fcport->vha; 409 struct qla_hw_data *ha = vha->hw; 410 struct qla_qpair *qpair = sp->qpair; 411 struct srb_iocb *nvme = &sp->u.iocb_cmd; 412 struct scatterlist *sgl, *sg; 413 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; 414 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; 415 uint32_t rval = QLA_SUCCESS; 416 417 /* Setup qpair pointers */ 418 req = qpair->req; 419 rsp = qpair->rsp; 420 tot_dsds = fd->sg_cnt; 421 422 /* Acquire qpair specific lock */ 423 spin_lock_irqsave(&qpair->qp_lock, flags); 424 425 handle = qla2xxx_get_next_handle(req); 426 if (handle == 0) { 427 rval = -EBUSY; 428 goto queuing_error; 429 } 430 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 431 if (req->cnt < (req_cnt + 2)) { 432 if (IS_SHADOW_REG_CAPABLE(ha)) { 433 cnt = *req->out_ptr; 434 } else { 435 cnt = rd_reg_dword_relaxed(req->req_q_out); 436 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 437 goto queuing_error; 438 } 439 440 if (req->ring_index < cnt) 441 req->cnt = cnt - req->ring_index; 442 else 443 req->cnt = req->length - (req->ring_index - cnt); 444 445 if (req->cnt < (req_cnt + 2)){ 446 rval = -EBUSY; 447 goto queuing_error; 448 } 449 } 450 451 if (unlikely(!fd->sqid)) { 452 if (cmd->sqe.common.opcode == nvme_admin_async_event) { 453 nvme->u.nvme.aen_op = 1; 454 atomic_inc(&ha->nvme_active_aen_cnt); 455 } 456 } 457 458 /* Build command packet. */ 459 req->current_outstanding_cmd = handle; 460 req->outstanding_cmds[handle] = sp; 461 sp->handle = handle; 462 req->cnt -= req_cnt; 463 464 cmd_pkt = (struct cmd_nvme *)req->ring_ptr; 465 cmd_pkt->handle = make_handle(req->id, handle); 466 467 /* Zero out remaining portion of packet. */ 468 clr_ptr = (uint32_t *)cmd_pkt + 2; 469 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 470 471 cmd_pkt->entry_status = 0; 472 473 /* Update entry type to indicate Command NVME IOCB */ 474 cmd_pkt->entry_type = COMMAND_NVME; 475 476 /* No data transfer how do we check buffer len == 0?? */ 477 if (fd->io_dir == NVMEFC_FCP_READ) { 478 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 479 qpair->counters.input_bytes += fd->payload_length; 480 qpair->counters.input_requests++; 481 } else if (fd->io_dir == NVMEFC_FCP_WRITE) { 482 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 483 if ((vha->flags.nvme_first_burst) && 484 (sp->fcport->nvme_prli_service_param & 485 NVME_PRLI_SP_FIRST_BURST)) { 486 if ((fd->payload_length <= 487 sp->fcport->nvme_first_burst_size) || 488 (sp->fcport->nvme_first_burst_size == 0)) 489 cmd_pkt->control_flags |= 490 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE); 491 } 492 qpair->counters.output_bytes += fd->payload_length; 493 qpair->counters.output_requests++; 494 } else if (fd->io_dir == 0) { 495 cmd_pkt->control_flags = 0; 496 } 497 498 if (sp->fcport->edif.enable && fd->io_dir != 0) 499 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); 500 501 /* Set BIT_13 of control flags for Async event */ 502 if (vha->flags.nvme2_enabled && 503 cmd->sqe.common.opcode == nvme_admin_async_event) { 504 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT); 505 } 506 507 /* Set NPORT-ID */ 508 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 509 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 510 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 511 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 512 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 513 514 /* NVME RSP IU */ 515 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen); 516 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address); 517 518 /* NVME CNMD IU */ 519 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen); 520 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma); 521 522 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 523 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length); 524 525 /* One DSD is available in the Command Type NVME IOCB */ 526 avail_dsds = 1; 527 cur_dsd = &cmd_pkt->nvme_dsd; 528 sgl = fd->first_sgl; 529 530 /* Load data segments */ 531 for_each_sg(sgl, sg, tot_dsds, i) { 532 cont_a64_entry_t *cont_pkt; 533 534 /* Allocate additional continuation packets? */ 535 if (avail_dsds == 0) { 536 /* 537 * Five DSDs are available in the Continuation 538 * Type 1 IOCB. 539 */ 540 541 /* Adjust ring index */ 542 req->ring_index++; 543 if (req->ring_index == req->length) { 544 req->ring_index = 0; 545 req->ring_ptr = req->ring; 546 } else { 547 req->ring_ptr++; 548 } 549 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 550 put_unaligned_le32(CONTINUE_A64_TYPE, 551 &cont_pkt->entry_type); 552 553 cur_dsd = cont_pkt->dsd; 554 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 555 } 556 557 append_dsd64(&cur_dsd, sg); 558 avail_dsds--; 559 } 560 561 /* Set total entry count. */ 562 cmd_pkt->entry_count = (uint8_t)req_cnt; 563 wmb(); 564 565 /* Adjust ring index. */ 566 req->ring_index++; 567 if (req->ring_index == req->length) { 568 req->ring_index = 0; 569 req->ring_ptr = req->ring; 570 } else { 571 req->ring_ptr++; 572 } 573 574 /* ignore nvme async cmd due to long timeout */ 575 if (!nvme->u.nvme.aen_op) 576 sp->qpair->cmd_cnt++; 577 578 /* Set chip new ring index. */ 579 wrt_reg_dword(req->req_q_in, req->ring_index); 580 581 if (vha->flags.process_response_queue && 582 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 583 qla24xx_process_response_queue(vha, rsp); 584 585 queuing_error: 586 spin_unlock_irqrestore(&qpair->qp_lock, flags); 587 588 return rval; 589 } 590 591 /* Post a command */ 592 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, 593 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 594 struct nvmefc_fcp_req *fd) 595 { 596 fc_port_t *fcport; 597 struct srb_iocb *nvme; 598 struct scsi_qla_host *vha; 599 int rval; 600 srb_t *sp; 601 struct qla_qpair *qpair = hw_queue_handle; 602 struct nvme_private *priv = fd->private; 603 struct qla_nvme_rport *qla_rport = rport->private; 604 605 if (!priv) { 606 /* nvme association has been torn down */ 607 return -ENODEV; 608 } 609 610 fcport = qla_rport->fcport; 611 612 if (unlikely(!qpair || !fcport || fcport->deleted)) 613 return -EBUSY; 614 615 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) 616 return -ENODEV; 617 618 vha = fcport->vha; 619 620 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 621 return -EBUSY; 622 623 /* 624 * If we know the dev is going away while the transport is still sending 625 * IO's return busy back to stall the IO Q. This happens when the 626 * link goes away and fw hasn't notified us yet, but IO's are being 627 * returned. If the dev comes back quickly we won't exhaust the IO 628 * retry count at the core. 629 */ 630 if (fcport->nvme_flag & NVME_FLAG_RESETTING) 631 return -EBUSY; 632 633 /* Alloc SRB structure */ 634 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); 635 if (!sp) 636 return -EBUSY; 637 638 init_waitqueue_head(&sp->nvme_ls_waitq); 639 kref_init(&sp->cmd_kref); 640 spin_lock_init(&priv->cmd_lock); 641 sp->priv = priv; 642 priv->sp = sp; 643 sp->type = SRB_NVME_CMD; 644 sp->name = "nvme_cmd"; 645 sp->done = qla_nvme_sp_done; 646 sp->put_fn = qla_nvme_release_fcp_cmd_kref; 647 sp->qpair = qpair; 648 sp->vha = vha; 649 sp->cmd_sp = sp; 650 nvme = &sp->u.iocb_cmd; 651 nvme->u.nvme.desc = fd; 652 653 rval = qla2x00_start_nvme_mq(sp); 654 if (rval != QLA_SUCCESS) { 655 ql_log(ql_log_warn, vha, 0x212d, 656 "qla2x00_start_nvme_mq failed = %d\n", rval); 657 wake_up(&sp->nvme_ls_waitq); 658 sp->priv = NULL; 659 priv->sp = NULL; 660 qla2xxx_rel_qpair_sp(sp->qpair, sp); 661 } 662 663 return rval; 664 } 665 666 static void qla_nvme_map_queues(struct nvme_fc_local_port *lport, 667 struct blk_mq_queue_map *map) 668 { 669 struct scsi_qla_host *vha = lport->private; 670 int rc; 671 672 rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset); 673 if (rc) 674 ql_log(ql_log_warn, vha, 0x21de, 675 "pci map queue failed 0x%x", rc); 676 } 677 678 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) 679 { 680 struct scsi_qla_host *vha = lport->private; 681 682 ql_log(ql_log_info, vha, 0x210f, 683 "localport delete of %p completed.\n", vha->nvme_local_port); 684 vha->nvme_local_port = NULL; 685 complete(&vha->nvme_del_done); 686 } 687 688 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) 689 { 690 fc_port_t *fcport; 691 struct qla_nvme_rport *qla_rport = rport->private; 692 693 fcport = qla_rport->fcport; 694 fcport->nvme_remote_port = NULL; 695 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; 696 fcport->nvme_flag &= ~NVME_FLAG_DELETING; 697 ql_log(ql_log_info, fcport->vha, 0x2110, 698 "remoteport_delete of %p %8phN completed.\n", 699 fcport, fcport->port_name); 700 complete(&fcport->nvme_del_done); 701 } 702 703 static struct nvme_fc_port_template qla_nvme_fc_transport = { 704 .localport_delete = qla_nvme_localport_delete, 705 .remoteport_delete = qla_nvme_remoteport_delete, 706 .create_queue = qla_nvme_alloc_queue, 707 .delete_queue = NULL, 708 .ls_req = qla_nvme_ls_req, 709 .ls_abort = qla_nvme_ls_abort, 710 .fcp_io = qla_nvme_post_cmd, 711 .fcp_abort = qla_nvme_fcp_abort, 712 .map_queues = qla_nvme_map_queues, 713 .max_hw_queues = 8, 714 .max_sgl_segments = 1024, 715 .max_dif_sgl_segments = 64, 716 .dma_boundary = 0xFFFFFFFF, 717 .local_priv_sz = 8, 718 .remote_priv_sz = sizeof(struct qla_nvme_rport), 719 .lsrqst_priv_sz = sizeof(struct nvme_private), 720 .fcprqst_priv_sz = sizeof(struct nvme_private), 721 }; 722 723 void qla_nvme_unregister_remote_port(struct fc_port *fcport) 724 { 725 int ret; 726 727 if (!IS_ENABLED(CONFIG_NVME_FC)) 728 return; 729 730 ql_log(ql_log_warn, fcport->vha, 0x2112, 731 "%s: unregister remoteport on %p %8phN\n", 732 __func__, fcport, fcport->port_name); 733 734 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags)) 735 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); 736 737 init_completion(&fcport->nvme_del_done); 738 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port); 739 if (ret) 740 ql_log(ql_log_info, fcport->vha, 0x2114, 741 "%s: Failed to unregister nvme_remote_port (%d)\n", 742 __func__, ret); 743 wait_for_completion(&fcport->nvme_del_done); 744 } 745 746 void qla_nvme_delete(struct scsi_qla_host *vha) 747 { 748 int nv_ret; 749 750 if (!IS_ENABLED(CONFIG_NVME_FC)) 751 return; 752 753 if (vha->nvme_local_port) { 754 init_completion(&vha->nvme_del_done); 755 ql_log(ql_log_info, vha, 0x2116, 756 "unregister localport=%p\n", 757 vha->nvme_local_port); 758 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); 759 if (nv_ret) 760 ql_log(ql_log_info, vha, 0x2115, 761 "Unregister of localport failed\n"); 762 else 763 wait_for_completion(&vha->nvme_del_done); 764 } 765 } 766 767 int qla_nvme_register_hba(struct scsi_qla_host *vha) 768 { 769 struct nvme_fc_port_template *tmpl; 770 struct qla_hw_data *ha; 771 struct nvme_fc_port_info pinfo; 772 int ret = -EINVAL; 773 774 if (!IS_ENABLED(CONFIG_NVME_FC)) 775 return ret; 776 777 ha = vha->hw; 778 tmpl = &qla_nvme_fc_transport; 779 780 WARN_ON(vha->nvme_local_port); 781 782 qla_nvme_fc_transport.max_hw_queues = 783 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), 784 (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1)); 785 786 pinfo.node_name = wwn_to_u64(vha->node_name); 787 pinfo.port_name = wwn_to_u64(vha->port_name); 788 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR; 789 pinfo.port_id = vha->d_id.b24; 790 791 ql_log(ql_log_info, vha, 0xffff, 792 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n", 793 pinfo.node_name, pinfo.port_name, pinfo.port_id); 794 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; 795 796 ret = nvme_fc_register_localport(&pinfo, tmpl, 797 get_device(&ha->pdev->dev), &vha->nvme_local_port); 798 if (ret) { 799 ql_log(ql_log_warn, vha, 0xffff, 800 "register_localport failed: ret=%x\n", ret); 801 } else { 802 vha->nvme_local_port->private = vha; 803 } 804 805 return ret; 806 } 807 808 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp) 809 { 810 struct qla_hw_data *ha; 811 812 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) 813 return; 814 815 ha = orig_sp->fcport->vha->hw; 816 817 WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0)); 818 /* Use Driver Specified Retry Count */ 819 abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT); 820 abt->drv.abts_rty_cnt = cpu_to_le16(2); 821 /* Use specified response timeout */ 822 abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT); 823 /* set it to 2 * r_a_tov in secs */ 824 abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10)); 825 } 826 827 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp) 828 { 829 u16 comp_status; 830 struct scsi_qla_host *vha; 831 832 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) 833 return; 834 835 vha = orig_sp->fcport->vha; 836 837 comp_status = le16_to_cpu(abt->comp_status); 838 switch (comp_status) { 839 case CS_RESET: /* reset event aborted */ 840 case CS_ABORTED: /* IOCB was cleaned */ 841 /* N_Port handle is not currently logged in */ 842 case CS_TIMEOUT: 843 /* N_Port handle was logged out while waiting for ABTS to complete */ 844 case CS_PORT_UNAVAILABLE: 845 /* Firmware found that the port name changed */ 846 case CS_PORT_LOGGED_OUT: 847 /* BA_RJT was received for the ABTS */ 848 case CS_PORT_CONFIG_CHG: 849 ql_dbg(ql_dbg_async, vha, 0xf09d, 850 "Abort I/O IOCB completed with error, comp_status=%x\n", 851 comp_status); 852 break; 853 854 /* BA_RJT was received for the ABTS */ 855 case CS_REJECT_RECEIVED: 856 ql_dbg(ql_dbg_async, vha, 0xf09e, 857 "BA_RJT was received for the ABTS rjt_vendorUnique = %u", 858 abt->fw.ba_rjt_vendorUnique); 859 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e, 860 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n", 861 abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode); 862 break; 863 864 case CS_COMPLETE: 865 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f, 866 "IOCB request is completed successfully comp_status=%x\n", 867 comp_status); 868 break; 869 870 case CS_IOCB_ERROR: 871 ql_dbg(ql_dbg_async, vha, 0xf0a0, 872 "IOCB request is failed, comp_status=%x\n", comp_status); 873 break; 874 875 default: 876 ql_dbg(ql_dbg_async, vha, 0xf0a1, 877 "Invalid Abort IO IOCB Completion Status %x\n", 878 comp_status); 879 break; 880 } 881 } 882 883 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp) 884 { 885 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) 886 return; 887 kref_put(&orig_sp->cmd_kref, orig_sp->put_fn); 888 } 889