1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2017 QLogic Corporation 5 */ 6 #include "qla_nvme.h" 7 #include <linux/scatterlist.h> 8 #include <linux/delay.h> 9 #include <linux/nvme.h> 10 #include <linux/nvme-fc.h> 11 12 static struct nvme_fc_port_template qla_nvme_fc_transport; 13 14 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) 15 { 16 struct qla_nvme_rport *rport; 17 struct nvme_fc_port_info req; 18 int ret; 19 20 if (!IS_ENABLED(CONFIG_NVME_FC)) 21 return 0; 22 23 if (!vha->flags.nvme_enabled) { 24 ql_log(ql_log_info, vha, 0x2100, 25 "%s: Not registering target since Host NVME is not enabled\n", 26 __func__); 27 return 0; 28 } 29 30 if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) 31 return 0; 32 33 if (!(fcport->nvme_prli_service_param & 34 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) || 35 (fcport->nvme_flag & NVME_FLAG_REGISTERED)) 36 return 0; 37 38 fcport->nvme_flag &= ~NVME_FLAG_RESETTING; 39 40 memset(&req, 0, sizeof(struct nvme_fc_port_info)); 41 req.port_name = wwn_to_u64(fcport->port_name); 42 req.node_name = wwn_to_u64(fcport->node_name); 43 req.port_role = 0; 44 req.dev_loss_tmo = 0; 45 46 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) 47 req.port_role = FC_PORT_ROLE_NVME_INITIATOR; 48 49 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET) 50 req.port_role |= FC_PORT_ROLE_NVME_TARGET; 51 52 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY) 53 req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; 54 55 req.port_id = fcport->d_id.b24; 56 57 ql_log(ql_log_info, vha, 0x2102, 58 "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n", 59 __func__, req.node_name, req.port_name, 60 req.port_id); 61 62 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req, 63 &fcport->nvme_remote_port); 64 if (ret) { 65 ql_log(ql_log_warn, vha, 0x212e, 66 "Failed to register remote port. Transport returned %d\n", 67 ret); 68 return ret; 69 } 70 71 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER) 72 ql_log(ql_log_info, vha, 0x212a, 73 "PortID:%06x Supports SLER\n", req.port_id); 74 75 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL) 76 ql_log(ql_log_info, vha, 0x212b, 77 "PortID:%06x Supports PI control\n", req.port_id); 78 79 rport = fcport->nvme_remote_port->private; 80 rport->fcport = fcport; 81 82 fcport->nvme_flag |= NVME_FLAG_REGISTERED; 83 return 0; 84 } 85 86 /* Allocate a queue for NVMe traffic */ 87 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, 88 unsigned int qidx, u16 qsize, void **handle) 89 { 90 struct scsi_qla_host *vha; 91 struct qla_hw_data *ha; 92 struct qla_qpair *qpair; 93 94 /* Map admin queue and 1st IO queue to index 0 */ 95 if (qidx) 96 qidx--; 97 98 vha = (struct scsi_qla_host *)lport->private; 99 ha = vha->hw; 100 101 ql_log(ql_log_info, vha, 0x2104, 102 "%s: handle %p, idx =%d, qsize %d\n", 103 __func__, handle, qidx, qsize); 104 105 if (qidx > qla_nvme_fc_transport.max_hw_queues) { 106 ql_log(ql_log_warn, vha, 0x212f, 107 "%s: Illegal qidx=%d. Max=%d\n", 108 __func__, qidx, qla_nvme_fc_transport.max_hw_queues); 109 return -EINVAL; 110 } 111 112 /* Use base qpair if max_qpairs is 0 */ 113 if (!ha->max_qpairs) { 114 qpair = ha->base_qpair; 115 } else { 116 if (ha->queue_pair_map[qidx]) { 117 *handle = ha->queue_pair_map[qidx]; 118 ql_log(ql_log_info, vha, 0x2121, 119 "Returning existing qpair of %p for idx=%x\n", 120 *handle, qidx); 121 return 0; 122 } 123 124 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); 125 if (!qpair) { 126 ql_log(ql_log_warn, vha, 0x2122, 127 "Failed to allocate qpair\n"); 128 return -EINVAL; 129 } 130 } 131 *handle = qpair; 132 133 return 0; 134 } 135 136 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) 137 { 138 struct srb *sp = container_of(kref, struct srb, cmd_kref); 139 struct nvme_private *priv = (struct nvme_private *)sp->priv; 140 struct nvmefc_fcp_req *fd; 141 struct srb_iocb *nvme; 142 unsigned long flags; 143 144 if (!priv) 145 goto out; 146 147 nvme = &sp->u.iocb_cmd; 148 fd = nvme->u.nvme.desc; 149 150 spin_lock_irqsave(&priv->cmd_lock, flags); 151 priv->sp = NULL; 152 sp->priv = NULL; 153 if (priv->comp_status == QLA_SUCCESS) { 154 fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); 155 fd->status = NVME_SC_SUCCESS; 156 } else { 157 fd->rcv_rsplen = 0; 158 fd->transferred_length = 0; 159 fd->status = NVME_SC_INTERNAL; 160 } 161 spin_unlock_irqrestore(&priv->cmd_lock, flags); 162 163 fd->done(fd); 164 out: 165 qla2xxx_rel_qpair_sp(sp->qpair, sp); 166 } 167 168 static void qla_nvme_release_ls_cmd_kref(struct kref *kref) 169 { 170 struct srb *sp = container_of(kref, struct srb, cmd_kref); 171 struct nvme_private *priv = (struct nvme_private *)sp->priv; 172 struct nvmefc_ls_req *fd; 173 unsigned long flags; 174 175 if (!priv) 176 goto out; 177 178 spin_lock_irqsave(&priv->cmd_lock, flags); 179 priv->sp = NULL; 180 sp->priv = NULL; 181 spin_unlock_irqrestore(&priv->cmd_lock, flags); 182 183 fd = priv->fd; 184 fd->done(fd, priv->comp_status); 185 out: 186 qla2x00_rel_sp(sp); 187 } 188 189 static void qla_nvme_ls_complete(struct work_struct *work) 190 { 191 struct nvme_private *priv = 192 container_of(work, struct nvme_private, ls_work); 193 194 kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref); 195 } 196 197 static void qla_nvme_sp_ls_done(srb_t *sp, int res) 198 { 199 struct nvme_private *priv = sp->priv; 200 201 if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) 202 return; 203 204 if (res) 205 res = -EINVAL; 206 207 priv->comp_status = res; 208 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); 209 schedule_work(&priv->ls_work); 210 } 211 212 /* it assumed that QPair lock is held. */ 213 static void qla_nvme_sp_done(srb_t *sp, int res) 214 { 215 struct nvme_private *priv = sp->priv; 216 217 priv->comp_status = res; 218 kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref); 219 220 return; 221 } 222 223 static void qla_nvme_abort_work(struct work_struct *work) 224 { 225 struct nvme_private *priv = 226 container_of(work, struct nvme_private, abort_work); 227 srb_t *sp = priv->sp; 228 fc_port_t *fcport = sp->fcport; 229 struct qla_hw_data *ha = fcport->vha->hw; 230 int rval, abts_done_called = 1; 231 bool io_wait_for_abort_done; 232 uint32_t handle; 233 234 ql_dbg(ql_dbg_io, fcport->vha, 0xffff, 235 "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n", 236 __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted); 237 238 if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED) 239 goto out; 240 241 if (ha->flags.host_shutting_down) { 242 ql_log(ql_log_info, sp->fcport->vha, 0xffff, 243 "%s Calling done on sp: %p, type: 0x%x\n", 244 __func__, sp, sp->type); 245 sp->done(sp, 0); 246 goto out; 247 } 248 249 /* 250 * sp may not be valid after abort_command if return code is either 251 * SUCCESS or ERR_FROM_FW codes, so cache the value here. 252 */ 253 io_wait_for_abort_done = ql2xabts_wait_nvme && 254 QLA_ABTS_WAIT_ENABLED(sp); 255 handle = sp->handle; 256 257 rval = ha->isp_ops->abort_command(sp); 258 259 ql_dbg(ql_dbg_io, fcport->vha, 0x212b, 260 "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n", 261 __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", 262 sp, handle, fcport, rval); 263 264 /* 265 * If async tmf is enabled, the abort callback is called only on 266 * return codes QLA_SUCCESS and QLA_ERR_FROM_FW. 267 */ 268 if (ql2xasynctmfenable && 269 rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW) 270 abts_done_called = 0; 271 272 /* 273 * Returned before decreasing kref so that I/O requests 274 * are waited until ABTS complete. This kref is decreased 275 * at qla24xx_abort_sp_done function. 276 */ 277 if (abts_done_called && io_wait_for_abort_done) 278 return; 279 out: 280 /* kref_get was done before work was schedule. */ 281 kref_put(&sp->cmd_kref, sp->put_fn); 282 } 283 284 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, 285 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 286 { 287 struct nvme_private *priv = fd->private; 288 unsigned long flags; 289 290 spin_lock_irqsave(&priv->cmd_lock, flags); 291 if (!priv->sp) { 292 spin_unlock_irqrestore(&priv->cmd_lock, flags); 293 return; 294 } 295 296 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { 297 spin_unlock_irqrestore(&priv->cmd_lock, flags); 298 return; 299 } 300 spin_unlock_irqrestore(&priv->cmd_lock, flags); 301 302 INIT_WORK(&priv->abort_work, qla_nvme_abort_work); 303 schedule_work(&priv->abort_work); 304 } 305 306 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, 307 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 308 { 309 struct qla_nvme_rport *qla_rport = rport->private; 310 fc_port_t *fcport = qla_rport->fcport; 311 struct srb_iocb *nvme; 312 struct nvme_private *priv = fd->private; 313 struct scsi_qla_host *vha; 314 int rval = QLA_FUNCTION_FAILED; 315 struct qla_hw_data *ha; 316 srb_t *sp; 317 318 if (!fcport || fcport->deleted) 319 return rval; 320 321 vha = fcport->vha; 322 ha = vha->hw; 323 324 if (!ha->flags.fw_started) 325 return rval; 326 327 /* Alloc SRB structure */ 328 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 329 if (!sp) 330 return rval; 331 332 sp->type = SRB_NVME_LS; 333 sp->name = "nvme_ls"; 334 sp->done = qla_nvme_sp_ls_done; 335 sp->put_fn = qla_nvme_release_ls_cmd_kref; 336 sp->priv = priv; 337 priv->sp = sp; 338 kref_init(&sp->cmd_kref); 339 spin_lock_init(&priv->cmd_lock); 340 nvme = &sp->u.iocb_cmd; 341 priv->fd = fd; 342 nvme->u.nvme.desc = fd; 343 nvme->u.nvme.dir = 0; 344 nvme->u.nvme.dl = 0; 345 nvme->u.nvme.cmd_len = fd->rqstlen; 346 nvme->u.nvme.rsp_len = fd->rsplen; 347 nvme->u.nvme.rsp_dma = fd->rspdma; 348 nvme->u.nvme.timeout_sec = fd->timeout; 349 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, 350 fd->rqstlen, DMA_TO_DEVICE); 351 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, 352 fd->rqstlen, DMA_TO_DEVICE); 353 354 rval = qla2x00_start_sp(sp); 355 if (rval != QLA_SUCCESS) { 356 ql_log(ql_log_warn, vha, 0x700e, 357 "qla2x00_start_sp failed = %d\n", rval); 358 wake_up(&sp->nvme_ls_waitq); 359 sp->priv = NULL; 360 priv->sp = NULL; 361 qla2x00_rel_sp(sp); 362 return rval; 363 } 364 365 return rval; 366 } 367 368 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport, 369 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 370 struct nvmefc_fcp_req *fd) 371 { 372 struct nvme_private *priv = fd->private; 373 unsigned long flags; 374 375 spin_lock_irqsave(&priv->cmd_lock, flags); 376 if (!priv->sp) { 377 spin_unlock_irqrestore(&priv->cmd_lock, flags); 378 return; 379 } 380 if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { 381 spin_unlock_irqrestore(&priv->cmd_lock, flags); 382 return; 383 } 384 spin_unlock_irqrestore(&priv->cmd_lock, flags); 385 386 INIT_WORK(&priv->abort_work, qla_nvme_abort_work); 387 schedule_work(&priv->abort_work); 388 } 389 390 static inline int qla2x00_start_nvme_mq(srb_t *sp) 391 { 392 unsigned long flags; 393 uint32_t *clr_ptr; 394 uint32_t handle; 395 struct cmd_nvme *cmd_pkt; 396 uint16_t cnt, i; 397 uint16_t req_cnt; 398 uint16_t tot_dsds; 399 uint16_t avail_dsds; 400 struct dsd64 *cur_dsd; 401 struct req_que *req = NULL; 402 struct rsp_que *rsp = NULL; 403 struct scsi_qla_host *vha = sp->fcport->vha; 404 struct qla_hw_data *ha = vha->hw; 405 struct qla_qpair *qpair = sp->qpair; 406 struct srb_iocb *nvme = &sp->u.iocb_cmd; 407 struct scatterlist *sgl, *sg; 408 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; 409 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; 410 uint32_t rval = QLA_SUCCESS; 411 412 /* Setup qpair pointers */ 413 req = qpair->req; 414 rsp = qpair->rsp; 415 tot_dsds = fd->sg_cnt; 416 417 /* Acquire qpair specific lock */ 418 spin_lock_irqsave(&qpair->qp_lock, flags); 419 420 handle = qla2xxx_get_next_handle(req); 421 if (handle == 0) { 422 rval = -EBUSY; 423 goto queuing_error; 424 } 425 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 426 if (req->cnt < (req_cnt + 2)) { 427 if (IS_SHADOW_REG_CAPABLE(ha)) { 428 cnt = *req->out_ptr; 429 } else { 430 cnt = rd_reg_dword_relaxed(req->req_q_out); 431 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 432 goto queuing_error; 433 } 434 435 if (req->ring_index < cnt) 436 req->cnt = cnt - req->ring_index; 437 else 438 req->cnt = req->length - (req->ring_index - cnt); 439 440 if (req->cnt < (req_cnt + 2)){ 441 rval = -EBUSY; 442 goto queuing_error; 443 } 444 } 445 446 if (unlikely(!fd->sqid)) { 447 if (cmd->sqe.common.opcode == nvme_admin_async_event) { 448 nvme->u.nvme.aen_op = 1; 449 atomic_inc(&ha->nvme_active_aen_cnt); 450 } 451 } 452 453 /* Build command packet. */ 454 req->current_outstanding_cmd = handle; 455 req->outstanding_cmds[handle] = sp; 456 sp->handle = handle; 457 req->cnt -= req_cnt; 458 459 cmd_pkt = (struct cmd_nvme *)req->ring_ptr; 460 cmd_pkt->handle = make_handle(req->id, handle); 461 462 /* Zero out remaining portion of packet. */ 463 clr_ptr = (uint32_t *)cmd_pkt + 2; 464 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 465 466 cmd_pkt->entry_status = 0; 467 468 /* Update entry type to indicate Command NVME IOCB */ 469 cmd_pkt->entry_type = COMMAND_NVME; 470 471 /* No data transfer how do we check buffer len == 0?? */ 472 if (fd->io_dir == NVMEFC_FCP_READ) { 473 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 474 qpair->counters.input_bytes += fd->payload_length; 475 qpair->counters.input_requests++; 476 } else if (fd->io_dir == NVMEFC_FCP_WRITE) { 477 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 478 if ((vha->flags.nvme_first_burst) && 479 (sp->fcport->nvme_prli_service_param & 480 NVME_PRLI_SP_FIRST_BURST)) { 481 if ((fd->payload_length <= 482 sp->fcport->nvme_first_burst_size) || 483 (sp->fcport->nvme_first_burst_size == 0)) 484 cmd_pkt->control_flags |= 485 cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE); 486 } 487 qpair->counters.output_bytes += fd->payload_length; 488 qpair->counters.output_requests++; 489 } else if (fd->io_dir == 0) { 490 cmd_pkt->control_flags = 0; 491 } 492 493 if (sp->fcport->edif.enable && fd->io_dir != 0) 494 cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); 495 496 /* Set BIT_13 of control flags for Async event */ 497 if (vha->flags.nvme2_enabled && 498 cmd->sqe.common.opcode == nvme_admin_async_event) { 499 cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT); 500 } 501 502 /* Set NPORT-ID */ 503 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 504 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 505 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 506 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 507 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 508 509 /* NVME RSP IU */ 510 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen); 511 put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address); 512 513 /* NVME CNMD IU */ 514 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen); 515 cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma); 516 517 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 518 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length); 519 520 /* One DSD is available in the Command Type NVME IOCB */ 521 avail_dsds = 1; 522 cur_dsd = &cmd_pkt->nvme_dsd; 523 sgl = fd->first_sgl; 524 525 /* Load data segments */ 526 for_each_sg(sgl, sg, tot_dsds, i) { 527 cont_a64_entry_t *cont_pkt; 528 529 /* Allocate additional continuation packets? */ 530 if (avail_dsds == 0) { 531 /* 532 * Five DSDs are available in the Continuation 533 * Type 1 IOCB. 534 */ 535 536 /* Adjust ring index */ 537 req->ring_index++; 538 if (req->ring_index == req->length) { 539 req->ring_index = 0; 540 req->ring_ptr = req->ring; 541 } else { 542 req->ring_ptr++; 543 } 544 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 545 put_unaligned_le32(CONTINUE_A64_TYPE, 546 &cont_pkt->entry_type); 547 548 cur_dsd = cont_pkt->dsd; 549 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 550 } 551 552 append_dsd64(&cur_dsd, sg); 553 avail_dsds--; 554 } 555 556 /* Set total entry count. */ 557 cmd_pkt->entry_count = (uint8_t)req_cnt; 558 wmb(); 559 560 /* Adjust ring index. */ 561 req->ring_index++; 562 if (req->ring_index == req->length) { 563 req->ring_index = 0; 564 req->ring_ptr = req->ring; 565 } else { 566 req->ring_ptr++; 567 } 568 569 /* ignore nvme async cmd due to long timeout */ 570 if (!nvme->u.nvme.aen_op) 571 sp->qpair->cmd_cnt++; 572 573 /* Set chip new ring index. */ 574 wrt_reg_dword(req->req_q_in, req->ring_index); 575 576 if (vha->flags.process_response_queue && 577 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 578 qla24xx_process_response_queue(vha, rsp); 579 580 queuing_error: 581 spin_unlock_irqrestore(&qpair->qp_lock, flags); 582 583 return rval; 584 } 585 586 /* Post a command */ 587 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, 588 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 589 struct nvmefc_fcp_req *fd) 590 { 591 fc_port_t *fcport; 592 struct srb_iocb *nvme; 593 struct scsi_qla_host *vha; 594 int rval; 595 srb_t *sp; 596 struct qla_qpair *qpair = hw_queue_handle; 597 struct nvme_private *priv = fd->private; 598 struct qla_nvme_rport *qla_rport = rport->private; 599 600 if (!priv) { 601 /* nvme association has been torn down */ 602 return -ENODEV; 603 } 604 605 fcport = qla_rport->fcport; 606 607 if (unlikely(!qpair || !fcport || fcport->deleted)) 608 return -EBUSY; 609 610 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) 611 return -ENODEV; 612 613 vha = fcport->vha; 614 615 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 616 return -EBUSY; 617 618 /* 619 * If we know the dev is going away while the transport is still sending 620 * IO's return busy back to stall the IO Q. This happens when the 621 * link goes away and fw hasn't notified us yet, but IO's are being 622 * returned. If the dev comes back quickly we won't exhaust the IO 623 * retry count at the core. 624 */ 625 if (fcport->nvme_flag & NVME_FLAG_RESETTING) 626 return -EBUSY; 627 628 /* Alloc SRB structure */ 629 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); 630 if (!sp) 631 return -EBUSY; 632 633 init_waitqueue_head(&sp->nvme_ls_waitq); 634 kref_init(&sp->cmd_kref); 635 spin_lock_init(&priv->cmd_lock); 636 sp->priv = priv; 637 priv->sp = sp; 638 sp->type = SRB_NVME_CMD; 639 sp->name = "nvme_cmd"; 640 sp->done = qla_nvme_sp_done; 641 sp->put_fn = qla_nvme_release_fcp_cmd_kref; 642 sp->qpair = qpair; 643 sp->vha = vha; 644 sp->cmd_sp = sp; 645 nvme = &sp->u.iocb_cmd; 646 nvme->u.nvme.desc = fd; 647 648 rval = qla2x00_start_nvme_mq(sp); 649 if (rval != QLA_SUCCESS) { 650 ql_log(ql_log_warn, vha, 0x212d, 651 "qla2x00_start_nvme_mq failed = %d\n", rval); 652 wake_up(&sp->nvme_ls_waitq); 653 sp->priv = NULL; 654 priv->sp = NULL; 655 qla2xxx_rel_qpair_sp(sp->qpair, sp); 656 } 657 658 return rval; 659 } 660 661 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) 662 { 663 struct scsi_qla_host *vha = lport->private; 664 665 ql_log(ql_log_info, vha, 0x210f, 666 "localport delete of %p completed.\n", vha->nvme_local_port); 667 vha->nvme_local_port = NULL; 668 complete(&vha->nvme_del_done); 669 } 670 671 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) 672 { 673 fc_port_t *fcport; 674 struct qla_nvme_rport *qla_rport = rport->private; 675 676 fcport = qla_rport->fcport; 677 fcport->nvme_remote_port = NULL; 678 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; 679 fcport->nvme_flag &= ~NVME_FLAG_DELETING; 680 ql_log(ql_log_info, fcport->vha, 0x2110, 681 "remoteport_delete of %p %8phN completed.\n", 682 fcport, fcport->port_name); 683 complete(&fcport->nvme_del_done); 684 } 685 686 static struct nvme_fc_port_template qla_nvme_fc_transport = { 687 .localport_delete = qla_nvme_localport_delete, 688 .remoteport_delete = qla_nvme_remoteport_delete, 689 .create_queue = qla_nvme_alloc_queue, 690 .delete_queue = NULL, 691 .ls_req = qla_nvme_ls_req, 692 .ls_abort = qla_nvme_ls_abort, 693 .fcp_io = qla_nvme_post_cmd, 694 .fcp_abort = qla_nvme_fcp_abort, 695 .max_hw_queues = 8, 696 .max_sgl_segments = 1024, 697 .max_dif_sgl_segments = 64, 698 .dma_boundary = 0xFFFFFFFF, 699 .local_priv_sz = 8, 700 .remote_priv_sz = sizeof(struct qla_nvme_rport), 701 .lsrqst_priv_sz = sizeof(struct nvme_private), 702 .fcprqst_priv_sz = sizeof(struct nvme_private), 703 }; 704 705 void qla_nvme_unregister_remote_port(struct fc_port *fcport) 706 { 707 int ret; 708 709 if (!IS_ENABLED(CONFIG_NVME_FC)) 710 return; 711 712 ql_log(ql_log_warn, fcport->vha, 0x2112, 713 "%s: unregister remoteport on %p %8phN\n", 714 __func__, fcport, fcport->port_name); 715 716 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags)) 717 nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); 718 719 init_completion(&fcport->nvme_del_done); 720 ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port); 721 if (ret) 722 ql_log(ql_log_info, fcport->vha, 0x2114, 723 "%s: Failed to unregister nvme_remote_port (%d)\n", 724 __func__, ret); 725 wait_for_completion(&fcport->nvme_del_done); 726 } 727 728 void qla_nvme_delete(struct scsi_qla_host *vha) 729 { 730 int nv_ret; 731 732 if (!IS_ENABLED(CONFIG_NVME_FC)) 733 return; 734 735 if (vha->nvme_local_port) { 736 init_completion(&vha->nvme_del_done); 737 ql_log(ql_log_info, vha, 0x2116, 738 "unregister localport=%p\n", 739 vha->nvme_local_port); 740 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); 741 if (nv_ret) 742 ql_log(ql_log_info, vha, 0x2115, 743 "Unregister of localport failed\n"); 744 else 745 wait_for_completion(&vha->nvme_del_done); 746 } 747 } 748 749 int qla_nvme_register_hba(struct scsi_qla_host *vha) 750 { 751 struct nvme_fc_port_template *tmpl; 752 struct qla_hw_data *ha; 753 struct nvme_fc_port_info pinfo; 754 int ret = -EINVAL; 755 756 if (!IS_ENABLED(CONFIG_NVME_FC)) 757 return ret; 758 759 ha = vha->hw; 760 tmpl = &qla_nvme_fc_transport; 761 762 WARN_ON(vha->nvme_local_port); 763 764 qla_nvme_fc_transport.max_hw_queues = 765 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), 766 (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1)); 767 768 pinfo.node_name = wwn_to_u64(vha->node_name); 769 pinfo.port_name = wwn_to_u64(vha->port_name); 770 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR; 771 pinfo.port_id = vha->d_id.b24; 772 773 ql_log(ql_log_info, vha, 0xffff, 774 "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n", 775 pinfo.node_name, pinfo.port_name, pinfo.port_id); 776 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; 777 778 ret = nvme_fc_register_localport(&pinfo, tmpl, 779 get_device(&ha->pdev->dev), &vha->nvme_local_port); 780 if (ret) { 781 ql_log(ql_log_warn, vha, 0xffff, 782 "register_localport failed: ret=%x\n", ret); 783 } else { 784 vha->nvme_local_port->private = vha; 785 } 786 787 return ret; 788 } 789 790 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp) 791 { 792 struct qla_hw_data *ha; 793 794 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) 795 return; 796 797 ha = orig_sp->fcport->vha->hw; 798 799 WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0)); 800 /* Use Driver Specified Retry Count */ 801 abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT); 802 abt->drv.abts_rty_cnt = cpu_to_le16(2); 803 /* Use specified response timeout */ 804 abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT); 805 /* set it to 2 * r_a_tov in secs */ 806 abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10)); 807 } 808 809 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp) 810 { 811 u16 comp_status; 812 struct scsi_qla_host *vha; 813 814 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) 815 return; 816 817 vha = orig_sp->fcport->vha; 818 819 comp_status = le16_to_cpu(abt->comp_status); 820 switch (comp_status) { 821 case CS_RESET: /* reset event aborted */ 822 case CS_ABORTED: /* IOCB was cleaned */ 823 /* N_Port handle is not currently logged in */ 824 case CS_TIMEOUT: 825 /* N_Port handle was logged out while waiting for ABTS to complete */ 826 case CS_PORT_UNAVAILABLE: 827 /* Firmware found that the port name changed */ 828 case CS_PORT_LOGGED_OUT: 829 /* BA_RJT was received for the ABTS */ 830 case CS_PORT_CONFIG_CHG: 831 ql_dbg(ql_dbg_async, vha, 0xf09d, 832 "Abort I/O IOCB completed with error, comp_status=%x\n", 833 comp_status); 834 break; 835 836 /* BA_RJT was received for the ABTS */ 837 case CS_REJECT_RECEIVED: 838 ql_dbg(ql_dbg_async, vha, 0xf09e, 839 "BA_RJT was received for the ABTS rjt_vendorUnique = %u", 840 abt->fw.ba_rjt_vendorUnique); 841 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e, 842 "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n", 843 abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode); 844 break; 845 846 case CS_COMPLETE: 847 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f, 848 "IOCB request is completed successfully comp_status=%x\n", 849 comp_status); 850 break; 851 852 case CS_IOCB_ERROR: 853 ql_dbg(ql_dbg_async, vha, 0xf0a0, 854 "IOCB request is failed, comp_status=%x\n", comp_status); 855 break; 856 857 default: 858 ql_dbg(ql_dbg_async, vha, 0xf0a1, 859 "Invalid Abort IO IOCB Completion Status %x\n", 860 comp_status); 861 break; 862 } 863 } 864 865 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp) 866 { 867 if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) 868 return; 869 kref_put(&orig_sp->cmd_kref, orig_sp->put_fn); 870 } 871