1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2017 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_nvme.h" 8 #include "qla_def.h" 9 #include <linux/scatterlist.h> 10 #include <linux/delay.h> 11 #include <linux/nvme.h> 12 #include <linux/nvme-fc.h> 13 14 static struct nvme_fc_port_template qla_nvme_fc_transport; 15 16 static void qla_nvme_unregister_remote_port(struct work_struct *); 17 18 int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport) 19 { 20 struct nvme_rport *rport; 21 int ret; 22 23 if (!IS_ENABLED(CONFIG_NVME_FC)) 24 return 0; 25 26 if (fcport->nvme_flag & NVME_FLAG_REGISTERED) 27 return 0; 28 29 if (!vha->flags.nvme_enabled) { 30 ql_log(ql_log_info, vha, 0x2100, 31 "%s: Not registering target since Host NVME is not enabled\n", 32 __func__); 33 return 0; 34 } 35 36 if (!(fcport->nvme_prli_service_param & 37 (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY))) 38 return 0; 39 40 INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port); 41 rport = kzalloc(sizeof(*rport), GFP_KERNEL); 42 if (!rport) { 43 ql_log(ql_log_warn, vha, 0x2101, 44 "%s: unable to alloc memory\n", __func__); 45 return -ENOMEM; 46 } 47 48 rport->req.port_name = wwn_to_u64(fcport->port_name); 49 rport->req.node_name = wwn_to_u64(fcport->node_name); 50 rport->req.port_role = 0; 51 52 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) 53 rport->req.port_role = FC_PORT_ROLE_NVME_INITIATOR; 54 55 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET) 56 rport->req.port_role |= FC_PORT_ROLE_NVME_TARGET; 57 58 if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY) 59 rport->req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; 60 61 rport->req.port_id = fcport->d_id.b24; 62 63 ql_log(ql_log_info, vha, 0x2102, 64 "%s: traddr=pn-0x%016llx:nn-0x%016llx PortID:%06x\n", 65 __func__, rport->req.port_name, rport->req.node_name, 66 rport->req.port_id); 67 68 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req, 69 &fcport->nvme_remote_port); 70 if (ret) { 71 ql_log(ql_log_warn, vha, 0x212e, 72 "Failed to register remote port. Transport returned %d\n", 73 ret); 74 return ret; 75 } 76 77 fcport->nvme_remote_port->private = fcport; 78 fcport->nvme_flag |= NVME_FLAG_REGISTERED; 79 atomic_set(&fcport->nvme_ref_count, 1); 80 init_waitqueue_head(&fcport->nvme_waitQ); 81 rport->fcport = fcport; 82 list_add_tail(&rport->list, &vha->nvme_rport_list); 83 return 0; 84 } 85 86 /* Allocate a queue for NVMe traffic */ 87 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx, 88 u16 qsize, void **handle) 89 { 90 struct scsi_qla_host *vha; 91 struct qla_hw_data *ha; 92 struct qla_qpair *qpair; 93 94 if (!qidx) 95 qidx++; 96 97 vha = (struct scsi_qla_host *)lport->private; 98 ha = vha->hw; 99 100 ql_log(ql_log_info, vha, 0x2104, 101 "%s: handle %p, idx =%d, qsize %d\n", 102 __func__, handle, qidx, qsize); 103 104 if (qidx > qla_nvme_fc_transport.max_hw_queues) { 105 ql_log(ql_log_warn, vha, 0x212f, 106 "%s: Illegal qidx=%d. Max=%d\n", 107 __func__, qidx, qla_nvme_fc_transport.max_hw_queues); 108 return -EINVAL; 109 } 110 111 if (ha->queue_pair_map[qidx]) { 112 *handle = ha->queue_pair_map[qidx]; 113 ql_log(ql_log_info, vha, 0x2121, 114 "Returning existing qpair of %p for idx=%x\n", 115 *handle, qidx); 116 return 0; 117 } 118 119 ql_log(ql_log_warn, vha, 0xffff, 120 "allocating q for idx=%x w/o cpu mask\n", qidx); 121 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); 122 if (qpair == NULL) { 123 ql_log(ql_log_warn, vha, 0x2122, 124 "Failed to allocate qpair\n"); 125 return -EINVAL; 126 } 127 *handle = qpair; 128 129 return 0; 130 } 131 132 static void qla_nvme_sp_ls_done(void *ptr, int res) 133 { 134 srb_t *sp = ptr; 135 struct srb_iocb *nvme; 136 struct nvmefc_ls_req *fd; 137 struct nvme_private *priv; 138 139 if (atomic_read(&sp->ref_count) == 0) { 140 ql_log(ql_log_warn, sp->fcport->vha, 0x2123, 141 "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp); 142 return; 143 } 144 145 if (!atomic_dec_and_test(&sp->ref_count)) 146 return; 147 148 if (res) 149 res = -EINVAL; 150 151 nvme = &sp->u.iocb_cmd; 152 fd = nvme->u.nvme.desc; 153 priv = fd->private; 154 priv->comp_status = res; 155 schedule_work(&priv->ls_work); 156 /* work schedule doesn't need the sp */ 157 qla2x00_rel_sp(sp); 158 } 159 160 static void qla_nvme_sp_done(void *ptr, int res) 161 { 162 srb_t *sp = ptr; 163 struct srb_iocb *nvme; 164 struct nvmefc_fcp_req *fd; 165 166 nvme = &sp->u.iocb_cmd; 167 fd = nvme->u.nvme.desc; 168 169 if (!atomic_dec_and_test(&sp->ref_count)) 170 return; 171 172 if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED)) 173 goto rel; 174 175 if (unlikely(nvme->u.nvme.comp_status || res)) 176 fd->status = -EINVAL; 177 else 178 fd->status = 0; 179 180 fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len; 181 fd->done(fd); 182 rel: 183 qla2xxx_rel_qpair_sp(sp->qpair, sp); 184 } 185 186 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, 187 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 188 { 189 struct nvme_private *priv = fd->private; 190 fc_port_t *fcport = rport->private; 191 srb_t *sp = priv->sp; 192 int rval; 193 struct qla_hw_data *ha = fcport->vha->hw; 194 195 rval = ha->isp_ops->abort_command(sp); 196 if (rval != QLA_SUCCESS) 197 ql_log(ql_log_warn, fcport->vha, 0x2125, 198 "%s: failed to abort LS command for SP:%p rval=%x\n", 199 __func__, sp, rval); 200 201 ql_dbg(ql_dbg_io, fcport->vha, 0x212b, 202 "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport); 203 } 204 205 static void qla_nvme_ls_complete(struct work_struct *work) 206 { 207 struct nvme_private *priv = 208 container_of(work, struct nvme_private, ls_work); 209 struct nvmefc_ls_req *fd = priv->fd; 210 211 fd->done(fd, priv->comp_status); 212 } 213 214 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, 215 struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) 216 { 217 fc_port_t *fcport = (fc_port_t *)rport->private; 218 struct srb_iocb *nvme; 219 struct nvme_private *priv = fd->private; 220 struct scsi_qla_host *vha; 221 int rval = QLA_FUNCTION_FAILED; 222 struct qla_hw_data *ha; 223 srb_t *sp; 224 225 if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) 226 return rval; 227 228 vha = fcport->vha; 229 ha = vha->hw; 230 /* Alloc SRB structure */ 231 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 232 if (!sp) 233 return rval; 234 235 sp->type = SRB_NVME_LS; 236 sp->name = "nvme_ls"; 237 sp->done = qla_nvme_sp_ls_done; 238 atomic_set(&sp->ref_count, 1); 239 init_waitqueue_head(&sp->nvme_ls_waitQ); 240 nvme = &sp->u.iocb_cmd; 241 priv->sp = sp; 242 priv->fd = fd; 243 INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); 244 nvme->u.nvme.desc = fd; 245 nvme->u.nvme.dir = 0; 246 nvme->u.nvme.dl = 0; 247 nvme->u.nvme.cmd_len = fd->rqstlen; 248 nvme->u.nvme.rsp_len = fd->rsplen; 249 nvme->u.nvme.rsp_dma = fd->rspdma; 250 nvme->u.nvme.timeout_sec = fd->timeout; 251 nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, 252 fd->rqstlen, DMA_TO_DEVICE); 253 dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, 254 fd->rqstlen, DMA_TO_DEVICE); 255 256 rval = qla2x00_start_sp(sp); 257 if (rval != QLA_SUCCESS) { 258 ql_log(ql_log_warn, vha, 0x700e, 259 "qla2x00_start_sp failed = %d\n", rval); 260 atomic_dec(&sp->ref_count); 261 wake_up(&sp->nvme_ls_waitQ); 262 return rval; 263 } 264 265 return rval; 266 } 267 268 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport, 269 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 270 struct nvmefc_fcp_req *fd) 271 { 272 struct nvme_private *priv = fd->private; 273 srb_t *sp = priv->sp; 274 int rval; 275 fc_port_t *fcport = rport->private; 276 struct qla_hw_data *ha = fcport->vha->hw; 277 278 rval = ha->isp_ops->abort_command(sp); 279 if (!rval) 280 ql_log(ql_log_warn, fcport->vha, 0x2127, 281 "%s: failed to abort command for SP:%p rval=%x\n", 282 __func__, sp, rval); 283 284 ql_dbg(ql_dbg_io, fcport->vha, 0x2126, 285 "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport); 286 } 287 288 static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle) 289 { 290 struct scsi_qla_host *vha = lport->private; 291 unsigned long flags; 292 struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle; 293 294 /* Acquire ring specific lock */ 295 spin_lock_irqsave(&qpair->qp_lock, flags); 296 qla24xx_process_response_queue(vha, qpair->rsp); 297 spin_unlock_irqrestore(&qpair->qp_lock, flags); 298 } 299 300 static int qla2x00_start_nvme_mq(srb_t *sp) 301 { 302 unsigned long flags; 303 uint32_t *clr_ptr; 304 uint32_t index; 305 uint32_t handle; 306 struct cmd_nvme *cmd_pkt; 307 uint16_t cnt, i; 308 uint16_t req_cnt; 309 uint16_t tot_dsds; 310 uint16_t avail_dsds; 311 uint32_t *cur_dsd; 312 struct req_que *req = NULL; 313 struct scsi_qla_host *vha = sp->fcport->vha; 314 struct qla_hw_data *ha = vha->hw; 315 struct qla_qpair *qpair = sp->qpair; 316 struct srb_iocb *nvme = &sp->u.iocb_cmd; 317 struct scatterlist *sgl, *sg; 318 struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; 319 uint32_t rval = QLA_SUCCESS; 320 321 /* Setup qpair pointers */ 322 req = qpair->req; 323 tot_dsds = fd->sg_cnt; 324 325 /* Acquire qpair specific lock */ 326 spin_lock_irqsave(&qpair->qp_lock, flags); 327 328 /* Check for room in outstanding command list. */ 329 handle = req->current_outstanding_cmd; 330 for (index = 1; index < req->num_outstanding_cmds; index++) { 331 handle++; 332 if (handle == req->num_outstanding_cmds) 333 handle = 1; 334 if (!req->outstanding_cmds[handle]) 335 break; 336 } 337 338 if (index == req->num_outstanding_cmds) { 339 rval = -1; 340 goto queuing_error; 341 } 342 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 343 if (req->cnt < (req_cnt + 2)) { 344 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 345 RD_REG_DWORD_RELAXED(req->req_q_out); 346 347 if (req->ring_index < cnt) 348 req->cnt = cnt - req->ring_index; 349 else 350 req->cnt = req->length - (req->ring_index - cnt); 351 352 if (req->cnt < (req_cnt + 2)){ 353 rval = -1; 354 goto queuing_error; 355 } 356 } 357 358 if (unlikely(!fd->sqid)) { 359 struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; 360 if (cmd->sqe.common.opcode == nvme_admin_async_event) { 361 nvme->u.nvme.aen_op = 1; 362 atomic_inc(&vha->nvme_active_aen_cnt); 363 } 364 } 365 366 /* Build command packet. */ 367 req->current_outstanding_cmd = handle; 368 req->outstanding_cmds[handle] = sp; 369 sp->handle = handle; 370 req->cnt -= req_cnt; 371 372 cmd_pkt = (struct cmd_nvme *)req->ring_ptr; 373 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 374 375 /* Zero out remaining portion of packet. */ 376 clr_ptr = (uint32_t *)cmd_pkt + 2; 377 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 378 379 cmd_pkt->entry_status = 0; 380 381 /* Update entry type to indicate Command NVME IOCB */ 382 cmd_pkt->entry_type = COMMAND_NVME; 383 384 /* No data transfer how do we check buffer len == 0?? */ 385 if (fd->io_dir == NVMEFC_FCP_READ) { 386 cmd_pkt->control_flags = 387 cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE); 388 vha->qla_stats.input_bytes += fd->payload_length; 389 vha->qla_stats.input_requests++; 390 } else if (fd->io_dir == NVMEFC_FCP_WRITE) { 391 cmd_pkt->control_flags = 392 cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE); 393 vha->qla_stats.output_bytes += fd->payload_length; 394 vha->qla_stats.output_requests++; 395 } else if (fd->io_dir == 0) { 396 cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE); 397 } 398 399 /* Set NPORT-ID */ 400 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 401 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 402 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 403 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 404 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 405 406 /* NVME RSP IU */ 407 cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen); 408 cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma)); 409 cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma)); 410 411 /* NVME CNMD IU */ 412 cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen); 413 cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma)); 414 cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma)); 415 416 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 417 cmd_pkt->byte_count = cpu_to_le32(fd->payload_length); 418 419 /* One DSD is available in the Command Type NVME IOCB */ 420 avail_dsds = 1; 421 cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0]; 422 sgl = fd->first_sgl; 423 424 /* Load data segments */ 425 for_each_sg(sgl, sg, tot_dsds, i) { 426 dma_addr_t sle_dma; 427 cont_a64_entry_t *cont_pkt; 428 429 /* Allocate additional continuation packets? */ 430 if (avail_dsds == 0) { 431 /* 432 * Five DSDs are available in the Continuation 433 * Type 1 IOCB. 434 */ 435 436 /* Adjust ring index */ 437 req->ring_index++; 438 if (req->ring_index == req->length) { 439 req->ring_index = 0; 440 req->ring_ptr = req->ring; 441 } else { 442 req->ring_ptr++; 443 } 444 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 445 *((uint32_t *)(&cont_pkt->entry_type)) = 446 cpu_to_le32(CONTINUE_A64_TYPE); 447 448 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 449 avail_dsds = 5; 450 } 451 452 sle_dma = sg_dma_address(sg); 453 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 454 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 455 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 456 avail_dsds--; 457 } 458 459 /* Set total entry count. */ 460 cmd_pkt->entry_count = (uint8_t)req_cnt; 461 wmb(); 462 463 /* Adjust ring index. */ 464 req->ring_index++; 465 if (req->ring_index == req->length) { 466 req->ring_index = 0; 467 req->ring_ptr = req->ring; 468 } else { 469 req->ring_ptr++; 470 } 471 472 /* Set chip new ring index. */ 473 WRT_REG_DWORD(req->req_q_in, req->ring_index); 474 475 queuing_error: 476 spin_unlock_irqrestore(&qpair->qp_lock, flags); 477 return rval; 478 } 479 480 /* Post a command */ 481 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, 482 struct nvme_fc_remote_port *rport, void *hw_queue_handle, 483 struct nvmefc_fcp_req *fd) 484 { 485 fc_port_t *fcport; 486 struct srb_iocb *nvme; 487 struct scsi_qla_host *vha; 488 int rval = QLA_FUNCTION_FAILED; 489 srb_t *sp; 490 struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle; 491 struct nvme_private *priv; 492 493 if (!fd) { 494 ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n"); 495 return rval; 496 } 497 498 priv = fd->private; 499 fcport = (fc_port_t *)rport->private; 500 if (!fcport) { 501 ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n"); 502 return rval; 503 } 504 505 vha = fcport->vha; 506 if ((!qpair) || (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))) 507 return -EBUSY; 508 509 /* Alloc SRB structure */ 510 sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC); 511 if (!sp) 512 return -EIO; 513 514 atomic_set(&sp->ref_count, 1); 515 init_waitqueue_head(&sp->nvme_ls_waitQ); 516 priv->sp = sp; 517 sp->type = SRB_NVME_CMD; 518 sp->name = "nvme_cmd"; 519 sp->done = qla_nvme_sp_done; 520 sp->qpair = qpair; 521 nvme = &sp->u.iocb_cmd; 522 nvme->u.nvme.desc = fd; 523 524 rval = qla2x00_start_nvme_mq(sp); 525 if (rval != QLA_SUCCESS) { 526 ql_log(ql_log_warn, vha, 0x212d, 527 "qla2x00_start_nvme_mq failed = %d\n", rval); 528 atomic_dec(&sp->ref_count); 529 wake_up(&sp->nvme_ls_waitQ); 530 return -EIO; 531 } 532 533 return rval; 534 } 535 536 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) 537 { 538 struct scsi_qla_host *vha = lport->private; 539 540 atomic_dec(&vha->nvme_ref_count); 541 wake_up_all(&vha->nvme_waitQ); 542 543 ql_log(ql_log_info, vha, 0x210f, 544 "localport delete of %p completed.\n", vha->nvme_local_port); 545 vha->nvme_local_port = NULL; 546 } 547 548 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) 549 { 550 fc_port_t *fcport; 551 struct nvme_rport *r_port, *trport; 552 553 fcport = (fc_port_t *)rport->private; 554 fcport->nvme_remote_port = NULL; 555 fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; 556 atomic_dec(&fcport->nvme_ref_count); 557 wake_up_all(&fcport->nvme_waitQ); 558 559 list_for_each_entry_safe(r_port, trport, 560 &fcport->vha->nvme_rport_list, list) { 561 if (r_port->fcport == fcport) { 562 list_del(&r_port->list); 563 break; 564 } 565 } 566 kfree(r_port); 567 568 ql_log(ql_log_info, fcport->vha, 0x2110, 569 "remoteport_delete of %p completed.\n", fcport); 570 } 571 572 static struct nvme_fc_port_template qla_nvme_fc_transport = { 573 .localport_delete = qla_nvme_localport_delete, 574 .remoteport_delete = qla_nvme_remoteport_delete, 575 .create_queue = qla_nvme_alloc_queue, 576 .delete_queue = NULL, 577 .ls_req = qla_nvme_ls_req, 578 .ls_abort = qla_nvme_ls_abort, 579 .fcp_io = qla_nvme_post_cmd, 580 .fcp_abort = qla_nvme_fcp_abort, 581 .poll_queue = qla_nvme_poll, 582 .max_hw_queues = 8, 583 .max_sgl_segments = 128, 584 .max_dif_sgl_segments = 64, 585 .dma_boundary = 0xFFFFFFFF, 586 .local_priv_sz = 8, 587 .remote_priv_sz = 0, 588 .lsrqst_priv_sz = sizeof(struct nvme_private), 589 .fcprqst_priv_sz = sizeof(struct nvme_private), 590 }; 591 592 #define NVME_ABORT_POLLING_PERIOD 2 593 static int qla_nvme_wait_on_command(srb_t *sp) 594 { 595 int ret = QLA_SUCCESS; 596 597 wait_event_timeout(sp->nvme_ls_waitQ, (atomic_read(&sp->ref_count) > 1), 598 NVME_ABORT_POLLING_PERIOD*HZ); 599 600 if (atomic_read(&sp->ref_count) > 1) 601 ret = QLA_FUNCTION_FAILED; 602 603 return ret; 604 } 605 606 static int qla_nvme_wait_on_rport_del(fc_port_t *fcport) 607 { 608 int ret = QLA_SUCCESS; 609 610 wait_event_timeout(fcport->nvme_waitQ, 611 atomic_read(&fcport->nvme_ref_count), 612 NVME_ABORT_POLLING_PERIOD*HZ); 613 614 if (atomic_read(&fcport->nvme_ref_count)) { 615 ret = QLA_FUNCTION_FAILED; 616 ql_log(ql_log_info, fcport->vha, 0x2111, 617 "timed out waiting for fcport=%p to delete\n", fcport); 618 } 619 620 return ret; 621 } 622 623 void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp) 624 { 625 int rval; 626 627 rval = ha->isp_ops->abort_command(sp); 628 if (!rval) { 629 if (!qla_nvme_wait_on_command(sp)) 630 ql_log(ql_log_warn, NULL, 0x2112, 631 "nvme_wait_on_command timed out waiting on sp=%p\n", 632 sp); 633 } 634 } 635 636 static void qla_nvme_abort_all(fc_port_t *fcport) 637 { 638 int que, cnt; 639 unsigned long flags; 640 srb_t *sp; 641 struct qla_hw_data *ha = fcport->vha->hw; 642 struct req_que *req; 643 644 spin_lock_irqsave(&ha->hardware_lock, flags); 645 for (que = 0; que < ha->max_req_queues; que++) { 646 req = ha->req_q_map[que]; 647 if (!req) 648 continue; 649 if (!req->outstanding_cmds) 650 continue; 651 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 652 sp = req->outstanding_cmds[cnt]; 653 if ((sp) && ((sp->type == SRB_NVME_CMD) || 654 (sp->type == SRB_NVME_LS)) && 655 (sp->fcport == fcport)) { 656 atomic_inc(&sp->ref_count); 657 spin_unlock_irqrestore(&ha->hardware_lock, 658 flags); 659 qla_nvme_abort(ha, sp); 660 spin_lock_irqsave(&ha->hardware_lock, flags); 661 req->outstanding_cmds[cnt] = NULL; 662 sp->done(sp, 1); 663 } 664 } 665 } 666 spin_unlock_irqrestore(&ha->hardware_lock, flags); 667 } 668 669 static void qla_nvme_unregister_remote_port(struct work_struct *work) 670 { 671 struct fc_port *fcport = container_of(work, struct fc_port, 672 nvme_del_work); 673 struct nvme_rport *rport, *trport; 674 675 if (!IS_ENABLED(CONFIG_NVME_FC)) 676 return; 677 678 list_for_each_entry_safe(rport, trport, 679 &fcport->vha->nvme_rport_list, list) { 680 if (rport->fcport == fcport) { 681 ql_log(ql_log_info, fcport->vha, 0x2113, 682 "%s: fcport=%p\n", __func__, fcport); 683 nvme_fc_unregister_remoteport( 684 fcport->nvme_remote_port); 685 } 686 } 687 } 688 689 void qla_nvme_delete(scsi_qla_host_t *vha) 690 { 691 struct nvme_rport *rport, *trport; 692 fc_port_t *fcport; 693 int nv_ret; 694 695 if (!IS_ENABLED(CONFIG_NVME_FC)) 696 return; 697 698 list_for_each_entry_safe(rport, trport, &vha->nvme_rport_list, list) { 699 fcport = rport->fcport; 700 701 ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n", 702 __func__, fcport); 703 704 nvme_fc_unregister_remoteport(fcport->nvme_remote_port); 705 qla_nvme_wait_on_rport_del(fcport); 706 qla_nvme_abort_all(fcport); 707 } 708 709 if (vha->nvme_local_port) { 710 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); 711 if (nv_ret == 0) 712 ql_log(ql_log_info, vha, 0x2116, 713 "unregistered localport=%p\n", 714 vha->nvme_local_port); 715 else 716 ql_log(ql_log_info, vha, 0x2115, 717 "Unregister of localport failed\n"); 718 } 719 } 720 721 void qla_nvme_register_hba(scsi_qla_host_t *vha) 722 { 723 struct nvme_fc_port_template *tmpl; 724 struct qla_hw_data *ha; 725 struct nvme_fc_port_info pinfo; 726 int ret; 727 728 if (!IS_ENABLED(CONFIG_NVME_FC)) 729 return; 730 731 ha = vha->hw; 732 tmpl = &qla_nvme_fc_transport; 733 734 WARN_ON(vha->nvme_local_port); 735 WARN_ON(ha->max_req_queues < 3); 736 737 qla_nvme_fc_transport.max_hw_queues = 738 min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), 739 (uint8_t)(ha->max_req_queues - 2)); 740 741 pinfo.node_name = wwn_to_u64(vha->node_name); 742 pinfo.port_name = wwn_to_u64(vha->port_name); 743 pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR; 744 pinfo.port_id = vha->d_id.b24; 745 746 ql_log(ql_log_info, vha, 0xffff, 747 "register_localport: host-traddr=pn-0x%llx:nn-0x%llx on portID:%x\n", 748 pinfo.port_name, pinfo.node_name, pinfo.port_id); 749 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; 750 751 ret = nvme_fc_register_localport(&pinfo, tmpl, 752 get_device(&ha->pdev->dev), &vha->nvme_local_port); 753 if (ret) { 754 ql_log(ql_log_warn, vha, 0xffff, 755 "register_localport failed: ret=%x\n", ret); 756 return; 757 } 758 atomic_set(&vha->nvme_ref_count, 1); 759 vha->nvme_local_port->private = vha; 760 init_waitqueue_head(&vha->nvme_waitQ); 761 } 762