1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics RDMA host code. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <rdma/mr_pool.h> 11 #include <linux/err.h> 12 #include <linux/string.h> 13 #include <linux/atomic.h> 14 #include <linux/blk-mq.h> 15 #include <linux/blk-mq-rdma.h> 16 #include <linux/blk-integrity.h> 17 #include <linux/types.h> 18 #include <linux/list.h> 19 #include <linux/mutex.h> 20 #include <linux/scatterlist.h> 21 #include <linux/nvme.h> 22 #include <asm/unaligned.h> 23 24 #include <rdma/ib_verbs.h> 25 #include <rdma/rdma_cm.h> 26 #include <linux/nvme-rdma.h> 27 28 #include "nvme.h" 29 #include "fabrics.h" 30 31 32 #define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */ 33 34 #define NVME_RDMA_MAX_SEGMENTS 256 35 36 #define NVME_RDMA_MAX_INLINE_SEGMENTS 4 37 38 #define NVME_RDMA_DATA_SGL_SIZE \ 39 (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT) 40 #define NVME_RDMA_METADATA_SGL_SIZE \ 41 (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT) 42 43 struct nvme_rdma_device { 44 struct ib_device *dev; 45 struct ib_pd *pd; 46 struct kref ref; 47 struct list_head entry; 48 unsigned int num_inline_segments; 49 }; 50 51 struct nvme_rdma_qe { 52 struct ib_cqe cqe; 53 void *data; 54 u64 dma; 55 }; 56 57 struct nvme_rdma_sgl { 58 int nents; 59 struct sg_table sg_table; 60 }; 61 62 struct nvme_rdma_queue; 63 struct nvme_rdma_request { 64 struct nvme_request req; 65 struct ib_mr *mr; 66 struct nvme_rdma_qe sqe; 67 union nvme_result result; 68 __le16 status; 69 refcount_t ref; 70 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; 71 u32 num_sge; 72 struct ib_reg_wr reg_wr; 73 struct ib_cqe reg_cqe; 74 struct nvme_rdma_queue *queue; 75 struct nvme_rdma_sgl data_sgl; 76 struct nvme_rdma_sgl *metadata_sgl; 77 bool use_sig_mr; 78 }; 79 80 enum nvme_rdma_queue_flags { 81 NVME_RDMA_Q_ALLOCATED = 0, 82 NVME_RDMA_Q_LIVE = 1, 83 NVME_RDMA_Q_TR_READY = 2, 84 }; 85 86 struct nvme_rdma_queue { 87 struct nvme_rdma_qe *rsp_ring; 88 int queue_size; 89 size_t cmnd_capsule_len; 90 struct nvme_rdma_ctrl *ctrl; 91 struct nvme_rdma_device *device; 92 struct ib_cq *ib_cq; 93 struct ib_qp *qp; 94 95 unsigned long flags; 96 struct rdma_cm_id *cm_id; 97 int cm_error; 98 struct completion cm_done; 99 bool pi_support; 100 int cq_size; 101 struct mutex queue_lock; 102 }; 103 104 struct nvme_rdma_ctrl { 105 /* read only in the hot path */ 106 struct nvme_rdma_queue *queues; 107 108 /* other member variables */ 109 struct blk_mq_tag_set tag_set; 110 struct work_struct err_work; 111 112 struct nvme_rdma_qe async_event_sqe; 113 114 struct delayed_work reconnect_work; 115 116 struct list_head list; 117 118 struct blk_mq_tag_set admin_tag_set; 119 struct nvme_rdma_device *device; 120 121 u32 max_fr_pages; 122 123 struct sockaddr_storage addr; 124 struct sockaddr_storage src_addr; 125 126 struct nvme_ctrl ctrl; 127 bool use_inline_data; 128 u32 io_queues[HCTX_MAX_TYPES]; 129 }; 130 131 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) 132 { 133 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl); 134 } 135 136 static LIST_HEAD(device_list); 137 static DEFINE_MUTEX(device_list_mutex); 138 139 static LIST_HEAD(nvme_rdma_ctrl_list); 140 static DEFINE_MUTEX(nvme_rdma_ctrl_mutex); 141 142 /* 143 * Disabling this option makes small I/O goes faster, but is fundamentally 144 * unsafe. With it turned off we will have to register a global rkey that 145 * allows read and write access to all physical memory. 146 */ 147 static bool register_always = true; 148 module_param(register_always, bool, 0444); 149 MODULE_PARM_DESC(register_always, 150 "Use memory registration even for contiguous memory regions"); 151 152 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 153 struct rdma_cm_event *event); 154 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 155 static void nvme_rdma_complete_rq(struct request *rq); 156 157 static const struct blk_mq_ops nvme_rdma_mq_ops; 158 static const struct blk_mq_ops nvme_rdma_admin_mq_ops; 159 160 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) 161 { 162 return queue - queue->ctrl->queues; 163 } 164 165 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) 166 { 167 return nvme_rdma_queue_idx(queue) > 168 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + 169 queue->ctrl->io_queues[HCTX_TYPE_READ]; 170 } 171 172 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) 173 { 174 return queue->cmnd_capsule_len - sizeof(struct nvme_command); 175 } 176 177 static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, 178 size_t capsule_size, enum dma_data_direction dir) 179 { 180 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); 181 kfree(qe->data); 182 } 183 184 static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, 185 size_t capsule_size, enum dma_data_direction dir) 186 { 187 qe->data = kzalloc(capsule_size, GFP_KERNEL); 188 if (!qe->data) 189 return -ENOMEM; 190 191 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); 192 if (ib_dma_mapping_error(ibdev, qe->dma)) { 193 kfree(qe->data); 194 qe->data = NULL; 195 return -ENOMEM; 196 } 197 198 return 0; 199 } 200 201 static void nvme_rdma_free_ring(struct ib_device *ibdev, 202 struct nvme_rdma_qe *ring, size_t ib_queue_size, 203 size_t capsule_size, enum dma_data_direction dir) 204 { 205 int i; 206 207 for (i = 0; i < ib_queue_size; i++) 208 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir); 209 kfree(ring); 210 } 211 212 static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, 213 size_t ib_queue_size, size_t capsule_size, 214 enum dma_data_direction dir) 215 { 216 struct nvme_rdma_qe *ring; 217 int i; 218 219 ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL); 220 if (!ring) 221 return NULL; 222 223 /* 224 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue 225 * lifetime. It's safe, since any chage in the underlying RDMA device 226 * will issue error recovery and queue re-creation. 227 */ 228 for (i = 0; i < ib_queue_size; i++) { 229 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir)) 230 goto out_free_ring; 231 } 232 233 return ring; 234 235 out_free_ring: 236 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir); 237 return NULL; 238 } 239 240 static void nvme_rdma_qp_event(struct ib_event *event, void *context) 241 { 242 pr_debug("QP event %s (%d)\n", 243 ib_event_msg(event->event), event->event); 244 245 } 246 247 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) 248 { 249 int ret; 250 251 ret = wait_for_completion_interruptible_timeout(&queue->cm_done, 252 msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1); 253 if (ret < 0) 254 return ret; 255 if (ret == 0) 256 return -ETIMEDOUT; 257 WARN_ON_ONCE(queue->cm_error > 0); 258 return queue->cm_error; 259 } 260 261 static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) 262 { 263 struct nvme_rdma_device *dev = queue->device; 264 struct ib_qp_init_attr init_attr; 265 int ret; 266 267 memset(&init_attr, 0, sizeof(init_attr)); 268 init_attr.event_handler = nvme_rdma_qp_event; 269 /* +1 for drain */ 270 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; 271 /* +1 for drain */ 272 init_attr.cap.max_recv_wr = queue->queue_size + 1; 273 init_attr.cap.max_recv_sge = 1; 274 init_attr.cap.max_send_sge = 1 + dev->num_inline_segments; 275 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 276 init_attr.qp_type = IB_QPT_RC; 277 init_attr.send_cq = queue->ib_cq; 278 init_attr.recv_cq = queue->ib_cq; 279 if (queue->pi_support) 280 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; 281 init_attr.qp_context = queue; 282 283 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); 284 285 queue->qp = queue->cm_id->qp; 286 return ret; 287 } 288 289 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, 290 struct request *rq, unsigned int hctx_idx) 291 { 292 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 293 294 kfree(req->sqe.data); 295 } 296 297 static int nvme_rdma_init_request(struct blk_mq_tag_set *set, 298 struct request *rq, unsigned int hctx_idx, 299 unsigned int numa_node) 300 { 301 struct nvme_rdma_ctrl *ctrl = set->driver_data; 302 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 303 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 304 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; 305 306 nvme_req(rq)->ctrl = &ctrl->ctrl; 307 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); 308 if (!req->sqe.data) 309 return -ENOMEM; 310 311 /* metadata nvme_rdma_sgl struct is located after command's data SGL */ 312 if (queue->pi_support) 313 req->metadata_sgl = (void *)nvme_req(rq) + 314 sizeof(struct nvme_rdma_request) + 315 NVME_RDMA_DATA_SGL_SIZE; 316 317 req->queue = queue; 318 nvme_req(rq)->cmd = req->sqe.data; 319 320 return 0; 321 } 322 323 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 324 unsigned int hctx_idx) 325 { 326 struct nvme_rdma_ctrl *ctrl = data; 327 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; 328 329 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); 330 331 hctx->driver_data = queue; 332 return 0; 333 } 334 335 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 336 unsigned int hctx_idx) 337 { 338 struct nvme_rdma_ctrl *ctrl = data; 339 struct nvme_rdma_queue *queue = &ctrl->queues[0]; 340 341 BUG_ON(hctx_idx != 0); 342 343 hctx->driver_data = queue; 344 return 0; 345 } 346 347 static void nvme_rdma_free_dev(struct kref *ref) 348 { 349 struct nvme_rdma_device *ndev = 350 container_of(ref, struct nvme_rdma_device, ref); 351 352 mutex_lock(&device_list_mutex); 353 list_del(&ndev->entry); 354 mutex_unlock(&device_list_mutex); 355 356 ib_dealloc_pd(ndev->pd); 357 kfree(ndev); 358 } 359 360 static void nvme_rdma_dev_put(struct nvme_rdma_device *dev) 361 { 362 kref_put(&dev->ref, nvme_rdma_free_dev); 363 } 364 365 static int nvme_rdma_dev_get(struct nvme_rdma_device *dev) 366 { 367 return kref_get_unless_zero(&dev->ref); 368 } 369 370 static struct nvme_rdma_device * 371 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) 372 { 373 struct nvme_rdma_device *ndev; 374 375 mutex_lock(&device_list_mutex); 376 list_for_each_entry(ndev, &device_list, entry) { 377 if (ndev->dev->node_guid == cm_id->device->node_guid && 378 nvme_rdma_dev_get(ndev)) 379 goto out_unlock; 380 } 381 382 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 383 if (!ndev) 384 goto out_err; 385 386 ndev->dev = cm_id->device; 387 kref_init(&ndev->ref); 388 389 ndev->pd = ib_alloc_pd(ndev->dev, 390 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); 391 if (IS_ERR(ndev->pd)) 392 goto out_free_dev; 393 394 if (!(ndev->dev->attrs.device_cap_flags & 395 IB_DEVICE_MEM_MGT_EXTENSIONS)) { 396 dev_err(&ndev->dev->dev, 397 "Memory registrations not supported.\n"); 398 goto out_free_pd; 399 } 400 401 ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS, 402 ndev->dev->attrs.max_send_sge - 1); 403 list_add(&ndev->entry, &device_list); 404 out_unlock: 405 mutex_unlock(&device_list_mutex); 406 return ndev; 407 408 out_free_pd: 409 ib_dealloc_pd(ndev->pd); 410 out_free_dev: 411 kfree(ndev); 412 out_err: 413 mutex_unlock(&device_list_mutex); 414 return NULL; 415 } 416 417 static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue) 418 { 419 if (nvme_rdma_poll_queue(queue)) 420 ib_free_cq(queue->ib_cq); 421 else 422 ib_cq_pool_put(queue->ib_cq, queue->cq_size); 423 } 424 425 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) 426 { 427 struct nvme_rdma_device *dev; 428 struct ib_device *ibdev; 429 430 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) 431 return; 432 433 dev = queue->device; 434 ibdev = dev->dev; 435 436 if (queue->pi_support) 437 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); 438 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); 439 440 /* 441 * The cm_id object might have been destroyed during RDMA connection 442 * establishment error flow to avoid getting other cma events, thus 443 * the destruction of the QP shouldn't use rdma_cm API. 444 */ 445 ib_destroy_qp(queue->qp); 446 nvme_rdma_free_cq(queue); 447 448 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, 449 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 450 451 nvme_rdma_dev_put(dev); 452 } 453 454 static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support) 455 { 456 u32 max_page_list_len; 457 458 if (pi_support) 459 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len; 460 else 461 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len; 462 463 return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1); 464 } 465 466 static int nvme_rdma_create_cq(struct ib_device *ibdev, 467 struct nvme_rdma_queue *queue) 468 { 469 int ret, comp_vector, idx = nvme_rdma_queue_idx(queue); 470 enum ib_poll_context poll_ctx; 471 472 /* 473 * Spread I/O queues completion vectors according their queue index. 474 * Admin queues can always go on completion vector 0. 475 */ 476 comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; 477 478 /* Polling queues need direct cq polling context */ 479 if (nvme_rdma_poll_queue(queue)) { 480 poll_ctx = IB_POLL_DIRECT; 481 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, 482 comp_vector, poll_ctx); 483 } else { 484 poll_ctx = IB_POLL_SOFTIRQ; 485 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, 486 comp_vector, poll_ctx); 487 } 488 489 if (IS_ERR(queue->ib_cq)) { 490 ret = PTR_ERR(queue->ib_cq); 491 return ret; 492 } 493 494 return 0; 495 } 496 497 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) 498 { 499 struct ib_device *ibdev; 500 const int send_wr_factor = 3; /* MR, SEND, INV */ 501 const int cq_factor = send_wr_factor + 1; /* + RECV */ 502 int ret, pages_per_mr; 503 504 queue->device = nvme_rdma_find_get_device(queue->cm_id); 505 if (!queue->device) { 506 dev_err(queue->cm_id->device->dev.parent, 507 "no client data found!\n"); 508 return -ECONNREFUSED; 509 } 510 ibdev = queue->device->dev; 511 512 /* +1 for ib_stop_cq */ 513 queue->cq_size = cq_factor * queue->queue_size + 1; 514 515 ret = nvme_rdma_create_cq(ibdev, queue); 516 if (ret) 517 goto out_put_dev; 518 519 ret = nvme_rdma_create_qp(queue, send_wr_factor); 520 if (ret) 521 goto out_destroy_ib_cq; 522 523 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, 524 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 525 if (!queue->rsp_ring) { 526 ret = -ENOMEM; 527 goto out_destroy_qp; 528 } 529 530 /* 531 * Currently we don't use SG_GAPS MR's so if the first entry is 532 * misaligned we'll end up using two entries for a single data page, 533 * so one additional entry is required. 534 */ 535 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; 536 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, 537 queue->queue_size, 538 IB_MR_TYPE_MEM_REG, 539 pages_per_mr, 0); 540 if (ret) { 541 dev_err(queue->ctrl->ctrl.device, 542 "failed to initialize MR pool sized %d for QID %d\n", 543 queue->queue_size, nvme_rdma_queue_idx(queue)); 544 goto out_destroy_ring; 545 } 546 547 if (queue->pi_support) { 548 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, 549 queue->queue_size, IB_MR_TYPE_INTEGRITY, 550 pages_per_mr, pages_per_mr); 551 if (ret) { 552 dev_err(queue->ctrl->ctrl.device, 553 "failed to initialize PI MR pool sized %d for QID %d\n", 554 queue->queue_size, nvme_rdma_queue_idx(queue)); 555 goto out_destroy_mr_pool; 556 } 557 } 558 559 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); 560 561 return 0; 562 563 out_destroy_mr_pool: 564 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); 565 out_destroy_ring: 566 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, 567 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 568 out_destroy_qp: 569 rdma_destroy_qp(queue->cm_id); 570 out_destroy_ib_cq: 571 nvme_rdma_free_cq(queue); 572 out_put_dev: 573 nvme_rdma_dev_put(queue->device); 574 return ret; 575 } 576 577 static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, 578 int idx, size_t queue_size) 579 { 580 struct nvme_rdma_queue *queue; 581 struct sockaddr *src_addr = NULL; 582 int ret; 583 584 queue = &ctrl->queues[idx]; 585 mutex_init(&queue->queue_lock); 586 queue->ctrl = ctrl; 587 if (idx && ctrl->ctrl.max_integrity_segments) 588 queue->pi_support = true; 589 else 590 queue->pi_support = false; 591 init_completion(&queue->cm_done); 592 593 if (idx > 0) 594 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 595 else 596 queue->cmnd_capsule_len = sizeof(struct nvme_command); 597 598 queue->queue_size = queue_size; 599 600 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, 601 RDMA_PS_TCP, IB_QPT_RC); 602 if (IS_ERR(queue->cm_id)) { 603 dev_info(ctrl->ctrl.device, 604 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); 605 ret = PTR_ERR(queue->cm_id); 606 goto out_destroy_mutex; 607 } 608 609 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) 610 src_addr = (struct sockaddr *)&ctrl->src_addr; 611 612 queue->cm_error = -ETIMEDOUT; 613 ret = rdma_resolve_addr(queue->cm_id, src_addr, 614 (struct sockaddr *)&ctrl->addr, 615 NVME_RDMA_CONNECT_TIMEOUT_MS); 616 if (ret) { 617 dev_info(ctrl->ctrl.device, 618 "rdma_resolve_addr failed (%d).\n", ret); 619 goto out_destroy_cm_id; 620 } 621 622 ret = nvme_rdma_wait_for_cm(queue); 623 if (ret) { 624 dev_info(ctrl->ctrl.device, 625 "rdma connection establishment failed (%d)\n", ret); 626 goto out_destroy_cm_id; 627 } 628 629 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); 630 631 return 0; 632 633 out_destroy_cm_id: 634 rdma_destroy_id(queue->cm_id); 635 nvme_rdma_destroy_queue_ib(queue); 636 out_destroy_mutex: 637 mutex_destroy(&queue->queue_lock); 638 return ret; 639 } 640 641 static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 642 { 643 rdma_disconnect(queue->cm_id); 644 ib_drain_qp(queue->qp); 645 } 646 647 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 648 { 649 mutex_lock(&queue->queue_lock); 650 if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) 651 __nvme_rdma_stop_queue(queue); 652 mutex_unlock(&queue->queue_lock); 653 } 654 655 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) 656 { 657 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) 658 return; 659 660 rdma_destroy_id(queue->cm_id); 661 nvme_rdma_destroy_queue_ib(queue); 662 mutex_destroy(&queue->queue_lock); 663 } 664 665 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) 666 { 667 int i; 668 669 for (i = 1; i < ctrl->ctrl.queue_count; i++) 670 nvme_rdma_free_queue(&ctrl->queues[i]); 671 } 672 673 static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) 674 { 675 int i; 676 677 for (i = 1; i < ctrl->ctrl.queue_count; i++) 678 nvme_rdma_stop_queue(&ctrl->queues[i]); 679 } 680 681 static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) 682 { 683 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; 684 int ret; 685 686 if (idx) 687 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx); 688 else 689 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 690 691 if (!ret) { 692 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); 693 } else { 694 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) 695 __nvme_rdma_stop_queue(queue); 696 dev_info(ctrl->ctrl.device, 697 "failed to connect queue: %d ret=%d\n", idx, ret); 698 } 699 return ret; 700 } 701 702 static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl) 703 { 704 int i, ret = 0; 705 706 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 707 ret = nvme_rdma_start_queue(ctrl, i); 708 if (ret) 709 goto out_stop_queues; 710 } 711 712 return 0; 713 714 out_stop_queues: 715 for (i--; i >= 1; i--) 716 nvme_rdma_stop_queue(&ctrl->queues[i]); 717 return ret; 718 } 719 720 static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) 721 { 722 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 723 struct ib_device *ibdev = ctrl->device->dev; 724 unsigned int nr_io_queues, nr_default_queues; 725 unsigned int nr_read_queues, nr_poll_queues; 726 int i, ret; 727 728 nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors, 729 min(opts->nr_io_queues, num_online_cpus())); 730 nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors, 731 min(opts->nr_write_queues, num_online_cpus())); 732 nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus()); 733 nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues; 734 735 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 736 if (ret) 737 return ret; 738 739 if (nr_io_queues == 0) { 740 dev_err(ctrl->ctrl.device, 741 "unable to set any I/O queues\n"); 742 return -ENOMEM; 743 } 744 745 ctrl->ctrl.queue_count = nr_io_queues + 1; 746 dev_info(ctrl->ctrl.device, 747 "creating %d I/O queues.\n", nr_io_queues); 748 749 if (opts->nr_write_queues && nr_read_queues < nr_io_queues) { 750 /* 751 * separate read/write queues 752 * hand out dedicated default queues only after we have 753 * sufficient read queues. 754 */ 755 ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues; 756 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; 757 ctrl->io_queues[HCTX_TYPE_DEFAULT] = 758 min(nr_default_queues, nr_io_queues); 759 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 760 } else { 761 /* 762 * shared read/write queues 763 * either no write queues were requested, or we don't have 764 * sufficient queue count to have dedicated default queues. 765 */ 766 ctrl->io_queues[HCTX_TYPE_DEFAULT] = 767 min(nr_read_queues, nr_io_queues); 768 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 769 } 770 771 if (opts->nr_poll_queues && nr_io_queues) { 772 /* map dedicated poll queues only if we have queues left */ 773 ctrl->io_queues[HCTX_TYPE_POLL] = 774 min(nr_poll_queues, nr_io_queues); 775 } 776 777 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 778 ret = nvme_rdma_alloc_queue(ctrl, i, 779 ctrl->ctrl.sqsize + 1); 780 if (ret) 781 goto out_free_queues; 782 } 783 784 return 0; 785 786 out_free_queues: 787 for (i--; i >= 1; i--) 788 nvme_rdma_free_queue(&ctrl->queues[i]); 789 790 return ret; 791 } 792 793 static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, 794 bool admin) 795 { 796 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); 797 struct blk_mq_tag_set *set; 798 int ret; 799 800 if (admin) { 801 set = &ctrl->admin_tag_set; 802 memset(set, 0, sizeof(*set)); 803 set->ops = &nvme_rdma_admin_mq_ops; 804 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 805 set->reserved_tags = NVMF_RESERVED_TAGS; 806 set->numa_node = nctrl->numa_node; 807 set->cmd_size = sizeof(struct nvme_rdma_request) + 808 NVME_RDMA_DATA_SGL_SIZE; 809 set->driver_data = ctrl; 810 set->nr_hw_queues = 1; 811 set->timeout = NVME_ADMIN_TIMEOUT; 812 set->flags = BLK_MQ_F_NO_SCHED; 813 } else { 814 set = &ctrl->tag_set; 815 memset(set, 0, sizeof(*set)); 816 set->ops = &nvme_rdma_mq_ops; 817 set->queue_depth = nctrl->sqsize + 1; 818 set->reserved_tags = NVMF_RESERVED_TAGS; 819 set->numa_node = nctrl->numa_node; 820 set->flags = BLK_MQ_F_SHOULD_MERGE; 821 set->cmd_size = sizeof(struct nvme_rdma_request) + 822 NVME_RDMA_DATA_SGL_SIZE; 823 if (nctrl->max_integrity_segments) 824 set->cmd_size += sizeof(struct nvme_rdma_sgl) + 825 NVME_RDMA_METADATA_SGL_SIZE; 826 set->driver_data = ctrl; 827 set->nr_hw_queues = nctrl->queue_count - 1; 828 set->timeout = NVME_IO_TIMEOUT; 829 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; 830 } 831 832 ret = blk_mq_alloc_tag_set(set); 833 if (ret) 834 return ERR_PTR(ret); 835 836 return set; 837 } 838 839 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, 840 bool remove) 841 { 842 if (remove) { 843 blk_cleanup_queue(ctrl->ctrl.admin_q); 844 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 845 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); 846 } 847 if (ctrl->async_event_sqe.data) { 848 cancel_work_sync(&ctrl->ctrl.async_event_work); 849 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, 850 sizeof(struct nvme_command), DMA_TO_DEVICE); 851 ctrl->async_event_sqe.data = NULL; 852 } 853 nvme_rdma_free_queue(&ctrl->queues[0]); 854 } 855 856 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, 857 bool new) 858 { 859 bool pi_capable = false; 860 int error; 861 862 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); 863 if (error) 864 return error; 865 866 ctrl->device = ctrl->queues[0].device; 867 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); 868 869 /* T10-PI support */ 870 if (ctrl->device->dev->attrs.device_cap_flags & 871 IB_DEVICE_INTEGRITY_HANDOVER) 872 pi_capable = true; 873 874 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, 875 pi_capable); 876 877 /* 878 * Bind the async event SQE DMA mapping to the admin queue lifetime. 879 * It's safe, since any chage in the underlying RDMA device will issue 880 * error recovery and queue re-creation. 881 */ 882 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, 883 sizeof(struct nvme_command), DMA_TO_DEVICE); 884 if (error) 885 goto out_free_queue; 886 887 if (new) { 888 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); 889 if (IS_ERR(ctrl->ctrl.admin_tagset)) { 890 error = PTR_ERR(ctrl->ctrl.admin_tagset); 891 goto out_free_async_qe; 892 } 893 894 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); 895 if (IS_ERR(ctrl->ctrl.fabrics_q)) { 896 error = PTR_ERR(ctrl->ctrl.fabrics_q); 897 goto out_free_tagset; 898 } 899 900 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 901 if (IS_ERR(ctrl->ctrl.admin_q)) { 902 error = PTR_ERR(ctrl->ctrl.admin_q); 903 goto out_cleanup_fabrics_q; 904 } 905 } 906 907 error = nvme_rdma_start_queue(ctrl, 0); 908 if (error) 909 goto out_cleanup_queue; 910 911 error = nvme_enable_ctrl(&ctrl->ctrl); 912 if (error) 913 goto out_stop_queue; 914 915 ctrl->ctrl.max_segments = ctrl->max_fr_pages; 916 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); 917 if (pi_capable) 918 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages; 919 else 920 ctrl->ctrl.max_integrity_segments = 0; 921 922 nvme_start_admin_queue(&ctrl->ctrl); 923 924 error = nvme_init_ctrl_finish(&ctrl->ctrl); 925 if (error) 926 goto out_quiesce_queue; 927 928 return 0; 929 930 out_quiesce_queue: 931 nvme_stop_admin_queue(&ctrl->ctrl); 932 blk_sync_queue(ctrl->ctrl.admin_q); 933 out_stop_queue: 934 nvme_rdma_stop_queue(&ctrl->queues[0]); 935 nvme_cancel_admin_tagset(&ctrl->ctrl); 936 out_cleanup_queue: 937 if (new) 938 blk_cleanup_queue(ctrl->ctrl.admin_q); 939 out_cleanup_fabrics_q: 940 if (new) 941 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 942 out_free_tagset: 943 if (new) 944 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); 945 out_free_async_qe: 946 if (ctrl->async_event_sqe.data) { 947 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, 948 sizeof(struct nvme_command), DMA_TO_DEVICE); 949 ctrl->async_event_sqe.data = NULL; 950 } 951 out_free_queue: 952 nvme_rdma_free_queue(&ctrl->queues[0]); 953 return error; 954 } 955 956 static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, 957 bool remove) 958 { 959 if (remove) { 960 blk_cleanup_queue(ctrl->ctrl.connect_q); 961 blk_mq_free_tag_set(ctrl->ctrl.tagset); 962 } 963 nvme_rdma_free_io_queues(ctrl); 964 } 965 966 static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) 967 { 968 int ret; 969 970 ret = nvme_rdma_alloc_io_queues(ctrl); 971 if (ret) 972 return ret; 973 974 if (new) { 975 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); 976 if (IS_ERR(ctrl->ctrl.tagset)) { 977 ret = PTR_ERR(ctrl->ctrl.tagset); 978 goto out_free_io_queues; 979 } 980 981 ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl)); 982 if (ret) 983 goto out_free_tag_set; 984 } 985 986 ret = nvme_rdma_start_io_queues(ctrl); 987 if (ret) 988 goto out_cleanup_connect_q; 989 990 if (!new) { 991 nvme_start_queues(&ctrl->ctrl); 992 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { 993 /* 994 * If we timed out waiting for freeze we are likely to 995 * be stuck. Fail the controller initialization just 996 * to be safe. 997 */ 998 ret = -ENODEV; 999 goto out_wait_freeze_timed_out; 1000 } 1001 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, 1002 ctrl->ctrl.queue_count - 1); 1003 nvme_unfreeze(&ctrl->ctrl); 1004 } 1005 1006 return 0; 1007 1008 out_wait_freeze_timed_out: 1009 nvme_stop_queues(&ctrl->ctrl); 1010 nvme_sync_io_queues(&ctrl->ctrl); 1011 nvme_rdma_stop_io_queues(ctrl); 1012 out_cleanup_connect_q: 1013 nvme_cancel_tagset(&ctrl->ctrl); 1014 if (new) 1015 blk_cleanup_queue(ctrl->ctrl.connect_q); 1016 out_free_tag_set: 1017 if (new) 1018 blk_mq_free_tag_set(ctrl->ctrl.tagset); 1019 out_free_io_queues: 1020 nvme_rdma_free_io_queues(ctrl); 1021 return ret; 1022 } 1023 1024 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, 1025 bool remove) 1026 { 1027 nvme_stop_admin_queue(&ctrl->ctrl); 1028 blk_sync_queue(ctrl->ctrl.admin_q); 1029 nvme_rdma_stop_queue(&ctrl->queues[0]); 1030 nvme_cancel_admin_tagset(&ctrl->ctrl); 1031 if (remove) 1032 nvme_start_admin_queue(&ctrl->ctrl); 1033 nvme_rdma_destroy_admin_queue(ctrl, remove); 1034 } 1035 1036 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, 1037 bool remove) 1038 { 1039 if (ctrl->ctrl.queue_count > 1) { 1040 nvme_start_freeze(&ctrl->ctrl); 1041 nvme_stop_queues(&ctrl->ctrl); 1042 nvme_sync_io_queues(&ctrl->ctrl); 1043 nvme_rdma_stop_io_queues(ctrl); 1044 nvme_cancel_tagset(&ctrl->ctrl); 1045 if (remove) 1046 nvme_start_queues(&ctrl->ctrl); 1047 nvme_rdma_destroy_io_queues(ctrl, remove); 1048 } 1049 } 1050 1051 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) 1052 { 1053 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); 1054 1055 if (list_empty(&ctrl->list)) 1056 goto free_ctrl; 1057 1058 mutex_lock(&nvme_rdma_ctrl_mutex); 1059 list_del(&ctrl->list); 1060 mutex_unlock(&nvme_rdma_ctrl_mutex); 1061 1062 nvmf_free_options(nctrl->opts); 1063 free_ctrl: 1064 kfree(ctrl->queues); 1065 kfree(ctrl); 1066 } 1067 1068 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) 1069 { 1070 /* If we are resetting/deleting then do nothing */ 1071 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { 1072 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || 1073 ctrl->ctrl.state == NVME_CTRL_LIVE); 1074 return; 1075 } 1076 1077 if (nvmf_should_reconnect(&ctrl->ctrl)) { 1078 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", 1079 ctrl->ctrl.opts->reconnect_delay); 1080 queue_delayed_work(nvme_wq, &ctrl->reconnect_work, 1081 ctrl->ctrl.opts->reconnect_delay * HZ); 1082 } else { 1083 nvme_delete_ctrl(&ctrl->ctrl); 1084 } 1085 } 1086 1087 static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) 1088 { 1089 int ret; 1090 bool changed; 1091 1092 ret = nvme_rdma_configure_admin_queue(ctrl, new); 1093 if (ret) 1094 return ret; 1095 1096 if (ctrl->ctrl.icdoff) { 1097 ret = -EOPNOTSUPP; 1098 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); 1099 goto destroy_admin; 1100 } 1101 1102 if (!(ctrl->ctrl.sgls & (1 << 2))) { 1103 ret = -EOPNOTSUPP; 1104 dev_err(ctrl->ctrl.device, 1105 "Mandatory keyed sgls are not supported!\n"); 1106 goto destroy_admin; 1107 } 1108 1109 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { 1110 dev_warn(ctrl->ctrl.device, 1111 "queue_size %zu > ctrl sqsize %u, clamping down\n", 1112 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); 1113 } 1114 1115 if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { 1116 dev_warn(ctrl->ctrl.device, 1117 "ctrl sqsize %u > max queue size %u, clamping down\n", 1118 ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); 1119 ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; 1120 } 1121 1122 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { 1123 dev_warn(ctrl->ctrl.device, 1124 "sqsize %u > ctrl maxcmd %u, clamping down\n", 1125 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); 1126 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; 1127 } 1128 1129 if (ctrl->ctrl.sgls & (1 << 20)) 1130 ctrl->use_inline_data = true; 1131 1132 if (ctrl->ctrl.queue_count > 1) { 1133 ret = nvme_rdma_configure_io_queues(ctrl, new); 1134 if (ret) 1135 goto destroy_admin; 1136 } 1137 1138 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 1139 if (!changed) { 1140 /* 1141 * state change failure is ok if we started ctrl delete, 1142 * unless we're during creation of a new controller to 1143 * avoid races with teardown flow. 1144 */ 1145 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && 1146 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); 1147 WARN_ON_ONCE(new); 1148 ret = -EINVAL; 1149 goto destroy_io; 1150 } 1151 1152 nvme_start_ctrl(&ctrl->ctrl); 1153 return 0; 1154 1155 destroy_io: 1156 if (ctrl->ctrl.queue_count > 1) { 1157 nvme_stop_queues(&ctrl->ctrl); 1158 nvme_sync_io_queues(&ctrl->ctrl); 1159 nvme_rdma_stop_io_queues(ctrl); 1160 nvme_cancel_tagset(&ctrl->ctrl); 1161 nvme_rdma_destroy_io_queues(ctrl, new); 1162 } 1163 destroy_admin: 1164 nvme_stop_admin_queue(&ctrl->ctrl); 1165 blk_sync_queue(ctrl->ctrl.admin_q); 1166 nvme_rdma_stop_queue(&ctrl->queues[0]); 1167 nvme_cancel_admin_tagset(&ctrl->ctrl); 1168 nvme_rdma_destroy_admin_queue(ctrl, new); 1169 return ret; 1170 } 1171 1172 static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) 1173 { 1174 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), 1175 struct nvme_rdma_ctrl, reconnect_work); 1176 1177 ++ctrl->ctrl.nr_reconnects; 1178 1179 if (nvme_rdma_setup_ctrl(ctrl, false)) 1180 goto requeue; 1181 1182 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", 1183 ctrl->ctrl.nr_reconnects); 1184 1185 ctrl->ctrl.nr_reconnects = 0; 1186 1187 return; 1188 1189 requeue: 1190 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 1191 ctrl->ctrl.nr_reconnects); 1192 nvme_rdma_reconnect_or_remove(ctrl); 1193 } 1194 1195 static void nvme_rdma_error_recovery_work(struct work_struct *work) 1196 { 1197 struct nvme_rdma_ctrl *ctrl = container_of(work, 1198 struct nvme_rdma_ctrl, err_work); 1199 1200 nvme_stop_keep_alive(&ctrl->ctrl); 1201 flush_work(&ctrl->ctrl.async_event_work); 1202 nvme_rdma_teardown_io_queues(ctrl, false); 1203 nvme_start_queues(&ctrl->ctrl); 1204 nvme_rdma_teardown_admin_queue(ctrl, false); 1205 nvme_start_admin_queue(&ctrl->ctrl); 1206 1207 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 1208 /* state change failure is ok if we started ctrl delete */ 1209 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && 1210 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); 1211 return; 1212 } 1213 1214 nvme_rdma_reconnect_or_remove(ctrl); 1215 } 1216 1217 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) 1218 { 1219 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) 1220 return; 1221 1222 dev_warn(ctrl->ctrl.device, "starting error recovery\n"); 1223 queue_work(nvme_reset_wq, &ctrl->err_work); 1224 } 1225 1226 static void nvme_rdma_end_request(struct nvme_rdma_request *req) 1227 { 1228 struct request *rq = blk_mq_rq_from_pdu(req); 1229 1230 if (!refcount_dec_and_test(&req->ref)) 1231 return; 1232 if (!nvme_try_complete_req(rq, req->status, req->result)) 1233 nvme_rdma_complete_rq(rq); 1234 } 1235 1236 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, 1237 const char *op) 1238 { 1239 struct nvme_rdma_queue *queue = wc->qp->qp_context; 1240 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1241 1242 if (ctrl->ctrl.state == NVME_CTRL_LIVE) 1243 dev_info(ctrl->ctrl.device, 1244 "%s for CQE 0x%p failed with status %s (%d)\n", 1245 op, wc->wr_cqe, 1246 ib_wc_status_msg(wc->status), wc->status); 1247 nvme_rdma_error_recovery(ctrl); 1248 } 1249 1250 static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc) 1251 { 1252 if (unlikely(wc->status != IB_WC_SUCCESS)) 1253 nvme_rdma_wr_error(cq, wc, "MEMREG"); 1254 } 1255 1256 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) 1257 { 1258 struct nvme_rdma_request *req = 1259 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); 1260 1261 if (unlikely(wc->status != IB_WC_SUCCESS)) 1262 nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); 1263 else 1264 nvme_rdma_end_request(req); 1265 } 1266 1267 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, 1268 struct nvme_rdma_request *req) 1269 { 1270 struct ib_send_wr wr = { 1271 .opcode = IB_WR_LOCAL_INV, 1272 .next = NULL, 1273 .num_sge = 0, 1274 .send_flags = IB_SEND_SIGNALED, 1275 .ex.invalidate_rkey = req->mr->rkey, 1276 }; 1277 1278 req->reg_cqe.done = nvme_rdma_inv_rkey_done; 1279 wr.wr_cqe = &req->reg_cqe; 1280 1281 return ib_post_send(queue->qp, &wr, NULL); 1282 } 1283 1284 static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq) 1285 { 1286 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1287 1288 if (blk_integrity_rq(rq)) { 1289 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, 1290 req->metadata_sgl->nents, rq_dma_dir(rq)); 1291 sg_free_table_chained(&req->metadata_sgl->sg_table, 1292 NVME_INLINE_METADATA_SG_CNT); 1293 } 1294 1295 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, 1296 rq_dma_dir(rq)); 1297 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); 1298 } 1299 1300 static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, 1301 struct request *rq) 1302 { 1303 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1304 struct nvme_rdma_device *dev = queue->device; 1305 struct ib_device *ibdev = dev->dev; 1306 struct list_head *pool = &queue->qp->rdma_mrs; 1307 1308 if (!blk_rq_nr_phys_segments(rq)) 1309 return; 1310 1311 if (req->use_sig_mr) 1312 pool = &queue->qp->sig_mrs; 1313 1314 if (req->mr) { 1315 ib_mr_pool_put(queue->qp, pool, req->mr); 1316 req->mr = NULL; 1317 } 1318 1319 nvme_rdma_dma_unmap_req(ibdev, rq); 1320 } 1321 1322 static int nvme_rdma_set_sg_null(struct nvme_command *c) 1323 { 1324 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 1325 1326 sg->addr = 0; 1327 put_unaligned_le24(0, sg->length); 1328 put_unaligned_le32(0, sg->key); 1329 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 1330 return 0; 1331 } 1332 1333 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, 1334 struct nvme_rdma_request *req, struct nvme_command *c, 1335 int count) 1336 { 1337 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 1338 struct ib_sge *sge = &req->sge[1]; 1339 struct scatterlist *sgl; 1340 u32 len = 0; 1341 int i; 1342 1343 for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { 1344 sge->addr = sg_dma_address(sgl); 1345 sge->length = sg_dma_len(sgl); 1346 sge->lkey = queue->device->pd->local_dma_lkey; 1347 len += sge->length; 1348 sge++; 1349 } 1350 1351 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); 1352 sg->length = cpu_to_le32(len); 1353 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; 1354 1355 req->num_sge += count; 1356 return 0; 1357 } 1358 1359 static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, 1360 struct nvme_rdma_request *req, struct nvme_command *c) 1361 { 1362 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 1363 1364 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl)); 1365 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length); 1366 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); 1367 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 1368 return 0; 1369 } 1370 1371 static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, 1372 struct nvme_rdma_request *req, struct nvme_command *c, 1373 int count) 1374 { 1375 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 1376 int nr; 1377 1378 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); 1379 if (WARN_ON_ONCE(!req->mr)) 1380 return -EAGAIN; 1381 1382 /* 1383 * Align the MR to a 4K page size to match the ctrl page size and 1384 * the block virtual boundary. 1385 */ 1386 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL, 1387 SZ_4K); 1388 if (unlikely(nr < count)) { 1389 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); 1390 req->mr = NULL; 1391 if (nr < 0) 1392 return nr; 1393 return -EINVAL; 1394 } 1395 1396 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); 1397 1398 req->reg_cqe.done = nvme_rdma_memreg_done; 1399 memset(&req->reg_wr, 0, sizeof(req->reg_wr)); 1400 req->reg_wr.wr.opcode = IB_WR_REG_MR; 1401 req->reg_wr.wr.wr_cqe = &req->reg_cqe; 1402 req->reg_wr.wr.num_sge = 0; 1403 req->reg_wr.mr = req->mr; 1404 req->reg_wr.key = req->mr->rkey; 1405 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | 1406 IB_ACCESS_REMOTE_READ | 1407 IB_ACCESS_REMOTE_WRITE; 1408 1409 sg->addr = cpu_to_le64(req->mr->iova); 1410 put_unaligned_le24(req->mr->length, sg->length); 1411 put_unaligned_le32(req->mr->rkey, sg->key); 1412 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) | 1413 NVME_SGL_FMT_INVALIDATE; 1414 1415 return 0; 1416 } 1417 1418 static void nvme_rdma_set_sig_domain(struct blk_integrity *bi, 1419 struct nvme_command *cmd, struct ib_sig_domain *domain, 1420 u16 control, u8 pi_type) 1421 { 1422 domain->sig_type = IB_SIG_TYPE_T10_DIF; 1423 domain->sig.dif.bg_type = IB_T10DIF_CRC; 1424 domain->sig.dif.pi_interval = 1 << bi->interval_exp; 1425 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); 1426 if (control & NVME_RW_PRINFO_PRCHK_REF) 1427 domain->sig.dif.ref_remap = true; 1428 1429 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); 1430 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); 1431 domain->sig.dif.app_escape = true; 1432 if (pi_type == NVME_NS_DPS_PI_TYPE3) 1433 domain->sig.dif.ref_escape = true; 1434 } 1435 1436 static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi, 1437 struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs, 1438 u8 pi_type) 1439 { 1440 u16 control = le16_to_cpu(cmd->rw.control); 1441 1442 memset(sig_attrs, 0, sizeof(*sig_attrs)); 1443 if (control & NVME_RW_PRINFO_PRACT) { 1444 /* for WRITE_INSERT/READ_STRIP no memory domain */ 1445 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 1446 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, 1447 pi_type); 1448 /* Clear the PRACT bit since HCA will generate/verify the PI */ 1449 control &= ~NVME_RW_PRINFO_PRACT; 1450 cmd->rw.control = cpu_to_le16(control); 1451 } else { 1452 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ 1453 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, 1454 pi_type); 1455 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, 1456 pi_type); 1457 } 1458 } 1459 1460 static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask) 1461 { 1462 *mask = 0; 1463 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF) 1464 *mask |= IB_SIG_CHECK_REFTAG; 1465 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD) 1466 *mask |= IB_SIG_CHECK_GUARD; 1467 } 1468 1469 static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc) 1470 { 1471 if (unlikely(wc->status != IB_WC_SUCCESS)) 1472 nvme_rdma_wr_error(cq, wc, "SIG"); 1473 } 1474 1475 static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, 1476 struct nvme_rdma_request *req, struct nvme_command *c, 1477 int count, int pi_count) 1478 { 1479 struct nvme_rdma_sgl *sgl = &req->data_sgl; 1480 struct ib_reg_wr *wr = &req->reg_wr; 1481 struct request *rq = blk_mq_rq_from_pdu(req); 1482 struct nvme_ns *ns = rq->q->queuedata; 1483 struct bio *bio = rq->bio; 1484 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 1485 int nr; 1486 1487 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); 1488 if (WARN_ON_ONCE(!req->mr)) 1489 return -EAGAIN; 1490 1491 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL, 1492 req->metadata_sgl->sg_table.sgl, pi_count, NULL, 1493 SZ_4K); 1494 if (unlikely(nr)) 1495 goto mr_put; 1496 1497 nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, 1498 req->mr->sig_attrs, ns->pi_type); 1499 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); 1500 1501 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); 1502 1503 req->reg_cqe.done = nvme_rdma_sig_done; 1504 memset(wr, 0, sizeof(*wr)); 1505 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; 1506 wr->wr.wr_cqe = &req->reg_cqe; 1507 wr->wr.num_sge = 0; 1508 wr->wr.send_flags = 0; 1509 wr->mr = req->mr; 1510 wr->key = req->mr->rkey; 1511 wr->access = IB_ACCESS_LOCAL_WRITE | 1512 IB_ACCESS_REMOTE_READ | 1513 IB_ACCESS_REMOTE_WRITE; 1514 1515 sg->addr = cpu_to_le64(req->mr->iova); 1516 put_unaligned_le24(req->mr->length, sg->length); 1517 put_unaligned_le32(req->mr->rkey, sg->key); 1518 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 1519 1520 return 0; 1521 1522 mr_put: 1523 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); 1524 req->mr = NULL; 1525 if (nr < 0) 1526 return nr; 1527 return -EINVAL; 1528 } 1529 1530 static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, 1531 int *count, int *pi_count) 1532 { 1533 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1534 int ret; 1535 1536 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); 1537 ret = sg_alloc_table_chained(&req->data_sgl.sg_table, 1538 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl, 1539 NVME_INLINE_SG_CNT); 1540 if (ret) 1541 return -ENOMEM; 1542 1543 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, 1544 req->data_sgl.sg_table.sgl); 1545 1546 *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, 1547 req->data_sgl.nents, rq_dma_dir(rq)); 1548 if (unlikely(*count <= 0)) { 1549 ret = -EIO; 1550 goto out_free_table; 1551 } 1552 1553 if (blk_integrity_rq(rq)) { 1554 req->metadata_sgl->sg_table.sgl = 1555 (struct scatterlist *)(req->metadata_sgl + 1); 1556 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table, 1557 blk_rq_count_integrity_sg(rq->q, rq->bio), 1558 req->metadata_sgl->sg_table.sgl, 1559 NVME_INLINE_METADATA_SG_CNT); 1560 if (unlikely(ret)) { 1561 ret = -ENOMEM; 1562 goto out_unmap_sg; 1563 } 1564 1565 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, 1566 rq->bio, req->metadata_sgl->sg_table.sgl); 1567 *pi_count = ib_dma_map_sg(ibdev, 1568 req->metadata_sgl->sg_table.sgl, 1569 req->metadata_sgl->nents, 1570 rq_dma_dir(rq)); 1571 if (unlikely(*pi_count <= 0)) { 1572 ret = -EIO; 1573 goto out_free_pi_table; 1574 } 1575 } 1576 1577 return 0; 1578 1579 out_free_pi_table: 1580 sg_free_table_chained(&req->metadata_sgl->sg_table, 1581 NVME_INLINE_METADATA_SG_CNT); 1582 out_unmap_sg: 1583 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, 1584 rq_dma_dir(rq)); 1585 out_free_table: 1586 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); 1587 return ret; 1588 } 1589 1590 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, 1591 struct request *rq, struct nvme_command *c) 1592 { 1593 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1594 struct nvme_rdma_device *dev = queue->device; 1595 struct ib_device *ibdev = dev->dev; 1596 int pi_count = 0; 1597 int count, ret; 1598 1599 req->num_sge = 1; 1600 refcount_set(&req->ref, 2); /* send and recv completions */ 1601 1602 c->common.flags |= NVME_CMD_SGL_METABUF; 1603 1604 if (!blk_rq_nr_phys_segments(rq)) 1605 return nvme_rdma_set_sg_null(c); 1606 1607 ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count); 1608 if (unlikely(ret)) 1609 return ret; 1610 1611 if (req->use_sig_mr) { 1612 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); 1613 goto out; 1614 } 1615 1616 if (count <= dev->num_inline_segments) { 1617 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && 1618 queue->ctrl->use_inline_data && 1619 blk_rq_payload_bytes(rq) <= 1620 nvme_rdma_inline_data_size(queue)) { 1621 ret = nvme_rdma_map_sg_inline(queue, req, c, count); 1622 goto out; 1623 } 1624 1625 if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 1626 ret = nvme_rdma_map_sg_single(queue, req, c); 1627 goto out; 1628 } 1629 } 1630 1631 ret = nvme_rdma_map_sg_fr(queue, req, c, count); 1632 out: 1633 if (unlikely(ret)) 1634 goto out_dma_unmap_req; 1635 1636 return 0; 1637 1638 out_dma_unmap_req: 1639 nvme_rdma_dma_unmap_req(ibdev, rq); 1640 return ret; 1641 } 1642 1643 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 1644 { 1645 struct nvme_rdma_qe *qe = 1646 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); 1647 struct nvme_rdma_request *req = 1648 container_of(qe, struct nvme_rdma_request, sqe); 1649 1650 if (unlikely(wc->status != IB_WC_SUCCESS)) 1651 nvme_rdma_wr_error(cq, wc, "SEND"); 1652 else 1653 nvme_rdma_end_request(req); 1654 } 1655 1656 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, 1657 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, 1658 struct ib_send_wr *first) 1659 { 1660 struct ib_send_wr wr; 1661 int ret; 1662 1663 sge->addr = qe->dma; 1664 sge->length = sizeof(struct nvme_command); 1665 sge->lkey = queue->device->pd->local_dma_lkey; 1666 1667 wr.next = NULL; 1668 wr.wr_cqe = &qe->cqe; 1669 wr.sg_list = sge; 1670 wr.num_sge = num_sge; 1671 wr.opcode = IB_WR_SEND; 1672 wr.send_flags = IB_SEND_SIGNALED; 1673 1674 if (first) 1675 first->next = ≀ 1676 else 1677 first = ≀ 1678 1679 ret = ib_post_send(queue->qp, first, NULL); 1680 if (unlikely(ret)) { 1681 dev_err(queue->ctrl->ctrl.device, 1682 "%s failed with error code %d\n", __func__, ret); 1683 } 1684 return ret; 1685 } 1686 1687 static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, 1688 struct nvme_rdma_qe *qe) 1689 { 1690 struct ib_recv_wr wr; 1691 struct ib_sge list; 1692 int ret; 1693 1694 list.addr = qe->dma; 1695 list.length = sizeof(struct nvme_completion); 1696 list.lkey = queue->device->pd->local_dma_lkey; 1697 1698 qe->cqe.done = nvme_rdma_recv_done; 1699 1700 wr.next = NULL; 1701 wr.wr_cqe = &qe->cqe; 1702 wr.sg_list = &list; 1703 wr.num_sge = 1; 1704 1705 ret = ib_post_recv(queue->qp, &wr, NULL); 1706 if (unlikely(ret)) { 1707 dev_err(queue->ctrl->ctrl.device, 1708 "%s failed with error code %d\n", __func__, ret); 1709 } 1710 return ret; 1711 } 1712 1713 static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) 1714 { 1715 u32 queue_idx = nvme_rdma_queue_idx(queue); 1716 1717 if (queue_idx == 0) 1718 return queue->ctrl->admin_tag_set.tags[queue_idx]; 1719 return queue->ctrl->tag_set.tags[queue_idx - 1]; 1720 } 1721 1722 static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) 1723 { 1724 if (unlikely(wc->status != IB_WC_SUCCESS)) 1725 nvme_rdma_wr_error(cq, wc, "ASYNC"); 1726 } 1727 1728 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg) 1729 { 1730 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); 1731 struct nvme_rdma_queue *queue = &ctrl->queues[0]; 1732 struct ib_device *dev = queue->device->dev; 1733 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; 1734 struct nvme_command *cmd = sqe->data; 1735 struct ib_sge sge; 1736 int ret; 1737 1738 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); 1739 1740 memset(cmd, 0, sizeof(*cmd)); 1741 cmd->common.opcode = nvme_admin_async_event; 1742 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; 1743 cmd->common.flags |= NVME_CMD_SGL_METABUF; 1744 nvme_rdma_set_sg_null(cmd); 1745 1746 sqe->cqe.done = nvme_rdma_async_done; 1747 1748 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), 1749 DMA_TO_DEVICE); 1750 1751 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); 1752 WARN_ON_ONCE(ret); 1753 } 1754 1755 static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, 1756 struct nvme_completion *cqe, struct ib_wc *wc) 1757 { 1758 struct request *rq; 1759 struct nvme_rdma_request *req; 1760 1761 rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id); 1762 if (!rq) { 1763 dev_err(queue->ctrl->ctrl.device, 1764 "got bad command_id %#x on QP %#x\n", 1765 cqe->command_id, queue->qp->qp_num); 1766 nvme_rdma_error_recovery(queue->ctrl); 1767 return; 1768 } 1769 req = blk_mq_rq_to_pdu(rq); 1770 1771 req->status = cqe->status; 1772 req->result = cqe->result; 1773 1774 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { 1775 if (unlikely(!req->mr || 1776 wc->ex.invalidate_rkey != req->mr->rkey)) { 1777 dev_err(queue->ctrl->ctrl.device, 1778 "Bogus remote invalidation for rkey %#x\n", 1779 req->mr ? req->mr->rkey : 0); 1780 nvme_rdma_error_recovery(queue->ctrl); 1781 } 1782 } else if (req->mr) { 1783 int ret; 1784 1785 ret = nvme_rdma_inv_rkey(queue, req); 1786 if (unlikely(ret < 0)) { 1787 dev_err(queue->ctrl->ctrl.device, 1788 "Queueing INV WR for rkey %#x failed (%d)\n", 1789 req->mr->rkey, ret); 1790 nvme_rdma_error_recovery(queue->ctrl); 1791 } 1792 /* the local invalidation completion will end the request */ 1793 return; 1794 } 1795 1796 nvme_rdma_end_request(req); 1797 } 1798 1799 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1800 { 1801 struct nvme_rdma_qe *qe = 1802 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); 1803 struct nvme_rdma_queue *queue = wc->qp->qp_context; 1804 struct ib_device *ibdev = queue->device->dev; 1805 struct nvme_completion *cqe = qe->data; 1806 const size_t len = sizeof(struct nvme_completion); 1807 1808 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1809 nvme_rdma_wr_error(cq, wc, "RECV"); 1810 return; 1811 } 1812 1813 /* sanity checking for received data length */ 1814 if (unlikely(wc->byte_len < len)) { 1815 dev_err(queue->ctrl->ctrl.device, 1816 "Unexpected nvme completion length(%d)\n", wc->byte_len); 1817 nvme_rdma_error_recovery(queue->ctrl); 1818 return; 1819 } 1820 1821 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); 1822 /* 1823 * AEN requests are special as they don't time out and can 1824 * survive any kind of queue freeze and often don't respond to 1825 * aborts. We don't even bother to allocate a struct request 1826 * for them but rather special case them here. 1827 */ 1828 if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue), 1829 cqe->command_id))) 1830 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 1831 &cqe->result); 1832 else 1833 nvme_rdma_process_nvme_rsp(queue, cqe, wc); 1834 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); 1835 1836 nvme_rdma_post_recv(queue, qe); 1837 } 1838 1839 static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) 1840 { 1841 int ret, i; 1842 1843 for (i = 0; i < queue->queue_size; i++) { 1844 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); 1845 if (ret) 1846 return ret; 1847 } 1848 1849 return 0; 1850 } 1851 1852 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, 1853 struct rdma_cm_event *ev) 1854 { 1855 struct rdma_cm_id *cm_id = queue->cm_id; 1856 int status = ev->status; 1857 const char *rej_msg; 1858 const struct nvme_rdma_cm_rej *rej_data; 1859 u8 rej_data_len; 1860 1861 rej_msg = rdma_reject_msg(cm_id, status); 1862 rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len); 1863 1864 if (rej_data && rej_data_len >= sizeof(u16)) { 1865 u16 sts = le16_to_cpu(rej_data->sts); 1866 1867 dev_err(queue->ctrl->ctrl.device, 1868 "Connect rejected: status %d (%s) nvme status %d (%s).\n", 1869 status, rej_msg, sts, nvme_rdma_cm_msg(sts)); 1870 } else { 1871 dev_err(queue->ctrl->ctrl.device, 1872 "Connect rejected: status %d (%s).\n", status, rej_msg); 1873 } 1874 1875 return -ECONNRESET; 1876 } 1877 1878 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) 1879 { 1880 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; 1881 int ret; 1882 1883 ret = nvme_rdma_create_queue_ib(queue); 1884 if (ret) 1885 return ret; 1886 1887 if (ctrl->opts->tos >= 0) 1888 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); 1889 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS); 1890 if (ret) { 1891 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n", 1892 queue->cm_error); 1893 goto out_destroy_queue; 1894 } 1895 1896 return 0; 1897 1898 out_destroy_queue: 1899 nvme_rdma_destroy_queue_ib(queue); 1900 return ret; 1901 } 1902 1903 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) 1904 { 1905 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1906 struct rdma_conn_param param = { }; 1907 struct nvme_rdma_cm_req priv = { }; 1908 int ret; 1909 1910 param.qp_num = queue->qp->qp_num; 1911 param.flow_control = 1; 1912 1913 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; 1914 /* maximum retry count */ 1915 param.retry_count = 7; 1916 param.rnr_retry_count = 7; 1917 param.private_data = &priv; 1918 param.private_data_len = sizeof(priv); 1919 1920 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 1921 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); 1922 /* 1923 * set the admin queue depth to the minimum size 1924 * specified by the Fabrics standard. 1925 */ 1926 if (priv.qid == 0) { 1927 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH); 1928 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); 1929 } else { 1930 /* 1931 * current interpretation of the fabrics spec 1932 * is at minimum you make hrqsize sqsize+1, or a 1933 * 1's based representation of sqsize. 1934 */ 1935 priv.hrqsize = cpu_to_le16(queue->queue_size); 1936 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); 1937 } 1938 1939 ret = rdma_connect_locked(queue->cm_id, ¶m); 1940 if (ret) { 1941 dev_err(ctrl->ctrl.device, 1942 "rdma_connect_locked failed (%d).\n", ret); 1943 return ret; 1944 } 1945 1946 return 0; 1947 } 1948 1949 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 1950 struct rdma_cm_event *ev) 1951 { 1952 struct nvme_rdma_queue *queue = cm_id->context; 1953 int cm_error = 0; 1954 1955 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", 1956 rdma_event_msg(ev->event), ev->event, 1957 ev->status, cm_id); 1958 1959 switch (ev->event) { 1960 case RDMA_CM_EVENT_ADDR_RESOLVED: 1961 cm_error = nvme_rdma_addr_resolved(queue); 1962 break; 1963 case RDMA_CM_EVENT_ROUTE_RESOLVED: 1964 cm_error = nvme_rdma_route_resolved(queue); 1965 break; 1966 case RDMA_CM_EVENT_ESTABLISHED: 1967 queue->cm_error = nvme_rdma_conn_established(queue); 1968 /* complete cm_done regardless of success/failure */ 1969 complete(&queue->cm_done); 1970 return 0; 1971 case RDMA_CM_EVENT_REJECTED: 1972 cm_error = nvme_rdma_conn_rejected(queue, ev); 1973 break; 1974 case RDMA_CM_EVENT_ROUTE_ERROR: 1975 case RDMA_CM_EVENT_CONNECT_ERROR: 1976 case RDMA_CM_EVENT_UNREACHABLE: 1977 case RDMA_CM_EVENT_ADDR_ERROR: 1978 dev_dbg(queue->ctrl->ctrl.device, 1979 "CM error event %d\n", ev->event); 1980 cm_error = -ECONNRESET; 1981 break; 1982 case RDMA_CM_EVENT_DISCONNECTED: 1983 case RDMA_CM_EVENT_ADDR_CHANGE: 1984 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1985 dev_dbg(queue->ctrl->ctrl.device, 1986 "disconnect received - connection closed\n"); 1987 nvme_rdma_error_recovery(queue->ctrl); 1988 break; 1989 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1990 /* device removal is handled via the ib_client API */ 1991 break; 1992 default: 1993 dev_err(queue->ctrl->ctrl.device, 1994 "Unexpected RDMA CM event (%d)\n", ev->event); 1995 nvme_rdma_error_recovery(queue->ctrl); 1996 break; 1997 } 1998 1999 if (cm_error) { 2000 queue->cm_error = cm_error; 2001 complete(&queue->cm_done); 2002 } 2003 2004 return 0; 2005 } 2006 2007 static void nvme_rdma_complete_timed_out(struct request *rq) 2008 { 2009 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 2010 struct nvme_rdma_queue *queue = req->queue; 2011 2012 nvme_rdma_stop_queue(queue); 2013 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { 2014 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; 2015 blk_mq_complete_request(rq); 2016 } 2017 } 2018 2019 static enum blk_eh_timer_return 2020 nvme_rdma_timeout(struct request *rq, bool reserved) 2021 { 2022 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 2023 struct nvme_rdma_queue *queue = req->queue; 2024 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 2025 2026 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", 2027 rq->tag, nvme_rdma_queue_idx(queue)); 2028 2029 if (ctrl->ctrl.state != NVME_CTRL_LIVE) { 2030 /* 2031 * If we are resetting, connecting or deleting we should 2032 * complete immediately because we may block controller 2033 * teardown or setup sequence 2034 * - ctrl disable/shutdown fabrics requests 2035 * - connect requests 2036 * - initialization admin requests 2037 * - I/O requests that entered after unquiescing and 2038 * the controller stopped responding 2039 * 2040 * All other requests should be cancelled by the error 2041 * recovery work, so it's fine that we fail it here. 2042 */ 2043 nvme_rdma_complete_timed_out(rq); 2044 return BLK_EH_DONE; 2045 } 2046 2047 /* 2048 * LIVE state should trigger the normal error recovery which will 2049 * handle completing this request. 2050 */ 2051 nvme_rdma_error_recovery(ctrl); 2052 return BLK_EH_RESET_TIMER; 2053 } 2054 2055 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 2056 const struct blk_mq_queue_data *bd) 2057 { 2058 struct nvme_ns *ns = hctx->queue->queuedata; 2059 struct nvme_rdma_queue *queue = hctx->driver_data; 2060 struct request *rq = bd->rq; 2061 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 2062 struct nvme_rdma_qe *sqe = &req->sqe; 2063 struct nvme_command *c = nvme_req(rq)->cmd; 2064 struct ib_device *dev; 2065 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); 2066 blk_status_t ret; 2067 int err; 2068 2069 WARN_ON_ONCE(rq->tag < 0); 2070 2071 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2072 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); 2073 2074 dev = queue->device->dev; 2075 2076 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, 2077 sizeof(struct nvme_command), 2078 DMA_TO_DEVICE); 2079 err = ib_dma_mapping_error(dev, req->sqe.dma); 2080 if (unlikely(err)) 2081 return BLK_STS_RESOURCE; 2082 2083 ib_dma_sync_single_for_cpu(dev, sqe->dma, 2084 sizeof(struct nvme_command), DMA_TO_DEVICE); 2085 2086 ret = nvme_setup_cmd(ns, rq); 2087 if (ret) 2088 goto unmap_qe; 2089 2090 blk_mq_start_request(rq); 2091 2092 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 2093 queue->pi_support && 2094 (c->common.opcode == nvme_cmd_write || 2095 c->common.opcode == nvme_cmd_read) && 2096 nvme_ns_has_pi(ns)) 2097 req->use_sig_mr = true; 2098 else 2099 req->use_sig_mr = false; 2100 2101 err = nvme_rdma_map_data(queue, rq, c); 2102 if (unlikely(err < 0)) { 2103 dev_err(queue->ctrl->ctrl.device, 2104 "Failed to map data (%d)\n", err); 2105 goto err; 2106 } 2107 2108 sqe->cqe.done = nvme_rdma_send_done; 2109 2110 ib_dma_sync_single_for_device(dev, sqe->dma, 2111 sizeof(struct nvme_command), DMA_TO_DEVICE); 2112 2113 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 2114 req->mr ? &req->reg_wr.wr : NULL); 2115 if (unlikely(err)) 2116 goto err_unmap; 2117 2118 return BLK_STS_OK; 2119 2120 err_unmap: 2121 nvme_rdma_unmap_data(queue, rq); 2122 err: 2123 if (err == -EIO) 2124 ret = nvme_host_path_error(rq); 2125 else if (err == -ENOMEM || err == -EAGAIN) 2126 ret = BLK_STS_RESOURCE; 2127 else 2128 ret = BLK_STS_IOERR; 2129 nvme_cleanup_cmd(rq); 2130 unmap_qe: 2131 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), 2132 DMA_TO_DEVICE); 2133 return ret; 2134 } 2135 2136 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 2137 { 2138 struct nvme_rdma_queue *queue = hctx->driver_data; 2139 2140 return ib_process_cq_direct(queue->ib_cq, -1); 2141 } 2142 2143 static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req) 2144 { 2145 struct request *rq = blk_mq_rq_from_pdu(req); 2146 struct ib_mr_status mr_status; 2147 int ret; 2148 2149 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 2150 if (ret) { 2151 pr_err("ib_check_mr_status failed, ret %d\n", ret); 2152 nvme_req(rq)->status = NVME_SC_INVALID_PI; 2153 return; 2154 } 2155 2156 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 2157 switch (mr_status.sig_err.err_type) { 2158 case IB_SIG_BAD_GUARD: 2159 nvme_req(rq)->status = NVME_SC_GUARD_CHECK; 2160 break; 2161 case IB_SIG_BAD_REFTAG: 2162 nvme_req(rq)->status = NVME_SC_REFTAG_CHECK; 2163 break; 2164 case IB_SIG_BAD_APPTAG: 2165 nvme_req(rq)->status = NVME_SC_APPTAG_CHECK; 2166 break; 2167 } 2168 pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n", 2169 mr_status.sig_err.err_type, mr_status.sig_err.expected, 2170 mr_status.sig_err.actual); 2171 } 2172 } 2173 2174 static void nvme_rdma_complete_rq(struct request *rq) 2175 { 2176 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 2177 struct nvme_rdma_queue *queue = req->queue; 2178 struct ib_device *ibdev = queue->device->dev; 2179 2180 if (req->use_sig_mr) 2181 nvme_rdma_check_pi_status(req); 2182 2183 nvme_rdma_unmap_data(queue, rq); 2184 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), 2185 DMA_TO_DEVICE); 2186 nvme_complete_rq(rq); 2187 } 2188 2189 static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) 2190 { 2191 struct nvme_rdma_ctrl *ctrl = set->driver_data; 2192 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2193 2194 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { 2195 /* separate read/write queues */ 2196 set->map[HCTX_TYPE_DEFAULT].nr_queues = 2197 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2198 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 2199 set->map[HCTX_TYPE_READ].nr_queues = 2200 ctrl->io_queues[HCTX_TYPE_READ]; 2201 set->map[HCTX_TYPE_READ].queue_offset = 2202 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2203 } else { 2204 /* shared read/write queues */ 2205 set->map[HCTX_TYPE_DEFAULT].nr_queues = 2206 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2207 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 2208 set->map[HCTX_TYPE_READ].nr_queues = 2209 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2210 set->map[HCTX_TYPE_READ].queue_offset = 0; 2211 } 2212 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], 2213 ctrl->device->dev, 0); 2214 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ], 2215 ctrl->device->dev, 0); 2216 2217 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { 2218 /* map dedicated poll queues only if we have queues left */ 2219 set->map[HCTX_TYPE_POLL].nr_queues = 2220 ctrl->io_queues[HCTX_TYPE_POLL]; 2221 set->map[HCTX_TYPE_POLL].queue_offset = 2222 ctrl->io_queues[HCTX_TYPE_DEFAULT] + 2223 ctrl->io_queues[HCTX_TYPE_READ]; 2224 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); 2225 } 2226 2227 dev_info(ctrl->ctrl.device, 2228 "mapped %d/%d/%d default/read/poll queues.\n", 2229 ctrl->io_queues[HCTX_TYPE_DEFAULT], 2230 ctrl->io_queues[HCTX_TYPE_READ], 2231 ctrl->io_queues[HCTX_TYPE_POLL]); 2232 2233 return 0; 2234 } 2235 2236 static const struct blk_mq_ops nvme_rdma_mq_ops = { 2237 .queue_rq = nvme_rdma_queue_rq, 2238 .complete = nvme_rdma_complete_rq, 2239 .init_request = nvme_rdma_init_request, 2240 .exit_request = nvme_rdma_exit_request, 2241 .init_hctx = nvme_rdma_init_hctx, 2242 .timeout = nvme_rdma_timeout, 2243 .map_queues = nvme_rdma_map_queues, 2244 .poll = nvme_rdma_poll, 2245 }; 2246 2247 static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { 2248 .queue_rq = nvme_rdma_queue_rq, 2249 .complete = nvme_rdma_complete_rq, 2250 .init_request = nvme_rdma_init_request, 2251 .exit_request = nvme_rdma_exit_request, 2252 .init_hctx = nvme_rdma_init_admin_hctx, 2253 .timeout = nvme_rdma_timeout, 2254 }; 2255 2256 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) 2257 { 2258 cancel_work_sync(&ctrl->err_work); 2259 cancel_delayed_work_sync(&ctrl->reconnect_work); 2260 2261 nvme_rdma_teardown_io_queues(ctrl, shutdown); 2262 nvme_stop_admin_queue(&ctrl->ctrl); 2263 if (shutdown) 2264 nvme_shutdown_ctrl(&ctrl->ctrl); 2265 else 2266 nvme_disable_ctrl(&ctrl->ctrl); 2267 nvme_rdma_teardown_admin_queue(ctrl, shutdown); 2268 } 2269 2270 static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl) 2271 { 2272 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true); 2273 } 2274 2275 static void nvme_rdma_reset_ctrl_work(struct work_struct *work) 2276 { 2277 struct nvme_rdma_ctrl *ctrl = 2278 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); 2279 2280 nvme_stop_ctrl(&ctrl->ctrl); 2281 nvme_rdma_shutdown_ctrl(ctrl, false); 2282 2283 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 2284 /* state change failure should never happen */ 2285 WARN_ON_ONCE(1); 2286 return; 2287 } 2288 2289 if (nvme_rdma_setup_ctrl(ctrl, false)) 2290 goto out_fail; 2291 2292 return; 2293 2294 out_fail: 2295 ++ctrl->ctrl.nr_reconnects; 2296 nvme_rdma_reconnect_or_remove(ctrl); 2297 } 2298 2299 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { 2300 .name = "rdma", 2301 .module = THIS_MODULE, 2302 .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED, 2303 .reg_read32 = nvmf_reg_read32, 2304 .reg_read64 = nvmf_reg_read64, 2305 .reg_write32 = nvmf_reg_write32, 2306 .free_ctrl = nvme_rdma_free_ctrl, 2307 .submit_async_event = nvme_rdma_submit_async_event, 2308 .delete_ctrl = nvme_rdma_delete_ctrl, 2309 .get_address = nvmf_get_address, 2310 }; 2311 2312 /* 2313 * Fails a connection request if it matches an existing controller 2314 * (association) with the same tuple: 2315 * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN> 2316 * 2317 * if local address is not specified in the request, it will match an 2318 * existing controller with all the other parameters the same and no 2319 * local port address specified as well. 2320 * 2321 * The ports don't need to be compared as they are intrinsically 2322 * already matched by the port pointers supplied. 2323 */ 2324 static bool 2325 nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts) 2326 { 2327 struct nvme_rdma_ctrl *ctrl; 2328 bool found = false; 2329 2330 mutex_lock(&nvme_rdma_ctrl_mutex); 2331 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { 2332 found = nvmf_ip_options_match(&ctrl->ctrl, opts); 2333 if (found) 2334 break; 2335 } 2336 mutex_unlock(&nvme_rdma_ctrl_mutex); 2337 2338 return found; 2339 } 2340 2341 static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, 2342 struct nvmf_ctrl_options *opts) 2343 { 2344 struct nvme_rdma_ctrl *ctrl; 2345 int ret; 2346 bool changed; 2347 2348 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 2349 if (!ctrl) 2350 return ERR_PTR(-ENOMEM); 2351 ctrl->ctrl.opts = opts; 2352 INIT_LIST_HEAD(&ctrl->list); 2353 2354 if (!(opts->mask & NVMF_OPT_TRSVCID)) { 2355 opts->trsvcid = 2356 kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL); 2357 if (!opts->trsvcid) { 2358 ret = -ENOMEM; 2359 goto out_free_ctrl; 2360 } 2361 opts->mask |= NVMF_OPT_TRSVCID; 2362 } 2363 2364 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 2365 opts->traddr, opts->trsvcid, &ctrl->addr); 2366 if (ret) { 2367 pr_err("malformed address passed: %s:%s\n", 2368 opts->traddr, opts->trsvcid); 2369 goto out_free_ctrl; 2370 } 2371 2372 if (opts->mask & NVMF_OPT_HOST_TRADDR) { 2373 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 2374 opts->host_traddr, NULL, &ctrl->src_addr); 2375 if (ret) { 2376 pr_err("malformed src address passed: %s\n", 2377 opts->host_traddr); 2378 goto out_free_ctrl; 2379 } 2380 } 2381 2382 if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) { 2383 ret = -EALREADY; 2384 goto out_free_ctrl; 2385 } 2386 2387 INIT_DELAYED_WORK(&ctrl->reconnect_work, 2388 nvme_rdma_reconnect_ctrl_work); 2389 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); 2390 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); 2391 2392 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 2393 opts->nr_poll_queues + 1; 2394 ctrl->ctrl.sqsize = opts->queue_size - 1; 2395 ctrl->ctrl.kato = opts->kato; 2396 2397 ret = -ENOMEM; 2398 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), 2399 GFP_KERNEL); 2400 if (!ctrl->queues) 2401 goto out_free_ctrl; 2402 2403 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, 2404 0 /* no quirks, we're perfect! */); 2405 if (ret) 2406 goto out_kfree_queues; 2407 2408 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); 2409 WARN_ON_ONCE(!changed); 2410 2411 ret = nvme_rdma_setup_ctrl(ctrl, true); 2412 if (ret) 2413 goto out_uninit_ctrl; 2414 2415 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", 2416 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); 2417 2418 mutex_lock(&nvme_rdma_ctrl_mutex); 2419 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); 2420 mutex_unlock(&nvme_rdma_ctrl_mutex); 2421 2422 return &ctrl->ctrl; 2423 2424 out_uninit_ctrl: 2425 nvme_uninit_ctrl(&ctrl->ctrl); 2426 nvme_put_ctrl(&ctrl->ctrl); 2427 if (ret > 0) 2428 ret = -EIO; 2429 return ERR_PTR(ret); 2430 out_kfree_queues: 2431 kfree(ctrl->queues); 2432 out_free_ctrl: 2433 kfree(ctrl); 2434 return ERR_PTR(ret); 2435 } 2436 2437 static struct nvmf_transport_ops nvme_rdma_transport = { 2438 .name = "rdma", 2439 .module = THIS_MODULE, 2440 .required_opts = NVMF_OPT_TRADDR, 2441 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | 2442 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | 2443 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | 2444 NVMF_OPT_TOS, 2445 .create_ctrl = nvme_rdma_create_ctrl, 2446 }; 2447 2448 static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) 2449 { 2450 struct nvme_rdma_ctrl *ctrl; 2451 struct nvme_rdma_device *ndev; 2452 bool found = false; 2453 2454 mutex_lock(&device_list_mutex); 2455 list_for_each_entry(ndev, &device_list, entry) { 2456 if (ndev->dev == ib_device) { 2457 found = true; 2458 break; 2459 } 2460 } 2461 mutex_unlock(&device_list_mutex); 2462 2463 if (!found) 2464 return; 2465 2466 /* Delete all controllers using this device */ 2467 mutex_lock(&nvme_rdma_ctrl_mutex); 2468 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { 2469 if (ctrl->device->dev != ib_device) 2470 continue; 2471 nvme_delete_ctrl(&ctrl->ctrl); 2472 } 2473 mutex_unlock(&nvme_rdma_ctrl_mutex); 2474 2475 flush_workqueue(nvme_delete_wq); 2476 } 2477 2478 static struct ib_client nvme_rdma_ib_client = { 2479 .name = "nvme_rdma", 2480 .remove = nvme_rdma_remove_one 2481 }; 2482 2483 static int __init nvme_rdma_init_module(void) 2484 { 2485 int ret; 2486 2487 ret = ib_register_client(&nvme_rdma_ib_client); 2488 if (ret) 2489 return ret; 2490 2491 ret = nvmf_register_transport(&nvme_rdma_transport); 2492 if (ret) 2493 goto err_unreg_client; 2494 2495 return 0; 2496 2497 err_unreg_client: 2498 ib_unregister_client(&nvme_rdma_ib_client); 2499 return ret; 2500 } 2501 2502 static void __exit nvme_rdma_cleanup_module(void) 2503 { 2504 struct nvme_rdma_ctrl *ctrl; 2505 2506 nvmf_unregister_transport(&nvme_rdma_transport); 2507 ib_unregister_client(&nvme_rdma_ib_client); 2508 2509 mutex_lock(&nvme_rdma_ctrl_mutex); 2510 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) 2511 nvme_delete_ctrl(&ctrl->ctrl); 2512 mutex_unlock(&nvme_rdma_ctrl_mutex); 2513 flush_workqueue(nvme_delete_wq); 2514 } 2515 2516 module_init(nvme_rdma_init_module); 2517 module_exit(nvme_rdma_cleanup_module); 2518 2519 MODULE_LICENSE("GPL v2"); 2520