1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics RDMA host code. 4 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <rdma/mr_pool.h> 11 #include <linux/err.h> 12 #include <linux/string.h> 13 #include <linux/atomic.h> 14 #include <linux/blk-mq.h> 15 #include <linux/blk-mq-rdma.h> 16 #include <linux/types.h> 17 #include <linux/list.h> 18 #include <linux/mutex.h> 19 #include <linux/scatterlist.h> 20 #include <linux/nvme.h> 21 #include <asm/unaligned.h> 22 23 #include <rdma/ib_verbs.h> 24 #include <rdma/rdma_cm.h> 25 #include <linux/nvme-rdma.h> 26 27 #include "nvme.h" 28 #include "fabrics.h" 29 30 31 #define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */ 32 33 #define NVME_RDMA_MAX_SEGMENTS 256 34 35 #define NVME_RDMA_MAX_INLINE_SEGMENTS 4 36 37 #define NVME_RDMA_DATA_SGL_SIZE \ 38 (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT) 39 #define NVME_RDMA_METADATA_SGL_SIZE \ 40 (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT) 41 42 struct nvme_rdma_device { 43 struct ib_device *dev; 44 struct ib_pd *pd; 45 struct kref ref; 46 struct list_head entry; 47 unsigned int num_inline_segments; 48 }; 49 50 struct nvme_rdma_qe { 51 struct ib_cqe cqe; 52 void *data; 53 u64 dma; 54 }; 55 56 struct nvme_rdma_sgl { 57 int nents; 58 struct sg_table sg_table; 59 }; 60 61 struct nvme_rdma_queue; 62 struct nvme_rdma_request { 63 struct nvme_request req; 64 struct ib_mr *mr; 65 struct nvme_rdma_qe sqe; 66 union nvme_result result; 67 __le16 status; 68 refcount_t ref; 69 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; 70 u32 num_sge; 71 struct ib_reg_wr reg_wr; 72 struct ib_cqe reg_cqe; 73 struct nvme_rdma_queue *queue; 74 struct nvme_rdma_sgl data_sgl; 75 struct nvme_rdma_sgl *metadata_sgl; 76 bool use_sig_mr; 77 }; 78 79 enum nvme_rdma_queue_flags { 80 NVME_RDMA_Q_ALLOCATED = 0, 81 NVME_RDMA_Q_LIVE = 1, 82 NVME_RDMA_Q_TR_READY = 2, 83 }; 84 85 struct nvme_rdma_queue { 86 struct nvme_rdma_qe *rsp_ring; 87 int queue_size; 88 size_t cmnd_capsule_len; 89 struct nvme_rdma_ctrl *ctrl; 90 struct nvme_rdma_device *device; 91 struct ib_cq *ib_cq; 92 struct ib_qp *qp; 93 94 unsigned long flags; 95 struct rdma_cm_id *cm_id; 96 int cm_error; 97 struct completion cm_done; 98 bool pi_support; 99 int cq_size; 100 struct mutex queue_lock; 101 }; 102 103 struct nvme_rdma_ctrl { 104 /* read only in the hot path */ 105 struct nvme_rdma_queue *queues; 106 107 /* other member variables */ 108 struct blk_mq_tag_set tag_set; 109 struct work_struct err_work; 110 111 struct nvme_rdma_qe async_event_sqe; 112 113 struct delayed_work reconnect_work; 114 115 struct list_head list; 116 117 struct blk_mq_tag_set admin_tag_set; 118 struct nvme_rdma_device *device; 119 120 u32 max_fr_pages; 121 122 struct sockaddr_storage addr; 123 struct sockaddr_storage src_addr; 124 125 struct nvme_ctrl ctrl; 126 bool use_inline_data; 127 u32 io_queues[HCTX_MAX_TYPES]; 128 }; 129 130 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) 131 { 132 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl); 133 } 134 135 static LIST_HEAD(device_list); 136 static DEFINE_MUTEX(device_list_mutex); 137 138 static LIST_HEAD(nvme_rdma_ctrl_list); 139 static DEFINE_MUTEX(nvme_rdma_ctrl_mutex); 140 141 /* 142 * Disabling this option makes small I/O goes faster, but is fundamentally 143 * unsafe. With it turned off we will have to register a global rkey that 144 * allows read and write access to all physical memory. 145 */ 146 static bool register_always = true; 147 module_param(register_always, bool, 0444); 148 MODULE_PARM_DESC(register_always, 149 "Use memory registration even for contiguous memory regions"); 150 151 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 152 struct rdma_cm_event *event); 153 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 154 static void nvme_rdma_complete_rq(struct request *rq); 155 156 static const struct blk_mq_ops nvme_rdma_mq_ops; 157 static const struct blk_mq_ops nvme_rdma_admin_mq_ops; 158 159 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) 160 { 161 return queue - queue->ctrl->queues; 162 } 163 164 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) 165 { 166 return nvme_rdma_queue_idx(queue) > 167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + 168 queue->ctrl->io_queues[HCTX_TYPE_READ]; 169 } 170 171 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) 172 { 173 return queue->cmnd_capsule_len - sizeof(struct nvme_command); 174 } 175 176 static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, 177 size_t capsule_size, enum dma_data_direction dir) 178 { 179 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); 180 kfree(qe->data); 181 } 182 183 static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe, 184 size_t capsule_size, enum dma_data_direction dir) 185 { 186 qe->data = kzalloc(capsule_size, GFP_KERNEL); 187 if (!qe->data) 188 return -ENOMEM; 189 190 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); 191 if (ib_dma_mapping_error(ibdev, qe->dma)) { 192 kfree(qe->data); 193 qe->data = NULL; 194 return -ENOMEM; 195 } 196 197 return 0; 198 } 199 200 static void nvme_rdma_free_ring(struct ib_device *ibdev, 201 struct nvme_rdma_qe *ring, size_t ib_queue_size, 202 size_t capsule_size, enum dma_data_direction dir) 203 { 204 int i; 205 206 for (i = 0; i < ib_queue_size; i++) 207 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir); 208 kfree(ring); 209 } 210 211 static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, 212 size_t ib_queue_size, size_t capsule_size, 213 enum dma_data_direction dir) 214 { 215 struct nvme_rdma_qe *ring; 216 int i; 217 218 ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL); 219 if (!ring) 220 return NULL; 221 222 /* 223 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue 224 * lifetime. It's safe, since any chage in the underlying RDMA device 225 * will issue error recovery and queue re-creation. 226 */ 227 for (i = 0; i < ib_queue_size; i++) { 228 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir)) 229 goto out_free_ring; 230 } 231 232 return ring; 233 234 out_free_ring: 235 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir); 236 return NULL; 237 } 238 239 static void nvme_rdma_qp_event(struct ib_event *event, void *context) 240 { 241 pr_debug("QP event %s (%d)\n", 242 ib_event_msg(event->event), event->event); 243 244 } 245 246 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) 247 { 248 int ret; 249 250 ret = wait_for_completion_interruptible_timeout(&queue->cm_done, 251 msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1); 252 if (ret < 0) 253 return ret; 254 if (ret == 0) 255 return -ETIMEDOUT; 256 WARN_ON_ONCE(queue->cm_error > 0); 257 return queue->cm_error; 258 } 259 260 static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) 261 { 262 struct nvme_rdma_device *dev = queue->device; 263 struct ib_qp_init_attr init_attr; 264 int ret; 265 266 memset(&init_attr, 0, sizeof(init_attr)); 267 init_attr.event_handler = nvme_rdma_qp_event; 268 /* +1 for drain */ 269 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; 270 /* +1 for drain */ 271 init_attr.cap.max_recv_wr = queue->queue_size + 1; 272 init_attr.cap.max_recv_sge = 1; 273 init_attr.cap.max_send_sge = 1 + dev->num_inline_segments; 274 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 275 init_attr.qp_type = IB_QPT_RC; 276 init_attr.send_cq = queue->ib_cq; 277 init_attr.recv_cq = queue->ib_cq; 278 if (queue->pi_support) 279 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; 280 init_attr.qp_context = queue; 281 282 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); 283 284 queue->qp = queue->cm_id->qp; 285 return ret; 286 } 287 288 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set, 289 struct request *rq, unsigned int hctx_idx) 290 { 291 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 292 293 kfree(req->sqe.data); 294 } 295 296 static int nvme_rdma_init_request(struct blk_mq_tag_set *set, 297 struct request *rq, unsigned int hctx_idx, 298 unsigned int numa_node) 299 { 300 struct nvme_rdma_ctrl *ctrl = set->driver_data; 301 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 302 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 303 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; 304 305 nvme_req(rq)->ctrl = &ctrl->ctrl; 306 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); 307 if (!req->sqe.data) 308 return -ENOMEM; 309 310 /* metadata nvme_rdma_sgl struct is located after command's data SGL */ 311 if (queue->pi_support) 312 req->metadata_sgl = (void *)nvme_req(rq) + 313 sizeof(struct nvme_rdma_request) + 314 NVME_RDMA_DATA_SGL_SIZE; 315 316 req->queue = queue; 317 nvme_req(rq)->cmd = req->sqe.data; 318 319 return 0; 320 } 321 322 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 323 unsigned int hctx_idx) 324 { 325 struct nvme_rdma_ctrl *ctrl = data; 326 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; 327 328 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); 329 330 hctx->driver_data = queue; 331 return 0; 332 } 333 334 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 335 unsigned int hctx_idx) 336 { 337 struct nvme_rdma_ctrl *ctrl = data; 338 struct nvme_rdma_queue *queue = &ctrl->queues[0]; 339 340 BUG_ON(hctx_idx != 0); 341 342 hctx->driver_data = queue; 343 return 0; 344 } 345 346 static void nvme_rdma_free_dev(struct kref *ref) 347 { 348 struct nvme_rdma_device *ndev = 349 container_of(ref, struct nvme_rdma_device, ref); 350 351 mutex_lock(&device_list_mutex); 352 list_del(&ndev->entry); 353 mutex_unlock(&device_list_mutex); 354 355 ib_dealloc_pd(ndev->pd); 356 kfree(ndev); 357 } 358 359 static void nvme_rdma_dev_put(struct nvme_rdma_device *dev) 360 { 361 kref_put(&dev->ref, nvme_rdma_free_dev); 362 } 363 364 static int nvme_rdma_dev_get(struct nvme_rdma_device *dev) 365 { 366 return kref_get_unless_zero(&dev->ref); 367 } 368 369 static struct nvme_rdma_device * 370 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) 371 { 372 struct nvme_rdma_device *ndev; 373 374 mutex_lock(&device_list_mutex); 375 list_for_each_entry(ndev, &device_list, entry) { 376 if (ndev->dev->node_guid == cm_id->device->node_guid && 377 nvme_rdma_dev_get(ndev)) 378 goto out_unlock; 379 } 380 381 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 382 if (!ndev) 383 goto out_err; 384 385 ndev->dev = cm_id->device; 386 kref_init(&ndev->ref); 387 388 ndev->pd = ib_alloc_pd(ndev->dev, 389 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY); 390 if (IS_ERR(ndev->pd)) 391 goto out_free_dev; 392 393 if (!(ndev->dev->attrs.device_cap_flags & 394 IB_DEVICE_MEM_MGT_EXTENSIONS)) { 395 dev_err(&ndev->dev->dev, 396 "Memory registrations not supported.\n"); 397 goto out_free_pd; 398 } 399 400 ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS, 401 ndev->dev->attrs.max_send_sge - 1); 402 list_add(&ndev->entry, &device_list); 403 out_unlock: 404 mutex_unlock(&device_list_mutex); 405 return ndev; 406 407 out_free_pd: 408 ib_dealloc_pd(ndev->pd); 409 out_free_dev: 410 kfree(ndev); 411 out_err: 412 mutex_unlock(&device_list_mutex); 413 return NULL; 414 } 415 416 static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue) 417 { 418 if (nvme_rdma_poll_queue(queue)) 419 ib_free_cq(queue->ib_cq); 420 else 421 ib_cq_pool_put(queue->ib_cq, queue->cq_size); 422 } 423 424 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) 425 { 426 struct nvme_rdma_device *dev; 427 struct ib_device *ibdev; 428 429 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) 430 return; 431 432 dev = queue->device; 433 ibdev = dev->dev; 434 435 if (queue->pi_support) 436 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); 437 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); 438 439 /* 440 * The cm_id object might have been destroyed during RDMA connection 441 * establishment error flow to avoid getting other cma events, thus 442 * the destruction of the QP shouldn't use rdma_cm API. 443 */ 444 ib_destroy_qp(queue->qp); 445 nvme_rdma_free_cq(queue); 446 447 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, 448 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 449 450 nvme_rdma_dev_put(dev); 451 } 452 453 static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support) 454 { 455 u32 max_page_list_len; 456 457 if (pi_support) 458 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len; 459 else 460 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len; 461 462 return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1); 463 } 464 465 static int nvme_rdma_create_cq(struct ib_device *ibdev, 466 struct nvme_rdma_queue *queue) 467 { 468 int ret, comp_vector, idx = nvme_rdma_queue_idx(queue); 469 enum ib_poll_context poll_ctx; 470 471 /* 472 * Spread I/O queues completion vectors according their queue index. 473 * Admin queues can always go on completion vector 0. 474 */ 475 comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; 476 477 /* Polling queues need direct cq polling context */ 478 if (nvme_rdma_poll_queue(queue)) { 479 poll_ctx = IB_POLL_DIRECT; 480 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, 481 comp_vector, poll_ctx); 482 } else { 483 poll_ctx = IB_POLL_SOFTIRQ; 484 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, 485 comp_vector, poll_ctx); 486 } 487 488 if (IS_ERR(queue->ib_cq)) { 489 ret = PTR_ERR(queue->ib_cq); 490 return ret; 491 } 492 493 return 0; 494 } 495 496 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) 497 { 498 struct ib_device *ibdev; 499 const int send_wr_factor = 3; /* MR, SEND, INV */ 500 const int cq_factor = send_wr_factor + 1; /* + RECV */ 501 int ret, pages_per_mr; 502 503 queue->device = nvme_rdma_find_get_device(queue->cm_id); 504 if (!queue->device) { 505 dev_err(queue->cm_id->device->dev.parent, 506 "no client data found!\n"); 507 return -ECONNREFUSED; 508 } 509 ibdev = queue->device->dev; 510 511 /* +1 for ib_stop_cq */ 512 queue->cq_size = cq_factor * queue->queue_size + 1; 513 514 ret = nvme_rdma_create_cq(ibdev, queue); 515 if (ret) 516 goto out_put_dev; 517 518 ret = nvme_rdma_create_qp(queue, send_wr_factor); 519 if (ret) 520 goto out_destroy_ib_cq; 521 522 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, 523 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 524 if (!queue->rsp_ring) { 525 ret = -ENOMEM; 526 goto out_destroy_qp; 527 } 528 529 /* 530 * Currently we don't use SG_GAPS MR's so if the first entry is 531 * misaligned we'll end up using two entries for a single data page, 532 * so one additional entry is required. 533 */ 534 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; 535 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, 536 queue->queue_size, 537 IB_MR_TYPE_MEM_REG, 538 pages_per_mr, 0); 539 if (ret) { 540 dev_err(queue->ctrl->ctrl.device, 541 "failed to initialize MR pool sized %d for QID %d\n", 542 queue->queue_size, nvme_rdma_queue_idx(queue)); 543 goto out_destroy_ring; 544 } 545 546 if (queue->pi_support) { 547 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, 548 queue->queue_size, IB_MR_TYPE_INTEGRITY, 549 pages_per_mr, pages_per_mr); 550 if (ret) { 551 dev_err(queue->ctrl->ctrl.device, 552 "failed to initialize PI MR pool sized %d for QID %d\n", 553 queue->queue_size, nvme_rdma_queue_idx(queue)); 554 goto out_destroy_mr_pool; 555 } 556 } 557 558 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); 559 560 return 0; 561 562 out_destroy_mr_pool: 563 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); 564 out_destroy_ring: 565 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, 566 sizeof(struct nvme_completion), DMA_FROM_DEVICE); 567 out_destroy_qp: 568 rdma_destroy_qp(queue->cm_id); 569 out_destroy_ib_cq: 570 nvme_rdma_free_cq(queue); 571 out_put_dev: 572 nvme_rdma_dev_put(queue->device); 573 return ret; 574 } 575 576 static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, 577 int idx, size_t queue_size) 578 { 579 struct nvme_rdma_queue *queue; 580 struct sockaddr *src_addr = NULL; 581 int ret; 582 583 queue = &ctrl->queues[idx]; 584 mutex_init(&queue->queue_lock); 585 queue->ctrl = ctrl; 586 if (idx && ctrl->ctrl.max_integrity_segments) 587 queue->pi_support = true; 588 else 589 queue->pi_support = false; 590 init_completion(&queue->cm_done); 591 592 if (idx > 0) 593 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 594 else 595 queue->cmnd_capsule_len = sizeof(struct nvme_command); 596 597 queue->queue_size = queue_size; 598 599 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, 600 RDMA_PS_TCP, IB_QPT_RC); 601 if (IS_ERR(queue->cm_id)) { 602 dev_info(ctrl->ctrl.device, 603 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); 604 ret = PTR_ERR(queue->cm_id); 605 goto out_destroy_mutex; 606 } 607 608 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) 609 src_addr = (struct sockaddr *)&ctrl->src_addr; 610 611 queue->cm_error = -ETIMEDOUT; 612 ret = rdma_resolve_addr(queue->cm_id, src_addr, 613 (struct sockaddr *)&ctrl->addr, 614 NVME_RDMA_CONNECT_TIMEOUT_MS); 615 if (ret) { 616 dev_info(ctrl->ctrl.device, 617 "rdma_resolve_addr failed (%d).\n", ret); 618 goto out_destroy_cm_id; 619 } 620 621 ret = nvme_rdma_wait_for_cm(queue); 622 if (ret) { 623 dev_info(ctrl->ctrl.device, 624 "rdma connection establishment failed (%d)\n", ret); 625 goto out_destroy_cm_id; 626 } 627 628 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); 629 630 return 0; 631 632 out_destroy_cm_id: 633 rdma_destroy_id(queue->cm_id); 634 nvme_rdma_destroy_queue_ib(queue); 635 out_destroy_mutex: 636 mutex_destroy(&queue->queue_lock); 637 return ret; 638 } 639 640 static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 641 { 642 rdma_disconnect(queue->cm_id); 643 ib_drain_qp(queue->qp); 644 } 645 646 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 647 { 648 mutex_lock(&queue->queue_lock); 649 if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) 650 __nvme_rdma_stop_queue(queue); 651 mutex_unlock(&queue->queue_lock); 652 } 653 654 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) 655 { 656 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) 657 return; 658 659 nvme_rdma_destroy_queue_ib(queue); 660 rdma_destroy_id(queue->cm_id); 661 mutex_destroy(&queue->queue_lock); 662 } 663 664 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) 665 { 666 int i; 667 668 for (i = 1; i < ctrl->ctrl.queue_count; i++) 669 nvme_rdma_free_queue(&ctrl->queues[i]); 670 } 671 672 static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) 673 { 674 int i; 675 676 for (i = 1; i < ctrl->ctrl.queue_count; i++) 677 nvme_rdma_stop_queue(&ctrl->queues[i]); 678 } 679 680 static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) 681 { 682 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; 683 bool poll = nvme_rdma_poll_queue(queue); 684 int ret; 685 686 if (idx) 687 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll); 688 else 689 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 690 691 if (!ret) { 692 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); 693 } else { 694 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) 695 __nvme_rdma_stop_queue(queue); 696 dev_info(ctrl->ctrl.device, 697 "failed to connect queue: %d ret=%d\n", idx, ret); 698 } 699 return ret; 700 } 701 702 static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl) 703 { 704 int i, ret = 0; 705 706 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 707 ret = nvme_rdma_start_queue(ctrl, i); 708 if (ret) 709 goto out_stop_queues; 710 } 711 712 return 0; 713 714 out_stop_queues: 715 for (i--; i >= 1; i--) 716 nvme_rdma_stop_queue(&ctrl->queues[i]); 717 return ret; 718 } 719 720 static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) 721 { 722 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 723 struct ib_device *ibdev = ctrl->device->dev; 724 unsigned int nr_io_queues, nr_default_queues; 725 unsigned int nr_read_queues, nr_poll_queues; 726 int i, ret; 727 728 nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors, 729 min(opts->nr_io_queues, num_online_cpus())); 730 nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors, 731 min(opts->nr_write_queues, num_online_cpus())); 732 nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus()); 733 nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues; 734 735 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 736 if (ret) 737 return ret; 738 739 ctrl->ctrl.queue_count = nr_io_queues + 1; 740 if (ctrl->ctrl.queue_count < 2) { 741 dev_err(ctrl->ctrl.device, 742 "unable to set any I/O queues\n"); 743 return -ENOMEM; 744 } 745 746 dev_info(ctrl->ctrl.device, 747 "creating %d I/O queues.\n", nr_io_queues); 748 749 if (opts->nr_write_queues && nr_read_queues < nr_io_queues) { 750 /* 751 * separate read/write queues 752 * hand out dedicated default queues only after we have 753 * sufficient read queues. 754 */ 755 ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues; 756 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; 757 ctrl->io_queues[HCTX_TYPE_DEFAULT] = 758 min(nr_default_queues, nr_io_queues); 759 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 760 } else { 761 /* 762 * shared read/write queues 763 * either no write queues were requested, or we don't have 764 * sufficient queue count to have dedicated default queues. 765 */ 766 ctrl->io_queues[HCTX_TYPE_DEFAULT] = 767 min(nr_read_queues, nr_io_queues); 768 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 769 } 770 771 if (opts->nr_poll_queues && nr_io_queues) { 772 /* map dedicated poll queues only if we have queues left */ 773 ctrl->io_queues[HCTX_TYPE_POLL] = 774 min(nr_poll_queues, nr_io_queues); 775 } 776 777 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 778 ret = nvme_rdma_alloc_queue(ctrl, i, 779 ctrl->ctrl.sqsize + 1); 780 if (ret) 781 goto out_free_queues; 782 } 783 784 return 0; 785 786 out_free_queues: 787 for (i--; i >= 1; i--) 788 nvme_rdma_free_queue(&ctrl->queues[i]); 789 790 return ret; 791 } 792 793 static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, 794 bool admin) 795 { 796 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); 797 struct blk_mq_tag_set *set; 798 int ret; 799 800 if (admin) { 801 set = &ctrl->admin_tag_set; 802 memset(set, 0, sizeof(*set)); 803 set->ops = &nvme_rdma_admin_mq_ops; 804 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 805 set->reserved_tags = NVMF_RESERVED_TAGS; 806 set->numa_node = nctrl->numa_node; 807 set->cmd_size = sizeof(struct nvme_rdma_request) + 808 NVME_RDMA_DATA_SGL_SIZE; 809 set->driver_data = ctrl; 810 set->nr_hw_queues = 1; 811 set->timeout = NVME_ADMIN_TIMEOUT; 812 set->flags = BLK_MQ_F_NO_SCHED; 813 } else { 814 set = &ctrl->tag_set; 815 memset(set, 0, sizeof(*set)); 816 set->ops = &nvme_rdma_mq_ops; 817 set->queue_depth = nctrl->sqsize + 1; 818 set->reserved_tags = NVMF_RESERVED_TAGS; 819 set->numa_node = nctrl->numa_node; 820 set->flags = BLK_MQ_F_SHOULD_MERGE; 821 set->cmd_size = sizeof(struct nvme_rdma_request) + 822 NVME_RDMA_DATA_SGL_SIZE; 823 if (nctrl->max_integrity_segments) 824 set->cmd_size += sizeof(struct nvme_rdma_sgl) + 825 NVME_RDMA_METADATA_SGL_SIZE; 826 set->driver_data = ctrl; 827 set->nr_hw_queues = nctrl->queue_count - 1; 828 set->timeout = NVME_IO_TIMEOUT; 829 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; 830 } 831 832 ret = blk_mq_alloc_tag_set(set); 833 if (ret) 834 return ERR_PTR(ret); 835 836 return set; 837 } 838 839 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, 840 bool remove) 841 { 842 if (remove) { 843 blk_cleanup_queue(ctrl->ctrl.admin_q); 844 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 845 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); 846 } 847 if (ctrl->async_event_sqe.data) { 848 cancel_work_sync(&ctrl->ctrl.async_event_work); 849 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, 850 sizeof(struct nvme_command), DMA_TO_DEVICE); 851 ctrl->async_event_sqe.data = NULL; 852 } 853 nvme_rdma_free_queue(&ctrl->queues[0]); 854 } 855 856 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, 857 bool new) 858 { 859 bool pi_capable = false; 860 int error; 861 862 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); 863 if (error) 864 return error; 865 866 ctrl->device = ctrl->queues[0].device; 867 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); 868 869 /* T10-PI support */ 870 if (ctrl->device->dev->attrs.device_cap_flags & 871 IB_DEVICE_INTEGRITY_HANDOVER) 872 pi_capable = true; 873 874 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, 875 pi_capable); 876 877 /* 878 * Bind the async event SQE DMA mapping to the admin queue lifetime. 879 * It's safe, since any chage in the underlying RDMA device will issue 880 * error recovery and queue re-creation. 881 */ 882 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, 883 sizeof(struct nvme_command), DMA_TO_DEVICE); 884 if (error) 885 goto out_free_queue; 886 887 if (new) { 888 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); 889 if (IS_ERR(ctrl->ctrl.admin_tagset)) { 890 error = PTR_ERR(ctrl->ctrl.admin_tagset); 891 goto out_free_async_qe; 892 } 893 894 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); 895 if (IS_ERR(ctrl->ctrl.fabrics_q)) { 896 error = PTR_ERR(ctrl->ctrl.fabrics_q); 897 goto out_free_tagset; 898 } 899 900 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 901 if (IS_ERR(ctrl->ctrl.admin_q)) { 902 error = PTR_ERR(ctrl->ctrl.admin_q); 903 goto out_cleanup_fabrics_q; 904 } 905 } 906 907 error = nvme_rdma_start_queue(ctrl, 0); 908 if (error) 909 goto out_cleanup_queue; 910 911 error = nvme_enable_ctrl(&ctrl->ctrl); 912 if (error) 913 goto out_stop_queue; 914 915 ctrl->ctrl.max_segments = ctrl->max_fr_pages; 916 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); 917 if (pi_capable) 918 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages; 919 else 920 ctrl->ctrl.max_integrity_segments = 0; 921 922 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 923 924 error = nvme_init_ctrl_finish(&ctrl->ctrl); 925 if (error) 926 goto out_quiesce_queue; 927 928 return 0; 929 930 out_quiesce_queue: 931 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 932 blk_sync_queue(ctrl->ctrl.admin_q); 933 out_stop_queue: 934 nvme_rdma_stop_queue(&ctrl->queues[0]); 935 nvme_cancel_admin_tagset(&ctrl->ctrl); 936 out_cleanup_queue: 937 if (new) 938 blk_cleanup_queue(ctrl->ctrl.admin_q); 939 out_cleanup_fabrics_q: 940 if (new) 941 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 942 out_free_tagset: 943 if (new) 944 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); 945 out_free_async_qe: 946 if (ctrl->async_event_sqe.data) { 947 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, 948 sizeof(struct nvme_command), DMA_TO_DEVICE); 949 ctrl->async_event_sqe.data = NULL; 950 } 951 out_free_queue: 952 nvme_rdma_free_queue(&ctrl->queues[0]); 953 return error; 954 } 955 956 static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, 957 bool remove) 958 { 959 if (remove) { 960 blk_cleanup_queue(ctrl->ctrl.connect_q); 961 blk_mq_free_tag_set(ctrl->ctrl.tagset); 962 } 963 nvme_rdma_free_io_queues(ctrl); 964 } 965 966 static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) 967 { 968 int ret; 969 970 ret = nvme_rdma_alloc_io_queues(ctrl); 971 if (ret) 972 return ret; 973 974 if (new) { 975 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); 976 if (IS_ERR(ctrl->ctrl.tagset)) { 977 ret = PTR_ERR(ctrl->ctrl.tagset); 978 goto out_free_io_queues; 979 } 980 981 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 982 if (IS_ERR(ctrl->ctrl.connect_q)) { 983 ret = PTR_ERR(ctrl->ctrl.connect_q); 984 goto out_free_tag_set; 985 } 986 } 987 988 ret = nvme_rdma_start_io_queues(ctrl); 989 if (ret) 990 goto out_cleanup_connect_q; 991 992 if (!new) { 993 nvme_start_queues(&ctrl->ctrl); 994 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { 995 /* 996 * If we timed out waiting for freeze we are likely to 997 * be stuck. Fail the controller initialization just 998 * to be safe. 999 */ 1000 ret = -ENODEV; 1001 goto out_wait_freeze_timed_out; 1002 } 1003 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, 1004 ctrl->ctrl.queue_count - 1); 1005 nvme_unfreeze(&ctrl->ctrl); 1006 } 1007 1008 return 0; 1009 1010 out_wait_freeze_timed_out: 1011 nvme_stop_queues(&ctrl->ctrl); 1012 nvme_sync_io_queues(&ctrl->ctrl); 1013 nvme_rdma_stop_io_queues(ctrl); 1014 out_cleanup_connect_q: 1015 nvme_cancel_tagset(&ctrl->ctrl); 1016 if (new) 1017 blk_cleanup_queue(ctrl->ctrl.connect_q); 1018 out_free_tag_set: 1019 if (new) 1020 blk_mq_free_tag_set(ctrl->ctrl.tagset); 1021 out_free_io_queues: 1022 nvme_rdma_free_io_queues(ctrl); 1023 return ret; 1024 } 1025 1026 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, 1027 bool remove) 1028 { 1029 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 1030 blk_sync_queue(ctrl->ctrl.admin_q); 1031 nvme_rdma_stop_queue(&ctrl->queues[0]); 1032 nvme_cancel_admin_tagset(&ctrl->ctrl); 1033 if (remove) 1034 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 1035 nvme_rdma_destroy_admin_queue(ctrl, remove); 1036 } 1037 1038 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, 1039 bool remove) 1040 { 1041 if (ctrl->ctrl.queue_count > 1) { 1042 nvme_start_freeze(&ctrl->ctrl); 1043 nvme_stop_queues(&ctrl->ctrl); 1044 nvme_sync_io_queues(&ctrl->ctrl); 1045 nvme_rdma_stop_io_queues(ctrl); 1046 nvme_cancel_tagset(&ctrl->ctrl); 1047 if (remove) 1048 nvme_start_queues(&ctrl->ctrl); 1049 nvme_rdma_destroy_io_queues(ctrl, remove); 1050 } 1051 } 1052 1053 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) 1054 { 1055 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); 1056 1057 if (list_empty(&ctrl->list)) 1058 goto free_ctrl; 1059 1060 mutex_lock(&nvme_rdma_ctrl_mutex); 1061 list_del(&ctrl->list); 1062 mutex_unlock(&nvme_rdma_ctrl_mutex); 1063 1064 nvmf_free_options(nctrl->opts); 1065 free_ctrl: 1066 kfree(ctrl->queues); 1067 kfree(ctrl); 1068 } 1069 1070 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) 1071 { 1072 /* If we are resetting/deleting then do nothing */ 1073 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { 1074 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || 1075 ctrl->ctrl.state == NVME_CTRL_LIVE); 1076 return; 1077 } 1078 1079 if (nvmf_should_reconnect(&ctrl->ctrl)) { 1080 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", 1081 ctrl->ctrl.opts->reconnect_delay); 1082 queue_delayed_work(nvme_wq, &ctrl->reconnect_work, 1083 ctrl->ctrl.opts->reconnect_delay * HZ); 1084 } else { 1085 nvme_delete_ctrl(&ctrl->ctrl); 1086 } 1087 } 1088 1089 static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) 1090 { 1091 int ret = -EINVAL; 1092 bool changed; 1093 1094 ret = nvme_rdma_configure_admin_queue(ctrl, new); 1095 if (ret) 1096 return ret; 1097 1098 if (ctrl->ctrl.icdoff) { 1099 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); 1100 goto destroy_admin; 1101 } 1102 1103 if (!(ctrl->ctrl.sgls & (1 << 2))) { 1104 dev_err(ctrl->ctrl.device, 1105 "Mandatory keyed sgls are not supported!\n"); 1106 goto destroy_admin; 1107 } 1108 1109 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { 1110 dev_warn(ctrl->ctrl.device, 1111 "queue_size %zu > ctrl sqsize %u, clamping down\n", 1112 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); 1113 } 1114 1115 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { 1116 dev_warn(ctrl->ctrl.device, 1117 "sqsize %u > ctrl maxcmd %u, clamping down\n", 1118 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); 1119 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; 1120 } 1121 1122 if (ctrl->ctrl.sgls & (1 << 20)) 1123 ctrl->use_inline_data = true; 1124 1125 if (ctrl->ctrl.queue_count > 1) { 1126 ret = nvme_rdma_configure_io_queues(ctrl, new); 1127 if (ret) 1128 goto destroy_admin; 1129 } 1130 1131 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 1132 if (!changed) { 1133 /* 1134 * state change failure is ok if we started ctrl delete, 1135 * unless we're during creation of a new controller to 1136 * avoid races with teardown flow. 1137 */ 1138 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && 1139 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); 1140 WARN_ON_ONCE(new); 1141 ret = -EINVAL; 1142 goto destroy_io; 1143 } 1144 1145 nvme_start_ctrl(&ctrl->ctrl); 1146 return 0; 1147 1148 destroy_io: 1149 if (ctrl->ctrl.queue_count > 1) { 1150 nvme_stop_queues(&ctrl->ctrl); 1151 nvme_sync_io_queues(&ctrl->ctrl); 1152 nvme_rdma_stop_io_queues(ctrl); 1153 nvme_cancel_tagset(&ctrl->ctrl); 1154 nvme_rdma_destroy_io_queues(ctrl, new); 1155 } 1156 destroy_admin: 1157 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 1158 blk_sync_queue(ctrl->ctrl.admin_q); 1159 nvme_rdma_stop_queue(&ctrl->queues[0]); 1160 nvme_cancel_admin_tagset(&ctrl->ctrl); 1161 nvme_rdma_destroy_admin_queue(ctrl, new); 1162 return ret; 1163 } 1164 1165 static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) 1166 { 1167 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), 1168 struct nvme_rdma_ctrl, reconnect_work); 1169 1170 ++ctrl->ctrl.nr_reconnects; 1171 1172 if (nvme_rdma_setup_ctrl(ctrl, false)) 1173 goto requeue; 1174 1175 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", 1176 ctrl->ctrl.nr_reconnects); 1177 1178 ctrl->ctrl.nr_reconnects = 0; 1179 1180 return; 1181 1182 requeue: 1183 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 1184 ctrl->ctrl.nr_reconnects); 1185 nvme_rdma_reconnect_or_remove(ctrl); 1186 } 1187 1188 static void nvme_rdma_error_recovery_work(struct work_struct *work) 1189 { 1190 struct nvme_rdma_ctrl *ctrl = container_of(work, 1191 struct nvme_rdma_ctrl, err_work); 1192 1193 nvme_stop_keep_alive(&ctrl->ctrl); 1194 nvme_rdma_teardown_io_queues(ctrl, false); 1195 nvme_start_queues(&ctrl->ctrl); 1196 nvme_rdma_teardown_admin_queue(ctrl, false); 1197 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 1198 1199 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 1200 /* state change failure is ok if we started ctrl delete */ 1201 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && 1202 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); 1203 return; 1204 } 1205 1206 nvme_rdma_reconnect_or_remove(ctrl); 1207 } 1208 1209 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) 1210 { 1211 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) 1212 return; 1213 1214 dev_warn(ctrl->ctrl.device, "starting error recovery\n"); 1215 queue_work(nvme_reset_wq, &ctrl->err_work); 1216 } 1217 1218 static void nvme_rdma_end_request(struct nvme_rdma_request *req) 1219 { 1220 struct request *rq = blk_mq_rq_from_pdu(req); 1221 1222 if (!refcount_dec_and_test(&req->ref)) 1223 return; 1224 if (!nvme_try_complete_req(rq, req->status, req->result)) 1225 nvme_rdma_complete_rq(rq); 1226 } 1227 1228 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, 1229 const char *op) 1230 { 1231 struct nvme_rdma_queue *queue = wc->qp->qp_context; 1232 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1233 1234 if (ctrl->ctrl.state == NVME_CTRL_LIVE) 1235 dev_info(ctrl->ctrl.device, 1236 "%s for CQE 0x%p failed with status %s (%d)\n", 1237 op, wc->wr_cqe, 1238 ib_wc_status_msg(wc->status), wc->status); 1239 nvme_rdma_error_recovery(ctrl); 1240 } 1241 1242 static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc) 1243 { 1244 if (unlikely(wc->status != IB_WC_SUCCESS)) 1245 nvme_rdma_wr_error(cq, wc, "MEMREG"); 1246 } 1247 1248 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) 1249 { 1250 struct nvme_rdma_request *req = 1251 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); 1252 1253 if (unlikely(wc->status != IB_WC_SUCCESS)) 1254 nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); 1255 else 1256 nvme_rdma_end_request(req); 1257 } 1258 1259 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue, 1260 struct nvme_rdma_request *req) 1261 { 1262 struct ib_send_wr wr = { 1263 .opcode = IB_WR_LOCAL_INV, 1264 .next = NULL, 1265 .num_sge = 0, 1266 .send_flags = IB_SEND_SIGNALED, 1267 .ex.invalidate_rkey = req->mr->rkey, 1268 }; 1269 1270 req->reg_cqe.done = nvme_rdma_inv_rkey_done; 1271 wr.wr_cqe = &req->reg_cqe; 1272 1273 return ib_post_send(queue->qp, &wr, NULL); 1274 } 1275 1276 static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, 1277 struct request *rq) 1278 { 1279 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1280 struct nvme_rdma_device *dev = queue->device; 1281 struct ib_device *ibdev = dev->dev; 1282 struct list_head *pool = &queue->qp->rdma_mrs; 1283 1284 if (!blk_rq_nr_phys_segments(rq)) 1285 return; 1286 1287 if (blk_integrity_rq(rq)) { 1288 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, 1289 req->metadata_sgl->nents, rq_dma_dir(rq)); 1290 sg_free_table_chained(&req->metadata_sgl->sg_table, 1291 NVME_INLINE_METADATA_SG_CNT); 1292 } 1293 1294 if (req->use_sig_mr) 1295 pool = &queue->qp->sig_mrs; 1296 1297 if (req->mr) { 1298 ib_mr_pool_put(queue->qp, pool, req->mr); 1299 req->mr = NULL; 1300 } 1301 1302 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, 1303 rq_dma_dir(rq)); 1304 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); 1305 } 1306 1307 static int nvme_rdma_set_sg_null(struct nvme_command *c) 1308 { 1309 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 1310 1311 sg->addr = 0; 1312 put_unaligned_le24(0, sg->length); 1313 put_unaligned_le32(0, sg->key); 1314 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 1315 return 0; 1316 } 1317 1318 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, 1319 struct nvme_rdma_request *req, struct nvme_command *c, 1320 int count) 1321 { 1322 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 1323 struct ib_sge *sge = &req->sge[1]; 1324 struct scatterlist *sgl; 1325 u32 len = 0; 1326 int i; 1327 1328 for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { 1329 sge->addr = sg_dma_address(sgl); 1330 sge->length = sg_dma_len(sgl); 1331 sge->lkey = queue->device->pd->local_dma_lkey; 1332 len += sge->length; 1333 sge++; 1334 } 1335 1336 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); 1337 sg->length = cpu_to_le32(len); 1338 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; 1339 1340 req->num_sge += count; 1341 return 0; 1342 } 1343 1344 static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue, 1345 struct nvme_rdma_request *req, struct nvme_command *c) 1346 { 1347 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 1348 1349 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl)); 1350 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length); 1351 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); 1352 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 1353 return 0; 1354 } 1355 1356 static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, 1357 struct nvme_rdma_request *req, struct nvme_command *c, 1358 int count) 1359 { 1360 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 1361 int nr; 1362 1363 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); 1364 if (WARN_ON_ONCE(!req->mr)) 1365 return -EAGAIN; 1366 1367 /* 1368 * Align the MR to a 4K page size to match the ctrl page size and 1369 * the block virtual boundary. 1370 */ 1371 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL, 1372 SZ_4K); 1373 if (unlikely(nr < count)) { 1374 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); 1375 req->mr = NULL; 1376 if (nr < 0) 1377 return nr; 1378 return -EINVAL; 1379 } 1380 1381 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); 1382 1383 req->reg_cqe.done = nvme_rdma_memreg_done; 1384 memset(&req->reg_wr, 0, sizeof(req->reg_wr)); 1385 req->reg_wr.wr.opcode = IB_WR_REG_MR; 1386 req->reg_wr.wr.wr_cqe = &req->reg_cqe; 1387 req->reg_wr.wr.num_sge = 0; 1388 req->reg_wr.mr = req->mr; 1389 req->reg_wr.key = req->mr->rkey; 1390 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | 1391 IB_ACCESS_REMOTE_READ | 1392 IB_ACCESS_REMOTE_WRITE; 1393 1394 sg->addr = cpu_to_le64(req->mr->iova); 1395 put_unaligned_le24(req->mr->length, sg->length); 1396 put_unaligned_le32(req->mr->rkey, sg->key); 1397 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) | 1398 NVME_SGL_FMT_INVALIDATE; 1399 1400 return 0; 1401 } 1402 1403 static void nvme_rdma_set_sig_domain(struct blk_integrity *bi, 1404 struct nvme_command *cmd, struct ib_sig_domain *domain, 1405 u16 control, u8 pi_type) 1406 { 1407 domain->sig_type = IB_SIG_TYPE_T10_DIF; 1408 domain->sig.dif.bg_type = IB_T10DIF_CRC; 1409 domain->sig.dif.pi_interval = 1 << bi->interval_exp; 1410 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); 1411 if (control & NVME_RW_PRINFO_PRCHK_REF) 1412 domain->sig.dif.ref_remap = true; 1413 1414 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); 1415 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); 1416 domain->sig.dif.app_escape = true; 1417 if (pi_type == NVME_NS_DPS_PI_TYPE3) 1418 domain->sig.dif.ref_escape = true; 1419 } 1420 1421 static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi, 1422 struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs, 1423 u8 pi_type) 1424 { 1425 u16 control = le16_to_cpu(cmd->rw.control); 1426 1427 memset(sig_attrs, 0, sizeof(*sig_attrs)); 1428 if (control & NVME_RW_PRINFO_PRACT) { 1429 /* for WRITE_INSERT/READ_STRIP no memory domain */ 1430 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; 1431 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, 1432 pi_type); 1433 /* Clear the PRACT bit since HCA will generate/verify the PI */ 1434 control &= ~NVME_RW_PRINFO_PRACT; 1435 cmd->rw.control = cpu_to_le16(control); 1436 } else { 1437 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ 1438 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, 1439 pi_type); 1440 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, 1441 pi_type); 1442 } 1443 } 1444 1445 static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask) 1446 { 1447 *mask = 0; 1448 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF) 1449 *mask |= IB_SIG_CHECK_REFTAG; 1450 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD) 1451 *mask |= IB_SIG_CHECK_GUARD; 1452 } 1453 1454 static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc) 1455 { 1456 if (unlikely(wc->status != IB_WC_SUCCESS)) 1457 nvme_rdma_wr_error(cq, wc, "SIG"); 1458 } 1459 1460 static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, 1461 struct nvme_rdma_request *req, struct nvme_command *c, 1462 int count, int pi_count) 1463 { 1464 struct nvme_rdma_sgl *sgl = &req->data_sgl; 1465 struct ib_reg_wr *wr = &req->reg_wr; 1466 struct request *rq = blk_mq_rq_from_pdu(req); 1467 struct nvme_ns *ns = rq->q->queuedata; 1468 struct bio *bio = rq->bio; 1469 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; 1470 int nr; 1471 1472 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); 1473 if (WARN_ON_ONCE(!req->mr)) 1474 return -EAGAIN; 1475 1476 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL, 1477 req->metadata_sgl->sg_table.sgl, pi_count, NULL, 1478 SZ_4K); 1479 if (unlikely(nr)) 1480 goto mr_put; 1481 1482 nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, 1483 req->mr->sig_attrs, ns->pi_type); 1484 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); 1485 1486 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); 1487 1488 req->reg_cqe.done = nvme_rdma_sig_done; 1489 memset(wr, 0, sizeof(*wr)); 1490 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; 1491 wr->wr.wr_cqe = &req->reg_cqe; 1492 wr->wr.num_sge = 0; 1493 wr->wr.send_flags = 0; 1494 wr->mr = req->mr; 1495 wr->key = req->mr->rkey; 1496 wr->access = IB_ACCESS_LOCAL_WRITE | 1497 IB_ACCESS_REMOTE_READ | 1498 IB_ACCESS_REMOTE_WRITE; 1499 1500 sg->addr = cpu_to_le64(req->mr->iova); 1501 put_unaligned_le24(req->mr->length, sg->length); 1502 put_unaligned_le32(req->mr->rkey, sg->key); 1503 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; 1504 1505 return 0; 1506 1507 mr_put: 1508 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); 1509 req->mr = NULL; 1510 if (nr < 0) 1511 return nr; 1512 return -EINVAL; 1513 } 1514 1515 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, 1516 struct request *rq, struct nvme_command *c) 1517 { 1518 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1519 struct nvme_rdma_device *dev = queue->device; 1520 struct ib_device *ibdev = dev->dev; 1521 int pi_count = 0; 1522 int count, ret; 1523 1524 req->num_sge = 1; 1525 refcount_set(&req->ref, 2); /* send and recv completions */ 1526 1527 c->common.flags |= NVME_CMD_SGL_METABUF; 1528 1529 if (!blk_rq_nr_phys_segments(rq)) 1530 return nvme_rdma_set_sg_null(c); 1531 1532 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); 1533 ret = sg_alloc_table_chained(&req->data_sgl.sg_table, 1534 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl, 1535 NVME_INLINE_SG_CNT); 1536 if (ret) 1537 return -ENOMEM; 1538 1539 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, 1540 req->data_sgl.sg_table.sgl); 1541 1542 count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, 1543 req->data_sgl.nents, rq_dma_dir(rq)); 1544 if (unlikely(count <= 0)) { 1545 ret = -EIO; 1546 goto out_free_table; 1547 } 1548 1549 if (blk_integrity_rq(rq)) { 1550 req->metadata_sgl->sg_table.sgl = 1551 (struct scatterlist *)(req->metadata_sgl + 1); 1552 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table, 1553 blk_rq_count_integrity_sg(rq->q, rq->bio), 1554 req->metadata_sgl->sg_table.sgl, 1555 NVME_INLINE_METADATA_SG_CNT); 1556 if (unlikely(ret)) { 1557 ret = -ENOMEM; 1558 goto out_unmap_sg; 1559 } 1560 1561 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, 1562 rq->bio, req->metadata_sgl->sg_table.sgl); 1563 pi_count = ib_dma_map_sg(ibdev, 1564 req->metadata_sgl->sg_table.sgl, 1565 req->metadata_sgl->nents, 1566 rq_dma_dir(rq)); 1567 if (unlikely(pi_count <= 0)) { 1568 ret = -EIO; 1569 goto out_free_pi_table; 1570 } 1571 } 1572 1573 if (req->use_sig_mr) { 1574 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count); 1575 goto out; 1576 } 1577 1578 if (count <= dev->num_inline_segments) { 1579 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && 1580 queue->ctrl->use_inline_data && 1581 blk_rq_payload_bytes(rq) <= 1582 nvme_rdma_inline_data_size(queue)) { 1583 ret = nvme_rdma_map_sg_inline(queue, req, c, count); 1584 goto out; 1585 } 1586 1587 if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) { 1588 ret = nvme_rdma_map_sg_single(queue, req, c); 1589 goto out; 1590 } 1591 } 1592 1593 ret = nvme_rdma_map_sg_fr(queue, req, c, count); 1594 out: 1595 if (unlikely(ret)) 1596 goto out_unmap_pi_sg; 1597 1598 return 0; 1599 1600 out_unmap_pi_sg: 1601 if (blk_integrity_rq(rq)) 1602 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, 1603 req->metadata_sgl->nents, rq_dma_dir(rq)); 1604 out_free_pi_table: 1605 if (blk_integrity_rq(rq)) 1606 sg_free_table_chained(&req->metadata_sgl->sg_table, 1607 NVME_INLINE_METADATA_SG_CNT); 1608 out_unmap_sg: 1609 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, 1610 rq_dma_dir(rq)); 1611 out_free_table: 1612 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); 1613 return ret; 1614 } 1615 1616 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 1617 { 1618 struct nvme_rdma_qe *qe = 1619 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); 1620 struct nvme_rdma_request *req = 1621 container_of(qe, struct nvme_rdma_request, sqe); 1622 1623 if (unlikely(wc->status != IB_WC_SUCCESS)) 1624 nvme_rdma_wr_error(cq, wc, "SEND"); 1625 else 1626 nvme_rdma_end_request(req); 1627 } 1628 1629 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, 1630 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge, 1631 struct ib_send_wr *first) 1632 { 1633 struct ib_send_wr wr; 1634 int ret; 1635 1636 sge->addr = qe->dma; 1637 sge->length = sizeof(struct nvme_command); 1638 sge->lkey = queue->device->pd->local_dma_lkey; 1639 1640 wr.next = NULL; 1641 wr.wr_cqe = &qe->cqe; 1642 wr.sg_list = sge; 1643 wr.num_sge = num_sge; 1644 wr.opcode = IB_WR_SEND; 1645 wr.send_flags = IB_SEND_SIGNALED; 1646 1647 if (first) 1648 first->next = ≀ 1649 else 1650 first = ≀ 1651 1652 ret = ib_post_send(queue->qp, first, NULL); 1653 if (unlikely(ret)) { 1654 dev_err(queue->ctrl->ctrl.device, 1655 "%s failed with error code %d\n", __func__, ret); 1656 } 1657 return ret; 1658 } 1659 1660 static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue, 1661 struct nvme_rdma_qe *qe) 1662 { 1663 struct ib_recv_wr wr; 1664 struct ib_sge list; 1665 int ret; 1666 1667 list.addr = qe->dma; 1668 list.length = sizeof(struct nvme_completion); 1669 list.lkey = queue->device->pd->local_dma_lkey; 1670 1671 qe->cqe.done = nvme_rdma_recv_done; 1672 1673 wr.next = NULL; 1674 wr.wr_cqe = &qe->cqe; 1675 wr.sg_list = &list; 1676 wr.num_sge = 1; 1677 1678 ret = ib_post_recv(queue->qp, &wr, NULL); 1679 if (unlikely(ret)) { 1680 dev_err(queue->ctrl->ctrl.device, 1681 "%s failed with error code %d\n", __func__, ret); 1682 } 1683 return ret; 1684 } 1685 1686 static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) 1687 { 1688 u32 queue_idx = nvme_rdma_queue_idx(queue); 1689 1690 if (queue_idx == 0) 1691 return queue->ctrl->admin_tag_set.tags[queue_idx]; 1692 return queue->ctrl->tag_set.tags[queue_idx - 1]; 1693 } 1694 1695 static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) 1696 { 1697 if (unlikely(wc->status != IB_WC_SUCCESS)) 1698 nvme_rdma_wr_error(cq, wc, "ASYNC"); 1699 } 1700 1701 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg) 1702 { 1703 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); 1704 struct nvme_rdma_queue *queue = &ctrl->queues[0]; 1705 struct ib_device *dev = queue->device->dev; 1706 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; 1707 struct nvme_command *cmd = sqe->data; 1708 struct ib_sge sge; 1709 int ret; 1710 1711 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); 1712 1713 memset(cmd, 0, sizeof(*cmd)); 1714 cmd->common.opcode = nvme_admin_async_event; 1715 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; 1716 cmd->common.flags |= NVME_CMD_SGL_METABUF; 1717 nvme_rdma_set_sg_null(cmd); 1718 1719 sqe->cqe.done = nvme_rdma_async_done; 1720 1721 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), 1722 DMA_TO_DEVICE); 1723 1724 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); 1725 WARN_ON_ONCE(ret); 1726 } 1727 1728 static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, 1729 struct nvme_completion *cqe, struct ib_wc *wc) 1730 { 1731 struct request *rq; 1732 struct nvme_rdma_request *req; 1733 1734 rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id); 1735 if (!rq) { 1736 dev_err(queue->ctrl->ctrl.device, 1737 "tag 0x%x on QP %#x not found\n", 1738 cqe->command_id, queue->qp->qp_num); 1739 nvme_rdma_error_recovery(queue->ctrl); 1740 return; 1741 } 1742 req = blk_mq_rq_to_pdu(rq); 1743 1744 req->status = cqe->status; 1745 req->result = cqe->result; 1746 1747 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { 1748 if (unlikely(!req->mr || 1749 wc->ex.invalidate_rkey != req->mr->rkey)) { 1750 dev_err(queue->ctrl->ctrl.device, 1751 "Bogus remote invalidation for rkey %#x\n", 1752 req->mr ? req->mr->rkey : 0); 1753 nvme_rdma_error_recovery(queue->ctrl); 1754 } 1755 } else if (req->mr) { 1756 int ret; 1757 1758 ret = nvme_rdma_inv_rkey(queue, req); 1759 if (unlikely(ret < 0)) { 1760 dev_err(queue->ctrl->ctrl.device, 1761 "Queueing INV WR for rkey %#x failed (%d)\n", 1762 req->mr->rkey, ret); 1763 nvme_rdma_error_recovery(queue->ctrl); 1764 } 1765 /* the local invalidation completion will end the request */ 1766 return; 1767 } 1768 1769 nvme_rdma_end_request(req); 1770 } 1771 1772 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1773 { 1774 struct nvme_rdma_qe *qe = 1775 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); 1776 struct nvme_rdma_queue *queue = wc->qp->qp_context; 1777 struct ib_device *ibdev = queue->device->dev; 1778 struct nvme_completion *cqe = qe->data; 1779 const size_t len = sizeof(struct nvme_completion); 1780 1781 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1782 nvme_rdma_wr_error(cq, wc, "RECV"); 1783 return; 1784 } 1785 1786 /* sanity checking for received data length */ 1787 if (unlikely(wc->byte_len < len)) { 1788 dev_err(queue->ctrl->ctrl.device, 1789 "Unexpected nvme completion length(%d)\n", wc->byte_len); 1790 nvme_rdma_error_recovery(queue->ctrl); 1791 return; 1792 } 1793 1794 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); 1795 /* 1796 * AEN requests are special as they don't time out and can 1797 * survive any kind of queue freeze and often don't respond to 1798 * aborts. We don't even bother to allocate a struct request 1799 * for them but rather special case them here. 1800 */ 1801 if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue), 1802 cqe->command_id))) 1803 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 1804 &cqe->result); 1805 else 1806 nvme_rdma_process_nvme_rsp(queue, cqe, wc); 1807 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); 1808 1809 nvme_rdma_post_recv(queue, qe); 1810 } 1811 1812 static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) 1813 { 1814 int ret, i; 1815 1816 for (i = 0; i < queue->queue_size; i++) { 1817 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); 1818 if (ret) 1819 goto out_destroy_queue_ib; 1820 } 1821 1822 return 0; 1823 1824 out_destroy_queue_ib: 1825 nvme_rdma_destroy_queue_ib(queue); 1826 return ret; 1827 } 1828 1829 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, 1830 struct rdma_cm_event *ev) 1831 { 1832 struct rdma_cm_id *cm_id = queue->cm_id; 1833 int status = ev->status; 1834 const char *rej_msg; 1835 const struct nvme_rdma_cm_rej *rej_data; 1836 u8 rej_data_len; 1837 1838 rej_msg = rdma_reject_msg(cm_id, status); 1839 rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len); 1840 1841 if (rej_data && rej_data_len >= sizeof(u16)) { 1842 u16 sts = le16_to_cpu(rej_data->sts); 1843 1844 dev_err(queue->ctrl->ctrl.device, 1845 "Connect rejected: status %d (%s) nvme status %d (%s).\n", 1846 status, rej_msg, sts, nvme_rdma_cm_msg(sts)); 1847 } else { 1848 dev_err(queue->ctrl->ctrl.device, 1849 "Connect rejected: status %d (%s).\n", status, rej_msg); 1850 } 1851 1852 return -ECONNRESET; 1853 } 1854 1855 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) 1856 { 1857 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; 1858 int ret; 1859 1860 ret = nvme_rdma_create_queue_ib(queue); 1861 if (ret) 1862 return ret; 1863 1864 if (ctrl->opts->tos >= 0) 1865 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); 1866 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS); 1867 if (ret) { 1868 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n", 1869 queue->cm_error); 1870 goto out_destroy_queue; 1871 } 1872 1873 return 0; 1874 1875 out_destroy_queue: 1876 nvme_rdma_destroy_queue_ib(queue); 1877 return ret; 1878 } 1879 1880 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) 1881 { 1882 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 1883 struct rdma_conn_param param = { }; 1884 struct nvme_rdma_cm_req priv = { }; 1885 int ret; 1886 1887 param.qp_num = queue->qp->qp_num; 1888 param.flow_control = 1; 1889 1890 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; 1891 /* maximum retry count */ 1892 param.retry_count = 7; 1893 param.rnr_retry_count = 7; 1894 param.private_data = &priv; 1895 param.private_data_len = sizeof(priv); 1896 1897 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 1898 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); 1899 /* 1900 * set the admin queue depth to the minimum size 1901 * specified by the Fabrics standard. 1902 */ 1903 if (priv.qid == 0) { 1904 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH); 1905 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); 1906 } else { 1907 /* 1908 * current interpretation of the fabrics spec 1909 * is at minimum you make hrqsize sqsize+1, or a 1910 * 1's based representation of sqsize. 1911 */ 1912 priv.hrqsize = cpu_to_le16(queue->queue_size); 1913 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); 1914 } 1915 1916 ret = rdma_connect_locked(queue->cm_id, ¶m); 1917 if (ret) { 1918 dev_err(ctrl->ctrl.device, 1919 "rdma_connect_locked failed (%d).\n", ret); 1920 goto out_destroy_queue_ib; 1921 } 1922 1923 return 0; 1924 1925 out_destroy_queue_ib: 1926 nvme_rdma_destroy_queue_ib(queue); 1927 return ret; 1928 } 1929 1930 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 1931 struct rdma_cm_event *ev) 1932 { 1933 struct nvme_rdma_queue *queue = cm_id->context; 1934 int cm_error = 0; 1935 1936 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", 1937 rdma_event_msg(ev->event), ev->event, 1938 ev->status, cm_id); 1939 1940 switch (ev->event) { 1941 case RDMA_CM_EVENT_ADDR_RESOLVED: 1942 cm_error = nvme_rdma_addr_resolved(queue); 1943 break; 1944 case RDMA_CM_EVENT_ROUTE_RESOLVED: 1945 cm_error = nvme_rdma_route_resolved(queue); 1946 break; 1947 case RDMA_CM_EVENT_ESTABLISHED: 1948 queue->cm_error = nvme_rdma_conn_established(queue); 1949 /* complete cm_done regardless of success/failure */ 1950 complete(&queue->cm_done); 1951 return 0; 1952 case RDMA_CM_EVENT_REJECTED: 1953 cm_error = nvme_rdma_conn_rejected(queue, ev); 1954 break; 1955 case RDMA_CM_EVENT_ROUTE_ERROR: 1956 case RDMA_CM_EVENT_CONNECT_ERROR: 1957 case RDMA_CM_EVENT_UNREACHABLE: 1958 nvme_rdma_destroy_queue_ib(queue); 1959 fallthrough; 1960 case RDMA_CM_EVENT_ADDR_ERROR: 1961 dev_dbg(queue->ctrl->ctrl.device, 1962 "CM error event %d\n", ev->event); 1963 cm_error = -ECONNRESET; 1964 break; 1965 case RDMA_CM_EVENT_DISCONNECTED: 1966 case RDMA_CM_EVENT_ADDR_CHANGE: 1967 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1968 dev_dbg(queue->ctrl->ctrl.device, 1969 "disconnect received - connection closed\n"); 1970 nvme_rdma_error_recovery(queue->ctrl); 1971 break; 1972 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1973 /* device removal is handled via the ib_client API */ 1974 break; 1975 default: 1976 dev_err(queue->ctrl->ctrl.device, 1977 "Unexpected RDMA CM event (%d)\n", ev->event); 1978 nvme_rdma_error_recovery(queue->ctrl); 1979 break; 1980 } 1981 1982 if (cm_error) { 1983 queue->cm_error = cm_error; 1984 complete(&queue->cm_done); 1985 } 1986 1987 return 0; 1988 } 1989 1990 static void nvme_rdma_complete_timed_out(struct request *rq) 1991 { 1992 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1993 struct nvme_rdma_queue *queue = req->queue; 1994 1995 nvme_rdma_stop_queue(queue); 1996 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { 1997 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; 1998 blk_mq_complete_request(rq); 1999 } 2000 } 2001 2002 static enum blk_eh_timer_return 2003 nvme_rdma_timeout(struct request *rq, bool reserved) 2004 { 2005 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 2006 struct nvme_rdma_queue *queue = req->queue; 2007 struct nvme_rdma_ctrl *ctrl = queue->ctrl; 2008 2009 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", 2010 rq->tag, nvme_rdma_queue_idx(queue)); 2011 2012 if (ctrl->ctrl.state != NVME_CTRL_LIVE) { 2013 /* 2014 * If we are resetting, connecting or deleting we should 2015 * complete immediately because we may block controller 2016 * teardown or setup sequence 2017 * - ctrl disable/shutdown fabrics requests 2018 * - connect requests 2019 * - initialization admin requests 2020 * - I/O requests that entered after unquiescing and 2021 * the controller stopped responding 2022 * 2023 * All other requests should be cancelled by the error 2024 * recovery work, so it's fine that we fail it here. 2025 */ 2026 nvme_rdma_complete_timed_out(rq); 2027 return BLK_EH_DONE; 2028 } 2029 2030 /* 2031 * LIVE state should trigger the normal error recovery which will 2032 * handle completing this request. 2033 */ 2034 nvme_rdma_error_recovery(ctrl); 2035 return BLK_EH_RESET_TIMER; 2036 } 2037 2038 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 2039 const struct blk_mq_queue_data *bd) 2040 { 2041 struct nvme_ns *ns = hctx->queue->queuedata; 2042 struct nvme_rdma_queue *queue = hctx->driver_data; 2043 struct request *rq = bd->rq; 2044 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 2045 struct nvme_rdma_qe *sqe = &req->sqe; 2046 struct nvme_command *c = nvme_req(rq)->cmd; 2047 struct ib_device *dev; 2048 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); 2049 blk_status_t ret; 2050 int err; 2051 2052 WARN_ON_ONCE(rq->tag < 0); 2053 2054 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2055 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); 2056 2057 dev = queue->device->dev; 2058 2059 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, 2060 sizeof(struct nvme_command), 2061 DMA_TO_DEVICE); 2062 err = ib_dma_mapping_error(dev, req->sqe.dma); 2063 if (unlikely(err)) 2064 return BLK_STS_RESOURCE; 2065 2066 ib_dma_sync_single_for_cpu(dev, sqe->dma, 2067 sizeof(struct nvme_command), DMA_TO_DEVICE); 2068 2069 ret = nvme_setup_cmd(ns, rq); 2070 if (ret) 2071 goto unmap_qe; 2072 2073 blk_mq_start_request(rq); 2074 2075 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 2076 queue->pi_support && 2077 (c->common.opcode == nvme_cmd_write || 2078 c->common.opcode == nvme_cmd_read) && 2079 nvme_ns_has_pi(ns)) 2080 req->use_sig_mr = true; 2081 else 2082 req->use_sig_mr = false; 2083 2084 err = nvme_rdma_map_data(queue, rq, c); 2085 if (unlikely(err < 0)) { 2086 dev_err(queue->ctrl->ctrl.device, 2087 "Failed to map data (%d)\n", err); 2088 goto err; 2089 } 2090 2091 sqe->cqe.done = nvme_rdma_send_done; 2092 2093 ib_dma_sync_single_for_device(dev, sqe->dma, 2094 sizeof(struct nvme_command), DMA_TO_DEVICE); 2095 2096 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 2097 req->mr ? &req->reg_wr.wr : NULL); 2098 if (unlikely(err)) 2099 goto err_unmap; 2100 2101 return BLK_STS_OK; 2102 2103 err_unmap: 2104 nvme_rdma_unmap_data(queue, rq); 2105 err: 2106 if (err == -EIO) 2107 ret = nvme_host_path_error(rq); 2108 else if (err == -ENOMEM || err == -EAGAIN) 2109 ret = BLK_STS_RESOURCE; 2110 else 2111 ret = BLK_STS_IOERR; 2112 nvme_cleanup_cmd(rq); 2113 unmap_qe: 2114 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), 2115 DMA_TO_DEVICE); 2116 return ret; 2117 } 2118 2119 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx) 2120 { 2121 struct nvme_rdma_queue *queue = hctx->driver_data; 2122 2123 return ib_process_cq_direct(queue->ib_cq, -1); 2124 } 2125 2126 static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req) 2127 { 2128 struct request *rq = blk_mq_rq_from_pdu(req); 2129 struct ib_mr_status mr_status; 2130 int ret; 2131 2132 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 2133 if (ret) { 2134 pr_err("ib_check_mr_status failed, ret %d\n", ret); 2135 nvme_req(rq)->status = NVME_SC_INVALID_PI; 2136 return; 2137 } 2138 2139 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 2140 switch (mr_status.sig_err.err_type) { 2141 case IB_SIG_BAD_GUARD: 2142 nvme_req(rq)->status = NVME_SC_GUARD_CHECK; 2143 break; 2144 case IB_SIG_BAD_REFTAG: 2145 nvme_req(rq)->status = NVME_SC_REFTAG_CHECK; 2146 break; 2147 case IB_SIG_BAD_APPTAG: 2148 nvme_req(rq)->status = NVME_SC_APPTAG_CHECK; 2149 break; 2150 } 2151 pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n", 2152 mr_status.sig_err.err_type, mr_status.sig_err.expected, 2153 mr_status.sig_err.actual); 2154 } 2155 } 2156 2157 static void nvme_rdma_complete_rq(struct request *rq) 2158 { 2159 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 2160 struct nvme_rdma_queue *queue = req->queue; 2161 struct ib_device *ibdev = queue->device->dev; 2162 2163 if (req->use_sig_mr) 2164 nvme_rdma_check_pi_status(req); 2165 2166 nvme_rdma_unmap_data(queue, rq); 2167 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), 2168 DMA_TO_DEVICE); 2169 nvme_complete_rq(rq); 2170 } 2171 2172 static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) 2173 { 2174 struct nvme_rdma_ctrl *ctrl = set->driver_data; 2175 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2176 2177 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { 2178 /* separate read/write queues */ 2179 set->map[HCTX_TYPE_DEFAULT].nr_queues = 2180 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2181 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 2182 set->map[HCTX_TYPE_READ].nr_queues = 2183 ctrl->io_queues[HCTX_TYPE_READ]; 2184 set->map[HCTX_TYPE_READ].queue_offset = 2185 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2186 } else { 2187 /* shared read/write queues */ 2188 set->map[HCTX_TYPE_DEFAULT].nr_queues = 2189 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2190 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 2191 set->map[HCTX_TYPE_READ].nr_queues = 2192 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2193 set->map[HCTX_TYPE_READ].queue_offset = 0; 2194 } 2195 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], 2196 ctrl->device->dev, 0); 2197 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ], 2198 ctrl->device->dev, 0); 2199 2200 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { 2201 /* map dedicated poll queues only if we have queues left */ 2202 set->map[HCTX_TYPE_POLL].nr_queues = 2203 ctrl->io_queues[HCTX_TYPE_POLL]; 2204 set->map[HCTX_TYPE_POLL].queue_offset = 2205 ctrl->io_queues[HCTX_TYPE_DEFAULT] + 2206 ctrl->io_queues[HCTX_TYPE_READ]; 2207 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); 2208 } 2209 2210 dev_info(ctrl->ctrl.device, 2211 "mapped %d/%d/%d default/read/poll queues.\n", 2212 ctrl->io_queues[HCTX_TYPE_DEFAULT], 2213 ctrl->io_queues[HCTX_TYPE_READ], 2214 ctrl->io_queues[HCTX_TYPE_POLL]); 2215 2216 return 0; 2217 } 2218 2219 static const struct blk_mq_ops nvme_rdma_mq_ops = { 2220 .queue_rq = nvme_rdma_queue_rq, 2221 .complete = nvme_rdma_complete_rq, 2222 .init_request = nvme_rdma_init_request, 2223 .exit_request = nvme_rdma_exit_request, 2224 .init_hctx = nvme_rdma_init_hctx, 2225 .timeout = nvme_rdma_timeout, 2226 .map_queues = nvme_rdma_map_queues, 2227 .poll = nvme_rdma_poll, 2228 }; 2229 2230 static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { 2231 .queue_rq = nvme_rdma_queue_rq, 2232 .complete = nvme_rdma_complete_rq, 2233 .init_request = nvme_rdma_init_request, 2234 .exit_request = nvme_rdma_exit_request, 2235 .init_hctx = nvme_rdma_init_admin_hctx, 2236 .timeout = nvme_rdma_timeout, 2237 }; 2238 2239 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) 2240 { 2241 cancel_work_sync(&ctrl->err_work); 2242 cancel_delayed_work_sync(&ctrl->reconnect_work); 2243 2244 nvme_rdma_teardown_io_queues(ctrl, shutdown); 2245 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 2246 if (shutdown) 2247 nvme_shutdown_ctrl(&ctrl->ctrl); 2248 else 2249 nvme_disable_ctrl(&ctrl->ctrl); 2250 nvme_rdma_teardown_admin_queue(ctrl, shutdown); 2251 } 2252 2253 static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl) 2254 { 2255 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true); 2256 } 2257 2258 static void nvme_rdma_reset_ctrl_work(struct work_struct *work) 2259 { 2260 struct nvme_rdma_ctrl *ctrl = 2261 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); 2262 2263 nvme_stop_ctrl(&ctrl->ctrl); 2264 nvme_rdma_shutdown_ctrl(ctrl, false); 2265 2266 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 2267 /* state change failure should never happen */ 2268 WARN_ON_ONCE(1); 2269 return; 2270 } 2271 2272 if (nvme_rdma_setup_ctrl(ctrl, false)) 2273 goto out_fail; 2274 2275 return; 2276 2277 out_fail: 2278 ++ctrl->ctrl.nr_reconnects; 2279 nvme_rdma_reconnect_or_remove(ctrl); 2280 } 2281 2282 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { 2283 .name = "rdma", 2284 .module = THIS_MODULE, 2285 .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED, 2286 .reg_read32 = nvmf_reg_read32, 2287 .reg_read64 = nvmf_reg_read64, 2288 .reg_write32 = nvmf_reg_write32, 2289 .free_ctrl = nvme_rdma_free_ctrl, 2290 .submit_async_event = nvme_rdma_submit_async_event, 2291 .delete_ctrl = nvme_rdma_delete_ctrl, 2292 .get_address = nvmf_get_address, 2293 }; 2294 2295 /* 2296 * Fails a connection request if it matches an existing controller 2297 * (association) with the same tuple: 2298 * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN> 2299 * 2300 * if local address is not specified in the request, it will match an 2301 * existing controller with all the other parameters the same and no 2302 * local port address specified as well. 2303 * 2304 * The ports don't need to be compared as they are intrinsically 2305 * already matched by the port pointers supplied. 2306 */ 2307 static bool 2308 nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts) 2309 { 2310 struct nvme_rdma_ctrl *ctrl; 2311 bool found = false; 2312 2313 mutex_lock(&nvme_rdma_ctrl_mutex); 2314 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { 2315 found = nvmf_ip_options_match(&ctrl->ctrl, opts); 2316 if (found) 2317 break; 2318 } 2319 mutex_unlock(&nvme_rdma_ctrl_mutex); 2320 2321 return found; 2322 } 2323 2324 static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, 2325 struct nvmf_ctrl_options *opts) 2326 { 2327 struct nvme_rdma_ctrl *ctrl; 2328 int ret; 2329 bool changed; 2330 2331 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 2332 if (!ctrl) 2333 return ERR_PTR(-ENOMEM); 2334 ctrl->ctrl.opts = opts; 2335 INIT_LIST_HEAD(&ctrl->list); 2336 2337 if (!(opts->mask & NVMF_OPT_TRSVCID)) { 2338 opts->trsvcid = 2339 kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL); 2340 if (!opts->trsvcid) { 2341 ret = -ENOMEM; 2342 goto out_free_ctrl; 2343 } 2344 opts->mask |= NVMF_OPT_TRSVCID; 2345 } 2346 2347 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 2348 opts->traddr, opts->trsvcid, &ctrl->addr); 2349 if (ret) { 2350 pr_err("malformed address passed: %s:%s\n", 2351 opts->traddr, opts->trsvcid); 2352 goto out_free_ctrl; 2353 } 2354 2355 if (opts->mask & NVMF_OPT_HOST_TRADDR) { 2356 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 2357 opts->host_traddr, NULL, &ctrl->src_addr); 2358 if (ret) { 2359 pr_err("malformed src address passed: %s\n", 2360 opts->host_traddr); 2361 goto out_free_ctrl; 2362 } 2363 } 2364 2365 if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) { 2366 ret = -EALREADY; 2367 goto out_free_ctrl; 2368 } 2369 2370 INIT_DELAYED_WORK(&ctrl->reconnect_work, 2371 nvme_rdma_reconnect_ctrl_work); 2372 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); 2373 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); 2374 2375 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 2376 opts->nr_poll_queues + 1; 2377 ctrl->ctrl.sqsize = opts->queue_size - 1; 2378 ctrl->ctrl.kato = opts->kato; 2379 2380 ret = -ENOMEM; 2381 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), 2382 GFP_KERNEL); 2383 if (!ctrl->queues) 2384 goto out_free_ctrl; 2385 2386 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, 2387 0 /* no quirks, we're perfect! */); 2388 if (ret) 2389 goto out_kfree_queues; 2390 2391 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); 2392 WARN_ON_ONCE(!changed); 2393 2394 ret = nvme_rdma_setup_ctrl(ctrl, true); 2395 if (ret) 2396 goto out_uninit_ctrl; 2397 2398 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", 2399 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); 2400 2401 mutex_lock(&nvme_rdma_ctrl_mutex); 2402 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); 2403 mutex_unlock(&nvme_rdma_ctrl_mutex); 2404 2405 return &ctrl->ctrl; 2406 2407 out_uninit_ctrl: 2408 nvme_uninit_ctrl(&ctrl->ctrl); 2409 nvme_put_ctrl(&ctrl->ctrl); 2410 if (ret > 0) 2411 ret = -EIO; 2412 return ERR_PTR(ret); 2413 out_kfree_queues: 2414 kfree(ctrl->queues); 2415 out_free_ctrl: 2416 kfree(ctrl); 2417 return ERR_PTR(ret); 2418 } 2419 2420 static struct nvmf_transport_ops nvme_rdma_transport = { 2421 .name = "rdma", 2422 .module = THIS_MODULE, 2423 .required_opts = NVMF_OPT_TRADDR, 2424 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | 2425 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | 2426 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | 2427 NVMF_OPT_TOS, 2428 .create_ctrl = nvme_rdma_create_ctrl, 2429 }; 2430 2431 static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) 2432 { 2433 struct nvme_rdma_ctrl *ctrl; 2434 struct nvme_rdma_device *ndev; 2435 bool found = false; 2436 2437 mutex_lock(&device_list_mutex); 2438 list_for_each_entry(ndev, &device_list, entry) { 2439 if (ndev->dev == ib_device) { 2440 found = true; 2441 break; 2442 } 2443 } 2444 mutex_unlock(&device_list_mutex); 2445 2446 if (!found) 2447 return; 2448 2449 /* Delete all controllers using this device */ 2450 mutex_lock(&nvme_rdma_ctrl_mutex); 2451 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { 2452 if (ctrl->device->dev != ib_device) 2453 continue; 2454 nvme_delete_ctrl(&ctrl->ctrl); 2455 } 2456 mutex_unlock(&nvme_rdma_ctrl_mutex); 2457 2458 flush_workqueue(nvme_delete_wq); 2459 } 2460 2461 static struct ib_client nvme_rdma_ib_client = { 2462 .name = "nvme_rdma", 2463 .remove = nvme_rdma_remove_one 2464 }; 2465 2466 static int __init nvme_rdma_init_module(void) 2467 { 2468 int ret; 2469 2470 ret = ib_register_client(&nvme_rdma_ib_client); 2471 if (ret) 2472 return ret; 2473 2474 ret = nvmf_register_transport(&nvme_rdma_transport); 2475 if (ret) 2476 goto err_unreg_client; 2477 2478 return 0; 2479 2480 err_unreg_client: 2481 ib_unregister_client(&nvme_rdma_ib_client); 2482 return ret; 2483 } 2484 2485 static void __exit nvme_rdma_cleanup_module(void) 2486 { 2487 struct nvme_rdma_ctrl *ctrl; 2488 2489 nvmf_unregister_transport(&nvme_rdma_transport); 2490 ib_unregister_client(&nvme_rdma_ib_client); 2491 2492 mutex_lock(&nvme_rdma_ctrl_mutex); 2493 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) 2494 nvme_delete_ctrl(&ctrl->ctrl); 2495 mutex_unlock(&nvme_rdma_ctrl_mutex); 2496 flush_workqueue(nvme_delete_wq); 2497 } 2498 2499 module_init(nvme_rdma_init_module); 2500 module_exit(nvme_rdma_cleanup_module); 2501 2502 MODULE_LICENSE("GPL v2"); 2503