1 /* 2 * NVMe over Fabrics RDMA target. 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 #include <linux/atomic.h> 16 #include <linux/ctype.h> 17 #include <linux/delay.h> 18 #include <linux/err.h> 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/nvme.h> 22 #include <linux/slab.h> 23 #include <linux/string.h> 24 #include <linux/wait.h> 25 #include <linux/inet.h> 26 #include <asm/unaligned.h> 27 28 #include <rdma/ib_verbs.h> 29 #include <rdma/rdma_cm.h> 30 #include <rdma/rw.h> 31 32 #include <linux/nvme-rdma.h> 33 #include "nvmet.h" 34 35 /* 36 * We allow up to a page of inline data to go with the SQE 37 */ 38 #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE 39 40 struct nvmet_rdma_cmd { 41 struct ib_sge sge[2]; 42 struct ib_cqe cqe; 43 struct ib_recv_wr wr; 44 struct scatterlist inline_sg; 45 struct page *inline_page; 46 struct nvme_command *nvme_cmd; 47 struct nvmet_rdma_queue *queue; 48 }; 49 50 enum { 51 NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), 52 NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), 53 }; 54 55 struct nvmet_rdma_rsp { 56 struct ib_sge send_sge; 57 struct ib_cqe send_cqe; 58 struct ib_send_wr send_wr; 59 60 struct nvmet_rdma_cmd *cmd; 61 struct nvmet_rdma_queue *queue; 62 63 struct ib_cqe read_cqe; 64 struct rdma_rw_ctx rw; 65 66 struct nvmet_req req; 67 68 u8 n_rdma; 69 u32 flags; 70 u32 invalidate_rkey; 71 72 struct list_head wait_list; 73 struct list_head free_list; 74 }; 75 76 enum nvmet_rdma_queue_state { 77 NVMET_RDMA_Q_CONNECTING, 78 NVMET_RDMA_Q_LIVE, 79 NVMET_RDMA_Q_DISCONNECTING, 80 NVMET_RDMA_IN_DEVICE_REMOVAL, 81 }; 82 83 struct nvmet_rdma_queue { 84 struct rdma_cm_id *cm_id; 85 struct nvmet_port *port; 86 struct ib_cq *cq; 87 atomic_t sq_wr_avail; 88 struct nvmet_rdma_device *dev; 89 spinlock_t state_lock; 90 enum nvmet_rdma_queue_state state; 91 struct nvmet_cq nvme_cq; 92 struct nvmet_sq nvme_sq; 93 94 struct nvmet_rdma_rsp *rsps; 95 struct list_head free_rsps; 96 spinlock_t rsps_lock; 97 struct nvmet_rdma_cmd *cmds; 98 99 struct work_struct release_work; 100 struct list_head rsp_wait_list; 101 struct list_head rsp_wr_wait_list; 102 spinlock_t rsp_wr_wait_lock; 103 104 int idx; 105 int host_qid; 106 int recv_queue_size; 107 int send_queue_size; 108 109 struct list_head queue_list; 110 }; 111 112 struct nvmet_rdma_device { 113 struct ib_device *device; 114 struct ib_pd *pd; 115 struct ib_srq *srq; 116 struct nvmet_rdma_cmd *srq_cmds; 117 size_t srq_size; 118 struct kref ref; 119 struct list_head entry; 120 }; 121 122 static bool nvmet_rdma_use_srq; 123 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); 124 MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 125 126 static DEFINE_IDA(nvmet_rdma_queue_ida); 127 static LIST_HEAD(nvmet_rdma_queue_list); 128 static DEFINE_MUTEX(nvmet_rdma_queue_mutex); 129 130 static LIST_HEAD(device_list); 131 static DEFINE_MUTEX(device_list_mutex); 132 133 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); 134 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); 135 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); 136 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 137 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 138 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 139 140 static struct nvmet_fabrics_ops nvmet_rdma_ops; 141 142 /* XXX: really should move to a generic header sooner or later.. */ 143 static inline u32 get_unaligned_le24(const u8 *p) 144 { 145 return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16; 146 } 147 148 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) 149 { 150 return nvme_is_write(rsp->req.cmd) && 151 rsp->req.transfer_len && 152 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 153 } 154 155 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) 156 { 157 return !nvme_is_write(rsp->req.cmd) && 158 rsp->req.transfer_len && 159 !rsp->req.rsp->status && 160 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); 161 } 162 163 static inline struct nvmet_rdma_rsp * 164 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) 165 { 166 struct nvmet_rdma_rsp *rsp; 167 unsigned long flags; 168 169 spin_lock_irqsave(&queue->rsps_lock, flags); 170 rsp = list_first_entry(&queue->free_rsps, 171 struct nvmet_rdma_rsp, free_list); 172 list_del(&rsp->free_list); 173 spin_unlock_irqrestore(&queue->rsps_lock, flags); 174 175 return rsp; 176 } 177 178 static inline void 179 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) 180 { 181 unsigned long flags; 182 183 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); 184 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); 185 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); 186 } 187 188 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, 189 struct nvmet_rdma_cmd *c, bool admin) 190 { 191 /* NVMe command / RDMA RECV */ 192 c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); 193 if (!c->nvme_cmd) 194 goto out; 195 196 c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, 197 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 198 if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) 199 goto out_free_cmd; 200 201 c->sge[0].length = sizeof(*c->nvme_cmd); 202 c->sge[0].lkey = ndev->pd->local_dma_lkey; 203 204 if (!admin) { 205 c->inline_page = alloc_pages(GFP_KERNEL, 206 get_order(NVMET_RDMA_INLINE_DATA_SIZE)); 207 if (!c->inline_page) 208 goto out_unmap_cmd; 209 c->sge[1].addr = ib_dma_map_page(ndev->device, 210 c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE, 211 DMA_FROM_DEVICE); 212 if (ib_dma_mapping_error(ndev->device, c->sge[1].addr)) 213 goto out_free_inline_page; 214 c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE; 215 c->sge[1].lkey = ndev->pd->local_dma_lkey; 216 } 217 218 c->cqe.done = nvmet_rdma_recv_done; 219 220 c->wr.wr_cqe = &c->cqe; 221 c->wr.sg_list = c->sge; 222 c->wr.num_sge = admin ? 1 : 2; 223 224 return 0; 225 226 out_free_inline_page: 227 if (!admin) { 228 __free_pages(c->inline_page, 229 get_order(NVMET_RDMA_INLINE_DATA_SIZE)); 230 } 231 out_unmap_cmd: 232 ib_dma_unmap_single(ndev->device, c->sge[0].addr, 233 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 234 out_free_cmd: 235 kfree(c->nvme_cmd); 236 237 out: 238 return -ENOMEM; 239 } 240 241 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, 242 struct nvmet_rdma_cmd *c, bool admin) 243 { 244 if (!admin) { 245 ib_dma_unmap_page(ndev->device, c->sge[1].addr, 246 NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE); 247 __free_pages(c->inline_page, 248 get_order(NVMET_RDMA_INLINE_DATA_SIZE)); 249 } 250 ib_dma_unmap_single(ndev->device, c->sge[0].addr, 251 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); 252 kfree(c->nvme_cmd); 253 } 254 255 static struct nvmet_rdma_cmd * 256 nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, 257 int nr_cmds, bool admin) 258 { 259 struct nvmet_rdma_cmd *cmds; 260 int ret = -EINVAL, i; 261 262 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); 263 if (!cmds) 264 goto out; 265 266 for (i = 0; i < nr_cmds; i++) { 267 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); 268 if (ret) 269 goto out_free; 270 } 271 272 return cmds; 273 274 out_free: 275 while (--i >= 0) 276 nvmet_rdma_free_cmd(ndev, cmds + i, admin); 277 kfree(cmds); 278 out: 279 return ERR_PTR(ret); 280 } 281 282 static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, 283 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) 284 { 285 int i; 286 287 for (i = 0; i < nr_cmds; i++) 288 nvmet_rdma_free_cmd(ndev, cmds + i, admin); 289 kfree(cmds); 290 } 291 292 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, 293 struct nvmet_rdma_rsp *r) 294 { 295 /* NVMe CQE / RDMA SEND */ 296 r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL); 297 if (!r->req.rsp) 298 goto out; 299 300 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp, 301 sizeof(*r->req.rsp), DMA_TO_DEVICE); 302 if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) 303 goto out_free_rsp; 304 305 r->send_sge.length = sizeof(*r->req.rsp); 306 r->send_sge.lkey = ndev->pd->local_dma_lkey; 307 308 r->send_cqe.done = nvmet_rdma_send_done; 309 310 r->send_wr.wr_cqe = &r->send_cqe; 311 r->send_wr.sg_list = &r->send_sge; 312 r->send_wr.num_sge = 1; 313 r->send_wr.send_flags = IB_SEND_SIGNALED; 314 315 /* Data In / RDMA READ */ 316 r->read_cqe.done = nvmet_rdma_read_data_done; 317 return 0; 318 319 out_free_rsp: 320 kfree(r->req.rsp); 321 out: 322 return -ENOMEM; 323 } 324 325 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, 326 struct nvmet_rdma_rsp *r) 327 { 328 ib_dma_unmap_single(ndev->device, r->send_sge.addr, 329 sizeof(*r->req.rsp), DMA_TO_DEVICE); 330 kfree(r->req.rsp); 331 } 332 333 static int 334 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) 335 { 336 struct nvmet_rdma_device *ndev = queue->dev; 337 int nr_rsps = queue->recv_queue_size * 2; 338 int ret = -EINVAL, i; 339 340 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), 341 GFP_KERNEL); 342 if (!queue->rsps) 343 goto out; 344 345 for (i = 0; i < nr_rsps; i++) { 346 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 347 348 ret = nvmet_rdma_alloc_rsp(ndev, rsp); 349 if (ret) 350 goto out_free; 351 352 list_add_tail(&rsp->free_list, &queue->free_rsps); 353 } 354 355 return 0; 356 357 out_free: 358 while (--i >= 0) { 359 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 360 361 list_del(&rsp->free_list); 362 nvmet_rdma_free_rsp(ndev, rsp); 363 } 364 kfree(queue->rsps); 365 out: 366 return ret; 367 } 368 369 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) 370 { 371 struct nvmet_rdma_device *ndev = queue->dev; 372 int i, nr_rsps = queue->recv_queue_size * 2; 373 374 for (i = 0; i < nr_rsps; i++) { 375 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; 376 377 list_del(&rsp->free_list); 378 nvmet_rdma_free_rsp(ndev, rsp); 379 } 380 kfree(queue->rsps); 381 } 382 383 static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, 384 struct nvmet_rdma_cmd *cmd) 385 { 386 struct ib_recv_wr *bad_wr; 387 388 ib_dma_sync_single_for_device(ndev->device, 389 cmd->sge[0].addr, cmd->sge[0].length, 390 DMA_FROM_DEVICE); 391 392 if (ndev->srq) 393 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); 394 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); 395 } 396 397 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) 398 { 399 spin_lock(&queue->rsp_wr_wait_lock); 400 while (!list_empty(&queue->rsp_wr_wait_list)) { 401 struct nvmet_rdma_rsp *rsp; 402 bool ret; 403 404 rsp = list_entry(queue->rsp_wr_wait_list.next, 405 struct nvmet_rdma_rsp, wait_list); 406 list_del(&rsp->wait_list); 407 408 spin_unlock(&queue->rsp_wr_wait_lock); 409 ret = nvmet_rdma_execute_command(rsp); 410 spin_lock(&queue->rsp_wr_wait_lock); 411 412 if (!ret) { 413 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); 414 break; 415 } 416 } 417 spin_unlock(&queue->rsp_wr_wait_lock); 418 } 419 420 421 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) 422 { 423 struct nvmet_rdma_queue *queue = rsp->queue; 424 425 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 426 427 if (rsp->n_rdma) { 428 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 429 queue->cm_id->port_num, rsp->req.sg, 430 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 431 } 432 433 if (rsp->req.sg != &rsp->cmd->inline_sg) 434 sgl_free(rsp->req.sg); 435 436 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) 437 nvmet_rdma_process_wr_wait_list(queue); 438 439 nvmet_rdma_put_rsp(rsp); 440 } 441 442 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) 443 { 444 if (queue->nvme_sq.ctrl) { 445 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 446 } else { 447 /* 448 * we didn't setup the controller yet in case 449 * of admin connect error, just disconnect and 450 * cleanup the queue 451 */ 452 nvmet_rdma_queue_disconnect(queue); 453 } 454 } 455 456 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) 457 { 458 struct nvmet_rdma_rsp *rsp = 459 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); 460 461 nvmet_rdma_release_rsp(rsp); 462 463 if (unlikely(wc->status != IB_WC_SUCCESS && 464 wc->status != IB_WC_WR_FLUSH_ERR)) { 465 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", 466 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 467 nvmet_rdma_error_comp(rsp->queue); 468 } 469 } 470 471 static void nvmet_rdma_queue_response(struct nvmet_req *req) 472 { 473 struct nvmet_rdma_rsp *rsp = 474 container_of(req, struct nvmet_rdma_rsp, req); 475 struct rdma_cm_id *cm_id = rsp->queue->cm_id; 476 struct ib_send_wr *first_wr, *bad_wr; 477 478 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { 479 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; 480 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; 481 } else { 482 rsp->send_wr.opcode = IB_WR_SEND; 483 } 484 485 if (nvmet_rdma_need_data_out(rsp)) 486 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, 487 cm_id->port_num, NULL, &rsp->send_wr); 488 else 489 first_wr = &rsp->send_wr; 490 491 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 492 493 ib_dma_sync_single_for_device(rsp->queue->dev->device, 494 rsp->send_sge.addr, rsp->send_sge.length, 495 DMA_TO_DEVICE); 496 497 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { 498 pr_err("sending cmd response failed\n"); 499 nvmet_rdma_release_rsp(rsp); 500 } 501 } 502 503 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) 504 { 505 struct nvmet_rdma_rsp *rsp = 506 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); 507 struct nvmet_rdma_queue *queue = cq->cq_context; 508 509 WARN_ON(rsp->n_rdma <= 0); 510 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); 511 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, 512 queue->cm_id->port_num, rsp->req.sg, 513 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); 514 rsp->n_rdma = 0; 515 516 if (unlikely(wc->status != IB_WC_SUCCESS)) { 517 nvmet_req_uninit(&rsp->req); 518 nvmet_rdma_release_rsp(rsp); 519 if (wc->status != IB_WC_WR_FLUSH_ERR) { 520 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", 521 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); 522 nvmet_rdma_error_comp(queue); 523 } 524 return; 525 } 526 527 nvmet_req_execute(&rsp->req); 528 } 529 530 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, 531 u64 off) 532 { 533 sg_init_table(&rsp->cmd->inline_sg, 1); 534 sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off); 535 rsp->req.sg = &rsp->cmd->inline_sg; 536 rsp->req.sg_cnt = 1; 537 } 538 539 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) 540 { 541 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; 542 u64 off = le64_to_cpu(sgl->addr); 543 u32 len = le32_to_cpu(sgl->length); 544 545 if (!nvme_is_write(rsp->req.cmd)) 546 return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 547 548 if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) { 549 pr_err("invalid inline data offset!\n"); 550 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 551 } 552 553 /* no data command? */ 554 if (!len) 555 return 0; 556 557 nvmet_rdma_use_inline_sg(rsp, len, off); 558 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; 559 rsp->req.transfer_len += len; 560 return 0; 561 } 562 563 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, 564 struct nvme_keyed_sgl_desc *sgl, bool invalidate) 565 { 566 struct rdma_cm_id *cm_id = rsp->queue->cm_id; 567 u64 addr = le64_to_cpu(sgl->addr); 568 u32 len = get_unaligned_le24(sgl->length); 569 u32 key = get_unaligned_le32(sgl->key); 570 int ret; 571 572 /* no data command? */ 573 if (!len) 574 return 0; 575 576 rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt); 577 if (!rsp->req.sg) 578 return NVME_SC_INTERNAL; 579 580 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, 581 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, 582 nvmet_data_dir(&rsp->req)); 583 if (ret < 0) 584 return NVME_SC_INTERNAL; 585 rsp->req.transfer_len += len; 586 rsp->n_rdma += ret; 587 588 if (invalidate) { 589 rsp->invalidate_rkey = key; 590 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; 591 } 592 593 return 0; 594 } 595 596 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) 597 { 598 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; 599 600 switch (sgl->type >> 4) { 601 case NVME_SGL_FMT_DATA_DESC: 602 switch (sgl->type & 0xf) { 603 case NVME_SGL_FMT_OFFSET: 604 return nvmet_rdma_map_sgl_inline(rsp); 605 default: 606 pr_err("invalid SGL subtype: %#x\n", sgl->type); 607 return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 608 } 609 case NVME_KEY_SGL_FMT_DATA_DESC: 610 switch (sgl->type & 0xf) { 611 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: 612 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); 613 case NVME_SGL_FMT_ADDRESS: 614 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); 615 default: 616 pr_err("invalid SGL subtype: %#x\n", sgl->type); 617 return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 618 } 619 default: 620 pr_err("invalid SGL type: %#x\n", sgl->type); 621 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; 622 } 623 } 624 625 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) 626 { 627 struct nvmet_rdma_queue *queue = rsp->queue; 628 629 if (unlikely(atomic_sub_return(1 + rsp->n_rdma, 630 &queue->sq_wr_avail) < 0)) { 631 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", 632 1 + rsp->n_rdma, queue->idx, 633 queue->nvme_sq.ctrl->cntlid); 634 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); 635 return false; 636 } 637 638 if (nvmet_rdma_need_data_in(rsp)) { 639 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, 640 queue->cm_id->port_num, &rsp->read_cqe, NULL)) 641 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); 642 } else { 643 nvmet_req_execute(&rsp->req); 644 } 645 646 return true; 647 } 648 649 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, 650 struct nvmet_rdma_rsp *cmd) 651 { 652 u16 status; 653 654 ib_dma_sync_single_for_cpu(queue->dev->device, 655 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, 656 DMA_FROM_DEVICE); 657 ib_dma_sync_single_for_cpu(queue->dev->device, 658 cmd->send_sge.addr, cmd->send_sge.length, 659 DMA_TO_DEVICE); 660 661 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 662 &queue->nvme_sq, &nvmet_rdma_ops)) 663 return; 664 665 status = nvmet_rdma_map_sgl(cmd); 666 if (status) 667 goto out_err; 668 669 if (unlikely(!nvmet_rdma_execute_command(cmd))) { 670 spin_lock(&queue->rsp_wr_wait_lock); 671 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); 672 spin_unlock(&queue->rsp_wr_wait_lock); 673 } 674 675 return; 676 677 out_err: 678 nvmet_req_complete(&cmd->req, status); 679 } 680 681 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) 682 { 683 struct nvmet_rdma_cmd *cmd = 684 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); 685 struct nvmet_rdma_queue *queue = cq->cq_context; 686 struct nvmet_rdma_rsp *rsp; 687 688 if (unlikely(wc->status != IB_WC_SUCCESS)) { 689 if (wc->status != IB_WC_WR_FLUSH_ERR) { 690 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", 691 wc->wr_cqe, ib_wc_status_msg(wc->status), 692 wc->status); 693 nvmet_rdma_error_comp(queue); 694 } 695 return; 696 } 697 698 if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { 699 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); 700 nvmet_rdma_error_comp(queue); 701 return; 702 } 703 704 cmd->queue = queue; 705 rsp = nvmet_rdma_get_rsp(queue); 706 rsp->queue = queue; 707 rsp->cmd = cmd; 708 rsp->flags = 0; 709 rsp->req.cmd = cmd->nvme_cmd; 710 rsp->req.port = queue->port; 711 rsp->n_rdma = 0; 712 713 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { 714 unsigned long flags; 715 716 spin_lock_irqsave(&queue->state_lock, flags); 717 if (queue->state == NVMET_RDMA_Q_CONNECTING) 718 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); 719 else 720 nvmet_rdma_put_rsp(rsp); 721 spin_unlock_irqrestore(&queue->state_lock, flags); 722 return; 723 } 724 725 nvmet_rdma_handle_command(queue, rsp); 726 } 727 728 static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev) 729 { 730 if (!ndev->srq) 731 return; 732 733 nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); 734 ib_destroy_srq(ndev->srq); 735 } 736 737 static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) 738 { 739 struct ib_srq_init_attr srq_attr = { NULL, }; 740 struct ib_srq *srq; 741 size_t srq_size; 742 int ret, i; 743 744 srq_size = 4095; /* XXX: tune */ 745 746 srq_attr.attr.max_wr = srq_size; 747 srq_attr.attr.max_sge = 2; 748 srq_attr.attr.srq_limit = 0; 749 srq_attr.srq_type = IB_SRQT_BASIC; 750 srq = ib_create_srq(ndev->pd, &srq_attr); 751 if (IS_ERR(srq)) { 752 /* 753 * If SRQs aren't supported we just go ahead and use normal 754 * non-shared receive queues. 755 */ 756 pr_info("SRQ requested but not supported.\n"); 757 return 0; 758 } 759 760 ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); 761 if (IS_ERR(ndev->srq_cmds)) { 762 ret = PTR_ERR(ndev->srq_cmds); 763 goto out_destroy_srq; 764 } 765 766 ndev->srq = srq; 767 ndev->srq_size = srq_size; 768 769 for (i = 0; i < srq_size; i++) 770 nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); 771 772 return 0; 773 774 out_destroy_srq: 775 ib_destroy_srq(srq); 776 return ret; 777 } 778 779 static void nvmet_rdma_free_dev(struct kref *ref) 780 { 781 struct nvmet_rdma_device *ndev = 782 container_of(ref, struct nvmet_rdma_device, ref); 783 784 mutex_lock(&device_list_mutex); 785 list_del(&ndev->entry); 786 mutex_unlock(&device_list_mutex); 787 788 nvmet_rdma_destroy_srq(ndev); 789 ib_dealloc_pd(ndev->pd); 790 791 kfree(ndev); 792 } 793 794 static struct nvmet_rdma_device * 795 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) 796 { 797 struct nvmet_rdma_device *ndev; 798 int ret; 799 800 mutex_lock(&device_list_mutex); 801 list_for_each_entry(ndev, &device_list, entry) { 802 if (ndev->device->node_guid == cm_id->device->node_guid && 803 kref_get_unless_zero(&ndev->ref)) 804 goto out_unlock; 805 } 806 807 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); 808 if (!ndev) 809 goto out_err; 810 811 ndev->device = cm_id->device; 812 kref_init(&ndev->ref); 813 814 ndev->pd = ib_alloc_pd(ndev->device, 0); 815 if (IS_ERR(ndev->pd)) 816 goto out_free_dev; 817 818 if (nvmet_rdma_use_srq) { 819 ret = nvmet_rdma_init_srq(ndev); 820 if (ret) 821 goto out_free_pd; 822 } 823 824 list_add(&ndev->entry, &device_list); 825 out_unlock: 826 mutex_unlock(&device_list_mutex); 827 pr_debug("added %s.\n", ndev->device->name); 828 return ndev; 829 830 out_free_pd: 831 ib_dealloc_pd(ndev->pd); 832 out_free_dev: 833 kfree(ndev); 834 out_err: 835 mutex_unlock(&device_list_mutex); 836 return NULL; 837 } 838 839 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) 840 { 841 struct ib_qp_init_attr qp_attr; 842 struct nvmet_rdma_device *ndev = queue->dev; 843 int comp_vector, nr_cqe, ret, i; 844 845 /* 846 * Spread the io queues across completion vectors, 847 * but still keep all admin queues on vector 0. 848 */ 849 comp_vector = !queue->host_qid ? 0 : 850 queue->idx % ndev->device->num_comp_vectors; 851 852 /* 853 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. 854 */ 855 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; 856 857 queue->cq = ib_alloc_cq(ndev->device, queue, 858 nr_cqe + 1, comp_vector, 859 IB_POLL_WORKQUEUE); 860 if (IS_ERR(queue->cq)) { 861 ret = PTR_ERR(queue->cq); 862 pr_err("failed to create CQ cqe= %d ret= %d\n", 863 nr_cqe + 1, ret); 864 goto out; 865 } 866 867 memset(&qp_attr, 0, sizeof(qp_attr)); 868 qp_attr.qp_context = queue; 869 qp_attr.event_handler = nvmet_rdma_qp_event; 870 qp_attr.send_cq = queue->cq; 871 qp_attr.recv_cq = queue->cq; 872 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 873 qp_attr.qp_type = IB_QPT_RC; 874 /* +1 for drain */ 875 qp_attr.cap.max_send_wr = queue->send_queue_size + 1; 876 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size; 877 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, 878 ndev->device->attrs.max_sge); 879 880 if (ndev->srq) { 881 qp_attr.srq = ndev->srq; 882 } else { 883 /* +1 for drain */ 884 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; 885 qp_attr.cap.max_recv_sge = 2; 886 } 887 888 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); 889 if (ret) { 890 pr_err("failed to create_qp ret= %d\n", ret); 891 goto err_destroy_cq; 892 } 893 894 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); 895 896 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", 897 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, 898 qp_attr.cap.max_send_wr, queue->cm_id); 899 900 if (!ndev->srq) { 901 for (i = 0; i < queue->recv_queue_size; i++) { 902 queue->cmds[i].queue = queue; 903 nvmet_rdma_post_recv(ndev, &queue->cmds[i]); 904 } 905 } 906 907 out: 908 return ret; 909 910 err_destroy_cq: 911 ib_free_cq(queue->cq); 912 goto out; 913 } 914 915 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 916 { 917 ib_drain_qp(queue->cm_id->qp); 918 rdma_destroy_qp(queue->cm_id); 919 ib_free_cq(queue->cq); 920 } 921 922 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) 923 { 924 pr_debug("freeing queue %d\n", queue->idx); 925 926 nvmet_sq_destroy(&queue->nvme_sq); 927 928 nvmet_rdma_destroy_queue_ib(queue); 929 if (!queue->dev->srq) { 930 nvmet_rdma_free_cmds(queue->dev, queue->cmds, 931 queue->recv_queue_size, 932 !queue->host_qid); 933 } 934 nvmet_rdma_free_rsps(queue); 935 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 936 kfree(queue); 937 } 938 939 static void nvmet_rdma_release_queue_work(struct work_struct *w) 940 { 941 struct nvmet_rdma_queue *queue = 942 container_of(w, struct nvmet_rdma_queue, release_work); 943 struct rdma_cm_id *cm_id = queue->cm_id; 944 struct nvmet_rdma_device *dev = queue->dev; 945 enum nvmet_rdma_queue_state state = queue->state; 946 947 nvmet_rdma_free_queue(queue); 948 949 if (state != NVMET_RDMA_IN_DEVICE_REMOVAL) 950 rdma_destroy_id(cm_id); 951 952 kref_put(&dev->ref, nvmet_rdma_free_dev); 953 } 954 955 static int 956 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, 957 struct nvmet_rdma_queue *queue) 958 { 959 struct nvme_rdma_cm_req *req; 960 961 req = (struct nvme_rdma_cm_req *)conn->private_data; 962 if (!req || conn->private_data_len == 0) 963 return NVME_RDMA_CM_INVALID_LEN; 964 965 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) 966 return NVME_RDMA_CM_INVALID_RECFMT; 967 968 queue->host_qid = le16_to_cpu(req->qid); 969 970 /* 971 * req->hsqsize corresponds to our recv queue size plus 1 972 * req->hrqsize corresponds to our send queue size 973 */ 974 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; 975 queue->send_queue_size = le16_to_cpu(req->hrqsize); 976 977 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) 978 return NVME_RDMA_CM_INVALID_HSQSIZE; 979 980 /* XXX: Should we enforce some kind of max for IO queues? */ 981 982 return 0; 983 } 984 985 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, 986 enum nvme_rdma_cm_status status) 987 { 988 struct nvme_rdma_cm_rej rej; 989 990 pr_debug("rejecting connect request: status %d (%s)\n", 991 status, nvme_rdma_cm_msg(status)); 992 993 rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 994 rej.sts = cpu_to_le16(status); 995 996 return rdma_reject(cm_id, (void *)&rej, sizeof(rej)); 997 } 998 999 static struct nvmet_rdma_queue * 1000 nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, 1001 struct rdma_cm_id *cm_id, 1002 struct rdma_cm_event *event) 1003 { 1004 struct nvmet_rdma_queue *queue; 1005 int ret; 1006 1007 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 1008 if (!queue) { 1009 ret = NVME_RDMA_CM_NO_RSC; 1010 goto out_reject; 1011 } 1012 1013 ret = nvmet_sq_init(&queue->nvme_sq); 1014 if (ret) { 1015 ret = NVME_RDMA_CM_NO_RSC; 1016 goto out_free_queue; 1017 } 1018 1019 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); 1020 if (ret) 1021 goto out_destroy_sq; 1022 1023 /* 1024 * Schedules the actual release because calling rdma_destroy_id from 1025 * inside a CM callback would trigger a deadlock. (great API design..) 1026 */ 1027 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); 1028 queue->dev = ndev; 1029 queue->cm_id = cm_id; 1030 1031 spin_lock_init(&queue->state_lock); 1032 queue->state = NVMET_RDMA_Q_CONNECTING; 1033 INIT_LIST_HEAD(&queue->rsp_wait_list); 1034 INIT_LIST_HEAD(&queue->rsp_wr_wait_list); 1035 spin_lock_init(&queue->rsp_wr_wait_lock); 1036 INIT_LIST_HEAD(&queue->free_rsps); 1037 spin_lock_init(&queue->rsps_lock); 1038 INIT_LIST_HEAD(&queue->queue_list); 1039 1040 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); 1041 if (queue->idx < 0) { 1042 ret = NVME_RDMA_CM_NO_RSC; 1043 goto out_destroy_sq; 1044 } 1045 1046 ret = nvmet_rdma_alloc_rsps(queue); 1047 if (ret) { 1048 ret = NVME_RDMA_CM_NO_RSC; 1049 goto out_ida_remove; 1050 } 1051 1052 if (!ndev->srq) { 1053 queue->cmds = nvmet_rdma_alloc_cmds(ndev, 1054 queue->recv_queue_size, 1055 !queue->host_qid); 1056 if (IS_ERR(queue->cmds)) { 1057 ret = NVME_RDMA_CM_NO_RSC; 1058 goto out_free_responses; 1059 } 1060 } 1061 1062 ret = nvmet_rdma_create_queue_ib(queue); 1063 if (ret) { 1064 pr_err("%s: creating RDMA queue failed (%d).\n", 1065 __func__, ret); 1066 ret = NVME_RDMA_CM_NO_RSC; 1067 goto out_free_cmds; 1068 } 1069 1070 return queue; 1071 1072 out_free_cmds: 1073 if (!ndev->srq) { 1074 nvmet_rdma_free_cmds(queue->dev, queue->cmds, 1075 queue->recv_queue_size, 1076 !queue->host_qid); 1077 } 1078 out_free_responses: 1079 nvmet_rdma_free_rsps(queue); 1080 out_ida_remove: 1081 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); 1082 out_destroy_sq: 1083 nvmet_sq_destroy(&queue->nvme_sq); 1084 out_free_queue: 1085 kfree(queue); 1086 out_reject: 1087 nvmet_rdma_cm_reject(cm_id, ret); 1088 return NULL; 1089 } 1090 1091 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) 1092 { 1093 struct nvmet_rdma_queue *queue = priv; 1094 1095 switch (event->event) { 1096 case IB_EVENT_COMM_EST: 1097 rdma_notify(queue->cm_id, event->event); 1098 break; 1099 default: 1100 pr_err("received IB QP event: %s (%d)\n", 1101 ib_event_msg(event->event), event->event); 1102 break; 1103 } 1104 } 1105 1106 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, 1107 struct nvmet_rdma_queue *queue, 1108 struct rdma_conn_param *p) 1109 { 1110 struct rdma_conn_param param = { }; 1111 struct nvme_rdma_cm_rep priv = { }; 1112 int ret = -ENOMEM; 1113 1114 param.rnr_retry_count = 7; 1115 param.flow_control = 1; 1116 param.initiator_depth = min_t(u8, p->initiator_depth, 1117 queue->dev->device->attrs.max_qp_init_rd_atom); 1118 param.private_data = &priv; 1119 param.private_data_len = sizeof(priv); 1120 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); 1121 priv.crqsize = cpu_to_le16(queue->recv_queue_size); 1122 1123 ret = rdma_accept(cm_id, ¶m); 1124 if (ret) 1125 pr_err("rdma_accept failed (error code = %d)\n", ret); 1126 1127 return ret; 1128 } 1129 1130 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, 1131 struct rdma_cm_event *event) 1132 { 1133 struct nvmet_rdma_device *ndev; 1134 struct nvmet_rdma_queue *queue; 1135 int ret = -EINVAL; 1136 1137 ndev = nvmet_rdma_find_get_device(cm_id); 1138 if (!ndev) { 1139 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); 1140 return -ECONNREFUSED; 1141 } 1142 1143 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); 1144 if (!queue) { 1145 ret = -ENOMEM; 1146 goto put_device; 1147 } 1148 queue->port = cm_id->context; 1149 1150 if (queue->host_qid == 0) { 1151 /* Let inflight controller teardown complete */ 1152 flush_scheduled_work(); 1153 } 1154 1155 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); 1156 if (ret) 1157 goto release_queue; 1158 1159 mutex_lock(&nvmet_rdma_queue_mutex); 1160 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); 1161 mutex_unlock(&nvmet_rdma_queue_mutex); 1162 1163 return 0; 1164 1165 release_queue: 1166 nvmet_rdma_free_queue(queue); 1167 put_device: 1168 kref_put(&ndev->ref, nvmet_rdma_free_dev); 1169 1170 return ret; 1171 } 1172 1173 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) 1174 { 1175 unsigned long flags; 1176 1177 spin_lock_irqsave(&queue->state_lock, flags); 1178 if (queue->state != NVMET_RDMA_Q_CONNECTING) { 1179 pr_warn("trying to establish a connected queue\n"); 1180 goto out_unlock; 1181 } 1182 queue->state = NVMET_RDMA_Q_LIVE; 1183 1184 while (!list_empty(&queue->rsp_wait_list)) { 1185 struct nvmet_rdma_rsp *cmd; 1186 1187 cmd = list_first_entry(&queue->rsp_wait_list, 1188 struct nvmet_rdma_rsp, wait_list); 1189 list_del(&cmd->wait_list); 1190 1191 spin_unlock_irqrestore(&queue->state_lock, flags); 1192 nvmet_rdma_handle_command(queue, cmd); 1193 spin_lock_irqsave(&queue->state_lock, flags); 1194 } 1195 1196 out_unlock: 1197 spin_unlock_irqrestore(&queue->state_lock, flags); 1198 } 1199 1200 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 1201 { 1202 bool disconnect = false; 1203 unsigned long flags; 1204 1205 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); 1206 1207 spin_lock_irqsave(&queue->state_lock, flags); 1208 switch (queue->state) { 1209 case NVMET_RDMA_Q_CONNECTING: 1210 case NVMET_RDMA_Q_LIVE: 1211 queue->state = NVMET_RDMA_Q_DISCONNECTING; 1212 case NVMET_RDMA_IN_DEVICE_REMOVAL: 1213 disconnect = true; 1214 break; 1215 case NVMET_RDMA_Q_DISCONNECTING: 1216 break; 1217 } 1218 spin_unlock_irqrestore(&queue->state_lock, flags); 1219 1220 if (disconnect) { 1221 rdma_disconnect(queue->cm_id); 1222 schedule_work(&queue->release_work); 1223 } 1224 } 1225 1226 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) 1227 { 1228 bool disconnect = false; 1229 1230 mutex_lock(&nvmet_rdma_queue_mutex); 1231 if (!list_empty(&queue->queue_list)) { 1232 list_del_init(&queue->queue_list); 1233 disconnect = true; 1234 } 1235 mutex_unlock(&nvmet_rdma_queue_mutex); 1236 1237 if (disconnect) 1238 __nvmet_rdma_queue_disconnect(queue); 1239 } 1240 1241 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, 1242 struct nvmet_rdma_queue *queue) 1243 { 1244 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 1245 1246 mutex_lock(&nvmet_rdma_queue_mutex); 1247 if (!list_empty(&queue->queue_list)) 1248 list_del_init(&queue->queue_list); 1249 mutex_unlock(&nvmet_rdma_queue_mutex); 1250 1251 pr_err("failed to connect queue %d\n", queue->idx); 1252 schedule_work(&queue->release_work); 1253 } 1254 1255 /** 1256 * nvme_rdma_device_removal() - Handle RDMA device removal 1257 * @cm_id: rdma_cm id, used for nvmet port 1258 * @queue: nvmet rdma queue (cm id qp_context) 1259 * 1260 * DEVICE_REMOVAL event notifies us that the RDMA device is about 1261 * to unplug. Note that this event can be generated on a normal 1262 * queue cm_id and/or a device bound listener cm_id (where in this 1263 * case queue will be null). 1264 * 1265 * We registered an ib_client to handle device removal for queues, 1266 * so we only need to handle the listening port cm_ids. In this case 1267 * we nullify the priv to prevent double cm_id destruction and destroying 1268 * the cm_id implicitely by returning a non-zero rc to the callout. 1269 */ 1270 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, 1271 struct nvmet_rdma_queue *queue) 1272 { 1273 struct nvmet_port *port; 1274 1275 if (queue) { 1276 /* 1277 * This is a queue cm_id. we have registered 1278 * an ib_client to handle queues removal 1279 * so don't interfear and just return. 1280 */ 1281 return 0; 1282 } 1283 1284 port = cm_id->context; 1285 1286 /* 1287 * This is a listener cm_id. Make sure that 1288 * future remove_port won't invoke a double 1289 * cm_id destroy. use atomic xchg to make sure 1290 * we don't compete with remove_port. 1291 */ 1292 if (xchg(&port->priv, NULL) != cm_id) 1293 return 0; 1294 1295 /* 1296 * We need to return 1 so that the core will destroy 1297 * it's own ID. What a great API design.. 1298 */ 1299 return 1; 1300 } 1301 1302 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, 1303 struct rdma_cm_event *event) 1304 { 1305 struct nvmet_rdma_queue *queue = NULL; 1306 int ret = 0; 1307 1308 if (cm_id->qp) 1309 queue = cm_id->qp->qp_context; 1310 1311 pr_debug("%s (%d): status %d id %p\n", 1312 rdma_event_msg(event->event), event->event, 1313 event->status, cm_id); 1314 1315 switch (event->event) { 1316 case RDMA_CM_EVENT_CONNECT_REQUEST: 1317 ret = nvmet_rdma_queue_connect(cm_id, event); 1318 break; 1319 case RDMA_CM_EVENT_ESTABLISHED: 1320 nvmet_rdma_queue_established(queue); 1321 break; 1322 case RDMA_CM_EVENT_ADDR_CHANGE: 1323 case RDMA_CM_EVENT_DISCONNECTED: 1324 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1325 /* 1326 * We might end up here when we already freed the qp 1327 * which means queue release sequence is in progress, 1328 * so don't get in the way... 1329 */ 1330 if (queue) 1331 nvmet_rdma_queue_disconnect(queue); 1332 break; 1333 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1334 ret = nvmet_rdma_device_removal(cm_id, queue); 1335 break; 1336 case RDMA_CM_EVENT_REJECTED: 1337 pr_debug("Connection rejected: %s\n", 1338 rdma_reject_msg(cm_id, event->status)); 1339 /* FALLTHROUGH */ 1340 case RDMA_CM_EVENT_UNREACHABLE: 1341 case RDMA_CM_EVENT_CONNECT_ERROR: 1342 nvmet_rdma_queue_connect_fail(cm_id, queue); 1343 break; 1344 default: 1345 pr_err("received unrecognized RDMA CM event %d\n", 1346 event->event); 1347 break; 1348 } 1349 1350 return ret; 1351 } 1352 1353 static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) 1354 { 1355 struct nvmet_rdma_queue *queue; 1356 1357 restart: 1358 mutex_lock(&nvmet_rdma_queue_mutex); 1359 list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { 1360 if (queue->nvme_sq.ctrl == ctrl) { 1361 list_del_init(&queue->queue_list); 1362 mutex_unlock(&nvmet_rdma_queue_mutex); 1363 1364 __nvmet_rdma_queue_disconnect(queue); 1365 goto restart; 1366 } 1367 } 1368 mutex_unlock(&nvmet_rdma_queue_mutex); 1369 } 1370 1371 static int nvmet_rdma_add_port(struct nvmet_port *port) 1372 { 1373 struct rdma_cm_id *cm_id; 1374 struct sockaddr_storage addr = { }; 1375 __kernel_sa_family_t af; 1376 int ret; 1377 1378 switch (port->disc_addr.adrfam) { 1379 case NVMF_ADDR_FAMILY_IP4: 1380 af = AF_INET; 1381 break; 1382 case NVMF_ADDR_FAMILY_IP6: 1383 af = AF_INET6; 1384 break; 1385 default: 1386 pr_err("address family %d not supported\n", 1387 port->disc_addr.adrfam); 1388 return -EINVAL; 1389 } 1390 1391 ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr, 1392 port->disc_addr.trsvcid, &addr); 1393 if (ret) { 1394 pr_err("malformed ip/port passed: %s:%s\n", 1395 port->disc_addr.traddr, port->disc_addr.trsvcid); 1396 return ret; 1397 } 1398 1399 cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, 1400 RDMA_PS_TCP, IB_QPT_RC); 1401 if (IS_ERR(cm_id)) { 1402 pr_err("CM ID creation failed\n"); 1403 return PTR_ERR(cm_id); 1404 } 1405 1406 /* 1407 * Allow both IPv4 and IPv6 sockets to bind a single port 1408 * at the same time. 1409 */ 1410 ret = rdma_set_afonly(cm_id, 1); 1411 if (ret) { 1412 pr_err("rdma_set_afonly failed (%d)\n", ret); 1413 goto out_destroy_id; 1414 } 1415 1416 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr); 1417 if (ret) { 1418 pr_err("binding CM ID to %pISpcs failed (%d)\n", 1419 (struct sockaddr *)&addr, ret); 1420 goto out_destroy_id; 1421 } 1422 1423 ret = rdma_listen(cm_id, 128); 1424 if (ret) { 1425 pr_err("listening to %pISpcs failed (%d)\n", 1426 (struct sockaddr *)&addr, ret); 1427 goto out_destroy_id; 1428 } 1429 1430 pr_info("enabling port %d (%pISpcs)\n", 1431 le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr); 1432 port->priv = cm_id; 1433 return 0; 1434 1435 out_destroy_id: 1436 rdma_destroy_id(cm_id); 1437 return ret; 1438 } 1439 1440 static void nvmet_rdma_remove_port(struct nvmet_port *port) 1441 { 1442 struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); 1443 1444 if (cm_id) 1445 rdma_destroy_id(cm_id); 1446 } 1447 1448 static struct nvmet_fabrics_ops nvmet_rdma_ops = { 1449 .owner = THIS_MODULE, 1450 .type = NVMF_TRTYPE_RDMA, 1451 .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE, 1452 .msdbd = 1, 1453 .has_keyed_sgls = 1, 1454 .add_port = nvmet_rdma_add_port, 1455 .remove_port = nvmet_rdma_remove_port, 1456 .queue_response = nvmet_rdma_queue_response, 1457 .delete_ctrl = nvmet_rdma_delete_ctrl, 1458 }; 1459 1460 static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) 1461 { 1462 struct nvmet_rdma_queue *queue, *tmp; 1463 1464 /* Device is being removed, delete all queues using this device */ 1465 mutex_lock(&nvmet_rdma_queue_mutex); 1466 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, 1467 queue_list) { 1468 if (queue->dev->device != ib_device) 1469 continue; 1470 1471 pr_info("Removing queue %d\n", queue->idx); 1472 list_del_init(&queue->queue_list); 1473 __nvmet_rdma_queue_disconnect(queue); 1474 } 1475 mutex_unlock(&nvmet_rdma_queue_mutex); 1476 1477 flush_scheduled_work(); 1478 } 1479 1480 static struct ib_client nvmet_rdma_ib_client = { 1481 .name = "nvmet_rdma", 1482 .remove = nvmet_rdma_remove_one 1483 }; 1484 1485 static int __init nvmet_rdma_init(void) 1486 { 1487 int ret; 1488 1489 ret = ib_register_client(&nvmet_rdma_ib_client); 1490 if (ret) 1491 return ret; 1492 1493 ret = nvmet_register_transport(&nvmet_rdma_ops); 1494 if (ret) 1495 goto err_ib_client; 1496 1497 return 0; 1498 1499 err_ib_client: 1500 ib_unregister_client(&nvmet_rdma_ib_client); 1501 return ret; 1502 } 1503 1504 static void __exit nvmet_rdma_exit(void) 1505 { 1506 nvmet_unregister_transport(&nvmet_rdma_ops); 1507 ib_unregister_client(&nvmet_rdma_ib_client); 1508 WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); 1509 ida_destroy(&nvmet_rdma_queue_ida); 1510 } 1511 1512 module_init(nvmet_rdma_init); 1513 module_exit(nvmet_rdma_exit); 1514 1515 MODULE_LICENSE("GPL v2"); 1516 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ 1517