1 /* 2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved. 3 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the BSD-type 9 * license below: 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 18 * Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials provided 21 * with the distribution. 22 * 23 * Neither the name of the Network Appliance, Inc. nor the names of 24 * its contributors may be used to endorse or promote products 25 * derived from this software without specific prior written 26 * permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Author: Tom Tucker <tom@opengridcomputing.com> 41 */ 42 43 #include <linux/sunrpc/svc_xprt.h> 44 #include <linux/sunrpc/debug.h> 45 #include <linux/sunrpc/rpc_rdma.h> 46 #include <linux/interrupt.h> 47 #include <linux/sched.h> 48 #include <linux/slab.h> 49 #include <linux/spinlock.h> 50 #include <linux/workqueue.h> 51 #include <rdma/ib_verbs.h> 52 #include <rdma/rdma_cm.h> 53 #include <linux/sunrpc/svc_rdma.h> 54 #include <linux/export.h> 55 #include "xprt_rdma.h" 56 57 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 58 59 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int); 60 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, 61 struct net *net, 62 struct sockaddr *sa, int salen, 63 int flags); 64 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); 65 static void svc_rdma_release_rqst(struct svc_rqst *); 66 static void dto_tasklet_func(unsigned long data); 67 static void svc_rdma_detach(struct svc_xprt *xprt); 68 static void svc_rdma_free(struct svc_xprt *xprt); 69 static int svc_rdma_has_wspace(struct svc_xprt *xprt); 70 static int svc_rdma_secure_port(struct svc_rqst *); 71 static void rq_cq_reap(struct svcxprt_rdma *xprt); 72 static void sq_cq_reap(struct svcxprt_rdma *xprt); 73 74 static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL); 75 static DEFINE_SPINLOCK(dto_lock); 76 static LIST_HEAD(dto_xprt_q); 77 78 static struct svc_xprt_ops svc_rdma_ops = { 79 .xpo_create = svc_rdma_create, 80 .xpo_recvfrom = svc_rdma_recvfrom, 81 .xpo_sendto = svc_rdma_sendto, 82 .xpo_release_rqst = svc_rdma_release_rqst, 83 .xpo_detach = svc_rdma_detach, 84 .xpo_free = svc_rdma_free, 85 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, 86 .xpo_has_wspace = svc_rdma_has_wspace, 87 .xpo_accept = svc_rdma_accept, 88 .xpo_secure_port = svc_rdma_secure_port, 89 }; 90 91 struct svc_xprt_class svc_rdma_class = { 92 .xcl_name = "rdma", 93 .xcl_owner = THIS_MODULE, 94 .xcl_ops = &svc_rdma_ops, 95 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA, 96 .xcl_ident = XPRT_TRANSPORT_RDMA, 97 }; 98 99 #if defined(CONFIG_SUNRPC_BACKCHANNEL) 100 static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *, struct net *, 101 struct sockaddr *, int, int); 102 static void svc_rdma_bc_detach(struct svc_xprt *); 103 static void svc_rdma_bc_free(struct svc_xprt *); 104 105 static struct svc_xprt_ops svc_rdma_bc_ops = { 106 .xpo_create = svc_rdma_bc_create, 107 .xpo_detach = svc_rdma_bc_detach, 108 .xpo_free = svc_rdma_bc_free, 109 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, 110 .xpo_secure_port = svc_rdma_secure_port, 111 }; 112 113 struct svc_xprt_class svc_rdma_bc_class = { 114 .xcl_name = "rdma-bc", 115 .xcl_owner = THIS_MODULE, 116 .xcl_ops = &svc_rdma_bc_ops, 117 .xcl_max_payload = (1024 - RPCRDMA_HDRLEN_MIN) 118 }; 119 120 static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv, 121 struct net *net, 122 struct sockaddr *sa, int salen, 123 int flags) 124 { 125 struct svcxprt_rdma *cma_xprt; 126 struct svc_xprt *xprt; 127 128 cma_xprt = rdma_create_xprt(serv, 0); 129 if (!cma_xprt) 130 return ERR_PTR(-ENOMEM); 131 xprt = &cma_xprt->sc_xprt; 132 133 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv); 134 serv->sv_bc_xprt = xprt; 135 136 dprintk("svcrdma: %s(%p)\n", __func__, xprt); 137 return xprt; 138 } 139 140 static void svc_rdma_bc_detach(struct svc_xprt *xprt) 141 { 142 dprintk("svcrdma: %s(%p)\n", __func__, xprt); 143 } 144 145 static void svc_rdma_bc_free(struct svc_xprt *xprt) 146 { 147 struct svcxprt_rdma *rdma = 148 container_of(xprt, struct svcxprt_rdma, sc_xprt); 149 150 dprintk("svcrdma: %s(%p)\n", __func__, xprt); 151 if (xprt) 152 kfree(rdma); 153 } 154 #endif /* CONFIG_SUNRPC_BACKCHANNEL */ 155 156 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) 157 { 158 struct svc_rdma_op_ctxt *ctxt; 159 160 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, 161 GFP_KERNEL | __GFP_NOFAIL); 162 ctxt->xprt = xprt; 163 INIT_LIST_HEAD(&ctxt->dto_q); 164 ctxt->count = 0; 165 ctxt->frmr = NULL; 166 atomic_inc(&xprt->sc_ctxt_used); 167 return ctxt; 168 } 169 170 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) 171 { 172 struct svcxprt_rdma *xprt = ctxt->xprt; 173 int i; 174 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { 175 /* 176 * Unmap the DMA addr in the SGE if the lkey matches 177 * the sc_dma_lkey, otherwise, ignore it since it is 178 * an FRMR lkey and will be unmapped later when the 179 * last WR that uses it completes. 180 */ 181 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { 182 atomic_dec(&xprt->sc_dma_used); 183 ib_dma_unmap_page(xprt->sc_cm_id->device, 184 ctxt->sge[i].addr, 185 ctxt->sge[i].length, 186 ctxt->direction); 187 } 188 } 189 } 190 191 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) 192 { 193 struct svcxprt_rdma *xprt; 194 int i; 195 196 xprt = ctxt->xprt; 197 if (free_pages) 198 for (i = 0; i < ctxt->count; i++) 199 put_page(ctxt->pages[i]); 200 201 kmem_cache_free(svc_rdma_ctxt_cachep, ctxt); 202 atomic_dec(&xprt->sc_ctxt_used); 203 } 204 205 /* 206 * Temporary NFS req mappings are shared across all transport 207 * instances. These are short lived and should be bounded by the number 208 * of concurrent server threads * depth of the SQ. 209 */ 210 struct svc_rdma_req_map *svc_rdma_get_req_map(void) 211 { 212 struct svc_rdma_req_map *map; 213 map = kmem_cache_alloc(svc_rdma_map_cachep, 214 GFP_KERNEL | __GFP_NOFAIL); 215 map->count = 0; 216 return map; 217 } 218 219 void svc_rdma_put_req_map(struct svc_rdma_req_map *map) 220 { 221 kmem_cache_free(svc_rdma_map_cachep, map); 222 } 223 224 /* ib_cq event handler */ 225 static void cq_event_handler(struct ib_event *event, void *context) 226 { 227 struct svc_xprt *xprt = context; 228 dprintk("svcrdma: received CQ event %s (%d), context=%p\n", 229 ib_event_msg(event->event), event->event, context); 230 set_bit(XPT_CLOSE, &xprt->xpt_flags); 231 } 232 233 /* QP event handler */ 234 static void qp_event_handler(struct ib_event *event, void *context) 235 { 236 struct svc_xprt *xprt = context; 237 238 switch (event->event) { 239 /* These are considered benign events */ 240 case IB_EVENT_PATH_MIG: 241 case IB_EVENT_COMM_EST: 242 case IB_EVENT_SQ_DRAINED: 243 case IB_EVENT_QP_LAST_WQE_REACHED: 244 dprintk("svcrdma: QP event %s (%d) received for QP=%p\n", 245 ib_event_msg(event->event), event->event, 246 event->element.qp); 247 break; 248 /* These are considered fatal events */ 249 case IB_EVENT_PATH_MIG_ERR: 250 case IB_EVENT_QP_FATAL: 251 case IB_EVENT_QP_REQ_ERR: 252 case IB_EVENT_QP_ACCESS_ERR: 253 case IB_EVENT_DEVICE_FATAL: 254 default: 255 dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, " 256 "closing transport\n", 257 ib_event_msg(event->event), event->event, 258 event->element.qp); 259 set_bit(XPT_CLOSE, &xprt->xpt_flags); 260 break; 261 } 262 } 263 264 /* 265 * Data Transfer Operation Tasklet 266 * 267 * Walks a list of transports with I/O pending, removing entries as 268 * they are added to the server's I/O pending list. Two bits indicate 269 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave 270 * spinlock that serializes access to the transport list with the RQ 271 * and SQ interrupt handlers. 272 */ 273 static void dto_tasklet_func(unsigned long data) 274 { 275 struct svcxprt_rdma *xprt; 276 unsigned long flags; 277 278 spin_lock_irqsave(&dto_lock, flags); 279 while (!list_empty(&dto_xprt_q)) { 280 xprt = list_entry(dto_xprt_q.next, 281 struct svcxprt_rdma, sc_dto_q); 282 list_del_init(&xprt->sc_dto_q); 283 spin_unlock_irqrestore(&dto_lock, flags); 284 285 rq_cq_reap(xprt); 286 sq_cq_reap(xprt); 287 288 svc_xprt_put(&xprt->sc_xprt); 289 spin_lock_irqsave(&dto_lock, flags); 290 } 291 spin_unlock_irqrestore(&dto_lock, flags); 292 } 293 294 /* 295 * Receive Queue Completion Handler 296 * 297 * Since an RQ completion handler is called on interrupt context, we 298 * need to defer the handling of the I/O to a tasklet 299 */ 300 static void rq_comp_handler(struct ib_cq *cq, void *cq_context) 301 { 302 struct svcxprt_rdma *xprt = cq_context; 303 unsigned long flags; 304 305 /* Guard against unconditional flush call for destroyed QP */ 306 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) 307 return; 308 309 /* 310 * Set the bit regardless of whether or not it's on the list 311 * because it may be on the list already due to an SQ 312 * completion. 313 */ 314 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); 315 316 /* 317 * If this transport is not already on the DTO transport queue, 318 * add it 319 */ 320 spin_lock_irqsave(&dto_lock, flags); 321 if (list_empty(&xprt->sc_dto_q)) { 322 svc_xprt_get(&xprt->sc_xprt); 323 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); 324 } 325 spin_unlock_irqrestore(&dto_lock, flags); 326 327 /* Tasklet does all the work to avoid irqsave locks. */ 328 tasklet_schedule(&dto_tasklet); 329 } 330 331 /* 332 * rq_cq_reap - Process the RQ CQ. 333 * 334 * Take all completing WC off the CQE and enqueue the associated DTO 335 * context on the dto_q for the transport. 336 * 337 * Note that caller must hold a transport reference. 338 */ 339 static void rq_cq_reap(struct svcxprt_rdma *xprt) 340 { 341 int ret; 342 struct ib_wc wc; 343 struct svc_rdma_op_ctxt *ctxt = NULL; 344 345 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) 346 return; 347 348 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); 349 atomic_inc(&rdma_stat_rq_poll); 350 351 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { 352 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; 353 ctxt->wc_status = wc.status; 354 ctxt->byte_len = wc.byte_len; 355 svc_rdma_unmap_dma(ctxt); 356 if (wc.status != IB_WC_SUCCESS) { 357 /* Close the transport */ 358 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); 359 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 360 svc_rdma_put_context(ctxt, 1); 361 svc_xprt_put(&xprt->sc_xprt); 362 continue; 363 } 364 spin_lock_bh(&xprt->sc_rq_dto_lock); 365 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); 366 spin_unlock_bh(&xprt->sc_rq_dto_lock); 367 svc_xprt_put(&xprt->sc_xprt); 368 } 369 370 if (ctxt) 371 atomic_inc(&rdma_stat_rq_prod); 372 373 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); 374 /* 375 * If data arrived before established event, 376 * don't enqueue. This defers RPC I/O until the 377 * RDMA connection is complete. 378 */ 379 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) 380 svc_xprt_enqueue(&xprt->sc_xprt); 381 } 382 383 /* 384 * Process a completion context 385 */ 386 static void process_context(struct svcxprt_rdma *xprt, 387 struct svc_rdma_op_ctxt *ctxt) 388 { 389 svc_rdma_unmap_dma(ctxt); 390 391 switch (ctxt->wr_op) { 392 case IB_WR_SEND: 393 if (ctxt->frmr) 394 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n"); 395 svc_rdma_put_context(ctxt, 1); 396 break; 397 398 case IB_WR_RDMA_WRITE: 399 if (ctxt->frmr) 400 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n"); 401 svc_rdma_put_context(ctxt, 0); 402 break; 403 404 case IB_WR_RDMA_READ: 405 case IB_WR_RDMA_READ_WITH_INV: 406 svc_rdma_put_frmr(xprt, ctxt->frmr); 407 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { 408 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; 409 if (read_hdr) { 410 spin_lock_bh(&xprt->sc_rq_dto_lock); 411 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); 412 list_add_tail(&read_hdr->dto_q, 413 &xprt->sc_read_complete_q); 414 spin_unlock_bh(&xprt->sc_rq_dto_lock); 415 } else { 416 pr_err("svcrdma: ctxt->read_hdr == NULL\n"); 417 } 418 svc_xprt_enqueue(&xprt->sc_xprt); 419 } 420 svc_rdma_put_context(ctxt, 0); 421 break; 422 423 default: 424 printk(KERN_ERR "svcrdma: unexpected completion type, " 425 "opcode=%d\n", 426 ctxt->wr_op); 427 break; 428 } 429 } 430 431 /* 432 * Send Queue Completion Handler - potentially called on interrupt context. 433 * 434 * Note that caller must hold a transport reference. 435 */ 436 static void sq_cq_reap(struct svcxprt_rdma *xprt) 437 { 438 struct svc_rdma_op_ctxt *ctxt = NULL; 439 struct ib_wc wc_a[6]; 440 struct ib_wc *wc; 441 struct ib_cq *cq = xprt->sc_sq_cq; 442 int ret; 443 444 memset(wc_a, 0, sizeof(wc_a)); 445 446 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) 447 return; 448 449 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); 450 atomic_inc(&rdma_stat_sq_poll); 451 while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) { 452 int i; 453 454 for (i = 0; i < ret; i++) { 455 wc = &wc_a[i]; 456 if (wc->status != IB_WC_SUCCESS) { 457 dprintk("svcrdma: sq wc err status %s (%d)\n", 458 ib_wc_status_msg(wc->status), 459 wc->status); 460 461 /* Close the transport */ 462 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 463 } 464 465 /* Decrement used SQ WR count */ 466 atomic_dec(&xprt->sc_sq_count); 467 wake_up(&xprt->sc_send_wait); 468 469 ctxt = (struct svc_rdma_op_ctxt *) 470 (unsigned long)wc->wr_id; 471 if (ctxt) 472 process_context(xprt, ctxt); 473 474 svc_xprt_put(&xprt->sc_xprt); 475 } 476 } 477 478 if (ctxt) 479 atomic_inc(&rdma_stat_sq_prod); 480 } 481 482 static void sq_comp_handler(struct ib_cq *cq, void *cq_context) 483 { 484 struct svcxprt_rdma *xprt = cq_context; 485 unsigned long flags; 486 487 /* Guard against unconditional flush call for destroyed QP */ 488 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) 489 return; 490 491 /* 492 * Set the bit regardless of whether or not it's on the list 493 * because it may be on the list already due to an RQ 494 * completion. 495 */ 496 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); 497 498 /* 499 * If this transport is not already on the DTO transport queue, 500 * add it 501 */ 502 spin_lock_irqsave(&dto_lock, flags); 503 if (list_empty(&xprt->sc_dto_q)) { 504 svc_xprt_get(&xprt->sc_xprt); 505 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); 506 } 507 spin_unlock_irqrestore(&dto_lock, flags); 508 509 /* Tasklet does all the work to avoid irqsave locks. */ 510 tasklet_schedule(&dto_tasklet); 511 } 512 513 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, 514 int listener) 515 { 516 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); 517 518 if (!cma_xprt) 519 return NULL; 520 svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv); 521 INIT_LIST_HEAD(&cma_xprt->sc_accept_q); 522 INIT_LIST_HEAD(&cma_xprt->sc_dto_q); 523 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); 524 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); 525 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q); 526 init_waitqueue_head(&cma_xprt->sc_send_wait); 527 528 spin_lock_init(&cma_xprt->sc_lock); 529 spin_lock_init(&cma_xprt->sc_rq_dto_lock); 530 spin_lock_init(&cma_xprt->sc_frmr_q_lock); 531 532 cma_xprt->sc_ord = svcrdma_ord; 533 534 cma_xprt->sc_max_req_size = svcrdma_max_req_size; 535 cma_xprt->sc_max_requests = svcrdma_max_requests; 536 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; 537 atomic_set(&cma_xprt->sc_sq_count, 0); 538 atomic_set(&cma_xprt->sc_ctxt_used, 0); 539 540 if (listener) 541 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); 542 543 return cma_xprt; 544 } 545 546 int svc_rdma_post_recv(struct svcxprt_rdma *xprt) 547 { 548 struct ib_recv_wr recv_wr, *bad_recv_wr; 549 struct svc_rdma_op_ctxt *ctxt; 550 struct page *page; 551 dma_addr_t pa; 552 int sge_no; 553 int buflen; 554 int ret; 555 556 ctxt = svc_rdma_get_context(xprt); 557 buflen = 0; 558 ctxt->direction = DMA_FROM_DEVICE; 559 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { 560 if (sge_no >= xprt->sc_max_sge) { 561 pr_err("svcrdma: Too many sges (%d)\n", sge_no); 562 goto err_put_ctxt; 563 } 564 page = alloc_page(GFP_KERNEL | __GFP_NOFAIL); 565 ctxt->pages[sge_no] = page; 566 pa = ib_dma_map_page(xprt->sc_cm_id->device, 567 page, 0, PAGE_SIZE, 568 DMA_FROM_DEVICE); 569 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) 570 goto err_put_ctxt; 571 atomic_inc(&xprt->sc_dma_used); 572 ctxt->sge[sge_no].addr = pa; 573 ctxt->sge[sge_no].length = PAGE_SIZE; 574 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; 575 ctxt->count = sge_no + 1; 576 buflen += PAGE_SIZE; 577 } 578 recv_wr.next = NULL; 579 recv_wr.sg_list = &ctxt->sge[0]; 580 recv_wr.num_sge = ctxt->count; 581 recv_wr.wr_id = (u64)(unsigned long)ctxt; 582 583 svc_xprt_get(&xprt->sc_xprt); 584 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); 585 if (ret) { 586 svc_rdma_unmap_dma(ctxt); 587 svc_rdma_put_context(ctxt, 1); 588 svc_xprt_put(&xprt->sc_xprt); 589 } 590 return ret; 591 592 err_put_ctxt: 593 svc_rdma_unmap_dma(ctxt); 594 svc_rdma_put_context(ctxt, 1); 595 return -ENOMEM; 596 } 597 598 /* 599 * This function handles the CONNECT_REQUEST event on a listening 600 * endpoint. It is passed the cma_id for the _new_ connection. The context in 601 * this cma_id is inherited from the listening cma_id and is the svc_xprt 602 * structure for the listening endpoint. 603 * 604 * This function creates a new xprt for the new connection and enqueues it on 605 * the accept queue for the listent xprt. When the listen thread is kicked, it 606 * will call the recvfrom method on the listen xprt which will accept the new 607 * connection. 608 */ 609 static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) 610 { 611 struct svcxprt_rdma *listen_xprt = new_cma_id->context; 612 struct svcxprt_rdma *newxprt; 613 struct sockaddr *sa; 614 615 /* Create a new transport */ 616 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); 617 if (!newxprt) { 618 dprintk("svcrdma: failed to create new transport\n"); 619 return; 620 } 621 newxprt->sc_cm_id = new_cma_id; 622 new_cma_id->context = newxprt; 623 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", 624 newxprt, newxprt->sc_cm_id, listen_xprt); 625 626 /* Save client advertised inbound read limit for use later in accept. */ 627 newxprt->sc_ord = client_ird; 628 629 /* Set the local and remote addresses in the transport */ 630 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; 631 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); 632 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; 633 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); 634 635 /* 636 * Enqueue the new transport on the accept queue of the listening 637 * transport 638 */ 639 spin_lock_bh(&listen_xprt->sc_lock); 640 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); 641 spin_unlock_bh(&listen_xprt->sc_lock); 642 643 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); 644 svc_xprt_enqueue(&listen_xprt->sc_xprt); 645 } 646 647 /* 648 * Handles events generated on the listening endpoint. These events will be 649 * either be incoming connect requests or adapter removal events. 650 */ 651 static int rdma_listen_handler(struct rdma_cm_id *cma_id, 652 struct rdma_cm_event *event) 653 { 654 struct svcxprt_rdma *xprt = cma_id->context; 655 int ret = 0; 656 657 switch (event->event) { 658 case RDMA_CM_EVENT_CONNECT_REQUEST: 659 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " 660 "event = %s (%d)\n", cma_id, cma_id->context, 661 rdma_event_msg(event->event), event->event); 662 handle_connect_req(cma_id, 663 event->param.conn.initiator_depth); 664 break; 665 666 case RDMA_CM_EVENT_ESTABLISHED: 667 /* Accept complete */ 668 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, " 669 "cm_id=%p\n", xprt, cma_id); 670 break; 671 672 case RDMA_CM_EVENT_DEVICE_REMOVAL: 673 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", 674 xprt, cma_id); 675 if (xprt) 676 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 677 break; 678 679 default: 680 dprintk("svcrdma: Unexpected event on listening endpoint %p, " 681 "event = %s (%d)\n", cma_id, 682 rdma_event_msg(event->event), event->event); 683 break; 684 } 685 686 return ret; 687 } 688 689 static int rdma_cma_handler(struct rdma_cm_id *cma_id, 690 struct rdma_cm_event *event) 691 { 692 struct svc_xprt *xprt = cma_id->context; 693 struct svcxprt_rdma *rdma = 694 container_of(xprt, struct svcxprt_rdma, sc_xprt); 695 switch (event->event) { 696 case RDMA_CM_EVENT_ESTABLISHED: 697 /* Accept complete */ 698 svc_xprt_get(xprt); 699 dprintk("svcrdma: Connection completed on DTO xprt=%p, " 700 "cm_id=%p\n", xprt, cma_id); 701 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); 702 svc_xprt_enqueue(xprt); 703 break; 704 case RDMA_CM_EVENT_DISCONNECTED: 705 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n", 706 xprt, cma_id); 707 if (xprt) { 708 set_bit(XPT_CLOSE, &xprt->xpt_flags); 709 svc_xprt_enqueue(xprt); 710 svc_xprt_put(xprt); 711 } 712 break; 713 case RDMA_CM_EVENT_DEVICE_REMOVAL: 714 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " 715 "event = %s (%d)\n", cma_id, xprt, 716 rdma_event_msg(event->event), event->event); 717 if (xprt) { 718 set_bit(XPT_CLOSE, &xprt->xpt_flags); 719 svc_xprt_enqueue(xprt); 720 svc_xprt_put(xprt); 721 } 722 break; 723 default: 724 dprintk("svcrdma: Unexpected event on DTO endpoint %p, " 725 "event = %s (%d)\n", cma_id, 726 rdma_event_msg(event->event), event->event); 727 break; 728 } 729 return 0; 730 } 731 732 /* 733 * Create a listening RDMA service endpoint. 734 */ 735 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, 736 struct net *net, 737 struct sockaddr *sa, int salen, 738 int flags) 739 { 740 struct rdma_cm_id *listen_id; 741 struct svcxprt_rdma *cma_xprt; 742 int ret; 743 744 dprintk("svcrdma: Creating RDMA socket\n"); 745 if (sa->sa_family != AF_INET) { 746 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); 747 return ERR_PTR(-EAFNOSUPPORT); 748 } 749 cma_xprt = rdma_create_xprt(serv, 1); 750 if (!cma_xprt) 751 return ERR_PTR(-ENOMEM); 752 753 listen_id = rdma_create_id(&init_net, rdma_listen_handler, cma_xprt, 754 RDMA_PS_TCP, IB_QPT_RC); 755 if (IS_ERR(listen_id)) { 756 ret = PTR_ERR(listen_id); 757 dprintk("svcrdma: rdma_create_id failed = %d\n", ret); 758 goto err0; 759 } 760 761 ret = rdma_bind_addr(listen_id, sa); 762 if (ret) { 763 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); 764 goto err1; 765 } 766 cma_xprt->sc_cm_id = listen_id; 767 768 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); 769 if (ret) { 770 dprintk("svcrdma: rdma_listen failed = %d\n", ret); 771 goto err1; 772 } 773 774 /* 775 * We need to use the address from the cm_id in case the 776 * caller specified 0 for the port number. 777 */ 778 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; 779 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); 780 781 return &cma_xprt->sc_xprt; 782 783 err1: 784 rdma_destroy_id(listen_id); 785 err0: 786 kfree(cma_xprt); 787 return ERR_PTR(ret); 788 } 789 790 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt) 791 { 792 struct ib_mr *mr; 793 struct scatterlist *sg; 794 struct svc_rdma_fastreg_mr *frmr; 795 u32 num_sg; 796 797 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL); 798 if (!frmr) 799 goto err; 800 801 num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len); 802 mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg); 803 if (IS_ERR(mr)) 804 goto err_free_frmr; 805 806 sg = kcalloc(RPCSVC_MAXPAGES, sizeof(*sg), GFP_KERNEL); 807 if (!sg) 808 goto err_free_mr; 809 810 sg_init_table(sg, RPCSVC_MAXPAGES); 811 812 frmr->mr = mr; 813 frmr->sg = sg; 814 INIT_LIST_HEAD(&frmr->frmr_list); 815 return frmr; 816 817 err_free_mr: 818 ib_dereg_mr(mr); 819 err_free_frmr: 820 kfree(frmr); 821 err: 822 return ERR_PTR(-ENOMEM); 823 } 824 825 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt) 826 { 827 struct svc_rdma_fastreg_mr *frmr; 828 829 while (!list_empty(&xprt->sc_frmr_q)) { 830 frmr = list_entry(xprt->sc_frmr_q.next, 831 struct svc_rdma_fastreg_mr, frmr_list); 832 list_del_init(&frmr->frmr_list); 833 kfree(frmr->sg); 834 ib_dereg_mr(frmr->mr); 835 kfree(frmr); 836 } 837 } 838 839 struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma) 840 { 841 struct svc_rdma_fastreg_mr *frmr = NULL; 842 843 spin_lock_bh(&rdma->sc_frmr_q_lock); 844 if (!list_empty(&rdma->sc_frmr_q)) { 845 frmr = list_entry(rdma->sc_frmr_q.next, 846 struct svc_rdma_fastreg_mr, frmr_list); 847 list_del_init(&frmr->frmr_list); 848 frmr->sg_nents = 0; 849 } 850 spin_unlock_bh(&rdma->sc_frmr_q_lock); 851 if (frmr) 852 return frmr; 853 854 return rdma_alloc_frmr(rdma); 855 } 856 857 void svc_rdma_put_frmr(struct svcxprt_rdma *rdma, 858 struct svc_rdma_fastreg_mr *frmr) 859 { 860 if (frmr) { 861 ib_dma_unmap_sg(rdma->sc_cm_id->device, 862 frmr->sg, frmr->sg_nents, frmr->direction); 863 atomic_dec(&rdma->sc_dma_used); 864 spin_lock_bh(&rdma->sc_frmr_q_lock); 865 WARN_ON_ONCE(!list_empty(&frmr->frmr_list)); 866 list_add(&frmr->frmr_list, &rdma->sc_frmr_q); 867 spin_unlock_bh(&rdma->sc_frmr_q_lock); 868 } 869 } 870 871 /* 872 * This is the xpo_recvfrom function for listening endpoints. Its 873 * purpose is to accept incoming connections. The CMA callback handler 874 * has already created a new transport and attached it to the new CMA 875 * ID. 876 * 877 * There is a queue of pending connections hung on the listening 878 * transport. This queue contains the new svc_xprt structure. This 879 * function takes svc_xprt structures off the accept_q and completes 880 * the connection. 881 */ 882 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) 883 { 884 struct svcxprt_rdma *listen_rdma; 885 struct svcxprt_rdma *newxprt = NULL; 886 struct rdma_conn_param conn_param; 887 struct ib_cq_init_attr cq_attr = {}; 888 struct ib_qp_init_attr qp_attr; 889 struct ib_device_attr devattr; 890 int uninitialized_var(dma_mr_acc); 891 int need_dma_mr = 0; 892 int ret; 893 int i; 894 895 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); 896 clear_bit(XPT_CONN, &xprt->xpt_flags); 897 /* Get the next entry off the accept list */ 898 spin_lock_bh(&listen_rdma->sc_lock); 899 if (!list_empty(&listen_rdma->sc_accept_q)) { 900 newxprt = list_entry(listen_rdma->sc_accept_q.next, 901 struct svcxprt_rdma, sc_accept_q); 902 list_del_init(&newxprt->sc_accept_q); 903 } 904 if (!list_empty(&listen_rdma->sc_accept_q)) 905 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); 906 spin_unlock_bh(&listen_rdma->sc_lock); 907 if (!newxprt) 908 return NULL; 909 910 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", 911 newxprt, newxprt->sc_cm_id); 912 913 ret = ib_query_device(newxprt->sc_cm_id->device, &devattr); 914 if (ret) { 915 dprintk("svcrdma: could not query device attributes on " 916 "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret); 917 goto errout; 918 } 919 920 /* Qualify the transport resource defaults with the 921 * capabilities of this particular device */ 922 newxprt->sc_max_sge = min((size_t)devattr.max_sge, 923 (size_t)RPCSVC_MAXPAGES); 924 newxprt->sc_max_sge_rd = min_t(size_t, devattr.max_sge_rd, 925 RPCSVC_MAXPAGES); 926 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr, 927 (size_t)svcrdma_max_requests); 928 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; 929 930 /* 931 * Limit ORD based on client limit, local device limit, and 932 * configured svcrdma limit. 933 */ 934 newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord); 935 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord); 936 937 newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device); 938 if (IS_ERR(newxprt->sc_pd)) { 939 dprintk("svcrdma: error creating PD for connect request\n"); 940 goto errout; 941 } 942 cq_attr.cqe = newxprt->sc_sq_depth; 943 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, 944 sq_comp_handler, 945 cq_event_handler, 946 newxprt, 947 &cq_attr); 948 if (IS_ERR(newxprt->sc_sq_cq)) { 949 dprintk("svcrdma: error creating SQ CQ for connect request\n"); 950 goto errout; 951 } 952 cq_attr.cqe = newxprt->sc_max_requests; 953 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, 954 rq_comp_handler, 955 cq_event_handler, 956 newxprt, 957 &cq_attr); 958 if (IS_ERR(newxprt->sc_rq_cq)) { 959 dprintk("svcrdma: error creating RQ CQ for connect request\n"); 960 goto errout; 961 } 962 963 memset(&qp_attr, 0, sizeof qp_attr); 964 qp_attr.event_handler = qp_event_handler; 965 qp_attr.qp_context = &newxprt->sc_xprt; 966 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; 967 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; 968 qp_attr.cap.max_send_sge = newxprt->sc_max_sge; 969 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; 970 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 971 qp_attr.qp_type = IB_QPT_RC; 972 qp_attr.send_cq = newxprt->sc_sq_cq; 973 qp_attr.recv_cq = newxprt->sc_rq_cq; 974 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n" 975 " cm_id->device=%p, sc_pd->device=%p\n" 976 " cap.max_send_wr = %d\n" 977 " cap.max_recv_wr = %d\n" 978 " cap.max_send_sge = %d\n" 979 " cap.max_recv_sge = %d\n", 980 newxprt->sc_cm_id, newxprt->sc_pd, 981 newxprt->sc_cm_id->device, newxprt->sc_pd->device, 982 qp_attr.cap.max_send_wr, 983 qp_attr.cap.max_recv_wr, 984 qp_attr.cap.max_send_sge, 985 qp_attr.cap.max_recv_sge); 986 987 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); 988 if (ret) { 989 dprintk("svcrdma: failed to create QP, ret=%d\n", ret); 990 goto errout; 991 } 992 newxprt->sc_qp = newxprt->sc_cm_id->qp; 993 994 /* 995 * Use the most secure set of MR resources based on the 996 * transport type and available memory management features in 997 * the device. Here's the table implemented below: 998 * 999 * Fast Global DMA Remote WR 1000 * Reg LKEY MR Access 1001 * Sup'd Sup'd Needed Needed 1002 * 1003 * IWARP N N Y Y 1004 * N Y Y Y 1005 * Y N Y N 1006 * Y Y N - 1007 * 1008 * IB N N Y N 1009 * N Y N - 1010 * Y N Y N 1011 * Y Y N - 1012 * 1013 * NB: iWARP requires remote write access for the data sink 1014 * of an RDMA_READ. IB does not. 1015 */ 1016 newxprt->sc_reader = rdma_read_chunk_lcl; 1017 if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 1018 newxprt->sc_frmr_pg_list_len = 1019 devattr.max_fast_reg_page_list_len; 1020 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; 1021 newxprt->sc_reader = rdma_read_chunk_frmr; 1022 } 1023 1024 /* 1025 * Determine if a DMA MR is required and if so, what privs are required 1026 */ 1027 if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device, 1028 newxprt->sc_cm_id->port_num) && 1029 !rdma_ib_or_roce(newxprt->sc_cm_id->device, 1030 newxprt->sc_cm_id->port_num)) 1031 goto errout; 1032 1033 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) || 1034 !(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) { 1035 need_dma_mr = 1; 1036 dma_mr_acc = IB_ACCESS_LOCAL_WRITE; 1037 if (rdma_protocol_iwarp(newxprt->sc_cm_id->device, 1038 newxprt->sc_cm_id->port_num) && 1039 !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) 1040 dma_mr_acc |= IB_ACCESS_REMOTE_WRITE; 1041 } 1042 1043 if (rdma_protocol_iwarp(newxprt->sc_cm_id->device, 1044 newxprt->sc_cm_id->port_num)) 1045 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; 1046 1047 /* Create the DMA MR if needed, otherwise, use the DMA LKEY */ 1048 if (need_dma_mr) { 1049 /* Register all of physical memory */ 1050 newxprt->sc_phys_mr = 1051 ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc); 1052 if (IS_ERR(newxprt->sc_phys_mr)) { 1053 dprintk("svcrdma: Failed to create DMA MR ret=%d\n", 1054 ret); 1055 goto errout; 1056 } 1057 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey; 1058 } else 1059 newxprt->sc_dma_lkey = 1060 newxprt->sc_cm_id->device->local_dma_lkey; 1061 1062 /* Post receive buffers */ 1063 for (i = 0; i < newxprt->sc_max_requests; i++) { 1064 ret = svc_rdma_post_recv(newxprt); 1065 if (ret) { 1066 dprintk("svcrdma: failure posting receive buffers\n"); 1067 goto errout; 1068 } 1069 } 1070 1071 /* Swap out the handler */ 1072 newxprt->sc_cm_id->event_handler = rdma_cma_handler; 1073 1074 /* 1075 * Arm the CQs for the SQ and RQ before accepting so we can't 1076 * miss the first message 1077 */ 1078 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP); 1079 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP); 1080 1081 /* Accept Connection */ 1082 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); 1083 memset(&conn_param, 0, sizeof conn_param); 1084 conn_param.responder_resources = 0; 1085 conn_param.initiator_depth = newxprt->sc_ord; 1086 ret = rdma_accept(newxprt->sc_cm_id, &conn_param); 1087 if (ret) { 1088 dprintk("svcrdma: failed to accept new connection, ret=%d\n", 1089 ret); 1090 goto errout; 1091 } 1092 1093 dprintk("svcrdma: new connection %p accepted with the following " 1094 "attributes:\n" 1095 " local_ip : %pI4\n" 1096 " local_port : %d\n" 1097 " remote_ip : %pI4\n" 1098 " remote_port : %d\n" 1099 " max_sge : %d\n" 1100 " max_sge_rd : %d\n" 1101 " sq_depth : %d\n" 1102 " max_requests : %d\n" 1103 " ord : %d\n", 1104 newxprt, 1105 &((struct sockaddr_in *)&newxprt->sc_cm_id-> 1106 route.addr.src_addr)->sin_addr.s_addr, 1107 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> 1108 route.addr.src_addr)->sin_port), 1109 &((struct sockaddr_in *)&newxprt->sc_cm_id-> 1110 route.addr.dst_addr)->sin_addr.s_addr, 1111 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> 1112 route.addr.dst_addr)->sin_port), 1113 newxprt->sc_max_sge, 1114 newxprt->sc_max_sge_rd, 1115 newxprt->sc_sq_depth, 1116 newxprt->sc_max_requests, 1117 newxprt->sc_ord); 1118 1119 return &newxprt->sc_xprt; 1120 1121 errout: 1122 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); 1123 /* Take a reference in case the DTO handler runs */ 1124 svc_xprt_get(&newxprt->sc_xprt); 1125 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) 1126 ib_destroy_qp(newxprt->sc_qp); 1127 rdma_destroy_id(newxprt->sc_cm_id); 1128 /* This call to put will destroy the transport */ 1129 svc_xprt_put(&newxprt->sc_xprt); 1130 return NULL; 1131 } 1132 1133 static void svc_rdma_release_rqst(struct svc_rqst *rqstp) 1134 { 1135 } 1136 1137 /* 1138 * When connected, an svc_xprt has at least two references: 1139 * 1140 * - A reference held by the cm_id between the ESTABLISHED and 1141 * DISCONNECTED events. If the remote peer disconnected first, this 1142 * reference could be gone. 1143 * 1144 * - A reference held by the svc_recv code that called this function 1145 * as part of close processing. 1146 * 1147 * At a minimum one references should still be held. 1148 */ 1149 static void svc_rdma_detach(struct svc_xprt *xprt) 1150 { 1151 struct svcxprt_rdma *rdma = 1152 container_of(xprt, struct svcxprt_rdma, sc_xprt); 1153 dprintk("svc: svc_rdma_detach(%p)\n", xprt); 1154 1155 /* Disconnect and flush posted WQE */ 1156 rdma_disconnect(rdma->sc_cm_id); 1157 } 1158 1159 static void __svc_rdma_free(struct work_struct *work) 1160 { 1161 struct svcxprt_rdma *rdma = 1162 container_of(work, struct svcxprt_rdma, sc_work); 1163 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); 1164 1165 /* We should only be called from kref_put */ 1166 if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0) 1167 pr_err("svcrdma: sc_xprt still in use? (%d)\n", 1168 atomic_read(&rdma->sc_xprt.xpt_ref.refcount)); 1169 1170 /* 1171 * Destroy queued, but not processed read completions. Note 1172 * that this cleanup has to be done before destroying the 1173 * cm_id because the device ptr is needed to unmap the dma in 1174 * svc_rdma_put_context. 1175 */ 1176 while (!list_empty(&rdma->sc_read_complete_q)) { 1177 struct svc_rdma_op_ctxt *ctxt; 1178 ctxt = list_entry(rdma->sc_read_complete_q.next, 1179 struct svc_rdma_op_ctxt, 1180 dto_q); 1181 list_del_init(&ctxt->dto_q); 1182 svc_rdma_put_context(ctxt, 1); 1183 } 1184 1185 /* Destroy queued, but not processed recv completions */ 1186 while (!list_empty(&rdma->sc_rq_dto_q)) { 1187 struct svc_rdma_op_ctxt *ctxt; 1188 ctxt = list_entry(rdma->sc_rq_dto_q.next, 1189 struct svc_rdma_op_ctxt, 1190 dto_q); 1191 list_del_init(&ctxt->dto_q); 1192 svc_rdma_put_context(ctxt, 1); 1193 } 1194 1195 /* Warn if we leaked a resource or under-referenced */ 1196 if (atomic_read(&rdma->sc_ctxt_used) != 0) 1197 pr_err("svcrdma: ctxt still in use? (%d)\n", 1198 atomic_read(&rdma->sc_ctxt_used)); 1199 if (atomic_read(&rdma->sc_dma_used) != 0) 1200 pr_err("svcrdma: dma still in use? (%d)\n", 1201 atomic_read(&rdma->sc_dma_used)); 1202 1203 /* De-allocate fastreg mr */ 1204 rdma_dealloc_frmr_q(rdma); 1205 1206 /* Destroy the QP if present (not a listener) */ 1207 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) 1208 ib_destroy_qp(rdma->sc_qp); 1209 1210 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) 1211 ib_destroy_cq(rdma->sc_sq_cq); 1212 1213 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) 1214 ib_destroy_cq(rdma->sc_rq_cq); 1215 1216 if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) 1217 ib_dereg_mr(rdma->sc_phys_mr); 1218 1219 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) 1220 ib_dealloc_pd(rdma->sc_pd); 1221 1222 /* Destroy the CM ID */ 1223 rdma_destroy_id(rdma->sc_cm_id); 1224 1225 kfree(rdma); 1226 } 1227 1228 static void svc_rdma_free(struct svc_xprt *xprt) 1229 { 1230 struct svcxprt_rdma *rdma = 1231 container_of(xprt, struct svcxprt_rdma, sc_xprt); 1232 INIT_WORK(&rdma->sc_work, __svc_rdma_free); 1233 queue_work(svc_rdma_wq, &rdma->sc_work); 1234 } 1235 1236 static int svc_rdma_has_wspace(struct svc_xprt *xprt) 1237 { 1238 struct svcxprt_rdma *rdma = 1239 container_of(xprt, struct svcxprt_rdma, sc_xprt); 1240 1241 /* 1242 * If there are already waiters on the SQ, 1243 * return false. 1244 */ 1245 if (waitqueue_active(&rdma->sc_send_wait)) 1246 return 0; 1247 1248 /* Otherwise return true. */ 1249 return 1; 1250 } 1251 1252 static int svc_rdma_secure_port(struct svc_rqst *rqstp) 1253 { 1254 return 1; 1255 } 1256 1257 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) 1258 { 1259 struct ib_send_wr *bad_wr, *n_wr; 1260 int wr_count; 1261 int i; 1262 int ret; 1263 1264 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) 1265 return -ENOTCONN; 1266 1267 wr_count = 1; 1268 for (n_wr = wr->next; n_wr; n_wr = n_wr->next) 1269 wr_count++; 1270 1271 /* If the SQ is full, wait until an SQ entry is available */ 1272 while (1) { 1273 spin_lock_bh(&xprt->sc_lock); 1274 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { 1275 spin_unlock_bh(&xprt->sc_lock); 1276 atomic_inc(&rdma_stat_sq_starve); 1277 1278 /* See if we can opportunistically reap SQ WR to make room */ 1279 sq_cq_reap(xprt); 1280 1281 /* Wait until SQ WR available if SQ still full */ 1282 wait_event(xprt->sc_send_wait, 1283 atomic_read(&xprt->sc_sq_count) < 1284 xprt->sc_sq_depth); 1285 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) 1286 return -ENOTCONN; 1287 continue; 1288 } 1289 /* Take a transport ref for each WR posted */ 1290 for (i = 0; i < wr_count; i++) 1291 svc_xprt_get(&xprt->sc_xprt); 1292 1293 /* Bump used SQ WR count and post */ 1294 atomic_add(wr_count, &xprt->sc_sq_count); 1295 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); 1296 if (ret) { 1297 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 1298 atomic_sub(wr_count, &xprt->sc_sq_count); 1299 for (i = 0; i < wr_count; i ++) 1300 svc_xprt_put(&xprt->sc_xprt); 1301 dprintk("svcrdma: failed to post SQ WR rc=%d, " 1302 "sc_sq_count=%d, sc_sq_depth=%d\n", 1303 ret, atomic_read(&xprt->sc_sq_count), 1304 xprt->sc_sq_depth); 1305 } 1306 spin_unlock_bh(&xprt->sc_lock); 1307 if (ret) 1308 wake_up(&xprt->sc_send_wait); 1309 break; 1310 } 1311 return ret; 1312 } 1313 1314 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, 1315 enum rpcrdma_errcode err) 1316 { 1317 struct ib_send_wr err_wr; 1318 struct page *p; 1319 struct svc_rdma_op_ctxt *ctxt; 1320 __be32 *va; 1321 int length; 1322 int ret; 1323 1324 p = alloc_page(GFP_KERNEL | __GFP_NOFAIL); 1325 va = page_address(p); 1326 1327 /* XDR encode error */ 1328 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); 1329 1330 ctxt = svc_rdma_get_context(xprt); 1331 ctxt->direction = DMA_FROM_DEVICE; 1332 ctxt->count = 1; 1333 ctxt->pages[0] = p; 1334 1335 /* Prepare SGE for local address */ 1336 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, 1337 p, 0, length, DMA_FROM_DEVICE); 1338 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { 1339 put_page(p); 1340 svc_rdma_put_context(ctxt, 1); 1341 return; 1342 } 1343 atomic_inc(&xprt->sc_dma_used); 1344 ctxt->sge[0].lkey = xprt->sc_dma_lkey; 1345 ctxt->sge[0].length = length; 1346 1347 /* Prepare SEND WR */ 1348 memset(&err_wr, 0, sizeof err_wr); 1349 ctxt->wr_op = IB_WR_SEND; 1350 err_wr.wr_id = (unsigned long)ctxt; 1351 err_wr.sg_list = ctxt->sge; 1352 err_wr.num_sge = 1; 1353 err_wr.opcode = IB_WR_SEND; 1354 err_wr.send_flags = IB_SEND_SIGNALED; 1355 1356 /* Post It */ 1357 ret = svc_rdma_send(xprt, &err_wr); 1358 if (ret) { 1359 dprintk("svcrdma: Error %d posting send for protocol error\n", 1360 ret); 1361 svc_rdma_unmap_dma(ctxt); 1362 svc_rdma_put_context(ctxt, 1); 1363 } 1364 } 1365