1 /* 2 * linux/fs/9p/trans_rdma.c 3 * 4 * RDMA transport layer based on the trans_fd.c implementation. 5 * 6 * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com> 7 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> 8 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> 9 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> 10 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 14 * as published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to: 23 * Free Software Foundation 24 * 51 Franklin Street, Fifth Floor 25 * Boston, MA 02111-1301 USA 26 * 27 */ 28 29 #include <linux/in.h> 30 #include <linux/module.h> 31 #include <linux/net.h> 32 #include <linux/ipv6.h> 33 #include <linux/kthread.h> 34 #include <linux/errno.h> 35 #include <linux/kernel.h> 36 #include <linux/un.h> 37 #include <linux/uaccess.h> 38 #include <linux/inet.h> 39 #include <linux/idr.h> 40 #include <linux/file.h> 41 #include <linux/parser.h> 42 #include <linux/semaphore.h> 43 #include <linux/slab.h> 44 #include <net/9p/9p.h> 45 #include <net/9p/client.h> 46 #include <net/9p/transport.h> 47 #include <rdma/ib_verbs.h> 48 #include <rdma/rdma_cm.h> 49 50 #define P9_PORT 5640 51 #define P9_RDMA_SQ_DEPTH 32 52 #define P9_RDMA_RQ_DEPTH 32 53 #define P9_RDMA_SEND_SGE 4 54 #define P9_RDMA_RECV_SGE 4 55 #define P9_RDMA_IRD 0 56 #define P9_RDMA_ORD 0 57 #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */ 58 #define P9_RDMA_MAXSIZE (4*4096) /* Min SGE is 4, so we can 59 * safely advertise a maxsize 60 * of 64k */ 61 62 /** 63 * struct p9_trans_rdma - RDMA transport instance 64 * 65 * @state: tracks the transport state machine for connection setup and tear down 66 * @cm_id: The RDMA CM ID 67 * @pd: Protection Domain pointer 68 * @qp: Queue Pair pointer 69 * @cq: Completion Queue pointer 70 * @dm_mr: DMA Memory Region pointer 71 * @lkey: The local access only memory region key 72 * @timeout: Number of uSecs to wait for connection management events 73 * @sq_depth: The depth of the Send Queue 74 * @sq_sem: Semaphore for the SQ 75 * @rq_depth: The depth of the Receive Queue. 76 * @rq_count: Count of requests in the Receive Queue. 77 * @addr: The remote peer's address 78 * @req_lock: Protects the active request list 79 * @cm_done: Completion event for connection management tracking 80 */ 81 struct p9_trans_rdma { 82 enum { 83 P9_RDMA_INIT, 84 P9_RDMA_ADDR_RESOLVED, 85 P9_RDMA_ROUTE_RESOLVED, 86 P9_RDMA_CONNECTED, 87 P9_RDMA_FLUSHING, 88 P9_RDMA_CLOSING, 89 P9_RDMA_CLOSED, 90 } state; 91 struct rdma_cm_id *cm_id; 92 struct ib_pd *pd; 93 struct ib_qp *qp; 94 struct ib_cq *cq; 95 struct ib_mr *dma_mr; 96 u32 lkey; 97 long timeout; 98 int sq_depth; 99 struct semaphore sq_sem; 100 int rq_depth; 101 atomic_t rq_count; 102 struct sockaddr_in addr; 103 spinlock_t req_lock; 104 105 struct completion cm_done; 106 }; 107 108 /** 109 * p9_rdma_context - Keeps track of in-process WR 110 * 111 * @wc_op: The original WR op for when the CQE completes in error. 112 * @busa: Bus address to unmap when the WR completes 113 * @req: Keeps track of requests (send) 114 * @rc: Keepts track of replies (receive) 115 */ 116 struct p9_rdma_req; 117 struct p9_rdma_context { 118 enum ib_wc_opcode wc_op; 119 dma_addr_t busa; 120 union { 121 struct p9_req_t *req; 122 struct p9_fcall *rc; 123 }; 124 }; 125 126 /** 127 * p9_rdma_opts - Collection of mount options 128 * @port: port of connection 129 * @sq_depth: The requested depth of the SQ. This really doesn't need 130 * to be any deeper than the number of threads used in the client 131 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth 132 * @timeout: Time to wait in msecs for CM events 133 */ 134 struct p9_rdma_opts { 135 short port; 136 int sq_depth; 137 int rq_depth; 138 long timeout; 139 }; 140 141 /* 142 * Option Parsing (code inspired by NFS code) 143 */ 144 enum { 145 /* Options that take integer arguments */ 146 Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, Opt_err, 147 }; 148 149 static match_table_t tokens = { 150 {Opt_port, "port=%u"}, 151 {Opt_sq_depth, "sq=%u"}, 152 {Opt_rq_depth, "rq=%u"}, 153 {Opt_timeout, "timeout=%u"}, 154 {Opt_err, NULL}, 155 }; 156 157 /** 158 * parse_opts - parse mount options into rdma options structure 159 * @params: options string passed from mount 160 * @opts: rdma transport-specific structure to parse options into 161 * 162 * Returns 0 upon success, -ERRNO upon failure 163 */ 164 static int parse_opts(char *params, struct p9_rdma_opts *opts) 165 { 166 char *p; 167 substring_t args[MAX_OPT_ARGS]; 168 int option; 169 char *options, *tmp_options; 170 171 opts->port = P9_PORT; 172 opts->sq_depth = P9_RDMA_SQ_DEPTH; 173 opts->rq_depth = P9_RDMA_RQ_DEPTH; 174 opts->timeout = P9_RDMA_TIMEOUT; 175 176 if (!params) 177 return 0; 178 179 tmp_options = kstrdup(params, GFP_KERNEL); 180 if (!tmp_options) { 181 P9_DPRINTK(P9_DEBUG_ERROR, 182 "failed to allocate copy of option string\n"); 183 return -ENOMEM; 184 } 185 options = tmp_options; 186 187 while ((p = strsep(&options, ",")) != NULL) { 188 int token; 189 int r; 190 if (!*p) 191 continue; 192 token = match_token(p, tokens, args); 193 r = match_int(&args[0], &option); 194 if (r < 0) { 195 P9_DPRINTK(P9_DEBUG_ERROR, 196 "integer field, but no integer?\n"); 197 continue; 198 } 199 switch (token) { 200 case Opt_port: 201 opts->port = option; 202 break; 203 case Opt_sq_depth: 204 opts->sq_depth = option; 205 break; 206 case Opt_rq_depth: 207 opts->rq_depth = option; 208 break; 209 case Opt_timeout: 210 opts->timeout = option; 211 break; 212 default: 213 continue; 214 } 215 } 216 /* RQ must be at least as large as the SQ */ 217 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); 218 kfree(tmp_options); 219 return 0; 220 } 221 222 static int 223 p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) 224 { 225 struct p9_client *c = id->context; 226 struct p9_trans_rdma *rdma = c->trans; 227 switch (event->event) { 228 case RDMA_CM_EVENT_ADDR_RESOLVED: 229 BUG_ON(rdma->state != P9_RDMA_INIT); 230 rdma->state = P9_RDMA_ADDR_RESOLVED; 231 break; 232 233 case RDMA_CM_EVENT_ROUTE_RESOLVED: 234 BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); 235 rdma->state = P9_RDMA_ROUTE_RESOLVED; 236 break; 237 238 case RDMA_CM_EVENT_ESTABLISHED: 239 BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); 240 rdma->state = P9_RDMA_CONNECTED; 241 break; 242 243 case RDMA_CM_EVENT_DISCONNECTED: 244 if (rdma) 245 rdma->state = P9_RDMA_CLOSED; 246 if (c) 247 c->status = Disconnected; 248 break; 249 250 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 251 break; 252 253 case RDMA_CM_EVENT_ADDR_CHANGE: 254 case RDMA_CM_EVENT_ROUTE_ERROR: 255 case RDMA_CM_EVENT_DEVICE_REMOVAL: 256 case RDMA_CM_EVENT_MULTICAST_JOIN: 257 case RDMA_CM_EVENT_MULTICAST_ERROR: 258 case RDMA_CM_EVENT_REJECTED: 259 case RDMA_CM_EVENT_CONNECT_REQUEST: 260 case RDMA_CM_EVENT_CONNECT_RESPONSE: 261 case RDMA_CM_EVENT_CONNECT_ERROR: 262 case RDMA_CM_EVENT_ADDR_ERROR: 263 case RDMA_CM_EVENT_UNREACHABLE: 264 c->status = Disconnected; 265 rdma_disconnect(rdma->cm_id); 266 break; 267 default: 268 BUG(); 269 } 270 complete(&rdma->cm_done); 271 return 0; 272 } 273 274 static void 275 handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, 276 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len) 277 { 278 struct p9_req_t *req; 279 int err = 0; 280 int16_t tag; 281 282 req = NULL; 283 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, 284 DMA_FROM_DEVICE); 285 286 if (status != IB_WC_SUCCESS) 287 goto err_out; 288 289 err = p9_parse_header(c->rc, NULL, NULL, &tag, 1); 290 if (err) 291 goto err_out; 292 293 req = p9_tag_lookup(client, tag); 294 if (!req) 295 goto err_out; 296 297 req->rc = c->rc; 298 req->status = REQ_STATUS_RCVD; 299 p9_client_cb(client, req); 300 301 return; 302 303 err_out: 304 P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n", 305 req, err, status); 306 rdma->state = P9_RDMA_FLUSHING; 307 client->status = Disconnected; 308 } 309 310 static void 311 handle_send(struct p9_client *client, struct p9_trans_rdma *rdma, 312 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len) 313 { 314 ib_dma_unmap_single(rdma->cm_id->device, 315 c->busa, c->req->tc->size, 316 DMA_TO_DEVICE); 317 } 318 319 static void qp_event_handler(struct ib_event *event, void *context) 320 { 321 P9_DPRINTK(P9_DEBUG_ERROR, "QP event %d context %p\n", event->event, 322 context); 323 } 324 325 static void cq_comp_handler(struct ib_cq *cq, void *cq_context) 326 { 327 struct p9_client *client = cq_context; 328 struct p9_trans_rdma *rdma = client->trans; 329 int ret; 330 struct ib_wc wc; 331 332 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); 333 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { 334 struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id; 335 336 switch (c->wc_op) { 337 case IB_WC_RECV: 338 atomic_dec(&rdma->rq_count); 339 handle_recv(client, rdma, c, wc.status, wc.byte_len); 340 break; 341 342 case IB_WC_SEND: 343 handle_send(client, rdma, c, wc.status, wc.byte_len); 344 up(&rdma->sq_sem); 345 break; 346 347 default: 348 printk(KERN_ERR "9prdma: unexpected completion type, " 349 "c->wc_op=%d, wc.opcode=%d, status=%d\n", 350 c->wc_op, wc.opcode, wc.status); 351 break; 352 } 353 kfree(c); 354 } 355 } 356 357 static void cq_event_handler(struct ib_event *e, void *v) 358 { 359 P9_DPRINTK(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v); 360 } 361 362 static void rdma_destroy_trans(struct p9_trans_rdma *rdma) 363 { 364 if (!rdma) 365 return; 366 367 if (rdma->dma_mr && !IS_ERR(rdma->dma_mr)) 368 ib_dereg_mr(rdma->dma_mr); 369 370 if (rdma->qp && !IS_ERR(rdma->qp)) 371 ib_destroy_qp(rdma->qp); 372 373 if (rdma->pd && !IS_ERR(rdma->pd)) 374 ib_dealloc_pd(rdma->pd); 375 376 if (rdma->cq && !IS_ERR(rdma->cq)) 377 ib_destroy_cq(rdma->cq); 378 379 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) 380 rdma_destroy_id(rdma->cm_id); 381 382 kfree(rdma); 383 } 384 385 static int 386 post_recv(struct p9_client *client, struct p9_rdma_context *c) 387 { 388 struct p9_trans_rdma *rdma = client->trans; 389 struct ib_recv_wr wr, *bad_wr; 390 struct ib_sge sge; 391 392 c->busa = ib_dma_map_single(rdma->cm_id->device, 393 c->rc->sdata, client->msize, 394 DMA_FROM_DEVICE); 395 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) 396 goto error; 397 398 sge.addr = c->busa; 399 sge.length = client->msize; 400 sge.lkey = rdma->lkey; 401 402 wr.next = NULL; 403 c->wc_op = IB_WC_RECV; 404 wr.wr_id = (unsigned long) c; 405 wr.sg_list = &sge; 406 wr.num_sge = 1; 407 return ib_post_recv(rdma->qp, &wr, &bad_wr); 408 409 error: 410 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); 411 return -EIO; 412 } 413 414 static int rdma_request(struct p9_client *client, struct p9_req_t *req) 415 { 416 struct p9_trans_rdma *rdma = client->trans; 417 struct ib_send_wr wr, *bad_wr; 418 struct ib_sge sge; 419 int err = 0; 420 unsigned long flags; 421 struct p9_rdma_context *c = NULL; 422 struct p9_rdma_context *rpl_context = NULL; 423 424 /* Allocate an fcall for the reply */ 425 rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS); 426 if (!rpl_context) { 427 err = -ENOMEM; 428 goto err_close; 429 } 430 431 /* 432 * If the request has a buffer, steal it, otherwise 433 * allocate a new one. Typically, requests should already 434 * have receive buffers allocated and just swap them around 435 */ 436 if (!req->rc) { 437 req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize, 438 GFP_NOFS); 439 if (req->rc) { 440 req->rc->sdata = (char *) req->rc + 441 sizeof(struct p9_fcall); 442 req->rc->capacity = client->msize; 443 } 444 } 445 rpl_context->rc = req->rc; 446 if (!rpl_context->rc) { 447 err = -ENOMEM; 448 goto err_free2; 449 } 450 451 /* 452 * Post a receive buffer for this request. We need to ensure 453 * there is a reply buffer available for every outstanding 454 * request. A flushed request can result in no reply for an 455 * outstanding request, so we must keep a count to avoid 456 * overflowing the RQ. 457 */ 458 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { 459 err = post_recv(client, rpl_context); 460 if (err) 461 goto err_free1; 462 } else 463 atomic_dec(&rdma->rq_count); 464 465 /* remove posted receive buffer from request structure */ 466 req->rc = NULL; 467 468 /* Post the request */ 469 c = kmalloc(sizeof *c, GFP_NOFS); 470 if (!c) { 471 err = -ENOMEM; 472 goto err_free1; 473 } 474 c->req = req; 475 476 c->busa = ib_dma_map_single(rdma->cm_id->device, 477 c->req->tc->sdata, c->req->tc->size, 478 DMA_TO_DEVICE); 479 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) 480 goto error; 481 482 sge.addr = c->busa; 483 sge.length = c->req->tc->size; 484 sge.lkey = rdma->lkey; 485 486 wr.next = NULL; 487 c->wc_op = IB_WC_SEND; 488 wr.wr_id = (unsigned long) c; 489 wr.opcode = IB_WR_SEND; 490 wr.send_flags = IB_SEND_SIGNALED; 491 wr.sg_list = &sge; 492 wr.num_sge = 1; 493 494 if (down_interruptible(&rdma->sq_sem)) 495 goto error; 496 497 return ib_post_send(rdma->qp, &wr, &bad_wr); 498 499 error: 500 kfree(c); 501 kfree(rpl_context->rc); 502 kfree(rpl_context); 503 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); 504 return -EIO; 505 err_free1: 506 kfree(rpl_context->rc); 507 err_free2: 508 kfree(rpl_context); 509 err_close: 510 spin_lock_irqsave(&rdma->req_lock, flags); 511 if (rdma->state < P9_RDMA_CLOSING) { 512 rdma->state = P9_RDMA_CLOSING; 513 spin_unlock_irqrestore(&rdma->req_lock, flags); 514 rdma_disconnect(rdma->cm_id); 515 } else 516 spin_unlock_irqrestore(&rdma->req_lock, flags); 517 return err; 518 } 519 520 static void rdma_close(struct p9_client *client) 521 { 522 struct p9_trans_rdma *rdma; 523 524 if (!client) 525 return; 526 527 rdma = client->trans; 528 if (!rdma) 529 return; 530 531 client->status = Disconnected; 532 rdma_disconnect(rdma->cm_id); 533 rdma_destroy_trans(rdma); 534 } 535 536 /** 537 * alloc_rdma - Allocate and initialize the rdma transport structure 538 * @opts: Mount options structure 539 */ 540 static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts) 541 { 542 struct p9_trans_rdma *rdma; 543 544 rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL); 545 if (!rdma) 546 return NULL; 547 548 rdma->sq_depth = opts->sq_depth; 549 rdma->rq_depth = opts->rq_depth; 550 rdma->timeout = opts->timeout; 551 spin_lock_init(&rdma->req_lock); 552 init_completion(&rdma->cm_done); 553 sema_init(&rdma->sq_sem, rdma->sq_depth); 554 atomic_set(&rdma->rq_count, 0); 555 556 return rdma; 557 } 558 559 /* its not clear to me we can do anything after send has been posted */ 560 static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) 561 { 562 return 1; 563 } 564 565 /** 566 * trans_create_rdma - Transport method for creating atransport instance 567 * @client: client instance 568 * @addr: IP address string 569 * @args: Mount options string 570 */ 571 static int 572 rdma_create_trans(struct p9_client *client, const char *addr, char *args) 573 { 574 int err; 575 struct p9_rdma_opts opts; 576 struct p9_trans_rdma *rdma; 577 struct rdma_conn_param conn_param; 578 struct ib_qp_init_attr qp_attr; 579 struct ib_device_attr devattr; 580 581 /* Parse the transport specific mount options */ 582 err = parse_opts(args, &opts); 583 if (err < 0) 584 return err; 585 586 /* Create and initialize the RDMA transport structure */ 587 rdma = alloc_rdma(&opts); 588 if (!rdma) 589 return -ENOMEM; 590 591 /* Create the RDMA CM ID */ 592 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP, 593 IB_QPT_RC); 594 if (IS_ERR(rdma->cm_id)) 595 goto error; 596 597 /* Associate the client with the transport */ 598 client->trans = rdma; 599 600 /* Resolve the server's address */ 601 rdma->addr.sin_family = AF_INET; 602 rdma->addr.sin_addr.s_addr = in_aton(addr); 603 rdma->addr.sin_port = htons(opts.port); 604 err = rdma_resolve_addr(rdma->cm_id, NULL, 605 (struct sockaddr *)&rdma->addr, 606 rdma->timeout); 607 if (err) 608 goto error; 609 err = wait_for_completion_interruptible(&rdma->cm_done); 610 if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) 611 goto error; 612 613 /* Resolve the route to the server */ 614 err = rdma_resolve_route(rdma->cm_id, rdma->timeout); 615 if (err) 616 goto error; 617 err = wait_for_completion_interruptible(&rdma->cm_done); 618 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) 619 goto error; 620 621 /* Query the device attributes */ 622 err = ib_query_device(rdma->cm_id->device, &devattr); 623 if (err) 624 goto error; 625 626 /* Create the Completion Queue */ 627 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, 628 cq_event_handler, client, 629 opts.sq_depth + opts.rq_depth + 1, 0); 630 if (IS_ERR(rdma->cq)) 631 goto error; 632 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); 633 634 /* Create the Protection Domain */ 635 rdma->pd = ib_alloc_pd(rdma->cm_id->device); 636 if (IS_ERR(rdma->pd)) 637 goto error; 638 639 /* Cache the DMA lkey in the transport */ 640 rdma->dma_mr = NULL; 641 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 642 rdma->lkey = rdma->cm_id->device->local_dma_lkey; 643 else { 644 rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE); 645 if (IS_ERR(rdma->dma_mr)) 646 goto error; 647 rdma->lkey = rdma->dma_mr->lkey; 648 } 649 650 /* Create the Queue Pair */ 651 memset(&qp_attr, 0, sizeof qp_attr); 652 qp_attr.event_handler = qp_event_handler; 653 qp_attr.qp_context = client; 654 qp_attr.cap.max_send_wr = opts.sq_depth; 655 qp_attr.cap.max_recv_wr = opts.rq_depth; 656 qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE; 657 qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE; 658 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 659 qp_attr.qp_type = IB_QPT_RC; 660 qp_attr.send_cq = rdma->cq; 661 qp_attr.recv_cq = rdma->cq; 662 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); 663 if (err) 664 goto error; 665 rdma->qp = rdma->cm_id->qp; 666 667 /* Request a connection */ 668 memset(&conn_param, 0, sizeof(conn_param)); 669 conn_param.private_data = NULL; 670 conn_param.private_data_len = 0; 671 conn_param.responder_resources = P9_RDMA_IRD; 672 conn_param.initiator_depth = P9_RDMA_ORD; 673 err = rdma_connect(rdma->cm_id, &conn_param); 674 if (err) 675 goto error; 676 err = wait_for_completion_interruptible(&rdma->cm_done); 677 if (err || (rdma->state != P9_RDMA_CONNECTED)) 678 goto error; 679 680 client->status = Connected; 681 682 return 0; 683 684 error: 685 rdma_destroy_trans(rdma); 686 return -ENOTCONN; 687 } 688 689 static struct p9_trans_module p9_rdma_trans = { 690 .name = "rdma", 691 .maxsize = P9_RDMA_MAXSIZE, 692 .def = 0, 693 .owner = THIS_MODULE, 694 .create = rdma_create_trans, 695 .close = rdma_close, 696 .request = rdma_request, 697 .cancel = rdma_cancel, 698 }; 699 700 /** 701 * p9_trans_rdma_init - Register the 9P RDMA transport driver 702 */ 703 static int __init p9_trans_rdma_init(void) 704 { 705 v9fs_register_trans(&p9_rdma_trans); 706 return 0; 707 } 708 709 static void __exit p9_trans_rdma_exit(void) 710 { 711 v9fs_unregister_trans(&p9_rdma_trans); 712 } 713 714 module_init(p9_trans_rdma_init); 715 module_exit(p9_trans_rdma_exit); 716 717 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); 718 MODULE_DESCRIPTION("RDMA Transport for 9P"); 719 MODULE_LICENSE("Dual BSD/GPL"); 720