1fc79d4b1STom Tucker /* 2fc79d4b1STom Tucker * linux/fs/9p/trans_rdma.c 3fc79d4b1STom Tucker * 4fc79d4b1STom Tucker * RDMA transport layer based on the trans_fd.c implementation. 5fc79d4b1STom Tucker * 6fc79d4b1STom Tucker * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com> 7fc79d4b1STom Tucker * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> 8fc79d4b1STom Tucker * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> 9fc79d4b1STom Tucker * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> 10fc79d4b1STom Tucker * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> 11fc79d4b1STom Tucker * 12fc79d4b1STom Tucker * This program is free software; you can redistribute it and/or modify 13fc79d4b1STom Tucker * it under the terms of the GNU General Public License version 2 14fc79d4b1STom Tucker * as published by the Free Software Foundation. 15fc79d4b1STom Tucker * 16fc79d4b1STom Tucker * This program is distributed in the hope that it will be useful, 17fc79d4b1STom Tucker * but WITHOUT ANY WARRANTY; without even the implied warranty of 18fc79d4b1STom Tucker * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19fc79d4b1STom Tucker * GNU General Public License for more details. 20fc79d4b1STom Tucker * 21fc79d4b1STom Tucker * You should have received a copy of the GNU General Public License 22fc79d4b1STom Tucker * along with this program; if not, write to: 23fc79d4b1STom Tucker * Free Software Foundation 24fc79d4b1STom Tucker * 51 Franklin Street, Fifth Floor 25fc79d4b1STom Tucker * Boston, MA 02111-1301 USA 26fc79d4b1STom Tucker * 27fc79d4b1STom Tucker */ 28fc79d4b1STom Tucker 295d385153SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 305d385153SJoe Perches 31fc79d4b1STom Tucker #include <linux/in.h> 32fc79d4b1STom Tucker #include <linux/module.h> 33fc79d4b1STom Tucker #include <linux/net.h> 34fc79d4b1STom Tucker #include <linux/ipv6.h> 35fc79d4b1STom Tucker #include <linux/kthread.h> 36fc79d4b1STom Tucker #include <linux/errno.h> 37fc79d4b1STom Tucker #include <linux/kernel.h> 38fc79d4b1STom Tucker #include <linux/un.h> 39fc79d4b1STom Tucker #include <linux/uaccess.h> 40fc79d4b1STom Tucker #include <linux/inet.h> 41fc79d4b1STom Tucker #include <linux/idr.h> 42fc79d4b1STom Tucker #include <linux/file.h> 43fc79d4b1STom Tucker #include <linux/parser.h> 44fc79d4b1STom Tucker #include <linux/semaphore.h> 455a0e3ad6STejun Heo #include <linux/slab.h> 46c4fac910SDavid Howells #include <linux/seq_file.h> 47fc79d4b1STom Tucker #include <net/9p/9p.h> 48fc79d4b1STom Tucker #include <net/9p/client.h> 49fc79d4b1STom Tucker #include <net/9p/transport.h> 50fc79d4b1STom Tucker #include <rdma/ib_verbs.h> 51fc79d4b1STom Tucker #include <rdma/rdma_cm.h> 52fc79d4b1STom Tucker 53fc79d4b1STom Tucker #define P9_PORT 5640 54fc79d4b1STom Tucker #define P9_RDMA_SQ_DEPTH 32 55fc79d4b1STom Tucker #define P9_RDMA_RQ_DEPTH 32 56fc79d4b1STom Tucker #define P9_RDMA_SEND_SGE 4 57fc79d4b1STom Tucker #define P9_RDMA_RECV_SGE 4 58fc79d4b1STom Tucker #define P9_RDMA_IRD 0 59fc79d4b1STom Tucker #define P9_RDMA_ORD 0 60fc79d4b1STom Tucker #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */ 613fcc62f4SSimon Derr #define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */ 62fc79d4b1STom Tucker 63fc79d4b1STom Tucker /** 64fc79d4b1STom Tucker * struct p9_trans_rdma - RDMA transport instance 65fc79d4b1STom Tucker * 66fc79d4b1STom Tucker * @state: tracks the transport state machine for connection setup and tear down 67fc79d4b1STom Tucker * @cm_id: The RDMA CM ID 68fc79d4b1STom Tucker * @pd: Protection Domain pointer 69fc79d4b1STom Tucker * @qp: Queue Pair pointer 70fc79d4b1STom Tucker * @cq: Completion Queue pointer 71fc79d4b1STom Tucker * @timeout: Number of uSecs to wait for connection management events 72c4fac910SDavid Howells * @privport: Whether a privileged port may be used 73c4fac910SDavid Howells * @port: The port to use 74fc79d4b1STom Tucker * @sq_depth: The depth of the Send Queue 75fc79d4b1STom Tucker * @sq_sem: Semaphore for the SQ 76fc79d4b1STom Tucker * @rq_depth: The depth of the Receive Queue. 77fd453d0eSSimon Derr * @rq_sem: Semaphore for the RQ 781cff3306SSimon Derr * @excess_rc : Amount of posted Receive Contexts without a pending request. 791cff3306SSimon Derr * See rdma_request() 80fc79d4b1STom Tucker * @addr: The remote peer's address 81fc79d4b1STom Tucker * @req_lock: Protects the active request list 82fc79d4b1STom Tucker * @cm_done: Completion event for connection management tracking 83fc79d4b1STom Tucker */ 84fc79d4b1STom Tucker struct p9_trans_rdma { 85fc79d4b1STom Tucker enum { 86fc79d4b1STom Tucker P9_RDMA_INIT, 87fc79d4b1STom Tucker P9_RDMA_ADDR_RESOLVED, 88fc79d4b1STom Tucker P9_RDMA_ROUTE_RESOLVED, 89fc79d4b1STom Tucker P9_RDMA_CONNECTED, 90fc79d4b1STom Tucker P9_RDMA_FLUSHING, 91fc79d4b1STom Tucker P9_RDMA_CLOSING, 92fc79d4b1STom Tucker P9_RDMA_CLOSED, 93fc79d4b1STom Tucker } state; 94fc79d4b1STom Tucker struct rdma_cm_id *cm_id; 95fc79d4b1STom Tucker struct ib_pd *pd; 96fc79d4b1STom Tucker struct ib_qp *qp; 97fc79d4b1STom Tucker struct ib_cq *cq; 98fc79d4b1STom Tucker long timeout; 99c4fac910SDavid Howells bool privport; 100c4fac910SDavid Howells u16 port; 101fc79d4b1STom Tucker int sq_depth; 102fc79d4b1STom Tucker struct semaphore sq_sem; 103fc79d4b1STom Tucker int rq_depth; 104fd453d0eSSimon Derr struct semaphore rq_sem; 1051cff3306SSimon Derr atomic_t excess_rc; 106fc79d4b1STom Tucker struct sockaddr_in addr; 107fc79d4b1STom Tucker spinlock_t req_lock; 108fc79d4b1STom Tucker 109fc79d4b1STom Tucker struct completion cm_done; 110fc79d4b1STom Tucker }; 111fc79d4b1STom Tucker 112fc79d4b1STom Tucker /** 113fc79d4b1STom Tucker * p9_rdma_context - Keeps track of in-process WR 114fc79d4b1STom Tucker * 115fc79d4b1STom Tucker * @busa: Bus address to unmap when the WR completes 116fc79d4b1STom Tucker * @req: Keeps track of requests (send) 117fc79d4b1STom Tucker * @rc: Keepts track of replies (receive) 118fc79d4b1STom Tucker */ 119fc79d4b1STom Tucker struct p9_rdma_req; 120fc79d4b1STom Tucker struct p9_rdma_context { 1217cf20fc6SChristoph Hellwig struct ib_cqe cqe; 122fc79d4b1STom Tucker dma_addr_t busa; 123fc79d4b1STom Tucker union { 124fc79d4b1STom Tucker struct p9_req_t *req; 125fc79d4b1STom Tucker struct p9_fcall *rc; 126fc79d4b1STom Tucker }; 127fc79d4b1STom Tucker }; 128fc79d4b1STom Tucker 129fc79d4b1STom Tucker /** 130fc79d4b1STom Tucker * p9_rdma_opts - Collection of mount options 131fc79d4b1STom Tucker * @port: port of connection 132fc79d4b1STom Tucker * @sq_depth: The requested depth of the SQ. This really doesn't need 133fc79d4b1STom Tucker * to be any deeper than the number of threads used in the client 134fc79d4b1STom Tucker * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth 135fc79d4b1STom Tucker * @timeout: Time to wait in msecs for CM events 136fc79d4b1STom Tucker */ 137fc79d4b1STom Tucker struct p9_rdma_opts { 138fc79d4b1STom Tucker short port; 139c4fac910SDavid Howells bool privport; 140fc79d4b1STom Tucker int sq_depth; 141fc79d4b1STom Tucker int rq_depth; 142fc79d4b1STom Tucker long timeout; 143fc79d4b1STom Tucker }; 144fc79d4b1STom Tucker 145fc79d4b1STom Tucker /* 146fc79d4b1STom Tucker * Option Parsing (code inspired by NFS code) 147fc79d4b1STom Tucker */ 148fc79d4b1STom Tucker enum { 149fc79d4b1STom Tucker /* Options that take integer arguments */ 150f569d3efSDominique Martinet Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, 151f569d3efSDominique Martinet /* Options that take no argument */ 152f569d3efSDominique Martinet Opt_privport, 153f569d3efSDominique Martinet Opt_err, 154fc79d4b1STom Tucker }; 155fc79d4b1STom Tucker 156fc79d4b1STom Tucker static match_table_t tokens = { 157fc79d4b1STom Tucker {Opt_port, "port=%u"}, 158fc79d4b1STom Tucker {Opt_sq_depth, "sq=%u"}, 159fc79d4b1STom Tucker {Opt_rq_depth, "rq=%u"}, 160fc79d4b1STom Tucker {Opt_timeout, "timeout=%u"}, 161f569d3efSDominique Martinet {Opt_privport, "privport"}, 162fc79d4b1STom Tucker {Opt_err, NULL}, 163fc79d4b1STom Tucker }; 164fc79d4b1STom Tucker 165c4fac910SDavid Howells static int p9_rdma_show_options(struct seq_file *m, struct p9_client *clnt) 166c4fac910SDavid Howells { 167c4fac910SDavid Howells struct p9_trans_rdma *rdma = clnt->trans; 168c4fac910SDavid Howells 169c4fac910SDavid Howells if (rdma->port != P9_PORT) 170c4fac910SDavid Howells seq_printf(m, ",port=%u", rdma->port); 171c4fac910SDavid Howells if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) 172c4fac910SDavid Howells seq_printf(m, ",sq=%u", rdma->sq_depth); 173c4fac910SDavid Howells if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) 174c4fac910SDavid Howells seq_printf(m, ",rq=%u", rdma->rq_depth); 175c4fac910SDavid Howells if (rdma->timeout != P9_RDMA_TIMEOUT) 176c4fac910SDavid Howells seq_printf(m, ",timeout=%lu", rdma->timeout); 177c4fac910SDavid Howells if (rdma->privport) 178c4fac910SDavid Howells seq_puts(m, ",privport"); 179c4fac910SDavid Howells return 0; 180c4fac910SDavid Howells } 181c4fac910SDavid Howells 182fc79d4b1STom Tucker /** 1830e15597eSAbhishek Kulkarni * parse_opts - parse mount options into rdma options structure 1840e15597eSAbhishek Kulkarni * @params: options string passed from mount 1850e15597eSAbhishek Kulkarni * @opts: rdma transport-specific structure to parse options into 186fc79d4b1STom Tucker * 187fc79d4b1STom Tucker * Returns 0 upon success, -ERRNO upon failure 188fc79d4b1STom Tucker */ 189fc79d4b1STom Tucker static int parse_opts(char *params, struct p9_rdma_opts *opts) 190fc79d4b1STom Tucker { 191fc79d4b1STom Tucker char *p; 192fc79d4b1STom Tucker substring_t args[MAX_OPT_ARGS]; 193fc79d4b1STom Tucker int option; 194d8c8a9e3SEric Van Hensbergen char *options, *tmp_options; 195fc79d4b1STom Tucker 196fc79d4b1STom Tucker opts->port = P9_PORT; 197fc79d4b1STom Tucker opts->sq_depth = P9_RDMA_SQ_DEPTH; 198fc79d4b1STom Tucker opts->rq_depth = P9_RDMA_RQ_DEPTH; 199fc79d4b1STom Tucker opts->timeout = P9_RDMA_TIMEOUT; 200c4fac910SDavid Howells opts->privport = false; 201fc79d4b1STom Tucker 202fc79d4b1STom Tucker if (!params) 203fc79d4b1STom Tucker return 0; 204fc79d4b1STom Tucker 205d8c8a9e3SEric Van Hensbergen tmp_options = kstrdup(params, GFP_KERNEL); 206d8c8a9e3SEric Van Hensbergen if (!tmp_options) { 2075d385153SJoe Perches p9_debug(P9_DEBUG_ERROR, 208fc79d4b1STom Tucker "failed to allocate copy of option string\n"); 209fc79d4b1STom Tucker return -ENOMEM; 210fc79d4b1STom Tucker } 211d8c8a9e3SEric Van Hensbergen options = tmp_options; 212fc79d4b1STom Tucker 213fc79d4b1STom Tucker while ((p = strsep(&options, ",")) != NULL) { 214fc79d4b1STom Tucker int token; 215fc79d4b1STom Tucker int r; 216fc79d4b1STom Tucker if (!*p) 217fc79d4b1STom Tucker continue; 218fc79d4b1STom Tucker token = match_token(p, tokens, args); 219f569d3efSDominique Martinet if ((token != Opt_err) && (token != Opt_privport)) { 220fc79d4b1STom Tucker r = match_int(&args[0], &option); 221fc79d4b1STom Tucker if (r < 0) { 2225d385153SJoe Perches p9_debug(P9_DEBUG_ERROR, 223fc79d4b1STom Tucker "integer field, but no integer?\n"); 224fc79d4b1STom Tucker continue; 225fc79d4b1STom Tucker } 226f569d3efSDominique Martinet } 227fc79d4b1STom Tucker switch (token) { 228fc79d4b1STom Tucker case Opt_port: 229fc79d4b1STom Tucker opts->port = option; 230fc79d4b1STom Tucker break; 231fc79d4b1STom Tucker case Opt_sq_depth: 232fc79d4b1STom Tucker opts->sq_depth = option; 233fc79d4b1STom Tucker break; 234fc79d4b1STom Tucker case Opt_rq_depth: 235fc79d4b1STom Tucker opts->rq_depth = option; 236fc79d4b1STom Tucker break; 237fc79d4b1STom Tucker case Opt_timeout: 238fc79d4b1STom Tucker opts->timeout = option; 239fc79d4b1STom Tucker break; 240f569d3efSDominique Martinet case Opt_privport: 241c4fac910SDavid Howells opts->privport = true; 242f569d3efSDominique Martinet break; 243fc79d4b1STom Tucker default: 244fc79d4b1STom Tucker continue; 245fc79d4b1STom Tucker } 246fc79d4b1STom Tucker } 247fc79d4b1STom Tucker /* RQ must be at least as large as the SQ */ 248fc79d4b1STom Tucker opts->rq_depth = max(opts->rq_depth, opts->sq_depth); 249d8c8a9e3SEric Van Hensbergen kfree(tmp_options); 250fc79d4b1STom Tucker return 0; 251fc79d4b1STom Tucker } 252fc79d4b1STom Tucker 253fc79d4b1STom Tucker static int 254fc79d4b1STom Tucker p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) 255fc79d4b1STom Tucker { 256fc79d4b1STom Tucker struct p9_client *c = id->context; 257fc79d4b1STom Tucker struct p9_trans_rdma *rdma = c->trans; 258fc79d4b1STom Tucker switch (event->event) { 259fc79d4b1STom Tucker case RDMA_CM_EVENT_ADDR_RESOLVED: 260fc79d4b1STom Tucker BUG_ON(rdma->state != P9_RDMA_INIT); 261fc79d4b1STom Tucker rdma->state = P9_RDMA_ADDR_RESOLVED; 262fc79d4b1STom Tucker break; 263fc79d4b1STom Tucker 264fc79d4b1STom Tucker case RDMA_CM_EVENT_ROUTE_RESOLVED: 265fc79d4b1STom Tucker BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); 266fc79d4b1STom Tucker rdma->state = P9_RDMA_ROUTE_RESOLVED; 267fc79d4b1STom Tucker break; 268fc79d4b1STom Tucker 269fc79d4b1STom Tucker case RDMA_CM_EVENT_ESTABLISHED: 270fc79d4b1STom Tucker BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); 271fc79d4b1STom Tucker rdma->state = P9_RDMA_CONNECTED; 272fc79d4b1STom Tucker break; 273fc79d4b1STom Tucker 274fc79d4b1STom Tucker case RDMA_CM_EVENT_DISCONNECTED: 275fc79d4b1STom Tucker if (rdma) 276fc79d4b1STom Tucker rdma->state = P9_RDMA_CLOSED; 277fc79d4b1STom Tucker if (c) 278fc79d4b1STom Tucker c->status = Disconnected; 279fc79d4b1STom Tucker break; 280fc79d4b1STom Tucker 281fc79d4b1STom Tucker case RDMA_CM_EVENT_TIMEWAIT_EXIT: 282fc79d4b1STom Tucker break; 283fc79d4b1STom Tucker 284fc79d4b1STom Tucker case RDMA_CM_EVENT_ADDR_CHANGE: 285fc79d4b1STom Tucker case RDMA_CM_EVENT_ROUTE_ERROR: 286fc79d4b1STom Tucker case RDMA_CM_EVENT_DEVICE_REMOVAL: 287fc79d4b1STom Tucker case RDMA_CM_EVENT_MULTICAST_JOIN: 288fc79d4b1STom Tucker case RDMA_CM_EVENT_MULTICAST_ERROR: 289fc79d4b1STom Tucker case RDMA_CM_EVENT_REJECTED: 290fc79d4b1STom Tucker case RDMA_CM_EVENT_CONNECT_REQUEST: 291fc79d4b1STom Tucker case RDMA_CM_EVENT_CONNECT_RESPONSE: 292fc79d4b1STom Tucker case RDMA_CM_EVENT_CONNECT_ERROR: 293fc79d4b1STom Tucker case RDMA_CM_EVENT_ADDR_ERROR: 294fc79d4b1STom Tucker case RDMA_CM_EVENT_UNREACHABLE: 295fc79d4b1STom Tucker c->status = Disconnected; 296fc79d4b1STom Tucker rdma_disconnect(rdma->cm_id); 297fc79d4b1STom Tucker break; 298fc79d4b1STom Tucker default: 299fc79d4b1STom Tucker BUG(); 300fc79d4b1STom Tucker } 301fc79d4b1STom Tucker complete(&rdma->cm_done); 302fc79d4b1STom Tucker return 0; 303fc79d4b1STom Tucker } 304fc79d4b1STom Tucker 305fc79d4b1STom Tucker static void 3067cf20fc6SChristoph Hellwig recv_done(struct ib_cq *cq, struct ib_wc *wc) 307fc79d4b1STom Tucker { 3087cf20fc6SChristoph Hellwig struct p9_client *client = cq->cq_context; 3097cf20fc6SChristoph Hellwig struct p9_trans_rdma *rdma = client->trans; 3107cf20fc6SChristoph Hellwig struct p9_rdma_context *c = 3117cf20fc6SChristoph Hellwig container_of(wc->wr_cqe, struct p9_rdma_context, cqe); 312fc79d4b1STom Tucker struct p9_req_t *req; 313fc79d4b1STom Tucker int err = 0; 314fc79d4b1STom Tucker int16_t tag; 315fc79d4b1STom Tucker 316fc79d4b1STom Tucker req = NULL; 317fc79d4b1STom Tucker ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, 318fc79d4b1STom Tucker DMA_FROM_DEVICE); 319fc79d4b1STom Tucker 3207cf20fc6SChristoph Hellwig if (wc->status != IB_WC_SUCCESS) 321fc79d4b1STom Tucker goto err_out; 322fc79d4b1STom Tucker 323fc79d4b1STom Tucker err = p9_parse_header(c->rc, NULL, NULL, &tag, 1); 324fc79d4b1STom Tucker if (err) 325fc79d4b1STom Tucker goto err_out; 326fc79d4b1STom Tucker 327fc79d4b1STom Tucker req = p9_tag_lookup(client, tag); 328fc79d4b1STom Tucker if (!req) 329fc79d4b1STom Tucker goto err_out; 330fc79d4b1STom Tucker 33147229ff8SSimon Derr /* Check that we have not yet received a reply for this request. 33247229ff8SSimon Derr */ 33347229ff8SSimon Derr if (unlikely(req->rc)) { 33447229ff8SSimon Derr pr_err("Duplicate reply for request %d", tag); 33547229ff8SSimon Derr goto err_out; 33647229ff8SSimon Derr } 33747229ff8SSimon Derr 338fc79d4b1STom Tucker req->rc = c->rc; 3392b6e72edSDominique Martinet p9_client_cb(client, req, REQ_STATUS_RCVD); 340fc79d4b1STom Tucker 3417cf20fc6SChristoph Hellwig out: 3427cf20fc6SChristoph Hellwig up(&rdma->rq_sem); 3437cf20fc6SChristoph Hellwig kfree(c); 344fc79d4b1STom Tucker return; 345fc79d4b1STom Tucker 346fc79d4b1STom Tucker err_out: 3477cf20fc6SChristoph Hellwig p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", 3487cf20fc6SChristoph Hellwig req, err, wc->status); 349fc79d4b1STom Tucker rdma->state = P9_RDMA_FLUSHING; 350fc79d4b1STom Tucker client->status = Disconnected; 3517cf20fc6SChristoph Hellwig goto out; 352fc79d4b1STom Tucker } 353fc79d4b1STom Tucker 354fc79d4b1STom Tucker static void 3557cf20fc6SChristoph Hellwig send_done(struct ib_cq *cq, struct ib_wc *wc) 356fc79d4b1STom Tucker { 3577cf20fc6SChristoph Hellwig struct p9_client *client = cq->cq_context; 3587cf20fc6SChristoph Hellwig struct p9_trans_rdma *rdma = client->trans; 3597cf20fc6SChristoph Hellwig struct p9_rdma_context *c = 3607cf20fc6SChristoph Hellwig container_of(wc->wr_cqe, struct p9_rdma_context, cqe); 3617cf20fc6SChristoph Hellwig 362fc79d4b1STom Tucker ib_dma_unmap_single(rdma->cm_id->device, 363fc79d4b1STom Tucker c->busa, c->req->tc->size, 364fc79d4b1STom Tucker DMA_TO_DEVICE); 3657cf20fc6SChristoph Hellwig up(&rdma->sq_sem); 3667cf20fc6SChristoph Hellwig kfree(c); 367fc79d4b1STom Tucker } 368fc79d4b1STom Tucker 369fc79d4b1STom Tucker static void qp_event_handler(struct ib_event *event, void *context) 370fc79d4b1STom Tucker { 3715d385153SJoe Perches p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n", 3725d385153SJoe Perches event->event, context); 373fc79d4b1STom Tucker } 374fc79d4b1STom Tucker 375fc79d4b1STom Tucker static void rdma_destroy_trans(struct p9_trans_rdma *rdma) 376fc79d4b1STom Tucker { 377fc79d4b1STom Tucker if (!rdma) 378fc79d4b1STom Tucker return; 379fc79d4b1STom Tucker 380fc79d4b1STom Tucker if (rdma->qp && !IS_ERR(rdma->qp)) 381fc79d4b1STom Tucker ib_destroy_qp(rdma->qp); 382fc79d4b1STom Tucker 383fc79d4b1STom Tucker if (rdma->pd && !IS_ERR(rdma->pd)) 384fc79d4b1STom Tucker ib_dealloc_pd(rdma->pd); 385fc79d4b1STom Tucker 386fc79d4b1STom Tucker if (rdma->cq && !IS_ERR(rdma->cq)) 3877cf20fc6SChristoph Hellwig ib_free_cq(rdma->cq); 388fc79d4b1STom Tucker 389fc79d4b1STom Tucker if (rdma->cm_id && !IS_ERR(rdma->cm_id)) 390fc79d4b1STom Tucker rdma_destroy_id(rdma->cm_id); 391fc79d4b1STom Tucker 392fc79d4b1STom Tucker kfree(rdma); 393fc79d4b1STom Tucker } 394fc79d4b1STom Tucker 395fc79d4b1STom Tucker static int 396fc79d4b1STom Tucker post_recv(struct p9_client *client, struct p9_rdma_context *c) 397fc79d4b1STom Tucker { 398fc79d4b1STom Tucker struct p9_trans_rdma *rdma = client->trans; 399*72bc4d37SBart Van Assche struct ib_recv_wr wr; 400fc79d4b1STom Tucker struct ib_sge sge; 401fc79d4b1STom Tucker 402fc79d4b1STom Tucker c->busa = ib_dma_map_single(rdma->cm_id->device, 403fc79d4b1STom Tucker c->rc->sdata, client->msize, 404fc79d4b1STom Tucker DMA_FROM_DEVICE); 405fc79d4b1STom Tucker if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) 406fc79d4b1STom Tucker goto error; 407fc79d4b1STom Tucker 4087cf20fc6SChristoph Hellwig c->cqe.done = recv_done; 4097cf20fc6SChristoph Hellwig 410fc79d4b1STom Tucker sge.addr = c->busa; 411fc79d4b1STom Tucker sge.length = client->msize; 4122f31fa88SJason Gunthorpe sge.lkey = rdma->pd->local_dma_lkey; 413fc79d4b1STom Tucker 414fc79d4b1STom Tucker wr.next = NULL; 4157cf20fc6SChristoph Hellwig wr.wr_cqe = &c->cqe; 416fc79d4b1STom Tucker wr.sg_list = &sge; 417fc79d4b1STom Tucker wr.num_sge = 1; 418*72bc4d37SBart Van Assche return ib_post_recv(rdma->qp, &wr, NULL); 419fc79d4b1STom Tucker 420fc79d4b1STom Tucker error: 4215d385153SJoe Perches p9_debug(P9_DEBUG_ERROR, "EIO\n"); 422fc79d4b1STom Tucker return -EIO; 423fc79d4b1STom Tucker } 424fc79d4b1STom Tucker 425fc79d4b1STom Tucker static int rdma_request(struct p9_client *client, struct p9_req_t *req) 426fc79d4b1STom Tucker { 427fc79d4b1STom Tucker struct p9_trans_rdma *rdma = client->trans; 428*72bc4d37SBart Van Assche struct ib_send_wr wr; 429fc79d4b1STom Tucker struct ib_sge sge; 430fc79d4b1STom Tucker int err = 0; 431fc79d4b1STom Tucker unsigned long flags; 432fc79d4b1STom Tucker struct p9_rdma_context *c = NULL; 433fc79d4b1STom Tucker struct p9_rdma_context *rpl_context = NULL; 434fc79d4b1STom Tucker 4351cff3306SSimon Derr /* When an error occurs between posting the recv and the send, 4361cff3306SSimon Derr * there will be a receive context posted without a pending request. 4371cff3306SSimon Derr * Since there is no way to "un-post" it, we remember it and skip 4381cff3306SSimon Derr * post_recv() for the next request. 4391cff3306SSimon Derr * So here, 4401cff3306SSimon Derr * see if we are this `next request' and need to absorb an excess rc. 4411cff3306SSimon Derr * If yes, then drop and free our own, and do not recv_post(). 4421cff3306SSimon Derr **/ 4431cff3306SSimon Derr if (unlikely(atomic_read(&rdma->excess_rc) > 0)) { 4441cff3306SSimon Derr if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { 4451cff3306SSimon Derr /* Got one ! */ 4461cff3306SSimon Derr kfree(req->rc); 4471cff3306SSimon Derr req->rc = NULL; 4481cff3306SSimon Derr goto dont_need_post_recv; 4491cff3306SSimon Derr } else { 4501cff3306SSimon Derr /* We raced and lost. */ 4511cff3306SSimon Derr atomic_inc(&rdma->excess_rc); 4521cff3306SSimon Derr } 4531cff3306SSimon Derr } 4541cff3306SSimon Derr 455fc79d4b1STom Tucker /* Allocate an fcall for the reply */ 456eeff66efSAneesh Kumar K.V rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS); 4571d6400c7SDavidlohr Bueso if (!rpl_context) { 4581d6400c7SDavidlohr Bueso err = -ENOMEM; 4592f52d07cSSimon Derr goto recv_error; 4601d6400c7SDavidlohr Bueso } 461fc79d4b1STom Tucker rpl_context->rc = req->rc; 462fc79d4b1STom Tucker 463fc79d4b1STom Tucker /* 464fc79d4b1STom Tucker * Post a receive buffer for this request. We need to ensure 465fc79d4b1STom Tucker * there is a reply buffer available for every outstanding 466fc79d4b1STom Tucker * request. A flushed request can result in no reply for an 467fc79d4b1STom Tucker * outstanding request, so we must keep a count to avoid 468fc79d4b1STom Tucker * overflowing the RQ. 469fc79d4b1STom Tucker */ 4702f52d07cSSimon Derr if (down_interruptible(&rdma->rq_sem)) { 4712f52d07cSSimon Derr err = -EINTR; 4722f52d07cSSimon Derr goto recv_error; 4732f52d07cSSimon Derr } 474fd453d0eSSimon Derr 475fc79d4b1STom Tucker err = post_recv(client, rpl_context); 476fd453d0eSSimon Derr if (err) { 477fd453d0eSSimon Derr p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n"); 4782f52d07cSSimon Derr goto recv_error; 479fd453d0eSSimon Derr } 480fc79d4b1STom Tucker /* remove posted receive buffer from request structure */ 481fc79d4b1STom Tucker req->rc = NULL; 482fc79d4b1STom Tucker 4831cff3306SSimon Derr dont_need_post_recv: 484fc79d4b1STom Tucker /* Post the request */ 485eeff66efSAneesh Kumar K.V c = kmalloc(sizeof *c, GFP_NOFS); 4861d6400c7SDavidlohr Bueso if (!c) { 4871d6400c7SDavidlohr Bueso err = -ENOMEM; 4882f52d07cSSimon Derr goto send_error; 4891d6400c7SDavidlohr Bueso } 490fc79d4b1STom Tucker c->req = req; 491fc79d4b1STom Tucker 492fc79d4b1STom Tucker c->busa = ib_dma_map_single(rdma->cm_id->device, 493fc79d4b1STom Tucker c->req->tc->sdata, c->req->tc->size, 494fc79d4b1STom Tucker DMA_TO_DEVICE); 4952f52d07cSSimon Derr if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { 4962f52d07cSSimon Derr err = -EIO; 4972f52d07cSSimon Derr goto send_error; 4982f52d07cSSimon Derr } 499fc79d4b1STom Tucker 5007cf20fc6SChristoph Hellwig c->cqe.done = send_done; 5017cf20fc6SChristoph Hellwig 502fc79d4b1STom Tucker sge.addr = c->busa; 503fc79d4b1STom Tucker sge.length = c->req->tc->size; 5042f31fa88SJason Gunthorpe sge.lkey = rdma->pd->local_dma_lkey; 505fc79d4b1STom Tucker 506fc79d4b1STom Tucker wr.next = NULL; 5077cf20fc6SChristoph Hellwig wr.wr_cqe = &c->cqe; 508fc79d4b1STom Tucker wr.opcode = IB_WR_SEND; 509fc79d4b1STom Tucker wr.send_flags = IB_SEND_SIGNALED; 510fc79d4b1STom Tucker wr.sg_list = &sge; 511fc79d4b1STom Tucker wr.num_sge = 1; 512fc79d4b1STom Tucker 5132f52d07cSSimon Derr if (down_interruptible(&rdma->sq_sem)) { 5142f52d07cSSimon Derr err = -EINTR; 5152f52d07cSSimon Derr goto send_error; 5162f52d07cSSimon Derr } 517fc79d4b1STom Tucker 5183f9d5b8dSSimon Derr /* Mark request as `sent' *before* we actually send it, 5193f9d5b8dSSimon Derr * because doing if after could erase the REQ_STATUS_RCVD 5203f9d5b8dSSimon Derr * status in case of a very fast reply. 5213f9d5b8dSSimon Derr */ 5223f9d5b8dSSimon Derr req->status = REQ_STATUS_SENT; 523*72bc4d37SBart Van Assche err = ib_post_send(rdma->qp, &wr, NULL); 5242f52d07cSSimon Derr if (err) 5252f52d07cSSimon Derr goto send_error; 526fc79d4b1STom Tucker 5272f52d07cSSimon Derr /* Success */ 5282f52d07cSSimon Derr return 0; 5292f52d07cSSimon Derr 5302f52d07cSSimon Derr /* Handle errors that happened during or while preparing the send: */ 5312f52d07cSSimon Derr send_error: 5323f9d5b8dSSimon Derr req->status = REQ_STATUS_ERROR; 5331d6400c7SDavidlohr Bueso kfree(c); 5342f52d07cSSimon Derr p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err); 5351cff3306SSimon Derr 5361cff3306SSimon Derr /* Ach. 5371cff3306SSimon Derr * We did recv_post(), but not send. We have one recv_post in excess. 5381cff3306SSimon Derr */ 5391cff3306SSimon Derr atomic_inc(&rdma->excess_rc); 5402f52d07cSSimon Derr return err; 5412f52d07cSSimon Derr 5422f52d07cSSimon Derr /* Handle errors that happened during or while preparing post_recv(): */ 5432f52d07cSSimon Derr recv_error: 5441d6400c7SDavidlohr Bueso kfree(rpl_context); 545fc79d4b1STom Tucker spin_lock_irqsave(&rdma->req_lock, flags); 546fc79d4b1STom Tucker if (rdma->state < P9_RDMA_CLOSING) { 547fc79d4b1STom Tucker rdma->state = P9_RDMA_CLOSING; 548fc79d4b1STom Tucker spin_unlock_irqrestore(&rdma->req_lock, flags); 549fc79d4b1STom Tucker rdma_disconnect(rdma->cm_id); 550fc79d4b1STom Tucker } else 551fc79d4b1STom Tucker spin_unlock_irqrestore(&rdma->req_lock, flags); 552fc79d4b1STom Tucker return err; 553fc79d4b1STom Tucker } 554fc79d4b1STom Tucker 555fc79d4b1STom Tucker static void rdma_close(struct p9_client *client) 556fc79d4b1STom Tucker { 557fc79d4b1STom Tucker struct p9_trans_rdma *rdma; 558fc79d4b1STom Tucker 559fc79d4b1STom Tucker if (!client) 560fc79d4b1STom Tucker return; 561fc79d4b1STom Tucker 562fc79d4b1STom Tucker rdma = client->trans; 563fc79d4b1STom Tucker if (!rdma) 564fc79d4b1STom Tucker return; 565fc79d4b1STom Tucker 566fc79d4b1STom Tucker client->status = Disconnected; 567fc79d4b1STom Tucker rdma_disconnect(rdma->cm_id); 568fc79d4b1STom Tucker rdma_destroy_trans(rdma); 569fc79d4b1STom Tucker } 570fc79d4b1STom Tucker 571fc79d4b1STom Tucker /** 572fc79d4b1STom Tucker * alloc_rdma - Allocate and initialize the rdma transport structure 573fc79d4b1STom Tucker * @opts: Mount options structure 574fc79d4b1STom Tucker */ 575fc79d4b1STom Tucker static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts) 576fc79d4b1STom Tucker { 577fc79d4b1STom Tucker struct p9_trans_rdma *rdma; 578fc79d4b1STom Tucker 579fc79d4b1STom Tucker rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL); 580fc79d4b1STom Tucker if (!rdma) 581fc79d4b1STom Tucker return NULL; 582fc79d4b1STom Tucker 583c4fac910SDavid Howells rdma->port = opts->port; 584c4fac910SDavid Howells rdma->privport = opts->privport; 585fc79d4b1STom Tucker rdma->sq_depth = opts->sq_depth; 586fc79d4b1STom Tucker rdma->rq_depth = opts->rq_depth; 587fc79d4b1STom Tucker rdma->timeout = opts->timeout; 588fc79d4b1STom Tucker spin_lock_init(&rdma->req_lock); 589fc79d4b1STom Tucker init_completion(&rdma->cm_done); 590fc79d4b1STom Tucker sema_init(&rdma->sq_sem, rdma->sq_depth); 591fd453d0eSSimon Derr sema_init(&rdma->rq_sem, rdma->rq_depth); 5921cff3306SSimon Derr atomic_set(&rdma->excess_rc, 0); 593fc79d4b1STom Tucker 594fc79d4b1STom Tucker return rdma; 595fc79d4b1STom Tucker } 596fc79d4b1STom Tucker 597fc79d4b1STom Tucker static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) 598fc79d4b1STom Tucker { 599931700d2SSimon Derr /* Nothing to do here. 600931700d2SSimon Derr * We will take care of it (if we have to) in rdma_cancelled() 601931700d2SSimon Derr */ 602fc79d4b1STom Tucker return 1; 603fc79d4b1STom Tucker } 604fc79d4b1STom Tucker 605931700d2SSimon Derr /* A request has been fully flushed without a reply. 606931700d2SSimon Derr * That means we have posted one buffer in excess. 607931700d2SSimon Derr */ 608931700d2SSimon Derr static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req) 609931700d2SSimon Derr { 610931700d2SSimon Derr struct p9_trans_rdma *rdma = client->trans; 611931700d2SSimon Derr atomic_inc(&rdma->excess_rc); 612931700d2SSimon Derr return 0; 613931700d2SSimon Derr } 614931700d2SSimon Derr 615f569d3efSDominique Martinet static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) 616f569d3efSDominique Martinet { 617f569d3efSDominique Martinet struct sockaddr_in cl = { 618f569d3efSDominique Martinet .sin_family = AF_INET, 619f569d3efSDominique Martinet .sin_addr.s_addr = htonl(INADDR_ANY), 620f569d3efSDominique Martinet }; 621f569d3efSDominique Martinet int port, err = -EINVAL; 622f569d3efSDominique Martinet 623f569d3efSDominique Martinet for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) { 624f569d3efSDominique Martinet cl.sin_port = htons((ushort)port); 625f569d3efSDominique Martinet err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl); 626f569d3efSDominique Martinet if (err != -EADDRINUSE) 627f569d3efSDominique Martinet break; 628f569d3efSDominique Martinet } 629f569d3efSDominique Martinet return err; 630f569d3efSDominique Martinet } 631f569d3efSDominique Martinet 632fc79d4b1STom Tucker /** 6334a026da9SSun Lianwen * rdma_create_trans - Transport method for creating a transport instance 634fc79d4b1STom Tucker * @client: client instance 635fc79d4b1STom Tucker * @addr: IP address string 636fc79d4b1STom Tucker * @args: Mount options string 637fc79d4b1STom Tucker */ 638fc79d4b1STom Tucker static int 639fc79d4b1STom Tucker rdma_create_trans(struct p9_client *client, const char *addr, char *args) 640fc79d4b1STom Tucker { 641fc79d4b1STom Tucker int err; 642fc79d4b1STom Tucker struct p9_rdma_opts opts; 643fc79d4b1STom Tucker struct p9_trans_rdma *rdma; 644fc79d4b1STom Tucker struct rdma_conn_param conn_param; 645fc79d4b1STom Tucker struct ib_qp_init_attr qp_attr; 646fc79d4b1STom Tucker 647fc79d4b1STom Tucker /* Parse the transport specific mount options */ 648fc79d4b1STom Tucker err = parse_opts(args, &opts); 649fc79d4b1STom Tucker if (err < 0) 650fc79d4b1STom Tucker return err; 651fc79d4b1STom Tucker 652fc79d4b1STom Tucker /* Create and initialize the RDMA transport structure */ 653fc79d4b1STom Tucker rdma = alloc_rdma(&opts); 654fc79d4b1STom Tucker if (!rdma) 655fc79d4b1STom Tucker return -ENOMEM; 656fc79d4b1STom Tucker 657fc79d4b1STom Tucker /* Create the RDMA CM ID */ 658fa20105eSGuy Shapiro rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client, 659fa20105eSGuy Shapiro RDMA_PS_TCP, IB_QPT_RC); 660fc79d4b1STom Tucker if (IS_ERR(rdma->cm_id)) 661fc79d4b1STom Tucker goto error; 662fc79d4b1STom Tucker 663517ac45aSTom Tucker /* Associate the client with the transport */ 664517ac45aSTom Tucker client->trans = rdma; 665517ac45aSTom Tucker 666f569d3efSDominique Martinet /* Bind to a privileged port if we need to */ 667f569d3efSDominique Martinet if (opts.privport) { 668f569d3efSDominique Martinet err = p9_rdma_bind_privport(rdma); 669f569d3efSDominique Martinet if (err < 0) { 670f569d3efSDominique Martinet pr_err("%s (%d): problem binding to privport: %d\n", 671f569d3efSDominique Martinet __func__, task_pid_nr(current), -err); 672f569d3efSDominique Martinet goto error; 673f569d3efSDominique Martinet } 674f569d3efSDominique Martinet } 675f569d3efSDominique Martinet 676fc79d4b1STom Tucker /* Resolve the server's address */ 677fc79d4b1STom Tucker rdma->addr.sin_family = AF_INET; 678fc79d4b1STom Tucker rdma->addr.sin_addr.s_addr = in_aton(addr); 679fc79d4b1STom Tucker rdma->addr.sin_port = htons(opts.port); 680fc79d4b1STom Tucker err = rdma_resolve_addr(rdma->cm_id, NULL, 681fc79d4b1STom Tucker (struct sockaddr *)&rdma->addr, 682fc79d4b1STom Tucker rdma->timeout); 683fc79d4b1STom Tucker if (err) 684fc79d4b1STom Tucker goto error; 685fc79d4b1STom Tucker err = wait_for_completion_interruptible(&rdma->cm_done); 686fc79d4b1STom Tucker if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) 687fc79d4b1STom Tucker goto error; 688fc79d4b1STom Tucker 689fc79d4b1STom Tucker /* Resolve the route to the server */ 690fc79d4b1STom Tucker err = rdma_resolve_route(rdma->cm_id, rdma->timeout); 691fc79d4b1STom Tucker if (err) 692fc79d4b1STom Tucker goto error; 693fc79d4b1STom Tucker err = wait_for_completion_interruptible(&rdma->cm_done); 694fc79d4b1STom Tucker if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) 695fc79d4b1STom Tucker goto error; 696fc79d4b1STom Tucker 697fc79d4b1STom Tucker /* Create the Completion Queue */ 6987cf20fc6SChristoph Hellwig rdma->cq = ib_alloc_cq(rdma->cm_id->device, client, 6997cf20fc6SChristoph Hellwig opts.sq_depth + opts.rq_depth + 1, 7007cf20fc6SChristoph Hellwig 0, IB_POLL_SOFTIRQ); 701fc79d4b1STom Tucker if (IS_ERR(rdma->cq)) 702fc79d4b1STom Tucker goto error; 703fc79d4b1STom Tucker 704fc79d4b1STom Tucker /* Create the Protection Domain */ 705ed082d36SChristoph Hellwig rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); 706fc79d4b1STom Tucker if (IS_ERR(rdma->pd)) 707fc79d4b1STom Tucker goto error; 708fc79d4b1STom Tucker 709fc79d4b1STom Tucker /* Create the Queue Pair */ 710fc79d4b1STom Tucker memset(&qp_attr, 0, sizeof qp_attr); 711fc79d4b1STom Tucker qp_attr.event_handler = qp_event_handler; 712fc79d4b1STom Tucker qp_attr.qp_context = client; 713fc79d4b1STom Tucker qp_attr.cap.max_send_wr = opts.sq_depth; 714fc79d4b1STom Tucker qp_attr.cap.max_recv_wr = opts.rq_depth; 715fc79d4b1STom Tucker qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE; 716fc79d4b1STom Tucker qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE; 717fc79d4b1STom Tucker qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 718fc79d4b1STom Tucker qp_attr.qp_type = IB_QPT_RC; 719fc79d4b1STom Tucker qp_attr.send_cq = rdma->cq; 720fc79d4b1STom Tucker qp_attr.recv_cq = rdma->cq; 721fc79d4b1STom Tucker err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); 722fc79d4b1STom Tucker if (err) 723fc79d4b1STom Tucker goto error; 724fc79d4b1STom Tucker rdma->qp = rdma->cm_id->qp; 725fc79d4b1STom Tucker 726fc79d4b1STom Tucker /* Request a connection */ 727fc79d4b1STom Tucker memset(&conn_param, 0, sizeof(conn_param)); 728fc79d4b1STom Tucker conn_param.private_data = NULL; 729fc79d4b1STom Tucker conn_param.private_data_len = 0; 730fc79d4b1STom Tucker conn_param.responder_resources = P9_RDMA_IRD; 731fc79d4b1STom Tucker conn_param.initiator_depth = P9_RDMA_ORD; 732fc79d4b1STom Tucker err = rdma_connect(rdma->cm_id, &conn_param); 733fc79d4b1STom Tucker if (err) 734fc79d4b1STom Tucker goto error; 735fc79d4b1STom Tucker err = wait_for_completion_interruptible(&rdma->cm_done); 736fc79d4b1STom Tucker if (err || (rdma->state != P9_RDMA_CONNECTED)) 737fc79d4b1STom Tucker goto error; 738fc79d4b1STom Tucker 739fc79d4b1STom Tucker client->status = Connected; 740fc79d4b1STom Tucker 741fc79d4b1STom Tucker return 0; 742fc79d4b1STom Tucker 743fc79d4b1STom Tucker error: 744fc79d4b1STom Tucker rdma_destroy_trans(rdma); 745fc79d4b1STom Tucker return -ENOTCONN; 746fc79d4b1STom Tucker } 747fc79d4b1STom Tucker 748fc79d4b1STom Tucker static struct p9_trans_module p9_rdma_trans = { 749fc79d4b1STom Tucker .name = "rdma", 750fc79d4b1STom Tucker .maxsize = P9_RDMA_MAXSIZE, 751fc79d4b1STom Tucker .def = 0, 752fc79d4b1STom Tucker .owner = THIS_MODULE, 753fc79d4b1STom Tucker .create = rdma_create_trans, 754fc79d4b1STom Tucker .close = rdma_close, 755fc79d4b1STom Tucker .request = rdma_request, 756fc79d4b1STom Tucker .cancel = rdma_cancel, 757931700d2SSimon Derr .cancelled = rdma_cancelled, 758c4fac910SDavid Howells .show_options = p9_rdma_show_options, 759fc79d4b1STom Tucker }; 760fc79d4b1STom Tucker 761fc79d4b1STom Tucker /** 762fc79d4b1STom Tucker * p9_trans_rdma_init - Register the 9P RDMA transport driver 763fc79d4b1STom Tucker */ 764fc79d4b1STom Tucker static int __init p9_trans_rdma_init(void) 765fc79d4b1STom Tucker { 766fc79d4b1STom Tucker v9fs_register_trans(&p9_rdma_trans); 767fc79d4b1STom Tucker return 0; 768fc79d4b1STom Tucker } 769fc79d4b1STom Tucker 770fc79d4b1STom Tucker static void __exit p9_trans_rdma_exit(void) 771fc79d4b1STom Tucker { 772fc79d4b1STom Tucker v9fs_unregister_trans(&p9_rdma_trans); 773fc79d4b1STom Tucker } 774fc79d4b1STom Tucker 775fc79d4b1STom Tucker module_init(p9_trans_rdma_init); 776fc79d4b1STom Tucker module_exit(p9_trans_rdma_exit); 777fc79d4b1STom Tucker 778fc79d4b1STom Tucker MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); 779fc79d4b1STom Tucker MODULE_DESCRIPTION("RDMA Transport for 9P"); 780fc79d4b1STom Tucker MODULE_LICENSE("Dual BSD/GPL"); 781