xref: /openbmc/linux/net/9p/trans_rdma.c (revision 565d76cb)
1 /*
2  * linux/fs/9p/trans_rdma.c
3  *
4  * RDMA transport layer based on the trans_fd.c implementation.
5  *
6  *  Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
7  *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
8  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9  *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
10  *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License version 2
14  *  as published by the Free Software Foundation.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; if not, write to:
23  *  Free Software Foundation
24  *  51 Franklin Street, Fifth Floor
25  *  Boston, MA  02111-1301  USA
26  *
27  */
28 
29 #include <linux/in.h>
30 #include <linux/module.h>
31 #include <linux/net.h>
32 #include <linux/ipv6.h>
33 #include <linux/kthread.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/un.h>
37 #include <linux/uaccess.h>
38 #include <linux/inet.h>
39 #include <linux/idr.h>
40 #include <linux/file.h>
41 #include <linux/parser.h>
42 #include <linux/semaphore.h>
43 #include <linux/slab.h>
44 #include <net/9p/9p.h>
45 #include <net/9p/client.h>
46 #include <net/9p/transport.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
49 
50 #define P9_PORT			5640
51 #define P9_RDMA_SQ_DEPTH	32
52 #define P9_RDMA_RQ_DEPTH	32
53 #define P9_RDMA_SEND_SGE	4
54 #define P9_RDMA_RECV_SGE	4
55 #define P9_RDMA_IRD		0
56 #define P9_RDMA_ORD		0
57 #define P9_RDMA_TIMEOUT		30000		/* 30 seconds */
58 #define P9_RDMA_MAXSIZE		(4*4096)	/* Min SGE is 4, so we can
59 						 * safely advertise a maxsize
60 						 * of 64k */
61 
62 /**
63  * struct p9_trans_rdma - RDMA transport instance
64  *
65  * @state: tracks the transport state machine for connection setup and tear down
66  * @cm_id: The RDMA CM ID
67  * @pd: Protection Domain pointer
68  * @qp: Queue Pair pointer
69  * @cq: Completion Queue pointer
70  * @dm_mr: DMA Memory Region pointer
71  * @lkey: The local access only memory region key
72  * @timeout: Number of uSecs to wait for connection management events
73  * @sq_depth: The depth of the Send Queue
74  * @sq_sem: Semaphore for the SQ
75  * @rq_depth: The depth of the Receive Queue.
76  * @rq_count: Count of requests in the Receive Queue.
77  * @addr: The remote peer's address
78  * @req_lock: Protects the active request list
79  * @cm_done: Completion event for connection management tracking
80  */
81 struct p9_trans_rdma {
82 	enum {
83 		P9_RDMA_INIT,
84 		P9_RDMA_ADDR_RESOLVED,
85 		P9_RDMA_ROUTE_RESOLVED,
86 		P9_RDMA_CONNECTED,
87 		P9_RDMA_FLUSHING,
88 		P9_RDMA_CLOSING,
89 		P9_RDMA_CLOSED,
90 	} state;
91 	struct rdma_cm_id *cm_id;
92 	struct ib_pd *pd;
93 	struct ib_qp *qp;
94 	struct ib_cq *cq;
95 	struct ib_mr *dma_mr;
96 	u32 lkey;
97 	long timeout;
98 	int sq_depth;
99 	struct semaphore sq_sem;
100 	int rq_depth;
101 	atomic_t rq_count;
102 	struct sockaddr_in addr;
103 	spinlock_t req_lock;
104 
105 	struct completion cm_done;
106 };
107 
108 /**
109  * p9_rdma_context - Keeps track of in-process WR
110  *
111  * @wc_op: The original WR op for when the CQE completes in error.
112  * @busa: Bus address to unmap when the WR completes
113  * @req: Keeps track of requests (send)
114  * @rc: Keepts track of replies (receive)
115  */
116 struct p9_rdma_req;
117 struct p9_rdma_context {
118 	enum ib_wc_opcode wc_op;
119 	dma_addr_t busa;
120 	union {
121 		struct p9_req_t *req;
122 		struct p9_fcall *rc;
123 	};
124 };
125 
126 /**
127  * p9_rdma_opts - Collection of mount options
128  * @port: port of connection
129  * @sq_depth: The requested depth of the SQ. This really doesn't need
130  * to be any deeper than the number of threads used in the client
131  * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
132  * @timeout: Time to wait in msecs for CM events
133  */
134 struct p9_rdma_opts {
135 	short port;
136 	int sq_depth;
137 	int rq_depth;
138 	long timeout;
139 };
140 
141 /*
142  * Option Parsing (code inspired by NFS code)
143  */
144 enum {
145 	/* Options that take integer arguments */
146 	Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, Opt_err,
147 };
148 
149 static match_table_t tokens = {
150 	{Opt_port, "port=%u"},
151 	{Opt_sq_depth, "sq=%u"},
152 	{Opt_rq_depth, "rq=%u"},
153 	{Opt_timeout, "timeout=%u"},
154 	{Opt_err, NULL},
155 };
156 
157 /**
158  * parse_opts - parse mount options into rdma options structure
159  * @params: options string passed from mount
160  * @opts: rdma transport-specific structure to parse options into
161  *
162  * Returns 0 upon success, -ERRNO upon failure
163  */
164 static int parse_opts(char *params, struct p9_rdma_opts *opts)
165 {
166 	char *p;
167 	substring_t args[MAX_OPT_ARGS];
168 	int option;
169 	char *options, *tmp_options;
170 	int ret;
171 
172 	opts->port = P9_PORT;
173 	opts->sq_depth = P9_RDMA_SQ_DEPTH;
174 	opts->rq_depth = P9_RDMA_RQ_DEPTH;
175 	opts->timeout = P9_RDMA_TIMEOUT;
176 
177 	if (!params)
178 		return 0;
179 
180 	tmp_options = kstrdup(params, GFP_KERNEL);
181 	if (!tmp_options) {
182 		P9_DPRINTK(P9_DEBUG_ERROR,
183 			   "failed to allocate copy of option string\n");
184 		return -ENOMEM;
185 	}
186 	options = tmp_options;
187 
188 	while ((p = strsep(&options, ",")) != NULL) {
189 		int token;
190 		int r;
191 		if (!*p)
192 			continue;
193 		token = match_token(p, tokens, args);
194 		r = match_int(&args[0], &option);
195 		if (r < 0) {
196 			P9_DPRINTK(P9_DEBUG_ERROR,
197 				   "integer field, but no integer?\n");
198 			ret = r;
199 			continue;
200 		}
201 		switch (token) {
202 		case Opt_port:
203 			opts->port = option;
204 			break;
205 		case Opt_sq_depth:
206 			opts->sq_depth = option;
207 			break;
208 		case Opt_rq_depth:
209 			opts->rq_depth = option;
210 			break;
211 		case Opt_timeout:
212 			opts->timeout = option;
213 			break;
214 		default:
215 			continue;
216 		}
217 	}
218 	/* RQ must be at least as large as the SQ */
219 	opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
220 	kfree(tmp_options);
221 	return 0;
222 }
223 
224 static int
225 p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
226 {
227 	struct p9_client *c = id->context;
228 	struct p9_trans_rdma *rdma = c->trans;
229 	switch (event->event) {
230 	case RDMA_CM_EVENT_ADDR_RESOLVED:
231 		BUG_ON(rdma->state != P9_RDMA_INIT);
232 		rdma->state = P9_RDMA_ADDR_RESOLVED;
233 		break;
234 
235 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
236 		BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
237 		rdma->state = P9_RDMA_ROUTE_RESOLVED;
238 		break;
239 
240 	case RDMA_CM_EVENT_ESTABLISHED:
241 		BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
242 		rdma->state = P9_RDMA_CONNECTED;
243 		break;
244 
245 	case RDMA_CM_EVENT_DISCONNECTED:
246 		if (rdma)
247 			rdma->state = P9_RDMA_CLOSED;
248 		if (c)
249 			c->status = Disconnected;
250 		break;
251 
252 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
253 		break;
254 
255 	case RDMA_CM_EVENT_ADDR_CHANGE:
256 	case RDMA_CM_EVENT_ROUTE_ERROR:
257 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
258 	case RDMA_CM_EVENT_MULTICAST_JOIN:
259 	case RDMA_CM_EVENT_MULTICAST_ERROR:
260 	case RDMA_CM_EVENT_REJECTED:
261 	case RDMA_CM_EVENT_CONNECT_REQUEST:
262 	case RDMA_CM_EVENT_CONNECT_RESPONSE:
263 	case RDMA_CM_EVENT_CONNECT_ERROR:
264 	case RDMA_CM_EVENT_ADDR_ERROR:
265 	case RDMA_CM_EVENT_UNREACHABLE:
266 		c->status = Disconnected;
267 		rdma_disconnect(rdma->cm_id);
268 		break;
269 	default:
270 		BUG();
271 	}
272 	complete(&rdma->cm_done);
273 	return 0;
274 }
275 
276 static void
277 handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
278 	    struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
279 {
280 	struct p9_req_t *req;
281 	int err = 0;
282 	int16_t tag;
283 
284 	req = NULL;
285 	ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
286 							 DMA_FROM_DEVICE);
287 
288 	if (status != IB_WC_SUCCESS)
289 		goto err_out;
290 
291 	err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
292 	if (err)
293 		goto err_out;
294 
295 	req = p9_tag_lookup(client, tag);
296 	if (!req)
297 		goto err_out;
298 
299 	req->rc = c->rc;
300 	req->status = REQ_STATUS_RCVD;
301 	p9_client_cb(client, req);
302 
303 	return;
304 
305  err_out:
306 	P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n",
307 		   req, err, status);
308 	rdma->state = P9_RDMA_FLUSHING;
309 	client->status = Disconnected;
310 }
311 
312 static void
313 handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
314 	    struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
315 {
316 	ib_dma_unmap_single(rdma->cm_id->device,
317 			    c->busa, c->req->tc->size,
318 			    DMA_TO_DEVICE);
319 }
320 
321 static void qp_event_handler(struct ib_event *event, void *context)
322 {
323 	P9_DPRINTK(P9_DEBUG_ERROR, "QP event %d context %p\n", event->event,
324 								context);
325 }
326 
327 static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
328 {
329 	struct p9_client *client = cq_context;
330 	struct p9_trans_rdma *rdma = client->trans;
331 	int ret;
332 	struct ib_wc wc;
333 
334 	ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
335 	while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
336 		struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
337 
338 		switch (c->wc_op) {
339 		case IB_WC_RECV:
340 			atomic_dec(&rdma->rq_count);
341 			handle_recv(client, rdma, c, wc.status, wc.byte_len);
342 			break;
343 
344 		case IB_WC_SEND:
345 			handle_send(client, rdma, c, wc.status, wc.byte_len);
346 			up(&rdma->sq_sem);
347 			break;
348 
349 		default:
350 			printk(KERN_ERR "9prdma: unexpected completion type, "
351 			       "c->wc_op=%d, wc.opcode=%d, status=%d\n",
352 			       c->wc_op, wc.opcode, wc.status);
353 			break;
354 		}
355 		kfree(c);
356 	}
357 }
358 
359 static void cq_event_handler(struct ib_event *e, void *v)
360 {
361 	P9_DPRINTK(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
362 }
363 
364 static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
365 {
366 	if (!rdma)
367 		return;
368 
369 	if (rdma->dma_mr && !IS_ERR(rdma->dma_mr))
370 		ib_dereg_mr(rdma->dma_mr);
371 
372 	if (rdma->qp && !IS_ERR(rdma->qp))
373 		ib_destroy_qp(rdma->qp);
374 
375 	if (rdma->pd && !IS_ERR(rdma->pd))
376 		ib_dealloc_pd(rdma->pd);
377 
378 	if (rdma->cq && !IS_ERR(rdma->cq))
379 		ib_destroy_cq(rdma->cq);
380 
381 	if (rdma->cm_id && !IS_ERR(rdma->cm_id))
382 		rdma_destroy_id(rdma->cm_id);
383 
384 	kfree(rdma);
385 }
386 
387 static int
388 post_recv(struct p9_client *client, struct p9_rdma_context *c)
389 {
390 	struct p9_trans_rdma *rdma = client->trans;
391 	struct ib_recv_wr wr, *bad_wr;
392 	struct ib_sge sge;
393 
394 	c->busa = ib_dma_map_single(rdma->cm_id->device,
395 				    c->rc->sdata, client->msize,
396 				    DMA_FROM_DEVICE);
397 	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
398 		goto error;
399 
400 	sge.addr = c->busa;
401 	sge.length = client->msize;
402 	sge.lkey = rdma->lkey;
403 
404 	wr.next = NULL;
405 	c->wc_op = IB_WC_RECV;
406 	wr.wr_id = (unsigned long) c;
407 	wr.sg_list = &sge;
408 	wr.num_sge = 1;
409 	return ib_post_recv(rdma->qp, &wr, &bad_wr);
410 
411  error:
412 	P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
413 	return -EIO;
414 }
415 
416 static int rdma_request(struct p9_client *client, struct p9_req_t *req)
417 {
418 	struct p9_trans_rdma *rdma = client->trans;
419 	struct ib_send_wr wr, *bad_wr;
420 	struct ib_sge sge;
421 	int err = 0;
422 	unsigned long flags;
423 	struct p9_rdma_context *c = NULL;
424 	struct p9_rdma_context *rpl_context = NULL;
425 
426 	/* Allocate an fcall for the reply */
427 	rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
428 	if (!rpl_context) {
429 		err = -ENOMEM;
430 		goto err_close;
431 	}
432 
433 	/*
434 	 * If the request has a buffer, steal it, otherwise
435 	 * allocate a new one.  Typically, requests should already
436 	 * have receive buffers allocated and just swap them around
437 	 */
438 	if (!req->rc) {
439 		req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize,
440 				  GFP_NOFS);
441 		if (req->rc) {
442 			req->rc->sdata = (char *) req->rc +
443 						sizeof(struct p9_fcall);
444 			req->rc->capacity = client->msize;
445 		}
446 	}
447 	rpl_context->rc = req->rc;
448 	if (!rpl_context->rc) {
449 		err = -ENOMEM;
450 		goto err_free2;
451 	}
452 
453 	/*
454 	 * Post a receive buffer for this request. We need to ensure
455 	 * there is a reply buffer available for every outstanding
456 	 * request. A flushed request can result in no reply for an
457 	 * outstanding request, so we must keep a count to avoid
458 	 * overflowing the RQ.
459 	 */
460 	if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
461 		err = post_recv(client, rpl_context);
462 		if (err)
463 			goto err_free1;
464 	} else
465 		atomic_dec(&rdma->rq_count);
466 
467 	/* remove posted receive buffer from request structure */
468 	req->rc = NULL;
469 
470 	/* Post the request */
471 	c = kmalloc(sizeof *c, GFP_NOFS);
472 	if (!c) {
473 		err = -ENOMEM;
474 		goto err_free1;
475 	}
476 	c->req = req;
477 
478 	c->busa = ib_dma_map_single(rdma->cm_id->device,
479 				    c->req->tc->sdata, c->req->tc->size,
480 				    DMA_TO_DEVICE);
481 	if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
482 		goto error;
483 
484 	sge.addr = c->busa;
485 	sge.length = c->req->tc->size;
486 	sge.lkey = rdma->lkey;
487 
488 	wr.next = NULL;
489 	c->wc_op = IB_WC_SEND;
490 	wr.wr_id = (unsigned long) c;
491 	wr.opcode = IB_WR_SEND;
492 	wr.send_flags = IB_SEND_SIGNALED;
493 	wr.sg_list = &sge;
494 	wr.num_sge = 1;
495 
496 	if (down_interruptible(&rdma->sq_sem))
497 		goto error;
498 
499 	return ib_post_send(rdma->qp, &wr, &bad_wr);
500 
501  error:
502 	kfree(c);
503 	kfree(rpl_context->rc);
504 	kfree(rpl_context);
505 	P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
506 	return -EIO;
507  err_free1:
508 	kfree(rpl_context->rc);
509  err_free2:
510 	kfree(rpl_context);
511  err_close:
512 	spin_lock_irqsave(&rdma->req_lock, flags);
513 	if (rdma->state < P9_RDMA_CLOSING) {
514 		rdma->state = P9_RDMA_CLOSING;
515 		spin_unlock_irqrestore(&rdma->req_lock, flags);
516 		rdma_disconnect(rdma->cm_id);
517 	} else
518 		spin_unlock_irqrestore(&rdma->req_lock, flags);
519 	return err;
520 }
521 
522 static void rdma_close(struct p9_client *client)
523 {
524 	struct p9_trans_rdma *rdma;
525 
526 	if (!client)
527 		return;
528 
529 	rdma = client->trans;
530 	if (!rdma)
531 		return;
532 
533 	client->status = Disconnected;
534 	rdma_disconnect(rdma->cm_id);
535 	rdma_destroy_trans(rdma);
536 }
537 
538 /**
539  * alloc_rdma - Allocate and initialize the rdma transport structure
540  * @opts: Mount options structure
541  */
542 static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
543 {
544 	struct p9_trans_rdma *rdma;
545 
546 	rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
547 	if (!rdma)
548 		return NULL;
549 
550 	rdma->sq_depth = opts->sq_depth;
551 	rdma->rq_depth = opts->rq_depth;
552 	rdma->timeout = opts->timeout;
553 	spin_lock_init(&rdma->req_lock);
554 	init_completion(&rdma->cm_done);
555 	sema_init(&rdma->sq_sem, rdma->sq_depth);
556 	atomic_set(&rdma->rq_count, 0);
557 
558 	return rdma;
559 }
560 
561 /* its not clear to me we can do anything after send has been posted */
562 static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
563 {
564 	return 1;
565 }
566 
567 /**
568  * trans_create_rdma - Transport method for creating atransport instance
569  * @client: client instance
570  * @addr: IP address string
571  * @args: Mount options string
572  */
573 static int
574 rdma_create_trans(struct p9_client *client, const char *addr, char *args)
575 {
576 	int err;
577 	struct p9_rdma_opts opts;
578 	struct p9_trans_rdma *rdma;
579 	struct rdma_conn_param conn_param;
580 	struct ib_qp_init_attr qp_attr;
581 	struct ib_device_attr devattr;
582 
583 	/* Parse the transport specific mount options */
584 	err = parse_opts(args, &opts);
585 	if (err < 0)
586 		return err;
587 
588 	/* Create and initialize the RDMA transport structure */
589 	rdma = alloc_rdma(&opts);
590 	if (!rdma)
591 		return -ENOMEM;
592 
593 	/* Create the RDMA CM ID */
594 	rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP);
595 	if (IS_ERR(rdma->cm_id))
596 		goto error;
597 
598 	/* Associate the client with the transport */
599 	client->trans = rdma;
600 
601 	/* Resolve the server's address */
602 	rdma->addr.sin_family = AF_INET;
603 	rdma->addr.sin_addr.s_addr = in_aton(addr);
604 	rdma->addr.sin_port = htons(opts.port);
605 	err = rdma_resolve_addr(rdma->cm_id, NULL,
606 				(struct sockaddr *)&rdma->addr,
607 				rdma->timeout);
608 	if (err)
609 		goto error;
610 	err = wait_for_completion_interruptible(&rdma->cm_done);
611 	if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
612 		goto error;
613 
614 	/* Resolve the route to the server */
615 	err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
616 	if (err)
617 		goto error;
618 	err = wait_for_completion_interruptible(&rdma->cm_done);
619 	if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
620 		goto error;
621 
622 	/* Query the device attributes */
623 	err = ib_query_device(rdma->cm_id->device, &devattr);
624 	if (err)
625 		goto error;
626 
627 	/* Create the Completion Queue */
628 	rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
629 				cq_event_handler, client,
630 				opts.sq_depth + opts.rq_depth + 1, 0);
631 	if (IS_ERR(rdma->cq))
632 		goto error;
633 	ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
634 
635 	/* Create the Protection Domain */
636 	rdma->pd = ib_alloc_pd(rdma->cm_id->device);
637 	if (IS_ERR(rdma->pd))
638 		goto error;
639 
640 	/* Cache the DMA lkey in the transport */
641 	rdma->dma_mr = NULL;
642 	if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
643 		rdma->lkey = rdma->cm_id->device->local_dma_lkey;
644 	else {
645 		rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE);
646 		if (IS_ERR(rdma->dma_mr))
647 			goto error;
648 		rdma->lkey = rdma->dma_mr->lkey;
649 	}
650 
651 	/* Create the Queue Pair */
652 	memset(&qp_attr, 0, sizeof qp_attr);
653 	qp_attr.event_handler = qp_event_handler;
654 	qp_attr.qp_context = client;
655 	qp_attr.cap.max_send_wr = opts.sq_depth;
656 	qp_attr.cap.max_recv_wr = opts.rq_depth;
657 	qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
658 	qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
659 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
660 	qp_attr.qp_type = IB_QPT_RC;
661 	qp_attr.send_cq = rdma->cq;
662 	qp_attr.recv_cq = rdma->cq;
663 	err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
664 	if (err)
665 		goto error;
666 	rdma->qp = rdma->cm_id->qp;
667 
668 	/* Request a connection */
669 	memset(&conn_param, 0, sizeof(conn_param));
670 	conn_param.private_data = NULL;
671 	conn_param.private_data_len = 0;
672 	conn_param.responder_resources = P9_RDMA_IRD;
673 	conn_param.initiator_depth = P9_RDMA_ORD;
674 	err = rdma_connect(rdma->cm_id, &conn_param);
675 	if (err)
676 		goto error;
677 	err = wait_for_completion_interruptible(&rdma->cm_done);
678 	if (err || (rdma->state != P9_RDMA_CONNECTED))
679 		goto error;
680 
681 	client->status = Connected;
682 
683 	return 0;
684 
685 error:
686 	rdma_destroy_trans(rdma);
687 	return -ENOTCONN;
688 }
689 
690 static struct p9_trans_module p9_rdma_trans = {
691 	.name = "rdma",
692 	.maxsize = P9_RDMA_MAXSIZE,
693 	.def = 0,
694 	.owner = THIS_MODULE,
695 	.create = rdma_create_trans,
696 	.close = rdma_close,
697 	.request = rdma_request,
698 	.cancel = rdma_cancel,
699 };
700 
701 /**
702  * p9_trans_rdma_init - Register the 9P RDMA transport driver
703  */
704 static int __init p9_trans_rdma_init(void)
705 {
706 	v9fs_register_trans(&p9_rdma_trans);
707 	return 0;
708 }
709 
710 static void __exit p9_trans_rdma_exit(void)
711 {
712 	v9fs_unregister_trans(&p9_rdma_trans);
713 }
714 
715 module_init(p9_trans_rdma_init);
716 module_exit(p9_trans_rdma_exit);
717 
718 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
719 MODULE_DESCRIPTION("RDMA Transport for 9P");
720 MODULE_LICENSE("Dual BSD/GPL");
721