xref: /openbmc/linux/drivers/infiniband/core/ucma.c (revision fd589a8f)
1 /*
2  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	copyright notice, this list of conditions and the following
16  *	disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	copyright notice, this list of conditions and the following
20  *	disclaimer in the documentation and/or other materials
21  *	provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/idr.h>
38 #include <linux/in.h>
39 #include <linux/in6.h>
40 #include <linux/miscdevice.h>
41 
42 #include <rdma/rdma_user_cm.h>
43 #include <rdma/ib_marshall.h>
44 #include <rdma/rdma_cm.h>
45 
46 MODULE_AUTHOR("Sean Hefty");
47 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
48 MODULE_LICENSE("Dual BSD/GPL");
49 
50 enum {
51 	UCMA_MAX_BACKLOG	= 128
52 };
53 
54 struct ucma_file {
55 	struct mutex		mut;
56 	struct file		*filp;
57 	struct list_head	ctx_list;
58 	struct list_head	event_list;
59 	wait_queue_head_t	poll_wait;
60 };
61 
62 struct ucma_context {
63 	int			id;
64 	struct completion	comp;
65 	atomic_t		ref;
66 	int			events_reported;
67 	int			backlog;
68 
69 	struct ucma_file	*file;
70 	struct rdma_cm_id	*cm_id;
71 	u64			uid;
72 
73 	struct list_head	list;
74 	struct list_head	mc_list;
75 };
76 
77 struct ucma_multicast {
78 	struct ucma_context	*ctx;
79 	int			id;
80 	int			events_reported;
81 
82 	u64			uid;
83 	struct list_head	list;
84 	struct sockaddr_storage	addr;
85 };
86 
87 struct ucma_event {
88 	struct ucma_context	*ctx;
89 	struct ucma_multicast	*mc;
90 	struct list_head	list;
91 	struct rdma_cm_id	*cm_id;
92 	struct rdma_ucm_event_resp resp;
93 };
94 
95 static DEFINE_MUTEX(mut);
96 static DEFINE_IDR(ctx_idr);
97 static DEFINE_IDR(multicast_idr);
98 
99 static inline struct ucma_context *_ucma_find_context(int id,
100 						      struct ucma_file *file)
101 {
102 	struct ucma_context *ctx;
103 
104 	ctx = idr_find(&ctx_idr, id);
105 	if (!ctx)
106 		ctx = ERR_PTR(-ENOENT);
107 	else if (ctx->file != file)
108 		ctx = ERR_PTR(-EINVAL);
109 	return ctx;
110 }
111 
112 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
113 {
114 	struct ucma_context *ctx;
115 
116 	mutex_lock(&mut);
117 	ctx = _ucma_find_context(id, file);
118 	if (!IS_ERR(ctx))
119 		atomic_inc(&ctx->ref);
120 	mutex_unlock(&mut);
121 	return ctx;
122 }
123 
124 static void ucma_put_ctx(struct ucma_context *ctx)
125 {
126 	if (atomic_dec_and_test(&ctx->ref))
127 		complete(&ctx->comp);
128 }
129 
130 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
131 {
132 	struct ucma_context *ctx;
133 	int ret;
134 
135 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
136 	if (!ctx)
137 		return NULL;
138 
139 	atomic_set(&ctx->ref, 1);
140 	init_completion(&ctx->comp);
141 	INIT_LIST_HEAD(&ctx->mc_list);
142 	ctx->file = file;
143 
144 	do {
145 		ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
146 		if (!ret)
147 			goto error;
148 
149 		mutex_lock(&mut);
150 		ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
151 		mutex_unlock(&mut);
152 	} while (ret == -EAGAIN);
153 
154 	if (ret)
155 		goto error;
156 
157 	list_add_tail(&ctx->list, &file->ctx_list);
158 	return ctx;
159 
160 error:
161 	kfree(ctx);
162 	return NULL;
163 }
164 
165 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
166 {
167 	struct ucma_multicast *mc;
168 	int ret;
169 
170 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
171 	if (!mc)
172 		return NULL;
173 
174 	do {
175 		ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
176 		if (!ret)
177 			goto error;
178 
179 		mutex_lock(&mut);
180 		ret = idr_get_new(&multicast_idr, mc, &mc->id);
181 		mutex_unlock(&mut);
182 	} while (ret == -EAGAIN);
183 
184 	if (ret)
185 		goto error;
186 
187 	mc->ctx = ctx;
188 	list_add_tail(&mc->list, &ctx->mc_list);
189 	return mc;
190 
191 error:
192 	kfree(mc);
193 	return NULL;
194 }
195 
196 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
197 				 struct rdma_conn_param *src)
198 {
199 	if (src->private_data_len)
200 		memcpy(dst->private_data, src->private_data,
201 		       src->private_data_len);
202 	dst->private_data_len = src->private_data_len;
203 	dst->responder_resources =src->responder_resources;
204 	dst->initiator_depth = src->initiator_depth;
205 	dst->flow_control = src->flow_control;
206 	dst->retry_count = src->retry_count;
207 	dst->rnr_retry_count = src->rnr_retry_count;
208 	dst->srq = src->srq;
209 	dst->qp_num = src->qp_num;
210 }
211 
212 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
213 			       struct rdma_ud_param *src)
214 {
215 	if (src->private_data_len)
216 		memcpy(dst->private_data, src->private_data,
217 		       src->private_data_len);
218 	dst->private_data_len = src->private_data_len;
219 	ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
220 	dst->qp_num = src->qp_num;
221 	dst->qkey = src->qkey;
222 }
223 
224 static void ucma_set_event_context(struct ucma_context *ctx,
225 				   struct rdma_cm_event *event,
226 				   struct ucma_event *uevent)
227 {
228 	uevent->ctx = ctx;
229 	switch (event->event) {
230 	case RDMA_CM_EVENT_MULTICAST_JOIN:
231 	case RDMA_CM_EVENT_MULTICAST_ERROR:
232 		uevent->mc = (struct ucma_multicast *)
233 			     event->param.ud.private_data;
234 		uevent->resp.uid = uevent->mc->uid;
235 		uevent->resp.id = uevent->mc->id;
236 		break;
237 	default:
238 		uevent->resp.uid = ctx->uid;
239 		uevent->resp.id = ctx->id;
240 		break;
241 	}
242 }
243 
244 static int ucma_event_handler(struct rdma_cm_id *cm_id,
245 			      struct rdma_cm_event *event)
246 {
247 	struct ucma_event *uevent;
248 	struct ucma_context *ctx = cm_id->context;
249 	int ret = 0;
250 
251 	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
252 	if (!uevent)
253 		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
254 
255 	uevent->cm_id = cm_id;
256 	ucma_set_event_context(ctx, event, uevent);
257 	uevent->resp.event = event->event;
258 	uevent->resp.status = event->status;
259 	if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
260 		ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
261 	else
262 		ucma_copy_conn_event(&uevent->resp.param.conn,
263 				     &event->param.conn);
264 
265 	mutex_lock(&ctx->file->mut);
266 	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
267 		if (!ctx->backlog) {
268 			ret = -ENOMEM;
269 			kfree(uevent);
270 			goto out;
271 		}
272 		ctx->backlog--;
273 	} else if (!ctx->uid) {
274 		/*
275 		 * We ignore events for new connections until userspace has set
276 		 * their context.  This can only happen if an error occurs on a
277 		 * new connection before the user accepts it.  This is okay,
278 		 * since the accept will just fail later.
279 		 */
280 		kfree(uevent);
281 		goto out;
282 	}
283 
284 	list_add_tail(&uevent->list, &ctx->file->event_list);
285 	wake_up_interruptible(&ctx->file->poll_wait);
286 out:
287 	mutex_unlock(&ctx->file->mut);
288 	return ret;
289 }
290 
291 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
292 			      int in_len, int out_len)
293 {
294 	struct ucma_context *ctx;
295 	struct rdma_ucm_get_event cmd;
296 	struct ucma_event *uevent;
297 	int ret = 0;
298 	DEFINE_WAIT(wait);
299 
300 	if (out_len < sizeof uevent->resp)
301 		return -ENOSPC;
302 
303 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
304 		return -EFAULT;
305 
306 	mutex_lock(&file->mut);
307 	while (list_empty(&file->event_list)) {
308 		mutex_unlock(&file->mut);
309 
310 		if (file->filp->f_flags & O_NONBLOCK)
311 			return -EAGAIN;
312 
313 		if (wait_event_interruptible(file->poll_wait,
314 					     !list_empty(&file->event_list)))
315 			return -ERESTARTSYS;
316 
317 		mutex_lock(&file->mut);
318 	}
319 
320 	uevent = list_entry(file->event_list.next, struct ucma_event, list);
321 
322 	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
323 		ctx = ucma_alloc_ctx(file);
324 		if (!ctx) {
325 			ret = -ENOMEM;
326 			goto done;
327 		}
328 		uevent->ctx->backlog++;
329 		ctx->cm_id = uevent->cm_id;
330 		ctx->cm_id->context = ctx;
331 		uevent->resp.id = ctx->id;
332 	}
333 
334 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
335 			 &uevent->resp, sizeof uevent->resp)) {
336 		ret = -EFAULT;
337 		goto done;
338 	}
339 
340 	list_del(&uevent->list);
341 	uevent->ctx->events_reported++;
342 	if (uevent->mc)
343 		uevent->mc->events_reported++;
344 	kfree(uevent);
345 done:
346 	mutex_unlock(&file->mut);
347 	return ret;
348 }
349 
350 static ssize_t ucma_create_id(struct ucma_file *file,
351 				const char __user *inbuf,
352 				int in_len, int out_len)
353 {
354 	struct rdma_ucm_create_id cmd;
355 	struct rdma_ucm_create_id_resp resp;
356 	struct ucma_context *ctx;
357 	int ret;
358 
359 	if (out_len < sizeof(resp))
360 		return -ENOSPC;
361 
362 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
363 		return -EFAULT;
364 
365 	mutex_lock(&file->mut);
366 	ctx = ucma_alloc_ctx(file);
367 	mutex_unlock(&file->mut);
368 	if (!ctx)
369 		return -ENOMEM;
370 
371 	ctx->uid = cmd.uid;
372 	ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
373 	if (IS_ERR(ctx->cm_id)) {
374 		ret = PTR_ERR(ctx->cm_id);
375 		goto err1;
376 	}
377 
378 	resp.id = ctx->id;
379 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
380 			 &resp, sizeof(resp))) {
381 		ret = -EFAULT;
382 		goto err2;
383 	}
384 	return 0;
385 
386 err2:
387 	rdma_destroy_id(ctx->cm_id);
388 err1:
389 	mutex_lock(&mut);
390 	idr_remove(&ctx_idr, ctx->id);
391 	mutex_unlock(&mut);
392 	kfree(ctx);
393 	return ret;
394 }
395 
396 static void ucma_cleanup_multicast(struct ucma_context *ctx)
397 {
398 	struct ucma_multicast *mc, *tmp;
399 
400 	mutex_lock(&mut);
401 	list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
402 		list_del(&mc->list);
403 		idr_remove(&multicast_idr, mc->id);
404 		kfree(mc);
405 	}
406 	mutex_unlock(&mut);
407 }
408 
409 static void ucma_cleanup_events(struct ucma_context *ctx)
410 {
411 	struct ucma_event *uevent, *tmp;
412 
413 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
414 		if (uevent->ctx != ctx)
415 			continue;
416 
417 		list_del(&uevent->list);
418 
419 		/* clear incoming connections. */
420 		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
421 			rdma_destroy_id(uevent->cm_id);
422 
423 		kfree(uevent);
424 	}
425 }
426 
427 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
428 {
429 	struct ucma_event *uevent, *tmp;
430 
431 	list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
432 		if (uevent->mc != mc)
433 			continue;
434 
435 		list_del(&uevent->list);
436 		kfree(uevent);
437 	}
438 }
439 
440 static int ucma_free_ctx(struct ucma_context *ctx)
441 {
442 	int events_reported;
443 
444 	/* No new events will be generated after destroying the id. */
445 	rdma_destroy_id(ctx->cm_id);
446 
447 	ucma_cleanup_multicast(ctx);
448 
449 	/* Cleanup events not yet reported to the user. */
450 	mutex_lock(&ctx->file->mut);
451 	ucma_cleanup_events(ctx);
452 	list_del(&ctx->list);
453 	mutex_unlock(&ctx->file->mut);
454 
455 	events_reported = ctx->events_reported;
456 	kfree(ctx);
457 	return events_reported;
458 }
459 
460 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
461 			       int in_len, int out_len)
462 {
463 	struct rdma_ucm_destroy_id cmd;
464 	struct rdma_ucm_destroy_id_resp resp;
465 	struct ucma_context *ctx;
466 	int ret = 0;
467 
468 	if (out_len < sizeof(resp))
469 		return -ENOSPC;
470 
471 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
472 		return -EFAULT;
473 
474 	mutex_lock(&mut);
475 	ctx = _ucma_find_context(cmd.id, file);
476 	if (!IS_ERR(ctx))
477 		idr_remove(&ctx_idr, ctx->id);
478 	mutex_unlock(&mut);
479 
480 	if (IS_ERR(ctx))
481 		return PTR_ERR(ctx);
482 
483 	ucma_put_ctx(ctx);
484 	wait_for_completion(&ctx->comp);
485 	resp.events_reported = ucma_free_ctx(ctx);
486 
487 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
488 			 &resp, sizeof(resp)))
489 		ret = -EFAULT;
490 
491 	return ret;
492 }
493 
494 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
495 			      int in_len, int out_len)
496 {
497 	struct rdma_ucm_bind_addr cmd;
498 	struct ucma_context *ctx;
499 	int ret;
500 
501 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
502 		return -EFAULT;
503 
504 	ctx = ucma_get_ctx(file, cmd.id);
505 	if (IS_ERR(ctx))
506 		return PTR_ERR(ctx);
507 
508 	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
509 	ucma_put_ctx(ctx);
510 	return ret;
511 }
512 
513 static ssize_t ucma_resolve_addr(struct ucma_file *file,
514 				 const char __user *inbuf,
515 				 int in_len, int out_len)
516 {
517 	struct rdma_ucm_resolve_addr cmd;
518 	struct ucma_context *ctx;
519 	int ret;
520 
521 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
522 		return -EFAULT;
523 
524 	ctx = ucma_get_ctx(file, cmd.id);
525 	if (IS_ERR(ctx))
526 		return PTR_ERR(ctx);
527 
528 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
529 				(struct sockaddr *) &cmd.dst_addr,
530 				cmd.timeout_ms);
531 	ucma_put_ctx(ctx);
532 	return ret;
533 }
534 
535 static ssize_t ucma_resolve_route(struct ucma_file *file,
536 				  const char __user *inbuf,
537 				  int in_len, int out_len)
538 {
539 	struct rdma_ucm_resolve_route cmd;
540 	struct ucma_context *ctx;
541 	int ret;
542 
543 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
544 		return -EFAULT;
545 
546 	ctx = ucma_get_ctx(file, cmd.id);
547 	if (IS_ERR(ctx))
548 		return PTR_ERR(ctx);
549 
550 	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
551 	ucma_put_ctx(ctx);
552 	return ret;
553 }
554 
555 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
556 			       struct rdma_route *route)
557 {
558 	struct rdma_dev_addr *dev_addr;
559 
560 	resp->num_paths = route->num_paths;
561 	switch (route->num_paths) {
562 	case 0:
563 		dev_addr = &route->addr.dev_addr;
564 		ib_addr_get_dgid(dev_addr,
565 				 (union ib_gid *) &resp->ib_route[0].dgid);
566 		ib_addr_get_sgid(dev_addr,
567 				 (union ib_gid *) &resp->ib_route[0].sgid);
568 		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
569 		break;
570 	case 2:
571 		ib_copy_path_rec_to_user(&resp->ib_route[1],
572 					 &route->path_rec[1]);
573 		/* fall through */
574 	case 1:
575 		ib_copy_path_rec_to_user(&resp->ib_route[0],
576 					 &route->path_rec[0]);
577 		break;
578 	default:
579 		break;
580 	}
581 }
582 
583 static ssize_t ucma_query_route(struct ucma_file *file,
584 				const char __user *inbuf,
585 				int in_len, int out_len)
586 {
587 	struct rdma_ucm_query_route cmd;
588 	struct rdma_ucm_query_route_resp resp;
589 	struct ucma_context *ctx;
590 	struct sockaddr *addr;
591 	int ret = 0;
592 
593 	if (out_len < sizeof(resp))
594 		return -ENOSPC;
595 
596 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
597 		return -EFAULT;
598 
599 	ctx = ucma_get_ctx(file, cmd.id);
600 	if (IS_ERR(ctx))
601 		return PTR_ERR(ctx);
602 
603 	memset(&resp, 0, sizeof resp);
604 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
605 	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
606 				     sizeof(struct sockaddr_in) :
607 				     sizeof(struct sockaddr_in6));
608 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
609 	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
610 				     sizeof(struct sockaddr_in) :
611 				     sizeof(struct sockaddr_in6));
612 	if (!ctx->cm_id->device)
613 		goto out;
614 
615 	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
616 	resp.port_num = ctx->cm_id->port_num;
617 	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
618 	case RDMA_TRANSPORT_IB:
619 		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
620 		break;
621 	default:
622 		break;
623 	}
624 
625 out:
626 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
627 			 &resp, sizeof(resp)))
628 		ret = -EFAULT;
629 
630 	ucma_put_ctx(ctx);
631 	return ret;
632 }
633 
634 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
635 				 struct rdma_ucm_conn_param *src)
636 {
637 	dst->private_data = src->private_data;
638 	dst->private_data_len = src->private_data_len;
639 	dst->responder_resources =src->responder_resources;
640 	dst->initiator_depth = src->initiator_depth;
641 	dst->flow_control = src->flow_control;
642 	dst->retry_count = src->retry_count;
643 	dst->rnr_retry_count = src->rnr_retry_count;
644 	dst->srq = src->srq;
645 	dst->qp_num = src->qp_num;
646 }
647 
648 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
649 			    int in_len, int out_len)
650 {
651 	struct rdma_ucm_connect cmd;
652 	struct rdma_conn_param conn_param;
653 	struct ucma_context *ctx;
654 	int ret;
655 
656 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
657 		return -EFAULT;
658 
659 	if (!cmd.conn_param.valid)
660 		return -EINVAL;
661 
662 	ctx = ucma_get_ctx(file, cmd.id);
663 	if (IS_ERR(ctx))
664 		return PTR_ERR(ctx);
665 
666 	ucma_copy_conn_param(&conn_param, &cmd.conn_param);
667 	ret = rdma_connect(ctx->cm_id, &conn_param);
668 	ucma_put_ctx(ctx);
669 	return ret;
670 }
671 
672 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
673 			   int in_len, int out_len)
674 {
675 	struct rdma_ucm_listen cmd;
676 	struct ucma_context *ctx;
677 	int ret;
678 
679 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
680 		return -EFAULT;
681 
682 	ctx = ucma_get_ctx(file, cmd.id);
683 	if (IS_ERR(ctx))
684 		return PTR_ERR(ctx);
685 
686 	ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
687 		       cmd.backlog : UCMA_MAX_BACKLOG;
688 	ret = rdma_listen(ctx->cm_id, ctx->backlog);
689 	ucma_put_ctx(ctx);
690 	return ret;
691 }
692 
693 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
694 			   int in_len, int out_len)
695 {
696 	struct rdma_ucm_accept cmd;
697 	struct rdma_conn_param conn_param;
698 	struct ucma_context *ctx;
699 	int ret;
700 
701 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
702 		return -EFAULT;
703 
704 	ctx = ucma_get_ctx(file, cmd.id);
705 	if (IS_ERR(ctx))
706 		return PTR_ERR(ctx);
707 
708 	if (cmd.conn_param.valid) {
709 		ctx->uid = cmd.uid;
710 		ucma_copy_conn_param(&conn_param, &cmd.conn_param);
711 		ret = rdma_accept(ctx->cm_id, &conn_param);
712 	} else
713 		ret = rdma_accept(ctx->cm_id, NULL);
714 
715 	ucma_put_ctx(ctx);
716 	return ret;
717 }
718 
719 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
720 			   int in_len, int out_len)
721 {
722 	struct rdma_ucm_reject cmd;
723 	struct ucma_context *ctx;
724 	int ret;
725 
726 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
727 		return -EFAULT;
728 
729 	ctx = ucma_get_ctx(file, cmd.id);
730 	if (IS_ERR(ctx))
731 		return PTR_ERR(ctx);
732 
733 	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
734 	ucma_put_ctx(ctx);
735 	return ret;
736 }
737 
738 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
739 			       int in_len, int out_len)
740 {
741 	struct rdma_ucm_disconnect cmd;
742 	struct ucma_context *ctx;
743 	int ret;
744 
745 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
746 		return -EFAULT;
747 
748 	ctx = ucma_get_ctx(file, cmd.id);
749 	if (IS_ERR(ctx))
750 		return PTR_ERR(ctx);
751 
752 	ret = rdma_disconnect(ctx->cm_id);
753 	ucma_put_ctx(ctx);
754 	return ret;
755 }
756 
757 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
758 				 const char __user *inbuf,
759 				 int in_len, int out_len)
760 {
761 	struct rdma_ucm_init_qp_attr cmd;
762 	struct ib_uverbs_qp_attr resp;
763 	struct ucma_context *ctx;
764 	struct ib_qp_attr qp_attr;
765 	int ret;
766 
767 	if (out_len < sizeof(resp))
768 		return -ENOSPC;
769 
770 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
771 		return -EFAULT;
772 
773 	ctx = ucma_get_ctx(file, cmd.id);
774 	if (IS_ERR(ctx))
775 		return PTR_ERR(ctx);
776 
777 	resp.qp_attr_mask = 0;
778 	memset(&qp_attr, 0, sizeof qp_attr);
779 	qp_attr.qp_state = cmd.qp_state;
780 	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
781 	if (ret)
782 		goto out;
783 
784 	ib_copy_qp_attr_to_user(&resp, &qp_attr);
785 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
786 			 &resp, sizeof(resp)))
787 		ret = -EFAULT;
788 
789 out:
790 	ucma_put_ctx(ctx);
791 	return ret;
792 }
793 
794 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
795 			      void *optval, size_t optlen)
796 {
797 	int ret = 0;
798 
799 	switch (optname) {
800 	case RDMA_OPTION_ID_TOS:
801 		if (optlen != sizeof(u8)) {
802 			ret = -EINVAL;
803 			break;
804 		}
805 		rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
806 		break;
807 	default:
808 		ret = -ENOSYS;
809 	}
810 
811 	return ret;
812 }
813 
814 static int ucma_set_option_level(struct ucma_context *ctx, int level,
815 				 int optname, void *optval, size_t optlen)
816 {
817 	int ret;
818 
819 	switch (level) {
820 	case RDMA_OPTION_ID:
821 		ret = ucma_set_option_id(ctx, optname, optval, optlen);
822 		break;
823 	default:
824 		ret = -ENOSYS;
825 	}
826 
827 	return ret;
828 }
829 
830 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
831 			       int in_len, int out_len)
832 {
833 	struct rdma_ucm_set_option cmd;
834 	struct ucma_context *ctx;
835 	void *optval;
836 	int ret;
837 
838 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
839 		return -EFAULT;
840 
841 	ctx = ucma_get_ctx(file, cmd.id);
842 	if (IS_ERR(ctx))
843 		return PTR_ERR(ctx);
844 
845 	optval = kmalloc(cmd.optlen, GFP_KERNEL);
846 	if (!optval) {
847 		ret = -ENOMEM;
848 		goto out1;
849 	}
850 
851 	if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
852 			   cmd.optlen)) {
853 		ret = -EFAULT;
854 		goto out2;
855 	}
856 
857 	ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
858 				    cmd.optlen);
859 out2:
860 	kfree(optval);
861 out1:
862 	ucma_put_ctx(ctx);
863 	return ret;
864 }
865 
866 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
867 			   int in_len, int out_len)
868 {
869 	struct rdma_ucm_notify cmd;
870 	struct ucma_context *ctx;
871 	int ret;
872 
873 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
874 		return -EFAULT;
875 
876 	ctx = ucma_get_ctx(file, cmd.id);
877 	if (IS_ERR(ctx))
878 		return PTR_ERR(ctx);
879 
880 	ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
881 	ucma_put_ctx(ctx);
882 	return ret;
883 }
884 
885 static ssize_t ucma_join_multicast(struct ucma_file *file,
886 				   const char __user *inbuf,
887 				   int in_len, int out_len)
888 {
889 	struct rdma_ucm_join_mcast cmd;
890 	struct rdma_ucm_create_id_resp resp;
891 	struct ucma_context *ctx;
892 	struct ucma_multicast *mc;
893 	int ret;
894 
895 	if (out_len < sizeof(resp))
896 		return -ENOSPC;
897 
898 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
899 		return -EFAULT;
900 
901 	ctx = ucma_get_ctx(file, cmd.id);
902 	if (IS_ERR(ctx))
903 		return PTR_ERR(ctx);
904 
905 	mutex_lock(&file->mut);
906 	mc = ucma_alloc_multicast(ctx);
907 	if (!mc) {
908 		ret = -ENOMEM;
909 		goto err1;
910 	}
911 
912 	mc->uid = cmd.uid;
913 	memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
914 	ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
915 	if (ret)
916 		goto err2;
917 
918 	resp.id = mc->id;
919 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
920 			 &resp, sizeof(resp))) {
921 		ret = -EFAULT;
922 		goto err3;
923 	}
924 
925 	mutex_unlock(&file->mut);
926 	ucma_put_ctx(ctx);
927 	return 0;
928 
929 err3:
930 	rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
931 	ucma_cleanup_mc_events(mc);
932 err2:
933 	mutex_lock(&mut);
934 	idr_remove(&multicast_idr, mc->id);
935 	mutex_unlock(&mut);
936 	list_del(&mc->list);
937 	kfree(mc);
938 err1:
939 	mutex_unlock(&file->mut);
940 	ucma_put_ctx(ctx);
941 	return ret;
942 }
943 
944 static ssize_t ucma_leave_multicast(struct ucma_file *file,
945 				    const char __user *inbuf,
946 				    int in_len, int out_len)
947 {
948 	struct rdma_ucm_destroy_id cmd;
949 	struct rdma_ucm_destroy_id_resp resp;
950 	struct ucma_multicast *mc;
951 	int ret = 0;
952 
953 	if (out_len < sizeof(resp))
954 		return -ENOSPC;
955 
956 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
957 		return -EFAULT;
958 
959 	mutex_lock(&mut);
960 	mc = idr_find(&multicast_idr, cmd.id);
961 	if (!mc)
962 		mc = ERR_PTR(-ENOENT);
963 	else if (mc->ctx->file != file)
964 		mc = ERR_PTR(-EINVAL);
965 	else {
966 		idr_remove(&multicast_idr, mc->id);
967 		atomic_inc(&mc->ctx->ref);
968 	}
969 	mutex_unlock(&mut);
970 
971 	if (IS_ERR(mc)) {
972 		ret = PTR_ERR(mc);
973 		goto out;
974 	}
975 
976 	rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
977 	mutex_lock(&mc->ctx->file->mut);
978 	ucma_cleanup_mc_events(mc);
979 	list_del(&mc->list);
980 	mutex_unlock(&mc->ctx->file->mut);
981 
982 	ucma_put_ctx(mc->ctx);
983 	resp.events_reported = mc->events_reported;
984 	kfree(mc);
985 
986 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
987 			 &resp, sizeof(resp)))
988 		ret = -EFAULT;
989 out:
990 	return ret;
991 }
992 
993 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
994 {
995 	/* Acquire mutex's based on pointer comparison to prevent deadlock. */
996 	if (file1 < file2) {
997 		mutex_lock(&file1->mut);
998 		mutex_lock(&file2->mut);
999 	} else {
1000 		mutex_lock(&file2->mut);
1001 		mutex_lock(&file1->mut);
1002 	}
1003 }
1004 
1005 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1006 {
1007 	if (file1 < file2) {
1008 		mutex_unlock(&file2->mut);
1009 		mutex_unlock(&file1->mut);
1010 	} else {
1011 		mutex_unlock(&file1->mut);
1012 		mutex_unlock(&file2->mut);
1013 	}
1014 }
1015 
1016 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1017 {
1018 	struct ucma_event *uevent, *tmp;
1019 
1020 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1021 		if (uevent->ctx == ctx)
1022 			list_move_tail(&uevent->list, &file->event_list);
1023 }
1024 
1025 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1026 			       const char __user *inbuf,
1027 			       int in_len, int out_len)
1028 {
1029 	struct rdma_ucm_migrate_id cmd;
1030 	struct rdma_ucm_migrate_resp resp;
1031 	struct ucma_context *ctx;
1032 	struct file *filp;
1033 	struct ucma_file *cur_file;
1034 	int ret = 0;
1035 
1036 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1037 		return -EFAULT;
1038 
1039 	/* Get current fd to protect against it being closed */
1040 	filp = fget(cmd.fd);
1041 	if (!filp)
1042 		return -ENOENT;
1043 
1044 	/* Validate current fd and prevent destruction of id. */
1045 	ctx = ucma_get_ctx(filp->private_data, cmd.id);
1046 	if (IS_ERR(ctx)) {
1047 		ret = PTR_ERR(ctx);
1048 		goto file_put;
1049 	}
1050 
1051 	cur_file = ctx->file;
1052 	if (cur_file == new_file) {
1053 		resp.events_reported = ctx->events_reported;
1054 		goto response;
1055 	}
1056 
1057 	/*
1058 	 * Migrate events between fd's, maintaining order, and avoiding new
1059 	 * events being added before existing events.
1060 	 */
1061 	ucma_lock_files(cur_file, new_file);
1062 	mutex_lock(&mut);
1063 
1064 	list_move_tail(&ctx->list, &new_file->ctx_list);
1065 	ucma_move_events(ctx, new_file);
1066 	ctx->file = new_file;
1067 	resp.events_reported = ctx->events_reported;
1068 
1069 	mutex_unlock(&mut);
1070 	ucma_unlock_files(cur_file, new_file);
1071 
1072 response:
1073 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1074 			 &resp, sizeof(resp)))
1075 		ret = -EFAULT;
1076 
1077 	ucma_put_ctx(ctx);
1078 file_put:
1079 	fput(filp);
1080 	return ret;
1081 }
1082 
1083 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1084 				   const char __user *inbuf,
1085 				   int in_len, int out_len) = {
1086 	[RDMA_USER_CM_CMD_CREATE_ID]	= ucma_create_id,
1087 	[RDMA_USER_CM_CMD_DESTROY_ID]	= ucma_destroy_id,
1088 	[RDMA_USER_CM_CMD_BIND_ADDR]	= ucma_bind_addr,
1089 	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	= ucma_resolve_addr,
1090 	[RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1091 	[RDMA_USER_CM_CMD_QUERY_ROUTE]	= ucma_query_route,
1092 	[RDMA_USER_CM_CMD_CONNECT]	= ucma_connect,
1093 	[RDMA_USER_CM_CMD_LISTEN]	= ucma_listen,
1094 	[RDMA_USER_CM_CMD_ACCEPT]	= ucma_accept,
1095 	[RDMA_USER_CM_CMD_REJECT]	= ucma_reject,
1096 	[RDMA_USER_CM_CMD_DISCONNECT]	= ucma_disconnect,
1097 	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	= ucma_init_qp_attr,
1098 	[RDMA_USER_CM_CMD_GET_EVENT]	= ucma_get_event,
1099 	[RDMA_USER_CM_CMD_GET_OPTION]	= NULL,
1100 	[RDMA_USER_CM_CMD_SET_OPTION]	= ucma_set_option,
1101 	[RDMA_USER_CM_CMD_NOTIFY]	= ucma_notify,
1102 	[RDMA_USER_CM_CMD_JOIN_MCAST]	= ucma_join_multicast,
1103 	[RDMA_USER_CM_CMD_LEAVE_MCAST]	= ucma_leave_multicast,
1104 	[RDMA_USER_CM_CMD_MIGRATE_ID]	= ucma_migrate_id
1105 };
1106 
1107 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1108 			  size_t len, loff_t *pos)
1109 {
1110 	struct ucma_file *file = filp->private_data;
1111 	struct rdma_ucm_cmd_hdr hdr;
1112 	ssize_t ret;
1113 
1114 	if (len < sizeof(hdr))
1115 		return -EINVAL;
1116 
1117 	if (copy_from_user(&hdr, buf, sizeof(hdr)))
1118 		return -EFAULT;
1119 
1120 	if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1121 		return -EINVAL;
1122 
1123 	if (hdr.in + sizeof(hdr) > len)
1124 		return -EINVAL;
1125 
1126 	if (!ucma_cmd_table[hdr.cmd])
1127 		return -ENOSYS;
1128 
1129 	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1130 	if (!ret)
1131 		ret = len;
1132 
1133 	return ret;
1134 }
1135 
1136 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1137 {
1138 	struct ucma_file *file = filp->private_data;
1139 	unsigned int mask = 0;
1140 
1141 	poll_wait(filp, &file->poll_wait, wait);
1142 
1143 	if (!list_empty(&file->event_list))
1144 		mask = POLLIN | POLLRDNORM;
1145 
1146 	return mask;
1147 }
1148 
1149 /*
1150  * ucma_open() does not need the BKL:
1151  *
1152  *  - no global state is referred to;
1153  *  - there is no ioctl method to race against;
1154  *  - no further module initialization is required for open to work
1155  *    after the device is registered.
1156  */
1157 static int ucma_open(struct inode *inode, struct file *filp)
1158 {
1159 	struct ucma_file *file;
1160 
1161 	file = kmalloc(sizeof *file, GFP_KERNEL);
1162 	if (!file)
1163 		return -ENOMEM;
1164 
1165 	INIT_LIST_HEAD(&file->event_list);
1166 	INIT_LIST_HEAD(&file->ctx_list);
1167 	init_waitqueue_head(&file->poll_wait);
1168 	mutex_init(&file->mut);
1169 
1170 	filp->private_data = file;
1171 	file->filp = filp;
1172 	return 0;
1173 }
1174 
1175 static int ucma_close(struct inode *inode, struct file *filp)
1176 {
1177 	struct ucma_file *file = filp->private_data;
1178 	struct ucma_context *ctx, *tmp;
1179 
1180 	mutex_lock(&file->mut);
1181 	list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1182 		mutex_unlock(&file->mut);
1183 
1184 		mutex_lock(&mut);
1185 		idr_remove(&ctx_idr, ctx->id);
1186 		mutex_unlock(&mut);
1187 
1188 		ucma_free_ctx(ctx);
1189 		mutex_lock(&file->mut);
1190 	}
1191 	mutex_unlock(&file->mut);
1192 	kfree(file);
1193 	return 0;
1194 }
1195 
1196 static const struct file_operations ucma_fops = {
1197 	.owner 	 = THIS_MODULE,
1198 	.open 	 = ucma_open,
1199 	.release = ucma_close,
1200 	.write	 = ucma_write,
1201 	.poll    = ucma_poll,
1202 };
1203 
1204 static struct miscdevice ucma_misc = {
1205 	.minor	= MISC_DYNAMIC_MINOR,
1206 	.name	= "rdma_cm",
1207 	.fops	= &ucma_fops,
1208 };
1209 
1210 static ssize_t show_abi_version(struct device *dev,
1211 				struct device_attribute *attr,
1212 				char *buf)
1213 {
1214 	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1215 }
1216 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1217 
1218 static int __init ucma_init(void)
1219 {
1220 	int ret;
1221 
1222 	ret = misc_register(&ucma_misc);
1223 	if (ret)
1224 		return ret;
1225 
1226 	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1227 	if (ret) {
1228 		printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1229 		goto err;
1230 	}
1231 	return 0;
1232 err:
1233 	misc_deregister(&ucma_misc);
1234 	return ret;
1235 }
1236 
1237 static void __exit ucma_cleanup(void)
1238 {
1239 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1240 	misc_deregister(&ucma_misc);
1241 	idr_destroy(&ctx_idr);
1242 }
1243 
1244 module_init(ucma_init);
1245 module_exit(ucma_cleanup);
1246