xref: /openbmc/linux/drivers/infiniband/core/ucma.c (revision 643d1f7f)
1 /*
2  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	copyright notice, this list of conditions and the following
16  *	disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	copyright notice, this list of conditions and the following
20  *	disclaimer in the documentation and/or other materials
21  *	provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/idr.h>
38 #include <linux/in.h>
39 #include <linux/in6.h>
40 #include <linux/miscdevice.h>
41 
42 #include <rdma/rdma_user_cm.h>
43 #include <rdma/ib_marshall.h>
44 #include <rdma/rdma_cm.h>
45 
46 MODULE_AUTHOR("Sean Hefty");
47 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
48 MODULE_LICENSE("Dual BSD/GPL");
49 
50 enum {
51 	UCMA_MAX_BACKLOG	= 128
52 };
53 
54 struct ucma_file {
55 	struct mutex		mut;
56 	struct file		*filp;
57 	struct list_head	ctx_list;
58 	struct list_head	event_list;
59 	wait_queue_head_t	poll_wait;
60 };
61 
62 struct ucma_context {
63 	int			id;
64 	struct completion	comp;
65 	atomic_t		ref;
66 	int			events_reported;
67 	int			backlog;
68 
69 	struct ucma_file	*file;
70 	struct rdma_cm_id	*cm_id;
71 	u64			uid;
72 
73 	struct list_head	list;
74 	struct list_head	mc_list;
75 };
76 
77 struct ucma_multicast {
78 	struct ucma_context	*ctx;
79 	int			id;
80 	int			events_reported;
81 
82 	u64			uid;
83 	struct list_head	list;
84 	struct sockaddr		addr;
85 	u8			pad[sizeof(struct sockaddr_in6) -
86 				    sizeof(struct sockaddr)];
87 };
88 
89 struct ucma_event {
90 	struct ucma_context	*ctx;
91 	struct ucma_multicast	*mc;
92 	struct list_head	list;
93 	struct rdma_cm_id	*cm_id;
94 	struct rdma_ucm_event_resp resp;
95 };
96 
97 static DEFINE_MUTEX(mut);
98 static DEFINE_IDR(ctx_idr);
99 static DEFINE_IDR(multicast_idr);
100 
101 static inline struct ucma_context *_ucma_find_context(int id,
102 						      struct ucma_file *file)
103 {
104 	struct ucma_context *ctx;
105 
106 	ctx = idr_find(&ctx_idr, id);
107 	if (!ctx)
108 		ctx = ERR_PTR(-ENOENT);
109 	else if (ctx->file != file)
110 		ctx = ERR_PTR(-EINVAL);
111 	return ctx;
112 }
113 
114 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
115 {
116 	struct ucma_context *ctx;
117 
118 	mutex_lock(&mut);
119 	ctx = _ucma_find_context(id, file);
120 	if (!IS_ERR(ctx))
121 		atomic_inc(&ctx->ref);
122 	mutex_unlock(&mut);
123 	return ctx;
124 }
125 
126 static void ucma_put_ctx(struct ucma_context *ctx)
127 {
128 	if (atomic_dec_and_test(&ctx->ref))
129 		complete(&ctx->comp);
130 }
131 
132 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
133 {
134 	struct ucma_context *ctx;
135 	int ret;
136 
137 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
138 	if (!ctx)
139 		return NULL;
140 
141 	atomic_set(&ctx->ref, 1);
142 	init_completion(&ctx->comp);
143 	INIT_LIST_HEAD(&ctx->mc_list);
144 	ctx->file = file;
145 
146 	do {
147 		ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
148 		if (!ret)
149 			goto error;
150 
151 		mutex_lock(&mut);
152 		ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
153 		mutex_unlock(&mut);
154 	} while (ret == -EAGAIN);
155 
156 	if (ret)
157 		goto error;
158 
159 	list_add_tail(&ctx->list, &file->ctx_list);
160 	return ctx;
161 
162 error:
163 	kfree(ctx);
164 	return NULL;
165 }
166 
167 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
168 {
169 	struct ucma_multicast *mc;
170 	int ret;
171 
172 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
173 	if (!mc)
174 		return NULL;
175 
176 	do {
177 		ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
178 		if (!ret)
179 			goto error;
180 
181 		mutex_lock(&mut);
182 		ret = idr_get_new(&multicast_idr, mc, &mc->id);
183 		mutex_unlock(&mut);
184 	} while (ret == -EAGAIN);
185 
186 	if (ret)
187 		goto error;
188 
189 	mc->ctx = ctx;
190 	list_add_tail(&mc->list, &ctx->mc_list);
191 	return mc;
192 
193 error:
194 	kfree(mc);
195 	return NULL;
196 }
197 
198 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
199 				 struct rdma_conn_param *src)
200 {
201 	if (src->private_data_len)
202 		memcpy(dst->private_data, src->private_data,
203 		       src->private_data_len);
204 	dst->private_data_len = src->private_data_len;
205 	dst->responder_resources =src->responder_resources;
206 	dst->initiator_depth = src->initiator_depth;
207 	dst->flow_control = src->flow_control;
208 	dst->retry_count = src->retry_count;
209 	dst->rnr_retry_count = src->rnr_retry_count;
210 	dst->srq = src->srq;
211 	dst->qp_num = src->qp_num;
212 }
213 
214 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
215 			       struct rdma_ud_param *src)
216 {
217 	if (src->private_data_len)
218 		memcpy(dst->private_data, src->private_data,
219 		       src->private_data_len);
220 	dst->private_data_len = src->private_data_len;
221 	ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
222 	dst->qp_num = src->qp_num;
223 	dst->qkey = src->qkey;
224 }
225 
226 static void ucma_set_event_context(struct ucma_context *ctx,
227 				   struct rdma_cm_event *event,
228 				   struct ucma_event *uevent)
229 {
230 	uevent->ctx = ctx;
231 	switch (event->event) {
232 	case RDMA_CM_EVENT_MULTICAST_JOIN:
233 	case RDMA_CM_EVENT_MULTICAST_ERROR:
234 		uevent->mc = (struct ucma_multicast *)
235 			     event->param.ud.private_data;
236 		uevent->resp.uid = uevent->mc->uid;
237 		uevent->resp.id = uevent->mc->id;
238 		break;
239 	default:
240 		uevent->resp.uid = ctx->uid;
241 		uevent->resp.id = ctx->id;
242 		break;
243 	}
244 }
245 
246 static int ucma_event_handler(struct rdma_cm_id *cm_id,
247 			      struct rdma_cm_event *event)
248 {
249 	struct ucma_event *uevent;
250 	struct ucma_context *ctx = cm_id->context;
251 	int ret = 0;
252 
253 	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
254 	if (!uevent)
255 		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
256 
257 	uevent->cm_id = cm_id;
258 	ucma_set_event_context(ctx, event, uevent);
259 	uevent->resp.event = event->event;
260 	uevent->resp.status = event->status;
261 	if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
262 		ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
263 	else
264 		ucma_copy_conn_event(&uevent->resp.param.conn,
265 				     &event->param.conn);
266 
267 	mutex_lock(&ctx->file->mut);
268 	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
269 		if (!ctx->backlog) {
270 			ret = -ENOMEM;
271 			kfree(uevent);
272 			goto out;
273 		}
274 		ctx->backlog--;
275 	} else if (!ctx->uid) {
276 		/*
277 		 * We ignore events for new connections until userspace has set
278 		 * their context.  This can only happen if an error occurs on a
279 		 * new connection before the user accepts it.  This is okay,
280 		 * since the accept will just fail later.
281 		 */
282 		kfree(uevent);
283 		goto out;
284 	}
285 
286 	list_add_tail(&uevent->list, &ctx->file->event_list);
287 	wake_up_interruptible(&ctx->file->poll_wait);
288 out:
289 	mutex_unlock(&ctx->file->mut);
290 	return ret;
291 }
292 
293 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
294 			      int in_len, int out_len)
295 {
296 	struct ucma_context *ctx;
297 	struct rdma_ucm_get_event cmd;
298 	struct ucma_event *uevent;
299 	int ret = 0;
300 	DEFINE_WAIT(wait);
301 
302 	if (out_len < sizeof uevent->resp)
303 		return -ENOSPC;
304 
305 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
306 		return -EFAULT;
307 
308 	mutex_lock(&file->mut);
309 	while (list_empty(&file->event_list)) {
310 		mutex_unlock(&file->mut);
311 
312 		if (file->filp->f_flags & O_NONBLOCK)
313 			return -EAGAIN;
314 
315 		if (wait_event_interruptible(file->poll_wait,
316 					     !list_empty(&file->event_list)))
317 			return -ERESTARTSYS;
318 
319 		mutex_lock(&file->mut);
320 	}
321 
322 	uevent = list_entry(file->event_list.next, struct ucma_event, list);
323 
324 	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
325 		ctx = ucma_alloc_ctx(file);
326 		if (!ctx) {
327 			ret = -ENOMEM;
328 			goto done;
329 		}
330 		uevent->ctx->backlog++;
331 		ctx->cm_id = uevent->cm_id;
332 		ctx->cm_id->context = ctx;
333 		uevent->resp.id = ctx->id;
334 	}
335 
336 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
337 			 &uevent->resp, sizeof uevent->resp)) {
338 		ret = -EFAULT;
339 		goto done;
340 	}
341 
342 	list_del(&uevent->list);
343 	uevent->ctx->events_reported++;
344 	if (uevent->mc)
345 		uevent->mc->events_reported++;
346 	kfree(uevent);
347 done:
348 	mutex_unlock(&file->mut);
349 	return ret;
350 }
351 
352 static ssize_t ucma_create_id(struct ucma_file *file,
353 				const char __user *inbuf,
354 				int in_len, int out_len)
355 {
356 	struct rdma_ucm_create_id cmd;
357 	struct rdma_ucm_create_id_resp resp;
358 	struct ucma_context *ctx;
359 	int ret;
360 
361 	if (out_len < sizeof(resp))
362 		return -ENOSPC;
363 
364 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
365 		return -EFAULT;
366 
367 	mutex_lock(&file->mut);
368 	ctx = ucma_alloc_ctx(file);
369 	mutex_unlock(&file->mut);
370 	if (!ctx)
371 		return -ENOMEM;
372 
373 	ctx->uid = cmd.uid;
374 	ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
375 	if (IS_ERR(ctx->cm_id)) {
376 		ret = PTR_ERR(ctx->cm_id);
377 		goto err1;
378 	}
379 
380 	resp.id = ctx->id;
381 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
382 			 &resp, sizeof(resp))) {
383 		ret = -EFAULT;
384 		goto err2;
385 	}
386 	return 0;
387 
388 err2:
389 	rdma_destroy_id(ctx->cm_id);
390 err1:
391 	mutex_lock(&mut);
392 	idr_remove(&ctx_idr, ctx->id);
393 	mutex_unlock(&mut);
394 	kfree(ctx);
395 	return ret;
396 }
397 
398 static void ucma_cleanup_multicast(struct ucma_context *ctx)
399 {
400 	struct ucma_multicast *mc, *tmp;
401 
402 	mutex_lock(&mut);
403 	list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
404 		list_del(&mc->list);
405 		idr_remove(&multicast_idr, mc->id);
406 		kfree(mc);
407 	}
408 	mutex_unlock(&mut);
409 }
410 
411 static void ucma_cleanup_events(struct ucma_context *ctx)
412 {
413 	struct ucma_event *uevent, *tmp;
414 
415 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
416 		if (uevent->ctx != ctx)
417 			continue;
418 
419 		list_del(&uevent->list);
420 
421 		/* clear incoming connections. */
422 		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
423 			rdma_destroy_id(uevent->cm_id);
424 
425 		kfree(uevent);
426 	}
427 }
428 
429 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
430 {
431 	struct ucma_event *uevent, *tmp;
432 
433 	list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
434 		if (uevent->mc != mc)
435 			continue;
436 
437 		list_del(&uevent->list);
438 		kfree(uevent);
439 	}
440 }
441 
442 static int ucma_free_ctx(struct ucma_context *ctx)
443 {
444 	int events_reported;
445 
446 	/* No new events will be generated after destroying the id. */
447 	rdma_destroy_id(ctx->cm_id);
448 
449 	ucma_cleanup_multicast(ctx);
450 
451 	/* Cleanup events not yet reported to the user. */
452 	mutex_lock(&ctx->file->mut);
453 	ucma_cleanup_events(ctx);
454 	list_del(&ctx->list);
455 	mutex_unlock(&ctx->file->mut);
456 
457 	events_reported = ctx->events_reported;
458 	kfree(ctx);
459 	return events_reported;
460 }
461 
462 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
463 			       int in_len, int out_len)
464 {
465 	struct rdma_ucm_destroy_id cmd;
466 	struct rdma_ucm_destroy_id_resp resp;
467 	struct ucma_context *ctx;
468 	int ret = 0;
469 
470 	if (out_len < sizeof(resp))
471 		return -ENOSPC;
472 
473 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
474 		return -EFAULT;
475 
476 	mutex_lock(&mut);
477 	ctx = _ucma_find_context(cmd.id, file);
478 	if (!IS_ERR(ctx))
479 		idr_remove(&ctx_idr, ctx->id);
480 	mutex_unlock(&mut);
481 
482 	if (IS_ERR(ctx))
483 		return PTR_ERR(ctx);
484 
485 	ucma_put_ctx(ctx);
486 	wait_for_completion(&ctx->comp);
487 	resp.events_reported = ucma_free_ctx(ctx);
488 
489 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
490 			 &resp, sizeof(resp)))
491 		ret = -EFAULT;
492 
493 	return ret;
494 }
495 
496 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
497 			      int in_len, int out_len)
498 {
499 	struct rdma_ucm_bind_addr cmd;
500 	struct ucma_context *ctx;
501 	int ret;
502 
503 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
504 		return -EFAULT;
505 
506 	ctx = ucma_get_ctx(file, cmd.id);
507 	if (IS_ERR(ctx))
508 		return PTR_ERR(ctx);
509 
510 	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
511 	ucma_put_ctx(ctx);
512 	return ret;
513 }
514 
515 static ssize_t ucma_resolve_addr(struct ucma_file *file,
516 				 const char __user *inbuf,
517 				 int in_len, int out_len)
518 {
519 	struct rdma_ucm_resolve_addr cmd;
520 	struct ucma_context *ctx;
521 	int ret;
522 
523 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
524 		return -EFAULT;
525 
526 	ctx = ucma_get_ctx(file, cmd.id);
527 	if (IS_ERR(ctx))
528 		return PTR_ERR(ctx);
529 
530 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
531 				(struct sockaddr *) &cmd.dst_addr,
532 				cmd.timeout_ms);
533 	ucma_put_ctx(ctx);
534 	return ret;
535 }
536 
537 static ssize_t ucma_resolve_route(struct ucma_file *file,
538 				  const char __user *inbuf,
539 				  int in_len, int out_len)
540 {
541 	struct rdma_ucm_resolve_route cmd;
542 	struct ucma_context *ctx;
543 	int ret;
544 
545 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
546 		return -EFAULT;
547 
548 	ctx = ucma_get_ctx(file, cmd.id);
549 	if (IS_ERR(ctx))
550 		return PTR_ERR(ctx);
551 
552 	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
553 	ucma_put_ctx(ctx);
554 	return ret;
555 }
556 
557 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
558 			       struct rdma_route *route)
559 {
560 	struct rdma_dev_addr *dev_addr;
561 
562 	resp->num_paths = route->num_paths;
563 	switch (route->num_paths) {
564 	case 0:
565 		dev_addr = &route->addr.dev_addr;
566 		ib_addr_get_dgid(dev_addr,
567 				 (union ib_gid *) &resp->ib_route[0].dgid);
568 		ib_addr_get_sgid(dev_addr,
569 				 (union ib_gid *) &resp->ib_route[0].sgid);
570 		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
571 		break;
572 	case 2:
573 		ib_copy_path_rec_to_user(&resp->ib_route[1],
574 					 &route->path_rec[1]);
575 		/* fall through */
576 	case 1:
577 		ib_copy_path_rec_to_user(&resp->ib_route[0],
578 					 &route->path_rec[0]);
579 		break;
580 	default:
581 		break;
582 	}
583 }
584 
585 static ssize_t ucma_query_route(struct ucma_file *file,
586 				const char __user *inbuf,
587 				int in_len, int out_len)
588 {
589 	struct rdma_ucm_query_route cmd;
590 	struct rdma_ucm_query_route_resp resp;
591 	struct ucma_context *ctx;
592 	struct sockaddr *addr;
593 	int ret = 0;
594 
595 	if (out_len < sizeof(resp))
596 		return -ENOSPC;
597 
598 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
599 		return -EFAULT;
600 
601 	ctx = ucma_get_ctx(file, cmd.id);
602 	if (IS_ERR(ctx))
603 		return PTR_ERR(ctx);
604 
605 	memset(&resp, 0, sizeof resp);
606 	addr = &ctx->cm_id->route.addr.src_addr;
607 	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
608 				     sizeof(struct sockaddr_in) :
609 				     sizeof(struct sockaddr_in6));
610 	addr = &ctx->cm_id->route.addr.dst_addr;
611 	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
612 				     sizeof(struct sockaddr_in) :
613 				     sizeof(struct sockaddr_in6));
614 	if (!ctx->cm_id->device)
615 		goto out;
616 
617 	resp.node_guid = ctx->cm_id->device->node_guid;
618 	resp.port_num = ctx->cm_id->port_num;
619 	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
620 	case RDMA_TRANSPORT_IB:
621 		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
622 		break;
623 	default:
624 		break;
625 	}
626 
627 out:
628 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
629 			 &resp, sizeof(resp)))
630 		ret = -EFAULT;
631 
632 	ucma_put_ctx(ctx);
633 	return ret;
634 }
635 
636 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
637 				 struct rdma_ucm_conn_param *src)
638 {
639 	dst->private_data = src->private_data;
640 	dst->private_data_len = src->private_data_len;
641 	dst->responder_resources =src->responder_resources;
642 	dst->initiator_depth = src->initiator_depth;
643 	dst->flow_control = src->flow_control;
644 	dst->retry_count = src->retry_count;
645 	dst->rnr_retry_count = src->rnr_retry_count;
646 	dst->srq = src->srq;
647 	dst->qp_num = src->qp_num;
648 }
649 
650 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
651 			    int in_len, int out_len)
652 {
653 	struct rdma_ucm_connect cmd;
654 	struct rdma_conn_param conn_param;
655 	struct ucma_context *ctx;
656 	int ret;
657 
658 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
659 		return -EFAULT;
660 
661 	if (!cmd.conn_param.valid)
662 		return -EINVAL;
663 
664 	ctx = ucma_get_ctx(file, cmd.id);
665 	if (IS_ERR(ctx))
666 		return PTR_ERR(ctx);
667 
668 	ucma_copy_conn_param(&conn_param, &cmd.conn_param);
669 	ret = rdma_connect(ctx->cm_id, &conn_param);
670 	ucma_put_ctx(ctx);
671 	return ret;
672 }
673 
674 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
675 			   int in_len, int out_len)
676 {
677 	struct rdma_ucm_listen cmd;
678 	struct ucma_context *ctx;
679 	int ret;
680 
681 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
682 		return -EFAULT;
683 
684 	ctx = ucma_get_ctx(file, cmd.id);
685 	if (IS_ERR(ctx))
686 		return PTR_ERR(ctx);
687 
688 	ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
689 		       cmd.backlog : UCMA_MAX_BACKLOG;
690 	ret = rdma_listen(ctx->cm_id, ctx->backlog);
691 	ucma_put_ctx(ctx);
692 	return ret;
693 }
694 
695 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
696 			   int in_len, int out_len)
697 {
698 	struct rdma_ucm_accept cmd;
699 	struct rdma_conn_param conn_param;
700 	struct ucma_context *ctx;
701 	int ret;
702 
703 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
704 		return -EFAULT;
705 
706 	ctx = ucma_get_ctx(file, cmd.id);
707 	if (IS_ERR(ctx))
708 		return PTR_ERR(ctx);
709 
710 	if (cmd.conn_param.valid) {
711 		ctx->uid = cmd.uid;
712 		ucma_copy_conn_param(&conn_param, &cmd.conn_param);
713 		ret = rdma_accept(ctx->cm_id, &conn_param);
714 	} else
715 		ret = rdma_accept(ctx->cm_id, NULL);
716 
717 	ucma_put_ctx(ctx);
718 	return ret;
719 }
720 
721 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
722 			   int in_len, int out_len)
723 {
724 	struct rdma_ucm_reject cmd;
725 	struct ucma_context *ctx;
726 	int ret;
727 
728 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
729 		return -EFAULT;
730 
731 	ctx = ucma_get_ctx(file, cmd.id);
732 	if (IS_ERR(ctx))
733 		return PTR_ERR(ctx);
734 
735 	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
736 	ucma_put_ctx(ctx);
737 	return ret;
738 }
739 
740 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
741 			       int in_len, int out_len)
742 {
743 	struct rdma_ucm_disconnect cmd;
744 	struct ucma_context *ctx;
745 	int ret;
746 
747 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
748 		return -EFAULT;
749 
750 	ctx = ucma_get_ctx(file, cmd.id);
751 	if (IS_ERR(ctx))
752 		return PTR_ERR(ctx);
753 
754 	ret = rdma_disconnect(ctx->cm_id);
755 	ucma_put_ctx(ctx);
756 	return ret;
757 }
758 
759 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
760 				 const char __user *inbuf,
761 				 int in_len, int out_len)
762 {
763 	struct rdma_ucm_init_qp_attr cmd;
764 	struct ib_uverbs_qp_attr resp;
765 	struct ucma_context *ctx;
766 	struct ib_qp_attr qp_attr;
767 	int ret;
768 
769 	if (out_len < sizeof(resp))
770 		return -ENOSPC;
771 
772 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
773 		return -EFAULT;
774 
775 	ctx = ucma_get_ctx(file, cmd.id);
776 	if (IS_ERR(ctx))
777 		return PTR_ERR(ctx);
778 
779 	resp.qp_attr_mask = 0;
780 	memset(&qp_attr, 0, sizeof qp_attr);
781 	qp_attr.qp_state = cmd.qp_state;
782 	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
783 	if (ret)
784 		goto out;
785 
786 	ib_copy_qp_attr_to_user(&resp, &qp_attr);
787 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
788 			 &resp, sizeof(resp)))
789 		ret = -EFAULT;
790 
791 out:
792 	ucma_put_ctx(ctx);
793 	return ret;
794 }
795 
796 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
797 			      void *optval, size_t optlen)
798 {
799 	int ret = 0;
800 
801 	switch (optname) {
802 	case RDMA_OPTION_ID_TOS:
803 		if (optlen != sizeof(u8)) {
804 			ret = -EINVAL;
805 			break;
806 		}
807 		rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
808 		break;
809 	default:
810 		ret = -ENOSYS;
811 	}
812 
813 	return ret;
814 }
815 
816 static int ucma_set_option_level(struct ucma_context *ctx, int level,
817 				 int optname, void *optval, size_t optlen)
818 {
819 	int ret;
820 
821 	switch (level) {
822 	case RDMA_OPTION_ID:
823 		ret = ucma_set_option_id(ctx, optname, optval, optlen);
824 		break;
825 	default:
826 		ret = -ENOSYS;
827 	}
828 
829 	return ret;
830 }
831 
832 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
833 			       int in_len, int out_len)
834 {
835 	struct rdma_ucm_set_option cmd;
836 	struct ucma_context *ctx;
837 	void *optval;
838 	int ret;
839 
840 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
841 		return -EFAULT;
842 
843 	ctx = ucma_get_ctx(file, cmd.id);
844 	if (IS_ERR(ctx))
845 		return PTR_ERR(ctx);
846 
847 	optval = kmalloc(cmd.optlen, GFP_KERNEL);
848 	if (!optval) {
849 		ret = -ENOMEM;
850 		goto out1;
851 	}
852 
853 	if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
854 			   cmd.optlen)) {
855 		ret = -EFAULT;
856 		goto out2;
857 	}
858 
859 	ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
860 				    cmd.optlen);
861 out2:
862 	kfree(optval);
863 out1:
864 	ucma_put_ctx(ctx);
865 	return ret;
866 }
867 
868 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
869 			   int in_len, int out_len)
870 {
871 	struct rdma_ucm_notify cmd;
872 	struct ucma_context *ctx;
873 	int ret;
874 
875 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
876 		return -EFAULT;
877 
878 	ctx = ucma_get_ctx(file, cmd.id);
879 	if (IS_ERR(ctx))
880 		return PTR_ERR(ctx);
881 
882 	ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
883 	ucma_put_ctx(ctx);
884 	return ret;
885 }
886 
887 static ssize_t ucma_join_multicast(struct ucma_file *file,
888 				   const char __user *inbuf,
889 				   int in_len, int out_len)
890 {
891 	struct rdma_ucm_join_mcast cmd;
892 	struct rdma_ucm_create_id_resp resp;
893 	struct ucma_context *ctx;
894 	struct ucma_multicast *mc;
895 	int ret;
896 
897 	if (out_len < sizeof(resp))
898 		return -ENOSPC;
899 
900 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
901 		return -EFAULT;
902 
903 	ctx = ucma_get_ctx(file, cmd.id);
904 	if (IS_ERR(ctx))
905 		return PTR_ERR(ctx);
906 
907 	mutex_lock(&file->mut);
908 	mc = ucma_alloc_multicast(ctx);
909 	if (IS_ERR(mc)) {
910 		ret = PTR_ERR(mc);
911 		goto err1;
912 	}
913 
914 	mc->uid = cmd.uid;
915 	memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
916 	ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc);
917 	if (ret)
918 		goto err2;
919 
920 	resp.id = mc->id;
921 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
922 			 &resp, sizeof(resp))) {
923 		ret = -EFAULT;
924 		goto err3;
925 	}
926 
927 	mutex_unlock(&file->mut);
928 	ucma_put_ctx(ctx);
929 	return 0;
930 
931 err3:
932 	rdma_leave_multicast(ctx->cm_id, &mc->addr);
933 	ucma_cleanup_mc_events(mc);
934 err2:
935 	mutex_lock(&mut);
936 	idr_remove(&multicast_idr, mc->id);
937 	mutex_unlock(&mut);
938 	list_del(&mc->list);
939 	kfree(mc);
940 err1:
941 	mutex_unlock(&file->mut);
942 	ucma_put_ctx(ctx);
943 	return ret;
944 }
945 
946 static ssize_t ucma_leave_multicast(struct ucma_file *file,
947 				    const char __user *inbuf,
948 				    int in_len, int out_len)
949 {
950 	struct rdma_ucm_destroy_id cmd;
951 	struct rdma_ucm_destroy_id_resp resp;
952 	struct ucma_multicast *mc;
953 	int ret = 0;
954 
955 	if (out_len < sizeof(resp))
956 		return -ENOSPC;
957 
958 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
959 		return -EFAULT;
960 
961 	mutex_lock(&mut);
962 	mc = idr_find(&multicast_idr, cmd.id);
963 	if (!mc)
964 		mc = ERR_PTR(-ENOENT);
965 	else if (mc->ctx->file != file)
966 		mc = ERR_PTR(-EINVAL);
967 	else {
968 		idr_remove(&multicast_idr, mc->id);
969 		atomic_inc(&mc->ctx->ref);
970 	}
971 	mutex_unlock(&mut);
972 
973 	if (IS_ERR(mc)) {
974 		ret = PTR_ERR(mc);
975 		goto out;
976 	}
977 
978 	rdma_leave_multicast(mc->ctx->cm_id, &mc->addr);
979 	mutex_lock(&mc->ctx->file->mut);
980 	ucma_cleanup_mc_events(mc);
981 	list_del(&mc->list);
982 	mutex_unlock(&mc->ctx->file->mut);
983 
984 	ucma_put_ctx(mc->ctx);
985 	resp.events_reported = mc->events_reported;
986 	kfree(mc);
987 
988 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
989 			 &resp, sizeof(resp)))
990 		ret = -EFAULT;
991 out:
992 	return ret;
993 }
994 
995 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
996 {
997 	/* Acquire mutex's based on pointer comparison to prevent deadlock. */
998 	if (file1 < file2) {
999 		mutex_lock(&file1->mut);
1000 		mutex_lock(&file2->mut);
1001 	} else {
1002 		mutex_lock(&file2->mut);
1003 		mutex_lock(&file1->mut);
1004 	}
1005 }
1006 
1007 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1008 {
1009 	if (file1 < file2) {
1010 		mutex_unlock(&file2->mut);
1011 		mutex_unlock(&file1->mut);
1012 	} else {
1013 		mutex_unlock(&file1->mut);
1014 		mutex_unlock(&file2->mut);
1015 	}
1016 }
1017 
1018 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1019 {
1020 	struct ucma_event *uevent, *tmp;
1021 
1022 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1023 		if (uevent->ctx == ctx)
1024 			list_move_tail(&uevent->list, &file->event_list);
1025 }
1026 
1027 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1028 			       const char __user *inbuf,
1029 			       int in_len, int out_len)
1030 {
1031 	struct rdma_ucm_migrate_id cmd;
1032 	struct rdma_ucm_migrate_resp resp;
1033 	struct ucma_context *ctx;
1034 	struct file *filp;
1035 	struct ucma_file *cur_file;
1036 	int ret = 0;
1037 
1038 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1039 		return -EFAULT;
1040 
1041 	/* Get current fd to protect against it being closed */
1042 	filp = fget(cmd.fd);
1043 	if (!filp)
1044 		return -ENOENT;
1045 
1046 	/* Validate current fd and prevent destruction of id. */
1047 	ctx = ucma_get_ctx(filp->private_data, cmd.id);
1048 	if (IS_ERR(ctx)) {
1049 		ret = PTR_ERR(ctx);
1050 		goto file_put;
1051 	}
1052 
1053 	cur_file = ctx->file;
1054 	if (cur_file == new_file) {
1055 		resp.events_reported = ctx->events_reported;
1056 		goto response;
1057 	}
1058 
1059 	/*
1060 	 * Migrate events between fd's, maintaining order, and avoiding new
1061 	 * events being added before existing events.
1062 	 */
1063 	ucma_lock_files(cur_file, new_file);
1064 	mutex_lock(&mut);
1065 
1066 	list_move_tail(&ctx->list, &new_file->ctx_list);
1067 	ucma_move_events(ctx, new_file);
1068 	ctx->file = new_file;
1069 	resp.events_reported = ctx->events_reported;
1070 
1071 	mutex_unlock(&mut);
1072 	ucma_unlock_files(cur_file, new_file);
1073 
1074 response:
1075 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1076 			 &resp, sizeof(resp)))
1077 		ret = -EFAULT;
1078 
1079 	ucma_put_ctx(ctx);
1080 file_put:
1081 	fput(filp);
1082 	return ret;
1083 }
1084 
1085 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1086 				   const char __user *inbuf,
1087 				   int in_len, int out_len) = {
1088 	[RDMA_USER_CM_CMD_CREATE_ID]	= ucma_create_id,
1089 	[RDMA_USER_CM_CMD_DESTROY_ID]	= ucma_destroy_id,
1090 	[RDMA_USER_CM_CMD_BIND_ADDR]	= ucma_bind_addr,
1091 	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	= ucma_resolve_addr,
1092 	[RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1093 	[RDMA_USER_CM_CMD_QUERY_ROUTE]	= ucma_query_route,
1094 	[RDMA_USER_CM_CMD_CONNECT]	= ucma_connect,
1095 	[RDMA_USER_CM_CMD_LISTEN]	= ucma_listen,
1096 	[RDMA_USER_CM_CMD_ACCEPT]	= ucma_accept,
1097 	[RDMA_USER_CM_CMD_REJECT]	= ucma_reject,
1098 	[RDMA_USER_CM_CMD_DISCONNECT]	= ucma_disconnect,
1099 	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	= ucma_init_qp_attr,
1100 	[RDMA_USER_CM_CMD_GET_EVENT]	= ucma_get_event,
1101 	[RDMA_USER_CM_CMD_GET_OPTION]	= NULL,
1102 	[RDMA_USER_CM_CMD_SET_OPTION]	= ucma_set_option,
1103 	[RDMA_USER_CM_CMD_NOTIFY]	= ucma_notify,
1104 	[RDMA_USER_CM_CMD_JOIN_MCAST]	= ucma_join_multicast,
1105 	[RDMA_USER_CM_CMD_LEAVE_MCAST]	= ucma_leave_multicast,
1106 	[RDMA_USER_CM_CMD_MIGRATE_ID]	= ucma_migrate_id
1107 };
1108 
1109 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1110 			  size_t len, loff_t *pos)
1111 {
1112 	struct ucma_file *file = filp->private_data;
1113 	struct rdma_ucm_cmd_hdr hdr;
1114 	ssize_t ret;
1115 
1116 	if (len < sizeof(hdr))
1117 		return -EINVAL;
1118 
1119 	if (copy_from_user(&hdr, buf, sizeof(hdr)))
1120 		return -EFAULT;
1121 
1122 	if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1123 		return -EINVAL;
1124 
1125 	if (hdr.in + sizeof(hdr) > len)
1126 		return -EINVAL;
1127 
1128 	if (!ucma_cmd_table[hdr.cmd])
1129 		return -ENOSYS;
1130 
1131 	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1132 	if (!ret)
1133 		ret = len;
1134 
1135 	return ret;
1136 }
1137 
1138 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1139 {
1140 	struct ucma_file *file = filp->private_data;
1141 	unsigned int mask = 0;
1142 
1143 	poll_wait(filp, &file->poll_wait, wait);
1144 
1145 	if (!list_empty(&file->event_list))
1146 		mask = POLLIN | POLLRDNORM;
1147 
1148 	return mask;
1149 }
1150 
1151 static int ucma_open(struct inode *inode, struct file *filp)
1152 {
1153 	struct ucma_file *file;
1154 
1155 	file = kmalloc(sizeof *file, GFP_KERNEL);
1156 	if (!file)
1157 		return -ENOMEM;
1158 
1159 	INIT_LIST_HEAD(&file->event_list);
1160 	INIT_LIST_HEAD(&file->ctx_list);
1161 	init_waitqueue_head(&file->poll_wait);
1162 	mutex_init(&file->mut);
1163 
1164 	filp->private_data = file;
1165 	file->filp = filp;
1166 	return 0;
1167 }
1168 
1169 static int ucma_close(struct inode *inode, struct file *filp)
1170 {
1171 	struct ucma_file *file = filp->private_data;
1172 	struct ucma_context *ctx, *tmp;
1173 
1174 	mutex_lock(&file->mut);
1175 	list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1176 		mutex_unlock(&file->mut);
1177 
1178 		mutex_lock(&mut);
1179 		idr_remove(&ctx_idr, ctx->id);
1180 		mutex_unlock(&mut);
1181 
1182 		ucma_free_ctx(ctx);
1183 		mutex_lock(&file->mut);
1184 	}
1185 	mutex_unlock(&file->mut);
1186 	kfree(file);
1187 	return 0;
1188 }
1189 
1190 static const struct file_operations ucma_fops = {
1191 	.owner 	 = THIS_MODULE,
1192 	.open 	 = ucma_open,
1193 	.release = ucma_close,
1194 	.write	 = ucma_write,
1195 	.poll    = ucma_poll,
1196 };
1197 
1198 static struct miscdevice ucma_misc = {
1199 	.minor	= MISC_DYNAMIC_MINOR,
1200 	.name	= "rdma_cm",
1201 	.fops	= &ucma_fops,
1202 };
1203 
1204 static ssize_t show_abi_version(struct device *dev,
1205 				struct device_attribute *attr,
1206 				char *buf)
1207 {
1208 	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1209 }
1210 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1211 
1212 static int __init ucma_init(void)
1213 {
1214 	int ret;
1215 
1216 	ret = misc_register(&ucma_misc);
1217 	if (ret)
1218 		return ret;
1219 
1220 	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1221 	if (ret) {
1222 		printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1223 		goto err;
1224 	}
1225 	return 0;
1226 err:
1227 	misc_deregister(&ucma_misc);
1228 	return ret;
1229 }
1230 
1231 static void __exit ucma_cleanup(void)
1232 {
1233 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1234 	misc_deregister(&ucma_misc);
1235 	idr_destroy(&ctx_idr);
1236 }
1237 
1238 module_init(ucma_init);
1239 module_exit(ucma_cleanup);
1240