xref: /openbmc/linux/drivers/infiniband/core/ucma.c (revision f7d84fa7)
1 /*
2  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	copyright notice, this list of conditions and the following
16  *	disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	copyright notice, this list of conditions and the following
20  *	disclaimer in the documentation and/or other materials
21  *	provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
39 #include <linux/in.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
46 
47 #include <rdma/rdma_user_cm.h>
48 #include <rdma/ib_marshall.h>
49 #include <rdma/rdma_cm.h>
50 #include <rdma/rdma_cm_ib.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib.h>
53 
54 MODULE_AUTHOR("Sean Hefty");
55 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
56 MODULE_LICENSE("Dual BSD/GPL");
57 
58 static unsigned int max_backlog = 1024;
59 
60 static struct ctl_table_header *ucma_ctl_table_hdr;
61 static struct ctl_table ucma_ctl_table[] = {
62 	{
63 		.procname	= "max_backlog",
64 		.data		= &max_backlog,
65 		.maxlen		= sizeof max_backlog,
66 		.mode		= 0644,
67 		.proc_handler	= proc_dointvec,
68 	},
69 	{ }
70 };
71 
72 struct ucma_file {
73 	struct mutex		mut;
74 	struct file		*filp;
75 	struct list_head	ctx_list;
76 	struct list_head	event_list;
77 	wait_queue_head_t	poll_wait;
78 	struct workqueue_struct	*close_wq;
79 };
80 
81 struct ucma_context {
82 	int			id;
83 	struct completion	comp;
84 	atomic_t		ref;
85 	int			events_reported;
86 	int			backlog;
87 
88 	struct ucma_file	*file;
89 	struct rdma_cm_id	*cm_id;
90 	u64			uid;
91 
92 	struct list_head	list;
93 	struct list_head	mc_list;
94 	/* mark that device is in process of destroying the internal HW
95 	 * resources, protected by the global mut
96 	 */
97 	int			closing;
98 	/* sync between removal event and id destroy, protected by file mut */
99 	int			destroying;
100 	struct work_struct	close_work;
101 };
102 
103 struct ucma_multicast {
104 	struct ucma_context	*ctx;
105 	int			id;
106 	int			events_reported;
107 
108 	u64			uid;
109 	u8			join_state;
110 	struct list_head	list;
111 	struct sockaddr_storage	addr;
112 };
113 
114 struct ucma_event {
115 	struct ucma_context	*ctx;
116 	struct ucma_multicast	*mc;
117 	struct list_head	list;
118 	struct rdma_cm_id	*cm_id;
119 	struct rdma_ucm_event_resp resp;
120 	struct work_struct	close_work;
121 };
122 
123 static DEFINE_MUTEX(mut);
124 static DEFINE_IDR(ctx_idr);
125 static DEFINE_IDR(multicast_idr);
126 
127 static inline struct ucma_context *_ucma_find_context(int id,
128 						      struct ucma_file *file)
129 {
130 	struct ucma_context *ctx;
131 
132 	ctx = idr_find(&ctx_idr, id);
133 	if (!ctx)
134 		ctx = ERR_PTR(-ENOENT);
135 	else if (ctx->file != file)
136 		ctx = ERR_PTR(-EINVAL);
137 	return ctx;
138 }
139 
140 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
141 {
142 	struct ucma_context *ctx;
143 
144 	mutex_lock(&mut);
145 	ctx = _ucma_find_context(id, file);
146 	if (!IS_ERR(ctx)) {
147 		if (ctx->closing)
148 			ctx = ERR_PTR(-EIO);
149 		else
150 			atomic_inc(&ctx->ref);
151 	}
152 	mutex_unlock(&mut);
153 	return ctx;
154 }
155 
156 static void ucma_put_ctx(struct ucma_context *ctx)
157 {
158 	if (atomic_dec_and_test(&ctx->ref))
159 		complete(&ctx->comp);
160 }
161 
162 static void ucma_close_event_id(struct work_struct *work)
163 {
164 	struct ucma_event *uevent_close =  container_of(work, struct ucma_event, close_work);
165 
166 	rdma_destroy_id(uevent_close->cm_id);
167 	kfree(uevent_close);
168 }
169 
170 static void ucma_close_id(struct work_struct *work)
171 {
172 	struct ucma_context *ctx =  container_of(work, struct ucma_context, close_work);
173 
174 	/* once all inflight tasks are finished, we close all underlying
175 	 * resources. The context is still alive till its explicit destryoing
176 	 * by its creator.
177 	 */
178 	ucma_put_ctx(ctx);
179 	wait_for_completion(&ctx->comp);
180 	/* No new events will be generated after destroying the id. */
181 	rdma_destroy_id(ctx->cm_id);
182 }
183 
184 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
185 {
186 	struct ucma_context *ctx;
187 
188 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
189 	if (!ctx)
190 		return NULL;
191 
192 	INIT_WORK(&ctx->close_work, ucma_close_id);
193 	atomic_set(&ctx->ref, 1);
194 	init_completion(&ctx->comp);
195 	INIT_LIST_HEAD(&ctx->mc_list);
196 	ctx->file = file;
197 
198 	mutex_lock(&mut);
199 	ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
200 	mutex_unlock(&mut);
201 	if (ctx->id < 0)
202 		goto error;
203 
204 	list_add_tail(&ctx->list, &file->ctx_list);
205 	return ctx;
206 
207 error:
208 	kfree(ctx);
209 	return NULL;
210 }
211 
212 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
213 {
214 	struct ucma_multicast *mc;
215 
216 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
217 	if (!mc)
218 		return NULL;
219 
220 	mutex_lock(&mut);
221 	mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
222 	mutex_unlock(&mut);
223 	if (mc->id < 0)
224 		goto error;
225 
226 	mc->ctx = ctx;
227 	list_add_tail(&mc->list, &ctx->mc_list);
228 	return mc;
229 
230 error:
231 	kfree(mc);
232 	return NULL;
233 }
234 
235 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
236 				 struct rdma_conn_param *src)
237 {
238 	if (src->private_data_len)
239 		memcpy(dst->private_data, src->private_data,
240 		       src->private_data_len);
241 	dst->private_data_len = src->private_data_len;
242 	dst->responder_resources =src->responder_resources;
243 	dst->initiator_depth = src->initiator_depth;
244 	dst->flow_control = src->flow_control;
245 	dst->retry_count = src->retry_count;
246 	dst->rnr_retry_count = src->rnr_retry_count;
247 	dst->srq = src->srq;
248 	dst->qp_num = src->qp_num;
249 }
250 
251 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
252 			       struct rdma_ud_param *src)
253 {
254 	if (src->private_data_len)
255 		memcpy(dst->private_data, src->private_data,
256 		       src->private_data_len);
257 	dst->private_data_len = src->private_data_len;
258 	ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
259 	dst->qp_num = src->qp_num;
260 	dst->qkey = src->qkey;
261 }
262 
263 static void ucma_set_event_context(struct ucma_context *ctx,
264 				   struct rdma_cm_event *event,
265 				   struct ucma_event *uevent)
266 {
267 	uevent->ctx = ctx;
268 	switch (event->event) {
269 	case RDMA_CM_EVENT_MULTICAST_JOIN:
270 	case RDMA_CM_EVENT_MULTICAST_ERROR:
271 		uevent->mc = (struct ucma_multicast *)
272 			     event->param.ud.private_data;
273 		uevent->resp.uid = uevent->mc->uid;
274 		uevent->resp.id = uevent->mc->id;
275 		break;
276 	default:
277 		uevent->resp.uid = ctx->uid;
278 		uevent->resp.id = ctx->id;
279 		break;
280 	}
281 }
282 
283 /* Called with file->mut locked for the relevant context. */
284 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
285 {
286 	struct ucma_context *ctx = cm_id->context;
287 	struct ucma_event *con_req_eve;
288 	int event_found = 0;
289 
290 	if (ctx->destroying)
291 		return;
292 
293 	/* only if context is pointing to cm_id that it owns it and can be
294 	 * queued to be closed, otherwise that cm_id is an inflight one that
295 	 * is part of that context event list pending to be detached and
296 	 * reattached to its new context as part of ucma_get_event,
297 	 * handled separately below.
298 	 */
299 	if (ctx->cm_id == cm_id) {
300 		mutex_lock(&mut);
301 		ctx->closing = 1;
302 		mutex_unlock(&mut);
303 		queue_work(ctx->file->close_wq, &ctx->close_work);
304 		return;
305 	}
306 
307 	list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
308 		if (con_req_eve->cm_id == cm_id &&
309 		    con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
310 			list_del(&con_req_eve->list);
311 			INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
312 			queue_work(ctx->file->close_wq, &con_req_eve->close_work);
313 			event_found = 1;
314 			break;
315 		}
316 	}
317 	if (!event_found)
318 		pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
319 }
320 
321 static int ucma_event_handler(struct rdma_cm_id *cm_id,
322 			      struct rdma_cm_event *event)
323 {
324 	struct ucma_event *uevent;
325 	struct ucma_context *ctx = cm_id->context;
326 	int ret = 0;
327 
328 	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
329 	if (!uevent)
330 		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
331 
332 	mutex_lock(&ctx->file->mut);
333 	uevent->cm_id = cm_id;
334 	ucma_set_event_context(ctx, event, uevent);
335 	uevent->resp.event = event->event;
336 	uevent->resp.status = event->status;
337 	if (cm_id->qp_type == IB_QPT_UD)
338 		ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
339 	else
340 		ucma_copy_conn_event(&uevent->resp.param.conn,
341 				     &event->param.conn);
342 
343 	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
344 		if (!ctx->backlog) {
345 			ret = -ENOMEM;
346 			kfree(uevent);
347 			goto out;
348 		}
349 		ctx->backlog--;
350 	} else if (!ctx->uid || ctx->cm_id != cm_id) {
351 		/*
352 		 * We ignore events for new connections until userspace has set
353 		 * their context.  This can only happen if an error occurs on a
354 		 * new connection before the user accepts it.  This is okay,
355 		 * since the accept will just fail later. However, we do need
356 		 * to release the underlying HW resources in case of a device
357 		 * removal event.
358 		 */
359 		if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
360 			ucma_removal_event_handler(cm_id);
361 
362 		kfree(uevent);
363 		goto out;
364 	}
365 
366 	list_add_tail(&uevent->list, &ctx->file->event_list);
367 	wake_up_interruptible(&ctx->file->poll_wait);
368 	if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
369 		ucma_removal_event_handler(cm_id);
370 out:
371 	mutex_unlock(&ctx->file->mut);
372 	return ret;
373 }
374 
375 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
376 			      int in_len, int out_len)
377 {
378 	struct ucma_context *ctx;
379 	struct rdma_ucm_get_event cmd;
380 	struct ucma_event *uevent;
381 	int ret = 0;
382 
383 	if (out_len < sizeof uevent->resp)
384 		return -ENOSPC;
385 
386 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
387 		return -EFAULT;
388 
389 	mutex_lock(&file->mut);
390 	while (list_empty(&file->event_list)) {
391 		mutex_unlock(&file->mut);
392 
393 		if (file->filp->f_flags & O_NONBLOCK)
394 			return -EAGAIN;
395 
396 		if (wait_event_interruptible(file->poll_wait,
397 					     !list_empty(&file->event_list)))
398 			return -ERESTARTSYS;
399 
400 		mutex_lock(&file->mut);
401 	}
402 
403 	uevent = list_entry(file->event_list.next, struct ucma_event, list);
404 
405 	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
406 		ctx = ucma_alloc_ctx(file);
407 		if (!ctx) {
408 			ret = -ENOMEM;
409 			goto done;
410 		}
411 		uevent->ctx->backlog++;
412 		ctx->cm_id = uevent->cm_id;
413 		ctx->cm_id->context = ctx;
414 		uevent->resp.id = ctx->id;
415 	}
416 
417 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
418 			 &uevent->resp, sizeof uevent->resp)) {
419 		ret = -EFAULT;
420 		goto done;
421 	}
422 
423 	list_del(&uevent->list);
424 	uevent->ctx->events_reported++;
425 	if (uevent->mc)
426 		uevent->mc->events_reported++;
427 	kfree(uevent);
428 done:
429 	mutex_unlock(&file->mut);
430 	return ret;
431 }
432 
433 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
434 {
435 	switch (cmd->ps) {
436 	case RDMA_PS_TCP:
437 		*qp_type = IB_QPT_RC;
438 		return 0;
439 	case RDMA_PS_UDP:
440 	case RDMA_PS_IPOIB:
441 		*qp_type = IB_QPT_UD;
442 		return 0;
443 	case RDMA_PS_IB:
444 		*qp_type = cmd->qp_type;
445 		return 0;
446 	default:
447 		return -EINVAL;
448 	}
449 }
450 
451 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
452 			      int in_len, int out_len)
453 {
454 	struct rdma_ucm_create_id cmd;
455 	struct rdma_ucm_create_id_resp resp;
456 	struct ucma_context *ctx;
457 	enum ib_qp_type qp_type;
458 	int ret;
459 
460 	if (out_len < sizeof(resp))
461 		return -ENOSPC;
462 
463 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
464 		return -EFAULT;
465 
466 	ret = ucma_get_qp_type(&cmd, &qp_type);
467 	if (ret)
468 		return ret;
469 
470 	mutex_lock(&file->mut);
471 	ctx = ucma_alloc_ctx(file);
472 	mutex_unlock(&file->mut);
473 	if (!ctx)
474 		return -ENOMEM;
475 
476 	ctx->uid = cmd.uid;
477 	ctx->cm_id = rdma_create_id(current->nsproxy->net_ns,
478 				    ucma_event_handler, ctx, cmd.ps, qp_type);
479 	if (IS_ERR(ctx->cm_id)) {
480 		ret = PTR_ERR(ctx->cm_id);
481 		goto err1;
482 	}
483 
484 	resp.id = ctx->id;
485 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
486 			 &resp, sizeof(resp))) {
487 		ret = -EFAULT;
488 		goto err2;
489 	}
490 	return 0;
491 
492 err2:
493 	rdma_destroy_id(ctx->cm_id);
494 err1:
495 	mutex_lock(&mut);
496 	idr_remove(&ctx_idr, ctx->id);
497 	mutex_unlock(&mut);
498 	kfree(ctx);
499 	return ret;
500 }
501 
502 static void ucma_cleanup_multicast(struct ucma_context *ctx)
503 {
504 	struct ucma_multicast *mc, *tmp;
505 
506 	mutex_lock(&mut);
507 	list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
508 		list_del(&mc->list);
509 		idr_remove(&multicast_idr, mc->id);
510 		kfree(mc);
511 	}
512 	mutex_unlock(&mut);
513 }
514 
515 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
516 {
517 	struct ucma_event *uevent, *tmp;
518 
519 	list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
520 		if (uevent->mc != mc)
521 			continue;
522 
523 		list_del(&uevent->list);
524 		kfree(uevent);
525 	}
526 }
527 
528 /*
529  * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
530  * this point, no new events will be reported from the hardware. However, we
531  * still need to cleanup the UCMA context for this ID. Specifically, there
532  * might be events that have not yet been consumed by the user space software.
533  * These might include pending connect requests which we have not completed
534  * processing.  We cannot call rdma_destroy_id while holding the lock of the
535  * context (file->mut), as it might cause a deadlock. We therefore extract all
536  * relevant events from the context pending events list while holding the
537  * mutex. After that we release them as needed.
538  */
539 static int ucma_free_ctx(struct ucma_context *ctx)
540 {
541 	int events_reported;
542 	struct ucma_event *uevent, *tmp;
543 	LIST_HEAD(list);
544 
545 
546 	ucma_cleanup_multicast(ctx);
547 
548 	/* Cleanup events not yet reported to the user. */
549 	mutex_lock(&ctx->file->mut);
550 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
551 		if (uevent->ctx == ctx)
552 			list_move_tail(&uevent->list, &list);
553 	}
554 	list_del(&ctx->list);
555 	mutex_unlock(&ctx->file->mut);
556 
557 	list_for_each_entry_safe(uevent, tmp, &list, list) {
558 		list_del(&uevent->list);
559 		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
560 			rdma_destroy_id(uevent->cm_id);
561 		kfree(uevent);
562 	}
563 
564 	events_reported = ctx->events_reported;
565 	kfree(ctx);
566 	return events_reported;
567 }
568 
569 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
570 			       int in_len, int out_len)
571 {
572 	struct rdma_ucm_destroy_id cmd;
573 	struct rdma_ucm_destroy_id_resp resp;
574 	struct ucma_context *ctx;
575 	int ret = 0;
576 
577 	if (out_len < sizeof(resp))
578 		return -ENOSPC;
579 
580 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
581 		return -EFAULT;
582 
583 	mutex_lock(&mut);
584 	ctx = _ucma_find_context(cmd.id, file);
585 	if (!IS_ERR(ctx))
586 		idr_remove(&ctx_idr, ctx->id);
587 	mutex_unlock(&mut);
588 
589 	if (IS_ERR(ctx))
590 		return PTR_ERR(ctx);
591 
592 	mutex_lock(&ctx->file->mut);
593 	ctx->destroying = 1;
594 	mutex_unlock(&ctx->file->mut);
595 
596 	flush_workqueue(ctx->file->close_wq);
597 	/* At this point it's guaranteed that there is no inflight
598 	 * closing task */
599 	mutex_lock(&mut);
600 	if (!ctx->closing) {
601 		mutex_unlock(&mut);
602 		ucma_put_ctx(ctx);
603 		wait_for_completion(&ctx->comp);
604 		rdma_destroy_id(ctx->cm_id);
605 	} else {
606 		mutex_unlock(&mut);
607 	}
608 
609 	resp.events_reported = ucma_free_ctx(ctx);
610 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
611 			 &resp, sizeof(resp)))
612 		ret = -EFAULT;
613 
614 	return ret;
615 }
616 
617 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
618 			      int in_len, int out_len)
619 {
620 	struct rdma_ucm_bind_ip cmd;
621 	struct ucma_context *ctx;
622 	int ret;
623 
624 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
625 		return -EFAULT;
626 
627 	ctx = ucma_get_ctx(file, cmd.id);
628 	if (IS_ERR(ctx))
629 		return PTR_ERR(ctx);
630 
631 	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
632 	ucma_put_ctx(ctx);
633 	return ret;
634 }
635 
636 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
637 			 int in_len, int out_len)
638 {
639 	struct rdma_ucm_bind cmd;
640 	struct sockaddr *addr;
641 	struct ucma_context *ctx;
642 	int ret;
643 
644 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
645 		return -EFAULT;
646 
647 	addr = (struct sockaddr *) &cmd.addr;
648 	if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
649 		return -EINVAL;
650 
651 	ctx = ucma_get_ctx(file, cmd.id);
652 	if (IS_ERR(ctx))
653 		return PTR_ERR(ctx);
654 
655 	ret = rdma_bind_addr(ctx->cm_id, addr);
656 	ucma_put_ctx(ctx);
657 	return ret;
658 }
659 
660 static ssize_t ucma_resolve_ip(struct ucma_file *file,
661 			       const char __user *inbuf,
662 			       int in_len, int out_len)
663 {
664 	struct rdma_ucm_resolve_ip cmd;
665 	struct ucma_context *ctx;
666 	int ret;
667 
668 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
669 		return -EFAULT;
670 
671 	ctx = ucma_get_ctx(file, cmd.id);
672 	if (IS_ERR(ctx))
673 		return PTR_ERR(ctx);
674 
675 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
676 				(struct sockaddr *) &cmd.dst_addr,
677 				cmd.timeout_ms);
678 	ucma_put_ctx(ctx);
679 	return ret;
680 }
681 
682 static ssize_t ucma_resolve_addr(struct ucma_file *file,
683 				 const char __user *inbuf,
684 				 int in_len, int out_len)
685 {
686 	struct rdma_ucm_resolve_addr cmd;
687 	struct sockaddr *src, *dst;
688 	struct ucma_context *ctx;
689 	int ret;
690 
691 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
692 		return -EFAULT;
693 
694 	src = (struct sockaddr *) &cmd.src_addr;
695 	dst = (struct sockaddr *) &cmd.dst_addr;
696 	if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
697 	    !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
698 		return -EINVAL;
699 
700 	ctx = ucma_get_ctx(file, cmd.id);
701 	if (IS_ERR(ctx))
702 		return PTR_ERR(ctx);
703 
704 	ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
705 	ucma_put_ctx(ctx);
706 	return ret;
707 }
708 
709 static ssize_t ucma_resolve_route(struct ucma_file *file,
710 				  const char __user *inbuf,
711 				  int in_len, int out_len)
712 {
713 	struct rdma_ucm_resolve_route cmd;
714 	struct ucma_context *ctx;
715 	int ret;
716 
717 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
718 		return -EFAULT;
719 
720 	ctx = ucma_get_ctx(file, cmd.id);
721 	if (IS_ERR(ctx))
722 		return PTR_ERR(ctx);
723 
724 	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
725 	ucma_put_ctx(ctx);
726 	return ret;
727 }
728 
729 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
730 			       struct rdma_route *route)
731 {
732 	struct rdma_dev_addr *dev_addr;
733 
734 	resp->num_paths = route->num_paths;
735 	switch (route->num_paths) {
736 	case 0:
737 		dev_addr = &route->addr.dev_addr;
738 		rdma_addr_get_dgid(dev_addr,
739 				   (union ib_gid *) &resp->ib_route[0].dgid);
740 		rdma_addr_get_sgid(dev_addr,
741 				   (union ib_gid *) &resp->ib_route[0].sgid);
742 		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
743 		break;
744 	case 2:
745 		ib_copy_path_rec_to_user(&resp->ib_route[1],
746 					 &route->path_rec[1]);
747 		/* fall through */
748 	case 1:
749 		ib_copy_path_rec_to_user(&resp->ib_route[0],
750 					 &route->path_rec[0]);
751 		break;
752 	default:
753 		break;
754 	}
755 }
756 
757 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
758 				 struct rdma_route *route)
759 {
760 
761 	resp->num_paths = route->num_paths;
762 	switch (route->num_paths) {
763 	case 0:
764 		rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
765 			    (union ib_gid *)&resp->ib_route[0].dgid);
766 		rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
767 			    (union ib_gid *)&resp->ib_route[0].sgid);
768 		resp->ib_route[0].pkey = cpu_to_be16(0xffff);
769 		break;
770 	case 2:
771 		ib_copy_path_rec_to_user(&resp->ib_route[1],
772 					 &route->path_rec[1]);
773 		/* fall through */
774 	case 1:
775 		ib_copy_path_rec_to_user(&resp->ib_route[0],
776 					 &route->path_rec[0]);
777 		break;
778 	default:
779 		break;
780 	}
781 }
782 
783 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
784 			       struct rdma_route *route)
785 {
786 	struct rdma_dev_addr *dev_addr;
787 
788 	dev_addr = &route->addr.dev_addr;
789 	rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
790 	rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
791 }
792 
793 static ssize_t ucma_query_route(struct ucma_file *file,
794 				const char __user *inbuf,
795 				int in_len, int out_len)
796 {
797 	struct rdma_ucm_query cmd;
798 	struct rdma_ucm_query_route_resp resp;
799 	struct ucma_context *ctx;
800 	struct sockaddr *addr;
801 	int ret = 0;
802 
803 	if (out_len < sizeof(resp))
804 		return -ENOSPC;
805 
806 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
807 		return -EFAULT;
808 
809 	ctx = ucma_get_ctx(file, cmd.id);
810 	if (IS_ERR(ctx))
811 		return PTR_ERR(ctx);
812 
813 	memset(&resp, 0, sizeof resp);
814 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
815 	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
816 				     sizeof(struct sockaddr_in) :
817 				     sizeof(struct sockaddr_in6));
818 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
819 	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
820 				     sizeof(struct sockaddr_in) :
821 				     sizeof(struct sockaddr_in6));
822 	if (!ctx->cm_id->device)
823 		goto out;
824 
825 	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
826 	resp.port_num = ctx->cm_id->port_num;
827 
828 	if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
829 		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
830 	else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
831 		ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
832 	else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
833 		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
834 
835 out:
836 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
837 			 &resp, sizeof(resp)))
838 		ret = -EFAULT;
839 
840 	ucma_put_ctx(ctx);
841 	return ret;
842 }
843 
844 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
845 				   struct rdma_ucm_query_addr_resp *resp)
846 {
847 	if (!cm_id->device)
848 		return;
849 
850 	resp->node_guid = (__force __u64) cm_id->device->node_guid;
851 	resp->port_num = cm_id->port_num;
852 	resp->pkey = (__force __u16) cpu_to_be16(
853 		     ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
854 }
855 
856 static ssize_t ucma_query_addr(struct ucma_context *ctx,
857 			       void __user *response, int out_len)
858 {
859 	struct rdma_ucm_query_addr_resp resp;
860 	struct sockaddr *addr;
861 	int ret = 0;
862 
863 	if (out_len < sizeof(resp))
864 		return -ENOSPC;
865 
866 	memset(&resp, 0, sizeof resp);
867 
868 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
869 	resp.src_size = rdma_addr_size(addr);
870 	memcpy(&resp.src_addr, addr, resp.src_size);
871 
872 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
873 	resp.dst_size = rdma_addr_size(addr);
874 	memcpy(&resp.dst_addr, addr, resp.dst_size);
875 
876 	ucma_query_device_addr(ctx->cm_id, &resp);
877 
878 	if (copy_to_user(response, &resp, sizeof(resp)))
879 		ret = -EFAULT;
880 
881 	return ret;
882 }
883 
884 static ssize_t ucma_query_path(struct ucma_context *ctx,
885 			       void __user *response, int out_len)
886 {
887 	struct rdma_ucm_query_path_resp *resp;
888 	int i, ret = 0;
889 
890 	if (out_len < sizeof(*resp))
891 		return -ENOSPC;
892 
893 	resp = kzalloc(out_len, GFP_KERNEL);
894 	if (!resp)
895 		return -ENOMEM;
896 
897 	resp->num_paths = ctx->cm_id->route.num_paths;
898 	for (i = 0, out_len -= sizeof(*resp);
899 	     i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
900 	     i++, out_len -= sizeof(struct ib_path_rec_data)) {
901 		struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
902 
903 		resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
904 					   IB_PATH_BIDIRECTIONAL;
905 		if (rec->rec_type == SA_PATH_REC_TYPE_IB) {
906 			ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
907 		} else {
908 			struct sa_path_rec ib;
909 
910 			sa_convert_path_opa_to_ib(&ib, rec);
911 			ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
912 		}
913 	}
914 
915 	if (copy_to_user(response, resp,
916 			 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
917 		ret = -EFAULT;
918 
919 	kfree(resp);
920 	return ret;
921 }
922 
923 static ssize_t ucma_query_gid(struct ucma_context *ctx,
924 			      void __user *response, int out_len)
925 {
926 	struct rdma_ucm_query_addr_resp resp;
927 	struct sockaddr_ib *addr;
928 	int ret = 0;
929 
930 	if (out_len < sizeof(resp))
931 		return -ENOSPC;
932 
933 	memset(&resp, 0, sizeof resp);
934 
935 	ucma_query_device_addr(ctx->cm_id, &resp);
936 
937 	addr = (struct sockaddr_ib *) &resp.src_addr;
938 	resp.src_size = sizeof(*addr);
939 	if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
940 		memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
941 	} else {
942 		addr->sib_family = AF_IB;
943 		addr->sib_pkey = (__force __be16) resp.pkey;
944 		rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
945 				   (union ib_gid *) &addr->sib_addr);
946 		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
947 						    &ctx->cm_id->route.addr.src_addr);
948 	}
949 
950 	addr = (struct sockaddr_ib *) &resp.dst_addr;
951 	resp.dst_size = sizeof(*addr);
952 	if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
953 		memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
954 	} else {
955 		addr->sib_family = AF_IB;
956 		addr->sib_pkey = (__force __be16) resp.pkey;
957 		rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
958 				   (union ib_gid *) &addr->sib_addr);
959 		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
960 						    &ctx->cm_id->route.addr.dst_addr);
961 	}
962 
963 	if (copy_to_user(response, &resp, sizeof(resp)))
964 		ret = -EFAULT;
965 
966 	return ret;
967 }
968 
969 static ssize_t ucma_query(struct ucma_file *file,
970 			  const char __user *inbuf,
971 			  int in_len, int out_len)
972 {
973 	struct rdma_ucm_query cmd;
974 	struct ucma_context *ctx;
975 	void __user *response;
976 	int ret;
977 
978 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
979 		return -EFAULT;
980 
981 	response = (void __user *)(unsigned long) cmd.response;
982 	ctx = ucma_get_ctx(file, cmd.id);
983 	if (IS_ERR(ctx))
984 		return PTR_ERR(ctx);
985 
986 	switch (cmd.option) {
987 	case RDMA_USER_CM_QUERY_ADDR:
988 		ret = ucma_query_addr(ctx, response, out_len);
989 		break;
990 	case RDMA_USER_CM_QUERY_PATH:
991 		ret = ucma_query_path(ctx, response, out_len);
992 		break;
993 	case RDMA_USER_CM_QUERY_GID:
994 		ret = ucma_query_gid(ctx, response, out_len);
995 		break;
996 	default:
997 		ret = -ENOSYS;
998 		break;
999 	}
1000 
1001 	ucma_put_ctx(ctx);
1002 	return ret;
1003 }
1004 
1005 static void ucma_copy_conn_param(struct rdma_cm_id *id,
1006 				 struct rdma_conn_param *dst,
1007 				 struct rdma_ucm_conn_param *src)
1008 {
1009 	dst->private_data = src->private_data;
1010 	dst->private_data_len = src->private_data_len;
1011 	dst->responder_resources =src->responder_resources;
1012 	dst->initiator_depth = src->initiator_depth;
1013 	dst->flow_control = src->flow_control;
1014 	dst->retry_count = src->retry_count;
1015 	dst->rnr_retry_count = src->rnr_retry_count;
1016 	dst->srq = src->srq;
1017 	dst->qp_num = src->qp_num;
1018 	dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1019 }
1020 
1021 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1022 			    int in_len, int out_len)
1023 {
1024 	struct rdma_ucm_connect cmd;
1025 	struct rdma_conn_param conn_param;
1026 	struct ucma_context *ctx;
1027 	int ret;
1028 
1029 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1030 		return -EFAULT;
1031 
1032 	if (!cmd.conn_param.valid)
1033 		return -EINVAL;
1034 
1035 	ctx = ucma_get_ctx(file, cmd.id);
1036 	if (IS_ERR(ctx))
1037 		return PTR_ERR(ctx);
1038 
1039 	ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1040 	ret = rdma_connect(ctx->cm_id, &conn_param);
1041 	ucma_put_ctx(ctx);
1042 	return ret;
1043 }
1044 
1045 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1046 			   int in_len, int out_len)
1047 {
1048 	struct rdma_ucm_listen cmd;
1049 	struct ucma_context *ctx;
1050 	int ret;
1051 
1052 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1053 		return -EFAULT;
1054 
1055 	ctx = ucma_get_ctx(file, cmd.id);
1056 	if (IS_ERR(ctx))
1057 		return PTR_ERR(ctx);
1058 
1059 	ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1060 		       cmd.backlog : max_backlog;
1061 	ret = rdma_listen(ctx->cm_id, ctx->backlog);
1062 	ucma_put_ctx(ctx);
1063 	return ret;
1064 }
1065 
1066 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1067 			   int in_len, int out_len)
1068 {
1069 	struct rdma_ucm_accept cmd;
1070 	struct rdma_conn_param conn_param;
1071 	struct ucma_context *ctx;
1072 	int ret;
1073 
1074 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1075 		return -EFAULT;
1076 
1077 	ctx = ucma_get_ctx(file, cmd.id);
1078 	if (IS_ERR(ctx))
1079 		return PTR_ERR(ctx);
1080 
1081 	if (cmd.conn_param.valid) {
1082 		ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1083 		mutex_lock(&file->mut);
1084 		ret = rdma_accept(ctx->cm_id, &conn_param);
1085 		if (!ret)
1086 			ctx->uid = cmd.uid;
1087 		mutex_unlock(&file->mut);
1088 	} else
1089 		ret = rdma_accept(ctx->cm_id, NULL);
1090 
1091 	ucma_put_ctx(ctx);
1092 	return ret;
1093 }
1094 
1095 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1096 			   int in_len, int out_len)
1097 {
1098 	struct rdma_ucm_reject cmd;
1099 	struct ucma_context *ctx;
1100 	int ret;
1101 
1102 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1103 		return -EFAULT;
1104 
1105 	ctx = ucma_get_ctx(file, cmd.id);
1106 	if (IS_ERR(ctx))
1107 		return PTR_ERR(ctx);
1108 
1109 	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1110 	ucma_put_ctx(ctx);
1111 	return ret;
1112 }
1113 
1114 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1115 			       int in_len, int out_len)
1116 {
1117 	struct rdma_ucm_disconnect cmd;
1118 	struct ucma_context *ctx;
1119 	int ret;
1120 
1121 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1122 		return -EFAULT;
1123 
1124 	ctx = ucma_get_ctx(file, cmd.id);
1125 	if (IS_ERR(ctx))
1126 		return PTR_ERR(ctx);
1127 
1128 	ret = rdma_disconnect(ctx->cm_id);
1129 	ucma_put_ctx(ctx);
1130 	return ret;
1131 }
1132 
1133 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1134 				 const char __user *inbuf,
1135 				 int in_len, int out_len)
1136 {
1137 	struct rdma_ucm_init_qp_attr cmd;
1138 	struct ib_uverbs_qp_attr resp;
1139 	struct ucma_context *ctx;
1140 	struct ib_qp_attr qp_attr;
1141 	int ret;
1142 
1143 	if (out_len < sizeof(resp))
1144 		return -ENOSPC;
1145 
1146 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1147 		return -EFAULT;
1148 
1149 	ctx = ucma_get_ctx(file, cmd.id);
1150 	if (IS_ERR(ctx))
1151 		return PTR_ERR(ctx);
1152 
1153 	resp.qp_attr_mask = 0;
1154 	memset(&qp_attr, 0, sizeof qp_attr);
1155 	qp_attr.qp_state = cmd.qp_state;
1156 	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1157 	if (ret)
1158 		goto out;
1159 
1160 	ib_copy_qp_attr_to_user(&resp, &qp_attr);
1161 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1162 			 &resp, sizeof(resp)))
1163 		ret = -EFAULT;
1164 
1165 out:
1166 	ucma_put_ctx(ctx);
1167 	return ret;
1168 }
1169 
1170 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1171 			      void *optval, size_t optlen)
1172 {
1173 	int ret = 0;
1174 
1175 	switch (optname) {
1176 	case RDMA_OPTION_ID_TOS:
1177 		if (optlen != sizeof(u8)) {
1178 			ret = -EINVAL;
1179 			break;
1180 		}
1181 		rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1182 		break;
1183 	case RDMA_OPTION_ID_REUSEADDR:
1184 		if (optlen != sizeof(int)) {
1185 			ret = -EINVAL;
1186 			break;
1187 		}
1188 		ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1189 		break;
1190 	case RDMA_OPTION_ID_AFONLY:
1191 		if (optlen != sizeof(int)) {
1192 			ret = -EINVAL;
1193 			break;
1194 		}
1195 		ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1196 		break;
1197 	default:
1198 		ret = -ENOSYS;
1199 	}
1200 
1201 	return ret;
1202 }
1203 
1204 static int ucma_set_ib_path(struct ucma_context *ctx,
1205 			    struct ib_path_rec_data *path_data, size_t optlen)
1206 {
1207 	struct sa_path_rec sa_path;
1208 	struct rdma_cm_event event;
1209 	int ret;
1210 
1211 	if (optlen % sizeof(*path_data))
1212 		return -EINVAL;
1213 
1214 	for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1215 		if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1216 					 IB_PATH_BIDIRECTIONAL))
1217 			break;
1218 	}
1219 
1220 	if (!optlen)
1221 		return -EINVAL;
1222 
1223 	memset(&sa_path, 0, sizeof(sa_path));
1224 
1225 	sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1226 	ib_sa_unpack_path(path_data->path_rec, &sa_path);
1227 
1228 	if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1229 		struct sa_path_rec opa;
1230 
1231 		sa_convert_path_ib_to_opa(&opa, &sa_path);
1232 		ret = rdma_set_ib_paths(ctx->cm_id, &opa, 1);
1233 	} else {
1234 		ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1235 	}
1236 	if (ret)
1237 		return ret;
1238 
1239 	memset(&event, 0, sizeof event);
1240 	event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1241 	return ucma_event_handler(ctx->cm_id, &event);
1242 }
1243 
1244 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1245 			      void *optval, size_t optlen)
1246 {
1247 	int ret;
1248 
1249 	switch (optname) {
1250 	case RDMA_OPTION_IB_PATH:
1251 		ret = ucma_set_ib_path(ctx, optval, optlen);
1252 		break;
1253 	default:
1254 		ret = -ENOSYS;
1255 	}
1256 
1257 	return ret;
1258 }
1259 
1260 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1261 				 int optname, void *optval, size_t optlen)
1262 {
1263 	int ret;
1264 
1265 	switch (level) {
1266 	case RDMA_OPTION_ID:
1267 		ret = ucma_set_option_id(ctx, optname, optval, optlen);
1268 		break;
1269 	case RDMA_OPTION_IB:
1270 		ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1271 		break;
1272 	default:
1273 		ret = -ENOSYS;
1274 	}
1275 
1276 	return ret;
1277 }
1278 
1279 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1280 			       int in_len, int out_len)
1281 {
1282 	struct rdma_ucm_set_option cmd;
1283 	struct ucma_context *ctx;
1284 	void *optval;
1285 	int ret;
1286 
1287 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1288 		return -EFAULT;
1289 
1290 	ctx = ucma_get_ctx(file, cmd.id);
1291 	if (IS_ERR(ctx))
1292 		return PTR_ERR(ctx);
1293 
1294 	optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1295 			     cmd.optlen);
1296 	if (IS_ERR(optval)) {
1297 		ret = PTR_ERR(optval);
1298 		goto out;
1299 	}
1300 
1301 	ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1302 				    cmd.optlen);
1303 	kfree(optval);
1304 
1305 out:
1306 	ucma_put_ctx(ctx);
1307 	return ret;
1308 }
1309 
1310 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1311 			   int in_len, int out_len)
1312 {
1313 	struct rdma_ucm_notify cmd;
1314 	struct ucma_context *ctx;
1315 	int ret;
1316 
1317 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1318 		return -EFAULT;
1319 
1320 	ctx = ucma_get_ctx(file, cmd.id);
1321 	if (IS_ERR(ctx))
1322 		return PTR_ERR(ctx);
1323 
1324 	ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1325 	ucma_put_ctx(ctx);
1326 	return ret;
1327 }
1328 
1329 static ssize_t ucma_process_join(struct ucma_file *file,
1330 				 struct rdma_ucm_join_mcast *cmd,  int out_len)
1331 {
1332 	struct rdma_ucm_create_id_resp resp;
1333 	struct ucma_context *ctx;
1334 	struct ucma_multicast *mc;
1335 	struct sockaddr *addr;
1336 	int ret;
1337 	u8 join_state;
1338 
1339 	if (out_len < sizeof(resp))
1340 		return -ENOSPC;
1341 
1342 	addr = (struct sockaddr *) &cmd->addr;
1343 	if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1344 		return -EINVAL;
1345 
1346 	if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1347 		join_state = BIT(FULLMEMBER_JOIN);
1348 	else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1349 		join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1350 	else
1351 		return -EINVAL;
1352 
1353 	ctx = ucma_get_ctx(file, cmd->id);
1354 	if (IS_ERR(ctx))
1355 		return PTR_ERR(ctx);
1356 
1357 	mutex_lock(&file->mut);
1358 	mc = ucma_alloc_multicast(ctx);
1359 	if (!mc) {
1360 		ret = -ENOMEM;
1361 		goto err1;
1362 	}
1363 	mc->join_state = join_state;
1364 	mc->uid = cmd->uid;
1365 	memcpy(&mc->addr, addr, cmd->addr_size);
1366 	ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1367 				  join_state, mc);
1368 	if (ret)
1369 		goto err2;
1370 
1371 	resp.id = mc->id;
1372 	if (copy_to_user((void __user *)(unsigned long) cmd->response,
1373 			 &resp, sizeof(resp))) {
1374 		ret = -EFAULT;
1375 		goto err3;
1376 	}
1377 
1378 	mutex_unlock(&file->mut);
1379 	ucma_put_ctx(ctx);
1380 	return 0;
1381 
1382 err3:
1383 	rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1384 	ucma_cleanup_mc_events(mc);
1385 err2:
1386 	mutex_lock(&mut);
1387 	idr_remove(&multicast_idr, mc->id);
1388 	mutex_unlock(&mut);
1389 	list_del(&mc->list);
1390 	kfree(mc);
1391 err1:
1392 	mutex_unlock(&file->mut);
1393 	ucma_put_ctx(ctx);
1394 	return ret;
1395 }
1396 
1397 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1398 				      const char __user *inbuf,
1399 				      int in_len, int out_len)
1400 {
1401 	struct rdma_ucm_join_ip_mcast cmd;
1402 	struct rdma_ucm_join_mcast join_cmd;
1403 
1404 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1405 		return -EFAULT;
1406 
1407 	join_cmd.response = cmd.response;
1408 	join_cmd.uid = cmd.uid;
1409 	join_cmd.id = cmd.id;
1410 	join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1411 	join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1412 	memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1413 
1414 	return ucma_process_join(file, &join_cmd, out_len);
1415 }
1416 
1417 static ssize_t ucma_join_multicast(struct ucma_file *file,
1418 				   const char __user *inbuf,
1419 				   int in_len, int out_len)
1420 {
1421 	struct rdma_ucm_join_mcast cmd;
1422 
1423 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1424 		return -EFAULT;
1425 
1426 	return ucma_process_join(file, &cmd, out_len);
1427 }
1428 
1429 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1430 				    const char __user *inbuf,
1431 				    int in_len, int out_len)
1432 {
1433 	struct rdma_ucm_destroy_id cmd;
1434 	struct rdma_ucm_destroy_id_resp resp;
1435 	struct ucma_multicast *mc;
1436 	int ret = 0;
1437 
1438 	if (out_len < sizeof(resp))
1439 		return -ENOSPC;
1440 
1441 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1442 		return -EFAULT;
1443 
1444 	mutex_lock(&mut);
1445 	mc = idr_find(&multicast_idr, cmd.id);
1446 	if (!mc)
1447 		mc = ERR_PTR(-ENOENT);
1448 	else if (mc->ctx->file != file)
1449 		mc = ERR_PTR(-EINVAL);
1450 	else if (!atomic_inc_not_zero(&mc->ctx->ref))
1451 		mc = ERR_PTR(-ENXIO);
1452 	else
1453 		idr_remove(&multicast_idr, mc->id);
1454 	mutex_unlock(&mut);
1455 
1456 	if (IS_ERR(mc)) {
1457 		ret = PTR_ERR(mc);
1458 		goto out;
1459 	}
1460 
1461 	rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1462 	mutex_lock(&mc->ctx->file->mut);
1463 	ucma_cleanup_mc_events(mc);
1464 	list_del(&mc->list);
1465 	mutex_unlock(&mc->ctx->file->mut);
1466 
1467 	ucma_put_ctx(mc->ctx);
1468 	resp.events_reported = mc->events_reported;
1469 	kfree(mc);
1470 
1471 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1472 			 &resp, sizeof(resp)))
1473 		ret = -EFAULT;
1474 out:
1475 	return ret;
1476 }
1477 
1478 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1479 {
1480 	/* Acquire mutex's based on pointer comparison to prevent deadlock. */
1481 	if (file1 < file2) {
1482 		mutex_lock(&file1->mut);
1483 		mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1484 	} else {
1485 		mutex_lock(&file2->mut);
1486 		mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1487 	}
1488 }
1489 
1490 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1491 {
1492 	if (file1 < file2) {
1493 		mutex_unlock(&file2->mut);
1494 		mutex_unlock(&file1->mut);
1495 	} else {
1496 		mutex_unlock(&file1->mut);
1497 		mutex_unlock(&file2->mut);
1498 	}
1499 }
1500 
1501 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1502 {
1503 	struct ucma_event *uevent, *tmp;
1504 
1505 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1506 		if (uevent->ctx == ctx)
1507 			list_move_tail(&uevent->list, &file->event_list);
1508 }
1509 
1510 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1511 			       const char __user *inbuf,
1512 			       int in_len, int out_len)
1513 {
1514 	struct rdma_ucm_migrate_id cmd;
1515 	struct rdma_ucm_migrate_resp resp;
1516 	struct ucma_context *ctx;
1517 	struct fd f;
1518 	struct ucma_file *cur_file;
1519 	int ret = 0;
1520 
1521 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1522 		return -EFAULT;
1523 
1524 	/* Get current fd to protect against it being closed */
1525 	f = fdget(cmd.fd);
1526 	if (!f.file)
1527 		return -ENOENT;
1528 
1529 	/* Validate current fd and prevent destruction of id. */
1530 	ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1531 	if (IS_ERR(ctx)) {
1532 		ret = PTR_ERR(ctx);
1533 		goto file_put;
1534 	}
1535 
1536 	cur_file = ctx->file;
1537 	if (cur_file == new_file) {
1538 		resp.events_reported = ctx->events_reported;
1539 		goto response;
1540 	}
1541 
1542 	/*
1543 	 * Migrate events between fd's, maintaining order, and avoiding new
1544 	 * events being added before existing events.
1545 	 */
1546 	ucma_lock_files(cur_file, new_file);
1547 	mutex_lock(&mut);
1548 
1549 	list_move_tail(&ctx->list, &new_file->ctx_list);
1550 	ucma_move_events(ctx, new_file);
1551 	ctx->file = new_file;
1552 	resp.events_reported = ctx->events_reported;
1553 
1554 	mutex_unlock(&mut);
1555 	ucma_unlock_files(cur_file, new_file);
1556 
1557 response:
1558 	if (copy_to_user((void __user *)(unsigned long)cmd.response,
1559 			 &resp, sizeof(resp)))
1560 		ret = -EFAULT;
1561 
1562 	ucma_put_ctx(ctx);
1563 file_put:
1564 	fdput(f);
1565 	return ret;
1566 }
1567 
1568 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1569 				   const char __user *inbuf,
1570 				   int in_len, int out_len) = {
1571 	[RDMA_USER_CM_CMD_CREATE_ID] 	 = ucma_create_id,
1572 	[RDMA_USER_CM_CMD_DESTROY_ID]	 = ucma_destroy_id,
1573 	[RDMA_USER_CM_CMD_BIND_IP]	 = ucma_bind_ip,
1574 	[RDMA_USER_CM_CMD_RESOLVE_IP]	 = ucma_resolve_ip,
1575 	[RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1576 	[RDMA_USER_CM_CMD_QUERY_ROUTE]	 = ucma_query_route,
1577 	[RDMA_USER_CM_CMD_CONNECT]	 = ucma_connect,
1578 	[RDMA_USER_CM_CMD_LISTEN]	 = ucma_listen,
1579 	[RDMA_USER_CM_CMD_ACCEPT]	 = ucma_accept,
1580 	[RDMA_USER_CM_CMD_REJECT]	 = ucma_reject,
1581 	[RDMA_USER_CM_CMD_DISCONNECT]	 = ucma_disconnect,
1582 	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	 = ucma_init_qp_attr,
1583 	[RDMA_USER_CM_CMD_GET_EVENT]	 = ucma_get_event,
1584 	[RDMA_USER_CM_CMD_GET_OPTION]	 = NULL,
1585 	[RDMA_USER_CM_CMD_SET_OPTION]	 = ucma_set_option,
1586 	[RDMA_USER_CM_CMD_NOTIFY]	 = ucma_notify,
1587 	[RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1588 	[RDMA_USER_CM_CMD_LEAVE_MCAST]	 = ucma_leave_multicast,
1589 	[RDMA_USER_CM_CMD_MIGRATE_ID]	 = ucma_migrate_id,
1590 	[RDMA_USER_CM_CMD_QUERY]	 = ucma_query,
1591 	[RDMA_USER_CM_CMD_BIND]		 = ucma_bind,
1592 	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	 = ucma_resolve_addr,
1593 	[RDMA_USER_CM_CMD_JOIN_MCAST]	 = ucma_join_multicast
1594 };
1595 
1596 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1597 			  size_t len, loff_t *pos)
1598 {
1599 	struct ucma_file *file = filp->private_data;
1600 	struct rdma_ucm_cmd_hdr hdr;
1601 	ssize_t ret;
1602 
1603 	if (!ib_safe_file_access(filp)) {
1604 		pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1605 			    task_tgid_vnr(current), current->comm);
1606 		return -EACCES;
1607 	}
1608 
1609 	if (len < sizeof(hdr))
1610 		return -EINVAL;
1611 
1612 	if (copy_from_user(&hdr, buf, sizeof(hdr)))
1613 		return -EFAULT;
1614 
1615 	if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1616 		return -EINVAL;
1617 
1618 	if (hdr.in + sizeof(hdr) > len)
1619 		return -EINVAL;
1620 
1621 	if (!ucma_cmd_table[hdr.cmd])
1622 		return -ENOSYS;
1623 
1624 	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1625 	if (!ret)
1626 		ret = len;
1627 
1628 	return ret;
1629 }
1630 
1631 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1632 {
1633 	struct ucma_file *file = filp->private_data;
1634 	unsigned int mask = 0;
1635 
1636 	poll_wait(filp, &file->poll_wait, wait);
1637 
1638 	if (!list_empty(&file->event_list))
1639 		mask = POLLIN | POLLRDNORM;
1640 
1641 	return mask;
1642 }
1643 
1644 /*
1645  * ucma_open() does not need the BKL:
1646  *
1647  *  - no global state is referred to;
1648  *  - there is no ioctl method to race against;
1649  *  - no further module initialization is required for open to work
1650  *    after the device is registered.
1651  */
1652 static int ucma_open(struct inode *inode, struct file *filp)
1653 {
1654 	struct ucma_file *file;
1655 
1656 	file = kmalloc(sizeof *file, GFP_KERNEL);
1657 	if (!file)
1658 		return -ENOMEM;
1659 
1660 	file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1661 						 WQ_MEM_RECLAIM);
1662 	if (!file->close_wq) {
1663 		kfree(file);
1664 		return -ENOMEM;
1665 	}
1666 
1667 	INIT_LIST_HEAD(&file->event_list);
1668 	INIT_LIST_HEAD(&file->ctx_list);
1669 	init_waitqueue_head(&file->poll_wait);
1670 	mutex_init(&file->mut);
1671 
1672 	filp->private_data = file;
1673 	file->filp = filp;
1674 
1675 	return nonseekable_open(inode, filp);
1676 }
1677 
1678 static int ucma_close(struct inode *inode, struct file *filp)
1679 {
1680 	struct ucma_file *file = filp->private_data;
1681 	struct ucma_context *ctx, *tmp;
1682 
1683 	mutex_lock(&file->mut);
1684 	list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1685 		ctx->destroying = 1;
1686 		mutex_unlock(&file->mut);
1687 
1688 		mutex_lock(&mut);
1689 		idr_remove(&ctx_idr, ctx->id);
1690 		mutex_unlock(&mut);
1691 
1692 		flush_workqueue(file->close_wq);
1693 		/* At that step once ctx was marked as destroying and workqueue
1694 		 * was flushed we are safe from any inflights handlers that
1695 		 * might put other closing task.
1696 		 */
1697 		mutex_lock(&mut);
1698 		if (!ctx->closing) {
1699 			mutex_unlock(&mut);
1700 			/* rdma_destroy_id ensures that no event handlers are
1701 			 * inflight for that id before releasing it.
1702 			 */
1703 			rdma_destroy_id(ctx->cm_id);
1704 		} else {
1705 			mutex_unlock(&mut);
1706 		}
1707 
1708 		ucma_free_ctx(ctx);
1709 		mutex_lock(&file->mut);
1710 	}
1711 	mutex_unlock(&file->mut);
1712 	destroy_workqueue(file->close_wq);
1713 	kfree(file);
1714 	return 0;
1715 }
1716 
1717 static const struct file_operations ucma_fops = {
1718 	.owner 	 = THIS_MODULE,
1719 	.open 	 = ucma_open,
1720 	.release = ucma_close,
1721 	.write	 = ucma_write,
1722 	.poll    = ucma_poll,
1723 	.llseek	 = no_llseek,
1724 };
1725 
1726 static struct miscdevice ucma_misc = {
1727 	.minor		= MISC_DYNAMIC_MINOR,
1728 	.name		= "rdma_cm",
1729 	.nodename	= "infiniband/rdma_cm",
1730 	.mode		= 0666,
1731 	.fops		= &ucma_fops,
1732 };
1733 
1734 static ssize_t show_abi_version(struct device *dev,
1735 				struct device_attribute *attr,
1736 				char *buf)
1737 {
1738 	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1739 }
1740 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1741 
1742 static int __init ucma_init(void)
1743 {
1744 	int ret;
1745 
1746 	ret = misc_register(&ucma_misc);
1747 	if (ret)
1748 		return ret;
1749 
1750 	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1751 	if (ret) {
1752 		pr_err("rdma_ucm: couldn't create abi_version attr\n");
1753 		goto err1;
1754 	}
1755 
1756 	ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1757 	if (!ucma_ctl_table_hdr) {
1758 		pr_err("rdma_ucm: couldn't register sysctl paths\n");
1759 		ret = -ENOMEM;
1760 		goto err2;
1761 	}
1762 	return 0;
1763 err2:
1764 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1765 err1:
1766 	misc_deregister(&ucma_misc);
1767 	return ret;
1768 }
1769 
1770 static void __exit ucma_cleanup(void)
1771 {
1772 	unregister_net_sysctl_table(ucma_ctl_table_hdr);
1773 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1774 	misc_deregister(&ucma_misc);
1775 	idr_destroy(&ctx_idr);
1776 	idr_destroy(&multicast_idr);
1777 }
1778 
1779 module_init(ucma_init);
1780 module_exit(ucma_cleanup);
1781