xref: /openbmc/linux/drivers/infiniband/core/ucma.c (revision 9c6d26df1fae6ad4718d51c48e6517913304ed27)
1 /*
2  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	copyright notice, this list of conditions and the following
16  *	disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	copyright notice, this list of conditions and the following
20  *	disclaimer in the documentation and/or other materials
21  *	provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
39 #include <linux/in.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
45 #include <linux/nsproxy.h>
46 
47 #include <rdma/rdma_user_cm.h>
48 #include <rdma/ib_marshall.h>
49 #include <rdma/rdma_cm.h>
50 #include <rdma/rdma_cm_ib.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib.h>
53 
54 MODULE_AUTHOR("Sean Hefty");
55 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
56 MODULE_LICENSE("Dual BSD/GPL");
57 
58 static unsigned int max_backlog = 1024;
59 
60 static struct ctl_table_header *ucma_ctl_table_hdr;
61 static struct ctl_table ucma_ctl_table[] = {
62 	{
63 		.procname	= "max_backlog",
64 		.data		= &max_backlog,
65 		.maxlen		= sizeof max_backlog,
66 		.mode		= 0644,
67 		.proc_handler	= proc_dointvec,
68 	},
69 	{ }
70 };
71 
72 struct ucma_file {
73 	struct mutex		mut;
74 	struct file		*filp;
75 	struct list_head	ctx_list;
76 	struct list_head	event_list;
77 	wait_queue_head_t	poll_wait;
78 	struct workqueue_struct	*close_wq;
79 };
80 
81 struct ucma_context {
82 	int			id;
83 	struct completion	comp;
84 	atomic_t		ref;
85 	int			events_reported;
86 	int			backlog;
87 
88 	struct ucma_file	*file;
89 	struct rdma_cm_id	*cm_id;
90 	u64			uid;
91 
92 	struct list_head	list;
93 	struct list_head	mc_list;
94 	/* mark that device is in process of destroying the internal HW
95 	 * resources, protected by the global mut
96 	 */
97 	int			closing;
98 	/* sync between removal event and id destroy, protected by file mut */
99 	int			destroying;
100 	struct work_struct	close_work;
101 };
102 
103 struct ucma_multicast {
104 	struct ucma_context	*ctx;
105 	int			id;
106 	int			events_reported;
107 
108 	u64			uid;
109 	u8			join_state;
110 	struct list_head	list;
111 	struct sockaddr_storage	addr;
112 };
113 
114 struct ucma_event {
115 	struct ucma_context	*ctx;
116 	struct ucma_multicast	*mc;
117 	struct list_head	list;
118 	struct rdma_cm_id	*cm_id;
119 	struct rdma_ucm_event_resp resp;
120 	struct work_struct	close_work;
121 };
122 
123 static DEFINE_MUTEX(mut);
124 static DEFINE_IDR(ctx_idr);
125 static DEFINE_IDR(multicast_idr);
126 
127 static inline struct ucma_context *_ucma_find_context(int id,
128 						      struct ucma_file *file)
129 {
130 	struct ucma_context *ctx;
131 
132 	ctx = idr_find(&ctx_idr, id);
133 	if (!ctx)
134 		ctx = ERR_PTR(-ENOENT);
135 	else if (ctx->file != file || !ctx->cm_id)
136 		ctx = ERR_PTR(-EINVAL);
137 	return ctx;
138 }
139 
140 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
141 {
142 	struct ucma_context *ctx;
143 
144 	mutex_lock(&mut);
145 	ctx = _ucma_find_context(id, file);
146 	if (!IS_ERR(ctx)) {
147 		if (ctx->closing)
148 			ctx = ERR_PTR(-EIO);
149 		else
150 			atomic_inc(&ctx->ref);
151 	}
152 	mutex_unlock(&mut);
153 	return ctx;
154 }
155 
156 static void ucma_put_ctx(struct ucma_context *ctx)
157 {
158 	if (atomic_dec_and_test(&ctx->ref))
159 		complete(&ctx->comp);
160 }
161 
162 static void ucma_close_event_id(struct work_struct *work)
163 {
164 	struct ucma_event *uevent_close =  container_of(work, struct ucma_event, close_work);
165 
166 	rdma_destroy_id(uevent_close->cm_id);
167 	kfree(uevent_close);
168 }
169 
170 static void ucma_close_id(struct work_struct *work)
171 {
172 	struct ucma_context *ctx =  container_of(work, struct ucma_context, close_work);
173 
174 	/* once all inflight tasks are finished, we close all underlying
175 	 * resources. The context is still alive till its explicit destryoing
176 	 * by its creator.
177 	 */
178 	ucma_put_ctx(ctx);
179 	wait_for_completion(&ctx->comp);
180 	/* No new events will be generated after destroying the id. */
181 	rdma_destroy_id(ctx->cm_id);
182 }
183 
184 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
185 {
186 	struct ucma_context *ctx;
187 
188 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
189 	if (!ctx)
190 		return NULL;
191 
192 	INIT_WORK(&ctx->close_work, ucma_close_id);
193 	atomic_set(&ctx->ref, 1);
194 	init_completion(&ctx->comp);
195 	INIT_LIST_HEAD(&ctx->mc_list);
196 	ctx->file = file;
197 
198 	mutex_lock(&mut);
199 	ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
200 	mutex_unlock(&mut);
201 	if (ctx->id < 0)
202 		goto error;
203 
204 	list_add_tail(&ctx->list, &file->ctx_list);
205 	return ctx;
206 
207 error:
208 	kfree(ctx);
209 	return NULL;
210 }
211 
212 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
213 {
214 	struct ucma_multicast *mc;
215 
216 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
217 	if (!mc)
218 		return NULL;
219 
220 	mutex_lock(&mut);
221 	mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
222 	mutex_unlock(&mut);
223 	if (mc->id < 0)
224 		goto error;
225 
226 	mc->ctx = ctx;
227 	list_add_tail(&mc->list, &ctx->mc_list);
228 	return mc;
229 
230 error:
231 	kfree(mc);
232 	return NULL;
233 }
234 
235 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
236 				 struct rdma_conn_param *src)
237 {
238 	if (src->private_data_len)
239 		memcpy(dst->private_data, src->private_data,
240 		       src->private_data_len);
241 	dst->private_data_len = src->private_data_len;
242 	dst->responder_resources =src->responder_resources;
243 	dst->initiator_depth = src->initiator_depth;
244 	dst->flow_control = src->flow_control;
245 	dst->retry_count = src->retry_count;
246 	dst->rnr_retry_count = src->rnr_retry_count;
247 	dst->srq = src->srq;
248 	dst->qp_num = src->qp_num;
249 }
250 
251 static void ucma_copy_ud_event(struct ib_device *device,
252 			       struct rdma_ucm_ud_param *dst,
253 			       struct rdma_ud_param *src)
254 {
255 	if (src->private_data_len)
256 		memcpy(dst->private_data, src->private_data,
257 		       src->private_data_len);
258 	dst->private_data_len = src->private_data_len;
259 	ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
260 	dst->qp_num = src->qp_num;
261 	dst->qkey = src->qkey;
262 }
263 
264 static void ucma_set_event_context(struct ucma_context *ctx,
265 				   struct rdma_cm_event *event,
266 				   struct ucma_event *uevent)
267 {
268 	uevent->ctx = ctx;
269 	switch (event->event) {
270 	case RDMA_CM_EVENT_MULTICAST_JOIN:
271 	case RDMA_CM_EVENT_MULTICAST_ERROR:
272 		uevent->mc = (struct ucma_multicast *)
273 			     event->param.ud.private_data;
274 		uevent->resp.uid = uevent->mc->uid;
275 		uevent->resp.id = uevent->mc->id;
276 		break;
277 	default:
278 		uevent->resp.uid = ctx->uid;
279 		uevent->resp.id = ctx->id;
280 		break;
281 	}
282 }
283 
284 /* Called with file->mut locked for the relevant context. */
285 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
286 {
287 	struct ucma_context *ctx = cm_id->context;
288 	struct ucma_event *con_req_eve;
289 	int event_found = 0;
290 
291 	if (ctx->destroying)
292 		return;
293 
294 	/* only if context is pointing to cm_id that it owns it and can be
295 	 * queued to be closed, otherwise that cm_id is an inflight one that
296 	 * is part of that context event list pending to be detached and
297 	 * reattached to its new context as part of ucma_get_event,
298 	 * handled separately below.
299 	 */
300 	if (ctx->cm_id == cm_id) {
301 		mutex_lock(&mut);
302 		ctx->closing = 1;
303 		mutex_unlock(&mut);
304 		queue_work(ctx->file->close_wq, &ctx->close_work);
305 		return;
306 	}
307 
308 	list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
309 		if (con_req_eve->cm_id == cm_id &&
310 		    con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
311 			list_del(&con_req_eve->list);
312 			INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
313 			queue_work(ctx->file->close_wq, &con_req_eve->close_work);
314 			event_found = 1;
315 			break;
316 		}
317 	}
318 	if (!event_found)
319 		pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
320 }
321 
322 static int ucma_event_handler(struct rdma_cm_id *cm_id,
323 			      struct rdma_cm_event *event)
324 {
325 	struct ucma_event *uevent;
326 	struct ucma_context *ctx = cm_id->context;
327 	int ret = 0;
328 
329 	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
330 	if (!uevent)
331 		return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
332 
333 	mutex_lock(&ctx->file->mut);
334 	uevent->cm_id = cm_id;
335 	ucma_set_event_context(ctx, event, uevent);
336 	uevent->resp.event = event->event;
337 	uevent->resp.status = event->status;
338 	if (cm_id->qp_type == IB_QPT_UD)
339 		ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
340 				   &event->param.ud);
341 	else
342 		ucma_copy_conn_event(&uevent->resp.param.conn,
343 				     &event->param.conn);
344 
345 	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
346 		if (!ctx->backlog) {
347 			ret = -ENOMEM;
348 			kfree(uevent);
349 			goto out;
350 		}
351 		ctx->backlog--;
352 	} else if (!ctx->uid || ctx->cm_id != cm_id) {
353 		/*
354 		 * We ignore events for new connections until userspace has set
355 		 * their context.  This can only happen if an error occurs on a
356 		 * new connection before the user accepts it.  This is okay,
357 		 * since the accept will just fail later. However, we do need
358 		 * to release the underlying HW resources in case of a device
359 		 * removal event.
360 		 */
361 		if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
362 			ucma_removal_event_handler(cm_id);
363 
364 		kfree(uevent);
365 		goto out;
366 	}
367 
368 	list_add_tail(&uevent->list, &ctx->file->event_list);
369 	wake_up_interruptible(&ctx->file->poll_wait);
370 	if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
371 		ucma_removal_event_handler(cm_id);
372 out:
373 	mutex_unlock(&ctx->file->mut);
374 	return ret;
375 }
376 
377 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
378 			      int in_len, int out_len)
379 {
380 	struct ucma_context *ctx;
381 	struct rdma_ucm_get_event cmd;
382 	struct ucma_event *uevent;
383 	int ret = 0;
384 
385 	/*
386 	 * Old 32 bit user space does not send the 4 byte padding in the
387 	 * reserved field. We don't care, allow it to keep working.
388 	 */
389 	if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved))
390 		return -ENOSPC;
391 
392 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
393 		return -EFAULT;
394 
395 	mutex_lock(&file->mut);
396 	while (list_empty(&file->event_list)) {
397 		mutex_unlock(&file->mut);
398 
399 		if (file->filp->f_flags & O_NONBLOCK)
400 			return -EAGAIN;
401 
402 		if (wait_event_interruptible(file->poll_wait,
403 					     !list_empty(&file->event_list)))
404 			return -ERESTARTSYS;
405 
406 		mutex_lock(&file->mut);
407 	}
408 
409 	uevent = list_entry(file->event_list.next, struct ucma_event, list);
410 
411 	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
412 		ctx = ucma_alloc_ctx(file);
413 		if (!ctx) {
414 			ret = -ENOMEM;
415 			goto done;
416 		}
417 		uevent->ctx->backlog++;
418 		ctx->cm_id = uevent->cm_id;
419 		ctx->cm_id->context = ctx;
420 		uevent->resp.id = ctx->id;
421 	}
422 
423 	if (copy_to_user(u64_to_user_ptr(cmd.response),
424 			 &uevent->resp,
425 			 min_t(size_t, out_len, sizeof(uevent->resp)))) {
426 		ret = -EFAULT;
427 		goto done;
428 	}
429 
430 	list_del(&uevent->list);
431 	uevent->ctx->events_reported++;
432 	if (uevent->mc)
433 		uevent->mc->events_reported++;
434 	kfree(uevent);
435 done:
436 	mutex_unlock(&file->mut);
437 	return ret;
438 }
439 
440 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
441 {
442 	switch (cmd->ps) {
443 	case RDMA_PS_TCP:
444 		*qp_type = IB_QPT_RC;
445 		return 0;
446 	case RDMA_PS_UDP:
447 	case RDMA_PS_IPOIB:
448 		*qp_type = IB_QPT_UD;
449 		return 0;
450 	case RDMA_PS_IB:
451 		*qp_type = cmd->qp_type;
452 		return 0;
453 	default:
454 		return -EINVAL;
455 	}
456 }
457 
458 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
459 			      int in_len, int out_len)
460 {
461 	struct rdma_ucm_create_id cmd;
462 	struct rdma_ucm_create_id_resp resp;
463 	struct ucma_context *ctx;
464 	struct rdma_cm_id *cm_id;
465 	enum ib_qp_type qp_type;
466 	int ret;
467 
468 	if (out_len < sizeof(resp))
469 		return -ENOSPC;
470 
471 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
472 		return -EFAULT;
473 
474 	ret = ucma_get_qp_type(&cmd, &qp_type);
475 	if (ret)
476 		return ret;
477 
478 	mutex_lock(&file->mut);
479 	ctx = ucma_alloc_ctx(file);
480 	mutex_unlock(&file->mut);
481 	if (!ctx)
482 		return -ENOMEM;
483 
484 	ctx->uid = cmd.uid;
485 	cm_id = __rdma_create_id(current->nsproxy->net_ns,
486 				 ucma_event_handler, ctx, cmd.ps, qp_type, NULL);
487 	if (IS_ERR(cm_id)) {
488 		ret = PTR_ERR(cm_id);
489 		goto err1;
490 	}
491 
492 	resp.id = ctx->id;
493 	if (copy_to_user(u64_to_user_ptr(cmd.response),
494 			 &resp, sizeof(resp))) {
495 		ret = -EFAULT;
496 		goto err2;
497 	}
498 
499 	ctx->cm_id = cm_id;
500 	return 0;
501 
502 err2:
503 	rdma_destroy_id(cm_id);
504 err1:
505 	mutex_lock(&mut);
506 	idr_remove(&ctx_idr, ctx->id);
507 	mutex_unlock(&mut);
508 	mutex_lock(&file->mut);
509 	list_del(&ctx->list);
510 	mutex_unlock(&file->mut);
511 	kfree(ctx);
512 	return ret;
513 }
514 
515 static void ucma_cleanup_multicast(struct ucma_context *ctx)
516 {
517 	struct ucma_multicast *mc, *tmp;
518 
519 	mutex_lock(&mut);
520 	list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
521 		list_del(&mc->list);
522 		idr_remove(&multicast_idr, mc->id);
523 		kfree(mc);
524 	}
525 	mutex_unlock(&mut);
526 }
527 
528 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
529 {
530 	struct ucma_event *uevent, *tmp;
531 
532 	list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
533 		if (uevent->mc != mc)
534 			continue;
535 
536 		list_del(&uevent->list);
537 		kfree(uevent);
538 	}
539 }
540 
541 /*
542  * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
543  * this point, no new events will be reported from the hardware. However, we
544  * still need to cleanup the UCMA context for this ID. Specifically, there
545  * might be events that have not yet been consumed by the user space software.
546  * These might include pending connect requests which we have not completed
547  * processing.  We cannot call rdma_destroy_id while holding the lock of the
548  * context (file->mut), as it might cause a deadlock. We therefore extract all
549  * relevant events from the context pending events list while holding the
550  * mutex. After that we release them as needed.
551  */
552 static int ucma_free_ctx(struct ucma_context *ctx)
553 {
554 	int events_reported;
555 	struct ucma_event *uevent, *tmp;
556 	LIST_HEAD(list);
557 
558 
559 	ucma_cleanup_multicast(ctx);
560 
561 	/* Cleanup events not yet reported to the user. */
562 	mutex_lock(&ctx->file->mut);
563 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
564 		if (uevent->ctx == ctx)
565 			list_move_tail(&uevent->list, &list);
566 	}
567 	list_del(&ctx->list);
568 	mutex_unlock(&ctx->file->mut);
569 
570 	list_for_each_entry_safe(uevent, tmp, &list, list) {
571 		list_del(&uevent->list);
572 		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
573 			rdma_destroy_id(uevent->cm_id);
574 		kfree(uevent);
575 	}
576 
577 	events_reported = ctx->events_reported;
578 	kfree(ctx);
579 	return events_reported;
580 }
581 
582 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
583 			       int in_len, int out_len)
584 {
585 	struct rdma_ucm_destroy_id cmd;
586 	struct rdma_ucm_destroy_id_resp resp;
587 	struct ucma_context *ctx;
588 	int ret = 0;
589 
590 	if (out_len < sizeof(resp))
591 		return -ENOSPC;
592 
593 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
594 		return -EFAULT;
595 
596 	mutex_lock(&mut);
597 	ctx = _ucma_find_context(cmd.id, file);
598 	if (!IS_ERR(ctx))
599 		idr_remove(&ctx_idr, ctx->id);
600 	mutex_unlock(&mut);
601 
602 	if (IS_ERR(ctx))
603 		return PTR_ERR(ctx);
604 
605 	mutex_lock(&ctx->file->mut);
606 	ctx->destroying = 1;
607 	mutex_unlock(&ctx->file->mut);
608 
609 	flush_workqueue(ctx->file->close_wq);
610 	/* At this point it's guaranteed that there is no inflight
611 	 * closing task */
612 	mutex_lock(&mut);
613 	if (!ctx->closing) {
614 		mutex_unlock(&mut);
615 		ucma_put_ctx(ctx);
616 		wait_for_completion(&ctx->comp);
617 		rdma_destroy_id(ctx->cm_id);
618 	} else {
619 		mutex_unlock(&mut);
620 	}
621 
622 	resp.events_reported = ucma_free_ctx(ctx);
623 	if (copy_to_user(u64_to_user_ptr(cmd.response),
624 			 &resp, sizeof(resp)))
625 		ret = -EFAULT;
626 
627 	return ret;
628 }
629 
630 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
631 			      int in_len, int out_len)
632 {
633 	struct rdma_ucm_bind_ip cmd;
634 	struct ucma_context *ctx;
635 	int ret;
636 
637 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
638 		return -EFAULT;
639 
640 	if (!rdma_addr_size_in6(&cmd.addr))
641 		return -EINVAL;
642 
643 	ctx = ucma_get_ctx(file, cmd.id);
644 	if (IS_ERR(ctx))
645 		return PTR_ERR(ctx);
646 
647 	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
648 	ucma_put_ctx(ctx);
649 	return ret;
650 }
651 
652 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
653 			 int in_len, int out_len)
654 {
655 	struct rdma_ucm_bind cmd;
656 	struct ucma_context *ctx;
657 	int ret;
658 
659 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
660 		return -EFAULT;
661 
662 	if (cmd.reserved || !cmd.addr_size ||
663 	    cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
664 		return -EINVAL;
665 
666 	ctx = ucma_get_ctx(file, cmd.id);
667 	if (IS_ERR(ctx))
668 		return PTR_ERR(ctx);
669 
670 	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
671 	ucma_put_ctx(ctx);
672 	return ret;
673 }
674 
675 static ssize_t ucma_resolve_ip(struct ucma_file *file,
676 			       const char __user *inbuf,
677 			       int in_len, int out_len)
678 {
679 	struct rdma_ucm_resolve_ip cmd;
680 	struct ucma_context *ctx;
681 	int ret;
682 
683 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
684 		return -EFAULT;
685 
686 	if (!rdma_addr_size_in6(&cmd.src_addr) ||
687 	    !rdma_addr_size_in6(&cmd.dst_addr))
688 		return -EINVAL;
689 
690 	ctx = ucma_get_ctx(file, cmd.id);
691 	if (IS_ERR(ctx))
692 		return PTR_ERR(ctx);
693 
694 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
695 				(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
696 	ucma_put_ctx(ctx);
697 	return ret;
698 }
699 
700 static ssize_t ucma_resolve_addr(struct ucma_file *file,
701 				 const char __user *inbuf,
702 				 int in_len, int out_len)
703 {
704 	struct rdma_ucm_resolve_addr cmd;
705 	struct ucma_context *ctx;
706 	int ret;
707 
708 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
709 		return -EFAULT;
710 
711 	if (cmd.reserved ||
712 	    (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
713 	    !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
714 		return -EINVAL;
715 
716 	ctx = ucma_get_ctx(file, cmd.id);
717 	if (IS_ERR(ctx))
718 		return PTR_ERR(ctx);
719 
720 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
721 				(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
722 	ucma_put_ctx(ctx);
723 	return ret;
724 }
725 
726 static ssize_t ucma_resolve_route(struct ucma_file *file,
727 				  const char __user *inbuf,
728 				  int in_len, int out_len)
729 {
730 	struct rdma_ucm_resolve_route cmd;
731 	struct ucma_context *ctx;
732 	int ret;
733 
734 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
735 		return -EFAULT;
736 
737 	ctx = ucma_get_ctx(file, cmd.id);
738 	if (IS_ERR(ctx))
739 		return PTR_ERR(ctx);
740 
741 	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
742 	ucma_put_ctx(ctx);
743 	return ret;
744 }
745 
746 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
747 			       struct rdma_route *route)
748 {
749 	struct rdma_dev_addr *dev_addr;
750 
751 	resp->num_paths = route->num_paths;
752 	switch (route->num_paths) {
753 	case 0:
754 		dev_addr = &route->addr.dev_addr;
755 		rdma_addr_get_dgid(dev_addr,
756 				   (union ib_gid *) &resp->ib_route[0].dgid);
757 		rdma_addr_get_sgid(dev_addr,
758 				   (union ib_gid *) &resp->ib_route[0].sgid);
759 		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
760 		break;
761 	case 2:
762 		ib_copy_path_rec_to_user(&resp->ib_route[1],
763 					 &route->path_rec[1]);
764 		/* fall through */
765 	case 1:
766 		ib_copy_path_rec_to_user(&resp->ib_route[0],
767 					 &route->path_rec[0]);
768 		break;
769 	default:
770 		break;
771 	}
772 }
773 
774 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
775 				 struct rdma_route *route)
776 {
777 
778 	resp->num_paths = route->num_paths;
779 	switch (route->num_paths) {
780 	case 0:
781 		rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
782 			    (union ib_gid *)&resp->ib_route[0].dgid);
783 		rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
784 			    (union ib_gid *)&resp->ib_route[0].sgid);
785 		resp->ib_route[0].pkey = cpu_to_be16(0xffff);
786 		break;
787 	case 2:
788 		ib_copy_path_rec_to_user(&resp->ib_route[1],
789 					 &route->path_rec[1]);
790 		/* fall through */
791 	case 1:
792 		ib_copy_path_rec_to_user(&resp->ib_route[0],
793 					 &route->path_rec[0]);
794 		break;
795 	default:
796 		break;
797 	}
798 }
799 
800 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
801 			       struct rdma_route *route)
802 {
803 	struct rdma_dev_addr *dev_addr;
804 
805 	dev_addr = &route->addr.dev_addr;
806 	rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
807 	rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
808 }
809 
810 static ssize_t ucma_query_route(struct ucma_file *file,
811 				const char __user *inbuf,
812 				int in_len, int out_len)
813 {
814 	struct rdma_ucm_query cmd;
815 	struct rdma_ucm_query_route_resp resp;
816 	struct ucma_context *ctx;
817 	struct sockaddr *addr;
818 	int ret = 0;
819 
820 	if (out_len < sizeof(resp))
821 		return -ENOSPC;
822 
823 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
824 		return -EFAULT;
825 
826 	ctx = ucma_get_ctx(file, cmd.id);
827 	if (IS_ERR(ctx))
828 		return PTR_ERR(ctx);
829 
830 	memset(&resp, 0, sizeof resp);
831 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
832 	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
833 				     sizeof(struct sockaddr_in) :
834 				     sizeof(struct sockaddr_in6));
835 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
836 	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
837 				     sizeof(struct sockaddr_in) :
838 				     sizeof(struct sockaddr_in6));
839 	if (!ctx->cm_id->device)
840 		goto out;
841 
842 	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
843 	resp.port_num = ctx->cm_id->port_num;
844 
845 	if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
846 		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
847 	else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
848 		ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
849 	else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
850 		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
851 
852 out:
853 	if (copy_to_user(u64_to_user_ptr(cmd.response),
854 			 &resp, sizeof(resp)))
855 		ret = -EFAULT;
856 
857 	ucma_put_ctx(ctx);
858 	return ret;
859 }
860 
861 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
862 				   struct rdma_ucm_query_addr_resp *resp)
863 {
864 	if (!cm_id->device)
865 		return;
866 
867 	resp->node_guid = (__force __u64) cm_id->device->node_guid;
868 	resp->port_num = cm_id->port_num;
869 	resp->pkey = (__force __u16) cpu_to_be16(
870 		     ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
871 }
872 
873 static ssize_t ucma_query_addr(struct ucma_context *ctx,
874 			       void __user *response, int out_len)
875 {
876 	struct rdma_ucm_query_addr_resp resp;
877 	struct sockaddr *addr;
878 	int ret = 0;
879 
880 	if (out_len < sizeof(resp))
881 		return -ENOSPC;
882 
883 	memset(&resp, 0, sizeof resp);
884 
885 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
886 	resp.src_size = rdma_addr_size(addr);
887 	memcpy(&resp.src_addr, addr, resp.src_size);
888 
889 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
890 	resp.dst_size = rdma_addr_size(addr);
891 	memcpy(&resp.dst_addr, addr, resp.dst_size);
892 
893 	ucma_query_device_addr(ctx->cm_id, &resp);
894 
895 	if (copy_to_user(response, &resp, sizeof(resp)))
896 		ret = -EFAULT;
897 
898 	return ret;
899 }
900 
901 static ssize_t ucma_query_path(struct ucma_context *ctx,
902 			       void __user *response, int out_len)
903 {
904 	struct rdma_ucm_query_path_resp *resp;
905 	int i, ret = 0;
906 
907 	if (out_len < sizeof(*resp))
908 		return -ENOSPC;
909 
910 	resp = kzalloc(out_len, GFP_KERNEL);
911 	if (!resp)
912 		return -ENOMEM;
913 
914 	resp->num_paths = ctx->cm_id->route.num_paths;
915 	for (i = 0, out_len -= sizeof(*resp);
916 	     i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
917 	     i++, out_len -= sizeof(struct ib_path_rec_data)) {
918 		struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
919 
920 		resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
921 					   IB_PATH_BIDIRECTIONAL;
922 		if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
923 			struct sa_path_rec ib;
924 
925 			sa_convert_path_opa_to_ib(&ib, rec);
926 			ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
927 
928 		} else {
929 			ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
930 		}
931 	}
932 
933 	if (copy_to_user(response, resp,
934 			 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
935 		ret = -EFAULT;
936 
937 	kfree(resp);
938 	return ret;
939 }
940 
941 static ssize_t ucma_query_gid(struct ucma_context *ctx,
942 			      void __user *response, int out_len)
943 {
944 	struct rdma_ucm_query_addr_resp resp;
945 	struct sockaddr_ib *addr;
946 	int ret = 0;
947 
948 	if (out_len < sizeof(resp))
949 		return -ENOSPC;
950 
951 	memset(&resp, 0, sizeof resp);
952 
953 	ucma_query_device_addr(ctx->cm_id, &resp);
954 
955 	addr = (struct sockaddr_ib *) &resp.src_addr;
956 	resp.src_size = sizeof(*addr);
957 	if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
958 		memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
959 	} else {
960 		addr->sib_family = AF_IB;
961 		addr->sib_pkey = (__force __be16) resp.pkey;
962 		rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
963 			       NULL);
964 		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
965 						    &ctx->cm_id->route.addr.src_addr);
966 	}
967 
968 	addr = (struct sockaddr_ib *) &resp.dst_addr;
969 	resp.dst_size = sizeof(*addr);
970 	if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
971 		memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
972 	} else {
973 		addr->sib_family = AF_IB;
974 		addr->sib_pkey = (__force __be16) resp.pkey;
975 		rdma_read_gids(ctx->cm_id, NULL,
976 			       (union ib_gid *)&addr->sib_addr);
977 		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
978 						    &ctx->cm_id->route.addr.dst_addr);
979 	}
980 
981 	if (copy_to_user(response, &resp, sizeof(resp)))
982 		ret = -EFAULT;
983 
984 	return ret;
985 }
986 
987 static ssize_t ucma_query(struct ucma_file *file,
988 			  const char __user *inbuf,
989 			  int in_len, int out_len)
990 {
991 	struct rdma_ucm_query cmd;
992 	struct ucma_context *ctx;
993 	void __user *response;
994 	int ret;
995 
996 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
997 		return -EFAULT;
998 
999 	response = u64_to_user_ptr(cmd.response);
1000 	ctx = ucma_get_ctx(file, cmd.id);
1001 	if (IS_ERR(ctx))
1002 		return PTR_ERR(ctx);
1003 
1004 	switch (cmd.option) {
1005 	case RDMA_USER_CM_QUERY_ADDR:
1006 		ret = ucma_query_addr(ctx, response, out_len);
1007 		break;
1008 	case RDMA_USER_CM_QUERY_PATH:
1009 		ret = ucma_query_path(ctx, response, out_len);
1010 		break;
1011 	case RDMA_USER_CM_QUERY_GID:
1012 		ret = ucma_query_gid(ctx, response, out_len);
1013 		break;
1014 	default:
1015 		ret = -ENOSYS;
1016 		break;
1017 	}
1018 
1019 	ucma_put_ctx(ctx);
1020 	return ret;
1021 }
1022 
1023 static void ucma_copy_conn_param(struct rdma_cm_id *id,
1024 				 struct rdma_conn_param *dst,
1025 				 struct rdma_ucm_conn_param *src)
1026 {
1027 	dst->private_data = src->private_data;
1028 	dst->private_data_len = src->private_data_len;
1029 	dst->responder_resources =src->responder_resources;
1030 	dst->initiator_depth = src->initiator_depth;
1031 	dst->flow_control = src->flow_control;
1032 	dst->retry_count = src->retry_count;
1033 	dst->rnr_retry_count = src->rnr_retry_count;
1034 	dst->srq = src->srq;
1035 	dst->qp_num = src->qp_num;
1036 	dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1037 }
1038 
1039 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1040 			    int in_len, int out_len)
1041 {
1042 	struct rdma_ucm_connect cmd;
1043 	struct rdma_conn_param conn_param;
1044 	struct ucma_context *ctx;
1045 	int ret;
1046 
1047 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1048 		return -EFAULT;
1049 
1050 	if (!cmd.conn_param.valid)
1051 		return -EINVAL;
1052 
1053 	ctx = ucma_get_ctx(file, cmd.id);
1054 	if (IS_ERR(ctx))
1055 		return PTR_ERR(ctx);
1056 
1057 	ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1058 	ret = rdma_connect(ctx->cm_id, &conn_param);
1059 	ucma_put_ctx(ctx);
1060 	return ret;
1061 }
1062 
1063 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1064 			   int in_len, int out_len)
1065 {
1066 	struct rdma_ucm_listen cmd;
1067 	struct ucma_context *ctx;
1068 	int ret;
1069 
1070 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1071 		return -EFAULT;
1072 
1073 	ctx = ucma_get_ctx(file, cmd.id);
1074 	if (IS_ERR(ctx))
1075 		return PTR_ERR(ctx);
1076 
1077 	ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1078 		       cmd.backlog : max_backlog;
1079 	ret = rdma_listen(ctx->cm_id, ctx->backlog);
1080 	ucma_put_ctx(ctx);
1081 	return ret;
1082 }
1083 
1084 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1085 			   int in_len, int out_len)
1086 {
1087 	struct rdma_ucm_accept cmd;
1088 	struct rdma_conn_param conn_param;
1089 	struct ucma_context *ctx;
1090 	int ret;
1091 
1092 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1093 		return -EFAULT;
1094 
1095 	ctx = ucma_get_ctx(file, cmd.id);
1096 	if (IS_ERR(ctx))
1097 		return PTR_ERR(ctx);
1098 
1099 	if (cmd.conn_param.valid) {
1100 		ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1101 		mutex_lock(&file->mut);
1102 		ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
1103 		if (!ret)
1104 			ctx->uid = cmd.uid;
1105 		mutex_unlock(&file->mut);
1106 	} else
1107 		ret = __rdma_accept(ctx->cm_id, NULL, NULL);
1108 
1109 	ucma_put_ctx(ctx);
1110 	return ret;
1111 }
1112 
1113 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1114 			   int in_len, int out_len)
1115 {
1116 	struct rdma_ucm_reject cmd;
1117 	struct ucma_context *ctx;
1118 	int ret;
1119 
1120 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1121 		return -EFAULT;
1122 
1123 	ctx = ucma_get_ctx(file, cmd.id);
1124 	if (IS_ERR(ctx))
1125 		return PTR_ERR(ctx);
1126 
1127 	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1128 	ucma_put_ctx(ctx);
1129 	return ret;
1130 }
1131 
1132 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1133 			       int in_len, int out_len)
1134 {
1135 	struct rdma_ucm_disconnect cmd;
1136 	struct ucma_context *ctx;
1137 	int ret;
1138 
1139 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1140 		return -EFAULT;
1141 
1142 	ctx = ucma_get_ctx(file, cmd.id);
1143 	if (IS_ERR(ctx))
1144 		return PTR_ERR(ctx);
1145 
1146 	ret = rdma_disconnect(ctx->cm_id);
1147 	ucma_put_ctx(ctx);
1148 	return ret;
1149 }
1150 
1151 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1152 				 const char __user *inbuf,
1153 				 int in_len, int out_len)
1154 {
1155 	struct rdma_ucm_init_qp_attr cmd;
1156 	struct ib_uverbs_qp_attr resp;
1157 	struct ucma_context *ctx;
1158 	struct ib_qp_attr qp_attr;
1159 	int ret;
1160 
1161 	if (out_len < sizeof(resp))
1162 		return -ENOSPC;
1163 
1164 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1165 		return -EFAULT;
1166 
1167 	if (cmd.qp_state > IB_QPS_ERR)
1168 		return -EINVAL;
1169 
1170 	ctx = ucma_get_ctx(file, cmd.id);
1171 	if (IS_ERR(ctx))
1172 		return PTR_ERR(ctx);
1173 
1174 	if (!ctx->cm_id->device) {
1175 		ret = -EINVAL;
1176 		goto out;
1177 	}
1178 
1179 	resp.qp_attr_mask = 0;
1180 	memset(&qp_attr, 0, sizeof qp_attr);
1181 	qp_attr.qp_state = cmd.qp_state;
1182 	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1183 	if (ret)
1184 		goto out;
1185 
1186 	ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
1187 	if (copy_to_user(u64_to_user_ptr(cmd.response),
1188 			 &resp, sizeof(resp)))
1189 		ret = -EFAULT;
1190 
1191 out:
1192 	ucma_put_ctx(ctx);
1193 	return ret;
1194 }
1195 
1196 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1197 			      void *optval, size_t optlen)
1198 {
1199 	int ret = 0;
1200 
1201 	switch (optname) {
1202 	case RDMA_OPTION_ID_TOS:
1203 		if (optlen != sizeof(u8)) {
1204 			ret = -EINVAL;
1205 			break;
1206 		}
1207 		rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1208 		break;
1209 	case RDMA_OPTION_ID_REUSEADDR:
1210 		if (optlen != sizeof(int)) {
1211 			ret = -EINVAL;
1212 			break;
1213 		}
1214 		ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1215 		break;
1216 	case RDMA_OPTION_ID_AFONLY:
1217 		if (optlen != sizeof(int)) {
1218 			ret = -EINVAL;
1219 			break;
1220 		}
1221 		ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1222 		break;
1223 	default:
1224 		ret = -ENOSYS;
1225 	}
1226 
1227 	return ret;
1228 }
1229 
1230 static int ucma_set_ib_path(struct ucma_context *ctx,
1231 			    struct ib_path_rec_data *path_data, size_t optlen)
1232 {
1233 	struct sa_path_rec sa_path;
1234 	struct rdma_cm_event event;
1235 	int ret;
1236 
1237 	if (optlen % sizeof(*path_data))
1238 		return -EINVAL;
1239 
1240 	for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1241 		if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1242 					 IB_PATH_BIDIRECTIONAL))
1243 			break;
1244 	}
1245 
1246 	if (!optlen)
1247 		return -EINVAL;
1248 
1249 	if (!ctx->cm_id->device)
1250 		return -EINVAL;
1251 
1252 	memset(&sa_path, 0, sizeof(sa_path));
1253 
1254 	sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1255 	ib_sa_unpack_path(path_data->path_rec, &sa_path);
1256 
1257 	if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1258 		struct sa_path_rec opa;
1259 
1260 		sa_convert_path_ib_to_opa(&opa, &sa_path);
1261 		ret = rdma_set_ib_path(ctx->cm_id, &opa);
1262 	} else {
1263 		ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
1264 	}
1265 	if (ret)
1266 		return ret;
1267 
1268 	memset(&event, 0, sizeof event);
1269 	event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1270 	return ucma_event_handler(ctx->cm_id, &event);
1271 }
1272 
1273 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1274 			      void *optval, size_t optlen)
1275 {
1276 	int ret;
1277 
1278 	switch (optname) {
1279 	case RDMA_OPTION_IB_PATH:
1280 		ret = ucma_set_ib_path(ctx, optval, optlen);
1281 		break;
1282 	default:
1283 		ret = -ENOSYS;
1284 	}
1285 
1286 	return ret;
1287 }
1288 
1289 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1290 				 int optname, void *optval, size_t optlen)
1291 {
1292 	int ret;
1293 
1294 	switch (level) {
1295 	case RDMA_OPTION_ID:
1296 		ret = ucma_set_option_id(ctx, optname, optval, optlen);
1297 		break;
1298 	case RDMA_OPTION_IB:
1299 		ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1300 		break;
1301 	default:
1302 		ret = -ENOSYS;
1303 	}
1304 
1305 	return ret;
1306 }
1307 
1308 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1309 			       int in_len, int out_len)
1310 {
1311 	struct rdma_ucm_set_option cmd;
1312 	struct ucma_context *ctx;
1313 	void *optval;
1314 	int ret;
1315 
1316 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1317 		return -EFAULT;
1318 
1319 	ctx = ucma_get_ctx(file, cmd.id);
1320 	if (IS_ERR(ctx))
1321 		return PTR_ERR(ctx);
1322 
1323 	if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1324 		return -EINVAL;
1325 
1326 	optval = memdup_user(u64_to_user_ptr(cmd.optval),
1327 			     cmd.optlen);
1328 	if (IS_ERR(optval)) {
1329 		ret = PTR_ERR(optval);
1330 		goto out;
1331 	}
1332 
1333 	ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1334 				    cmd.optlen);
1335 	kfree(optval);
1336 
1337 out:
1338 	ucma_put_ctx(ctx);
1339 	return ret;
1340 }
1341 
1342 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1343 			   int in_len, int out_len)
1344 {
1345 	struct rdma_ucm_notify cmd;
1346 	struct ucma_context *ctx;
1347 	int ret = -EINVAL;
1348 
1349 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1350 		return -EFAULT;
1351 
1352 	ctx = ucma_get_ctx(file, cmd.id);
1353 	if (IS_ERR(ctx))
1354 		return PTR_ERR(ctx);
1355 
1356 	if (ctx->cm_id->device)
1357 		ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1358 
1359 	ucma_put_ctx(ctx);
1360 	return ret;
1361 }
1362 
1363 static ssize_t ucma_process_join(struct ucma_file *file,
1364 				 struct rdma_ucm_join_mcast *cmd,  int out_len)
1365 {
1366 	struct rdma_ucm_create_id_resp resp;
1367 	struct ucma_context *ctx;
1368 	struct ucma_multicast *mc;
1369 	struct sockaddr *addr;
1370 	int ret;
1371 	u8 join_state;
1372 
1373 	if (out_len < sizeof(resp))
1374 		return -ENOSPC;
1375 
1376 	addr = (struct sockaddr *) &cmd->addr;
1377 	if (cmd->addr_size != rdma_addr_size(addr))
1378 		return -EINVAL;
1379 
1380 	if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1381 		join_state = BIT(FULLMEMBER_JOIN);
1382 	else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1383 		join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1384 	else
1385 		return -EINVAL;
1386 
1387 	ctx = ucma_get_ctx(file, cmd->id);
1388 	if (IS_ERR(ctx))
1389 		return PTR_ERR(ctx);
1390 
1391 	mutex_lock(&file->mut);
1392 	mc = ucma_alloc_multicast(ctx);
1393 	if (!mc) {
1394 		ret = -ENOMEM;
1395 		goto err1;
1396 	}
1397 	mc->join_state = join_state;
1398 	mc->uid = cmd->uid;
1399 	memcpy(&mc->addr, addr, cmd->addr_size);
1400 	ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1401 				  join_state, mc);
1402 	if (ret)
1403 		goto err2;
1404 
1405 	resp.id = mc->id;
1406 	if (copy_to_user(u64_to_user_ptr(cmd->response),
1407 			 &resp, sizeof(resp))) {
1408 		ret = -EFAULT;
1409 		goto err3;
1410 	}
1411 
1412 	mutex_unlock(&file->mut);
1413 	ucma_put_ctx(ctx);
1414 	return 0;
1415 
1416 err3:
1417 	rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1418 	ucma_cleanup_mc_events(mc);
1419 err2:
1420 	mutex_lock(&mut);
1421 	idr_remove(&multicast_idr, mc->id);
1422 	mutex_unlock(&mut);
1423 	list_del(&mc->list);
1424 	kfree(mc);
1425 err1:
1426 	mutex_unlock(&file->mut);
1427 	ucma_put_ctx(ctx);
1428 	return ret;
1429 }
1430 
1431 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1432 				      const char __user *inbuf,
1433 				      int in_len, int out_len)
1434 {
1435 	struct rdma_ucm_join_ip_mcast cmd;
1436 	struct rdma_ucm_join_mcast join_cmd;
1437 
1438 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1439 		return -EFAULT;
1440 
1441 	join_cmd.response = cmd.response;
1442 	join_cmd.uid = cmd.uid;
1443 	join_cmd.id = cmd.id;
1444 	join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1445 	if (!join_cmd.addr_size)
1446 		return -EINVAL;
1447 
1448 	join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1449 	memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1450 
1451 	return ucma_process_join(file, &join_cmd, out_len);
1452 }
1453 
1454 static ssize_t ucma_join_multicast(struct ucma_file *file,
1455 				   const char __user *inbuf,
1456 				   int in_len, int out_len)
1457 {
1458 	struct rdma_ucm_join_mcast cmd;
1459 
1460 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1461 		return -EFAULT;
1462 
1463 	if (!rdma_addr_size_kss(&cmd.addr))
1464 		return -EINVAL;
1465 
1466 	return ucma_process_join(file, &cmd, out_len);
1467 }
1468 
1469 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1470 				    const char __user *inbuf,
1471 				    int in_len, int out_len)
1472 {
1473 	struct rdma_ucm_destroy_id cmd;
1474 	struct rdma_ucm_destroy_id_resp resp;
1475 	struct ucma_multicast *mc;
1476 	int ret = 0;
1477 
1478 	if (out_len < sizeof(resp))
1479 		return -ENOSPC;
1480 
1481 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1482 		return -EFAULT;
1483 
1484 	mutex_lock(&mut);
1485 	mc = idr_find(&multicast_idr, cmd.id);
1486 	if (!mc)
1487 		mc = ERR_PTR(-ENOENT);
1488 	else if (mc->ctx->file != file)
1489 		mc = ERR_PTR(-EINVAL);
1490 	else if (!atomic_inc_not_zero(&mc->ctx->ref))
1491 		mc = ERR_PTR(-ENXIO);
1492 	else
1493 		idr_remove(&multicast_idr, mc->id);
1494 	mutex_unlock(&mut);
1495 
1496 	if (IS_ERR(mc)) {
1497 		ret = PTR_ERR(mc);
1498 		goto out;
1499 	}
1500 
1501 	rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1502 	mutex_lock(&mc->ctx->file->mut);
1503 	ucma_cleanup_mc_events(mc);
1504 	list_del(&mc->list);
1505 	mutex_unlock(&mc->ctx->file->mut);
1506 
1507 	ucma_put_ctx(mc->ctx);
1508 	resp.events_reported = mc->events_reported;
1509 	kfree(mc);
1510 
1511 	if (copy_to_user(u64_to_user_ptr(cmd.response),
1512 			 &resp, sizeof(resp)))
1513 		ret = -EFAULT;
1514 out:
1515 	return ret;
1516 }
1517 
1518 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1519 {
1520 	/* Acquire mutex's based on pointer comparison to prevent deadlock. */
1521 	if (file1 < file2) {
1522 		mutex_lock(&file1->mut);
1523 		mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1524 	} else {
1525 		mutex_lock(&file2->mut);
1526 		mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1527 	}
1528 }
1529 
1530 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1531 {
1532 	if (file1 < file2) {
1533 		mutex_unlock(&file2->mut);
1534 		mutex_unlock(&file1->mut);
1535 	} else {
1536 		mutex_unlock(&file1->mut);
1537 		mutex_unlock(&file2->mut);
1538 	}
1539 }
1540 
1541 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1542 {
1543 	struct ucma_event *uevent, *tmp;
1544 
1545 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1546 		if (uevent->ctx == ctx)
1547 			list_move_tail(&uevent->list, &file->event_list);
1548 }
1549 
1550 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1551 			       const char __user *inbuf,
1552 			       int in_len, int out_len)
1553 {
1554 	struct rdma_ucm_migrate_id cmd;
1555 	struct rdma_ucm_migrate_resp resp;
1556 	struct ucma_context *ctx;
1557 	struct fd f;
1558 	struct ucma_file *cur_file;
1559 	int ret = 0;
1560 
1561 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1562 		return -EFAULT;
1563 
1564 	/* Get current fd to protect against it being closed */
1565 	f = fdget(cmd.fd);
1566 	if (!f.file)
1567 		return -ENOENT;
1568 
1569 	/* Validate current fd and prevent destruction of id. */
1570 	ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1571 	if (IS_ERR(ctx)) {
1572 		ret = PTR_ERR(ctx);
1573 		goto file_put;
1574 	}
1575 
1576 	cur_file = ctx->file;
1577 	if (cur_file == new_file) {
1578 		resp.events_reported = ctx->events_reported;
1579 		goto response;
1580 	}
1581 
1582 	/*
1583 	 * Migrate events between fd's, maintaining order, and avoiding new
1584 	 * events being added before existing events.
1585 	 */
1586 	ucma_lock_files(cur_file, new_file);
1587 	mutex_lock(&mut);
1588 
1589 	list_move_tail(&ctx->list, &new_file->ctx_list);
1590 	ucma_move_events(ctx, new_file);
1591 	ctx->file = new_file;
1592 	resp.events_reported = ctx->events_reported;
1593 
1594 	mutex_unlock(&mut);
1595 	ucma_unlock_files(cur_file, new_file);
1596 
1597 response:
1598 	if (copy_to_user(u64_to_user_ptr(cmd.response),
1599 			 &resp, sizeof(resp)))
1600 		ret = -EFAULT;
1601 
1602 	ucma_put_ctx(ctx);
1603 file_put:
1604 	fdput(f);
1605 	return ret;
1606 }
1607 
1608 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1609 				   const char __user *inbuf,
1610 				   int in_len, int out_len) = {
1611 	[RDMA_USER_CM_CMD_CREATE_ID] 	 = ucma_create_id,
1612 	[RDMA_USER_CM_CMD_DESTROY_ID]	 = ucma_destroy_id,
1613 	[RDMA_USER_CM_CMD_BIND_IP]	 = ucma_bind_ip,
1614 	[RDMA_USER_CM_CMD_RESOLVE_IP]	 = ucma_resolve_ip,
1615 	[RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1616 	[RDMA_USER_CM_CMD_QUERY_ROUTE]	 = ucma_query_route,
1617 	[RDMA_USER_CM_CMD_CONNECT]	 = ucma_connect,
1618 	[RDMA_USER_CM_CMD_LISTEN]	 = ucma_listen,
1619 	[RDMA_USER_CM_CMD_ACCEPT]	 = ucma_accept,
1620 	[RDMA_USER_CM_CMD_REJECT]	 = ucma_reject,
1621 	[RDMA_USER_CM_CMD_DISCONNECT]	 = ucma_disconnect,
1622 	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	 = ucma_init_qp_attr,
1623 	[RDMA_USER_CM_CMD_GET_EVENT]	 = ucma_get_event,
1624 	[RDMA_USER_CM_CMD_GET_OPTION]	 = NULL,
1625 	[RDMA_USER_CM_CMD_SET_OPTION]	 = ucma_set_option,
1626 	[RDMA_USER_CM_CMD_NOTIFY]	 = ucma_notify,
1627 	[RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1628 	[RDMA_USER_CM_CMD_LEAVE_MCAST]	 = ucma_leave_multicast,
1629 	[RDMA_USER_CM_CMD_MIGRATE_ID]	 = ucma_migrate_id,
1630 	[RDMA_USER_CM_CMD_QUERY]	 = ucma_query,
1631 	[RDMA_USER_CM_CMD_BIND]		 = ucma_bind,
1632 	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	 = ucma_resolve_addr,
1633 	[RDMA_USER_CM_CMD_JOIN_MCAST]	 = ucma_join_multicast
1634 };
1635 
1636 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1637 			  size_t len, loff_t *pos)
1638 {
1639 	struct ucma_file *file = filp->private_data;
1640 	struct rdma_ucm_cmd_hdr hdr;
1641 	ssize_t ret;
1642 
1643 	if (!ib_safe_file_access(filp)) {
1644 		pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1645 			    task_tgid_vnr(current), current->comm);
1646 		return -EACCES;
1647 	}
1648 
1649 	if (len < sizeof(hdr))
1650 		return -EINVAL;
1651 
1652 	if (copy_from_user(&hdr, buf, sizeof(hdr)))
1653 		return -EFAULT;
1654 
1655 	if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1656 		return -EINVAL;
1657 
1658 	if (hdr.in + sizeof(hdr) > len)
1659 		return -EINVAL;
1660 
1661 	if (!ucma_cmd_table[hdr.cmd])
1662 		return -ENOSYS;
1663 
1664 	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1665 	if (!ret)
1666 		ret = len;
1667 
1668 	return ret;
1669 }
1670 
1671 static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
1672 {
1673 	struct ucma_file *file = filp->private_data;
1674 	__poll_t mask = 0;
1675 
1676 	poll_wait(filp, &file->poll_wait, wait);
1677 
1678 	if (!list_empty(&file->event_list))
1679 		mask = EPOLLIN | EPOLLRDNORM;
1680 
1681 	return mask;
1682 }
1683 
1684 /*
1685  * ucma_open() does not need the BKL:
1686  *
1687  *  - no global state is referred to;
1688  *  - there is no ioctl method to race against;
1689  *  - no further module initialization is required for open to work
1690  *    after the device is registered.
1691  */
1692 static int ucma_open(struct inode *inode, struct file *filp)
1693 {
1694 	struct ucma_file *file;
1695 
1696 	file = kmalloc(sizeof *file, GFP_KERNEL);
1697 	if (!file)
1698 		return -ENOMEM;
1699 
1700 	file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1701 						 WQ_MEM_RECLAIM);
1702 	if (!file->close_wq) {
1703 		kfree(file);
1704 		return -ENOMEM;
1705 	}
1706 
1707 	INIT_LIST_HEAD(&file->event_list);
1708 	INIT_LIST_HEAD(&file->ctx_list);
1709 	init_waitqueue_head(&file->poll_wait);
1710 	mutex_init(&file->mut);
1711 
1712 	filp->private_data = file;
1713 	file->filp = filp;
1714 
1715 	return nonseekable_open(inode, filp);
1716 }
1717 
1718 static int ucma_close(struct inode *inode, struct file *filp)
1719 {
1720 	struct ucma_file *file = filp->private_data;
1721 	struct ucma_context *ctx, *tmp;
1722 
1723 	mutex_lock(&file->mut);
1724 	list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1725 		ctx->destroying = 1;
1726 		mutex_unlock(&file->mut);
1727 
1728 		mutex_lock(&mut);
1729 		idr_remove(&ctx_idr, ctx->id);
1730 		mutex_unlock(&mut);
1731 
1732 		flush_workqueue(file->close_wq);
1733 		/* At that step once ctx was marked as destroying and workqueue
1734 		 * was flushed we are safe from any inflights handlers that
1735 		 * might put other closing task.
1736 		 */
1737 		mutex_lock(&mut);
1738 		if (!ctx->closing) {
1739 			mutex_unlock(&mut);
1740 			/* rdma_destroy_id ensures that no event handlers are
1741 			 * inflight for that id before releasing it.
1742 			 */
1743 			rdma_destroy_id(ctx->cm_id);
1744 		} else {
1745 			mutex_unlock(&mut);
1746 		}
1747 
1748 		ucma_free_ctx(ctx);
1749 		mutex_lock(&file->mut);
1750 	}
1751 	mutex_unlock(&file->mut);
1752 	destroy_workqueue(file->close_wq);
1753 	kfree(file);
1754 	return 0;
1755 }
1756 
1757 static const struct file_operations ucma_fops = {
1758 	.owner 	 = THIS_MODULE,
1759 	.open 	 = ucma_open,
1760 	.release = ucma_close,
1761 	.write	 = ucma_write,
1762 	.poll    = ucma_poll,
1763 	.llseek	 = no_llseek,
1764 };
1765 
1766 static struct miscdevice ucma_misc = {
1767 	.minor		= MISC_DYNAMIC_MINOR,
1768 	.name		= "rdma_cm",
1769 	.nodename	= "infiniband/rdma_cm",
1770 	.mode		= 0666,
1771 	.fops		= &ucma_fops,
1772 };
1773 
1774 static ssize_t show_abi_version(struct device *dev,
1775 				struct device_attribute *attr,
1776 				char *buf)
1777 {
1778 	return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1779 }
1780 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1781 
1782 static int __init ucma_init(void)
1783 {
1784 	int ret;
1785 
1786 	ret = misc_register(&ucma_misc);
1787 	if (ret)
1788 		return ret;
1789 
1790 	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1791 	if (ret) {
1792 		pr_err("rdma_ucm: couldn't create abi_version attr\n");
1793 		goto err1;
1794 	}
1795 
1796 	ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1797 	if (!ucma_ctl_table_hdr) {
1798 		pr_err("rdma_ucm: couldn't register sysctl paths\n");
1799 		ret = -ENOMEM;
1800 		goto err2;
1801 	}
1802 	return 0;
1803 err2:
1804 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1805 err1:
1806 	misc_deregister(&ucma_misc);
1807 	return ret;
1808 }
1809 
1810 static void __exit ucma_cleanup(void)
1811 {
1812 	unregister_net_sysctl_table(ucma_ctl_table_hdr);
1813 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1814 	misc_deregister(&ucma_misc);
1815 	idr_destroy(&ctx_idr);
1816 	idr_destroy(&multicast_idr);
1817 }
1818 
1819 module_init(ucma_init);
1820 module_exit(ucma_cleanup);
1821