xref: /openbmc/linux/drivers/infiniband/core/ucma.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
175216638SSean Hefty /*
275216638SSean Hefty  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
375216638SSean Hefty  *
475216638SSean Hefty  * This software is available to you under a choice of one of two
575216638SSean Hefty  * licenses.  You may choose to be licensed under the terms of the GNU
675216638SSean Hefty  * General Public License (GPL) Version 2, available from the file
775216638SSean Hefty  * COPYING in the main directory of this source tree, or the
875216638SSean Hefty  * OpenIB.org BSD license below:
975216638SSean Hefty  *
1075216638SSean Hefty  *     Redistribution and use in source and binary forms, with or
1175216638SSean Hefty  *     without modification, are permitted provided that the following
1275216638SSean Hefty  *     conditions are met:
1375216638SSean Hefty  *
1475216638SSean Hefty  *      - Redistributions of source code must retain the above
1575216638SSean Hefty  *	copyright notice, this list of conditions and the following
1675216638SSean Hefty  *	disclaimer.
1775216638SSean Hefty  *
1875216638SSean Hefty  *      - Redistributions in binary form must reproduce the above
1975216638SSean Hefty  *	copyright notice, this list of conditions and the following
2075216638SSean Hefty  *	disclaimer in the documentation and/or other materials
2175216638SSean Hefty  *	provided with the distribution.
2275216638SSean Hefty  *
2375216638SSean Hefty  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2475216638SSean Hefty  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2575216638SSean Hefty  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2675216638SSean Hefty  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2775216638SSean Hefty  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2875216638SSean Hefty  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2975216638SSean Hefty  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3075216638SSean Hefty  * SOFTWARE.
3175216638SSean Hefty  */
3275216638SSean Hefty 
3375216638SSean Hefty #include <linux/completion.h>
3488314e4dSSean Hefty #include <linux/file.h>
3575216638SSean Hefty #include <linux/mutex.h>
3675216638SSean Hefty #include <linux/poll.h>
37d43c36dcSAlexey Dobriyan #include <linux/sched.h>
3875216638SSean Hefty #include <linux/idr.h>
3975216638SSean Hefty #include <linux/in.h>
4075216638SSean Hefty #include <linux/in6.h>
4175216638SSean Hefty #include <linux/miscdevice.h>
425a0e3ad6STejun Heo #include <linux/slab.h>
4397cb7e40SSteve Wise #include <linux/sysctl.h>
44e4dd23d7SPaul Gortmaker #include <linux/module.h>
4595893ddeSGuy Shapiro #include <linux/nsproxy.h>
4675216638SSean Hefty 
47a3671a4fSGustavo A. R. Silva #include <linux/nospec.h>
48a3671a4fSGustavo A. R. Silva 
4975216638SSean Hefty #include <rdma/rdma_user_cm.h>
5075216638SSean Hefty #include <rdma/ib_marshall.h>
5175216638SSean Hefty #include <rdma/rdma_cm.h>
52a7ca1f00SSean Hefty #include <rdma/rdma_cm_ib.h>
53ee7aed45SSean Hefty #include <rdma/ib_addr.h>
54edaa7a55SSean Hefty #include <rdma/ib.h>
558094ba0aSLeon Romanovsky #include <rdma/ib_cm.h>
568f71bb00SJason Gunthorpe #include <rdma/rdma_netlink.h>
578f71bb00SJason Gunthorpe #include "core_priv.h"
5875216638SSean Hefty 
5975216638SSean Hefty MODULE_AUTHOR("Sean Hefty");
6075216638SSean Hefty MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
6175216638SSean Hefty MODULE_LICENSE("Dual BSD/GPL");
6275216638SSean Hefty 
6397cb7e40SSteve Wise static unsigned int max_backlog = 1024;
6497cb7e40SSteve Wise 
6597cb7e40SSteve Wise static struct ctl_table_header *ucma_ctl_table_hdr;
66f3a5e3e3SJoe Perches static struct ctl_table ucma_ctl_table[] = {
6797cb7e40SSteve Wise 	{
6897cb7e40SSteve Wise 		.procname	= "max_backlog",
6997cb7e40SSteve Wise 		.data		= &max_backlog,
7097cb7e40SSteve Wise 		.maxlen		= sizeof max_backlog,
7197cb7e40SSteve Wise 		.mode		= 0644,
7297cb7e40SSteve Wise 		.proc_handler	= proc_dointvec,
7397cb7e40SSteve Wise 	},
7497cb7e40SSteve Wise 	{ }
7597cb7e40SSteve Wise };
7697cb7e40SSteve Wise 
7775216638SSean Hefty struct ucma_file {
7875216638SSean Hefty 	struct mutex		mut;
7975216638SSean Hefty 	struct file		*filp;
8075216638SSean Hefty 	struct list_head	ctx_list;
8175216638SSean Hefty 	struct list_head	event_list;
8275216638SSean Hefty 	wait_queue_head_t	poll_wait;
8375216638SSean Hefty };
8475216638SSean Hefty 
8575216638SSean Hefty struct ucma_context {
86afcafe07SMatthew Wilcox 	u32			id;
8775216638SSean Hefty 	struct completion	comp;
88167b95ecSJason Gunthorpe 	refcount_t		ref;
8975216638SSean Hefty 	int			events_reported;
9026c15decSJason Gunthorpe 	atomic_t		backlog;
9175216638SSean Hefty 
9275216638SSean Hefty 	struct ucma_file	*file;
9375216638SSean Hefty 	struct rdma_cm_id	*cm_id;
947c119107SJason Gunthorpe 	struct mutex		mutex;
9575216638SSean Hefty 	u64			uid;
9675216638SSean Hefty 
9775216638SSean Hefty 	struct list_head	list;
9836e8169eSLeon Romanovsky 	struct list_head	mc_list;
99e1c30298SYishai Hadas 	struct work_struct	close_work;
100c8f6a362SSean Hefty };
101c8f6a362SSean Hefty 
102c8f6a362SSean Hefty struct ucma_multicast {
103c8f6a362SSean Hefty 	struct ucma_context	*ctx;
1044dfd5321SMatthew Wilcox 	u32			id;
105c8f6a362SSean Hefty 	int			events_reported;
106c8f6a362SSean Hefty 
107c8f6a362SSean Hefty 	u64			uid;
108ab15c95aSAlex Vesker 	u8			join_state;
10936e8169eSLeon Romanovsky 	struct list_head	list;
1103f446754SRoland Dreier 	struct sockaddr_storage	addr;
11175216638SSean Hefty };
11275216638SSean Hefty 
11375216638SSean Hefty struct ucma_event {
11475216638SSean Hefty 	struct ucma_context	*ctx;
115c7a198c7SMaor Gottlieb 	struct ucma_context	*conn_req_ctx;
116c8f6a362SSean Hefty 	struct ucma_multicast	*mc;
11775216638SSean Hefty 	struct list_head	list;
11875216638SSean Hefty 	struct rdma_ucm_event_resp resp;
11975216638SSean Hefty };
12075216638SSean Hefty 
121afcafe07SMatthew Wilcox static DEFINE_XARRAY_ALLOC(ctx_table);
1224dfd5321SMatthew Wilcox static DEFINE_XARRAY_ALLOC(multicast_table);
12375216638SSean Hefty 
1240d23ba60SJann Horn static const struct file_operations ucma_fops;
1258ae291ccSJason Gunthorpe static int ucma_destroy_private_ctx(struct ucma_context *ctx);
1260d23ba60SJann Horn 
_ucma_find_context(int id,struct ucma_file * file)12775216638SSean Hefty static inline struct ucma_context *_ucma_find_context(int id,
12875216638SSean Hefty 						      struct ucma_file *file)
12975216638SSean Hefty {
13075216638SSean Hefty 	struct ucma_context *ctx;
13175216638SSean Hefty 
132afcafe07SMatthew Wilcox 	ctx = xa_load(&ctx_table, id);
13375216638SSean Hefty 	if (!ctx)
13475216638SSean Hefty 		ctx = ERR_PTR(-ENOENT);
135620db1a1SJason Gunthorpe 	else if (ctx->file != file)
13675216638SSean Hefty 		ctx = ERR_PTR(-EINVAL);
13775216638SSean Hefty 	return ctx;
13875216638SSean Hefty }
13975216638SSean Hefty 
ucma_get_ctx(struct ucma_file * file,int id)14075216638SSean Hefty static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
14175216638SSean Hefty {
14275216638SSean Hefty 	struct ucma_context *ctx;
14375216638SSean Hefty 
144afcafe07SMatthew Wilcox 	xa_lock(&ctx_table);
14575216638SSean Hefty 	ctx = _ucma_find_context(id, file);
146657360d6SJason Gunthorpe 	if (!IS_ERR(ctx))
147657360d6SJason Gunthorpe 		if (!refcount_inc_not_zero(&ctx->ref))
148ca2968c1SJason Gunthorpe 			ctx = ERR_PTR(-ENXIO);
149afcafe07SMatthew Wilcox 	xa_unlock(&ctx_table);
15075216638SSean Hefty 	return ctx;
15175216638SSean Hefty }
15275216638SSean Hefty 
ucma_put_ctx(struct ucma_context * ctx)15375216638SSean Hefty static void ucma_put_ctx(struct ucma_context *ctx)
15475216638SSean Hefty {
155167b95ecSJason Gunthorpe 	if (refcount_dec_and_test(&ctx->ref))
15675216638SSean Hefty 		complete(&ctx->comp);
15775216638SSean Hefty }
15875216638SSean Hefty 
1598b77586bSJason Gunthorpe /*
1608b77586bSJason Gunthorpe  * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
1618b77586bSJason Gunthorpe  * CM_ID is bound.
1628b77586bSJason Gunthorpe  */
ucma_get_ctx_dev(struct ucma_file * file,int id)1638b77586bSJason Gunthorpe static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
1648b77586bSJason Gunthorpe {
1658b77586bSJason Gunthorpe 	struct ucma_context *ctx = ucma_get_ctx(file, id);
1668b77586bSJason Gunthorpe 
1678b77586bSJason Gunthorpe 	if (IS_ERR(ctx))
1688b77586bSJason Gunthorpe 		return ctx;
1698b77586bSJason Gunthorpe 	if (!ctx->cm_id->device) {
1708b77586bSJason Gunthorpe 		ucma_put_ctx(ctx);
1718b77586bSJason Gunthorpe 		return ERR_PTR(-EINVAL);
1728b77586bSJason Gunthorpe 	}
1738b77586bSJason Gunthorpe 	return ctx;
1748b77586bSJason Gunthorpe }
1758b77586bSJason Gunthorpe 
ucma_close_id(struct work_struct * work)176e1c30298SYishai Hadas static void ucma_close_id(struct work_struct *work)
177e1c30298SYishai Hadas {
178e1c30298SYishai Hadas 	struct ucma_context *ctx =  container_of(work, struct ucma_context, close_work);
179e1c30298SYishai Hadas 
180e1c30298SYishai Hadas 	/* once all inflight tasks are finished, we close all underlying
181e1c30298SYishai Hadas 	 * resources. The context is still alive till its explicit destryoing
1828ae291ccSJason Gunthorpe 	 * by its creator. This puts back the xarray's reference.
183e1c30298SYishai Hadas 	 */
184e1c30298SYishai Hadas 	ucma_put_ctx(ctx);
185e1c30298SYishai Hadas 	wait_for_completion(&ctx->comp);
186e1c30298SYishai Hadas 	/* No new events will be generated after destroying the id. */
187e1c30298SYishai Hadas 	rdma_destroy_id(ctx->cm_id);
188657360d6SJason Gunthorpe 
1898ae291ccSJason Gunthorpe 	/* Reading the cm_id without holding a positive ref is not allowed */
190657360d6SJason Gunthorpe 	ctx->cm_id = NULL;
191e1c30298SYishai Hadas }
192e1c30298SYishai Hadas 
ucma_alloc_ctx(struct ucma_file * file)19375216638SSean Hefty static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
19475216638SSean Hefty {
19575216638SSean Hefty 	struct ucma_context *ctx;
19675216638SSean Hefty 
19775216638SSean Hefty 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
19875216638SSean Hefty 	if (!ctx)
19975216638SSean Hefty 		return NULL;
20075216638SSean Hefty 
201e1c30298SYishai Hadas 	INIT_WORK(&ctx->close_work, ucma_close_id);
20275216638SSean Hefty 	init_completion(&ctx->comp);
20336e8169eSLeon Romanovsky 	INIT_LIST_HEAD(&ctx->mc_list);
204620db1a1SJason Gunthorpe 	/* So list_del() will work if we don't do ucma_finish_ctx() */
205620db1a1SJason Gunthorpe 	INIT_LIST_HEAD(&ctx->list);
20675216638SSean Hefty 	ctx->file = file;
2077c119107SJason Gunthorpe 	mutex_init(&ctx->mutex);
20875216638SSean Hefty 
209620db1a1SJason Gunthorpe 	if (xa_alloc(&ctx_table, &ctx->id, NULL, xa_limit_32b, GFP_KERNEL)) {
21075216638SSean Hefty 		kfree(ctx);
21175216638SSean Hefty 		return NULL;
21275216638SSean Hefty 	}
213620db1a1SJason Gunthorpe 	return ctx;
214620db1a1SJason Gunthorpe }
215620db1a1SJason Gunthorpe 
ucma_set_ctx_cm_id(struct ucma_context * ctx,struct rdma_cm_id * cm_id)2168ae291ccSJason Gunthorpe static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
2178ae291ccSJason Gunthorpe 			       struct rdma_cm_id *cm_id)
2188ae291ccSJason Gunthorpe {
2198ae291ccSJason Gunthorpe 	refcount_set(&ctx->ref, 1);
2208ae291ccSJason Gunthorpe 	ctx->cm_id = cm_id;
2218ae291ccSJason Gunthorpe }
2228ae291ccSJason Gunthorpe 
ucma_finish_ctx(struct ucma_context * ctx)223620db1a1SJason Gunthorpe static void ucma_finish_ctx(struct ucma_context *ctx)
224620db1a1SJason Gunthorpe {
225620db1a1SJason Gunthorpe 	lockdep_assert_held(&ctx->file->mut);
226620db1a1SJason Gunthorpe 	list_add_tail(&ctx->list, &ctx->file->ctx_list);
227620db1a1SJason Gunthorpe 	xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL);
228620db1a1SJason Gunthorpe }
22975216638SSean Hefty 
ucma_copy_conn_event(struct rdma_ucm_conn_param * dst,struct rdma_conn_param * src)23075216638SSean Hefty static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
23175216638SSean Hefty 				 struct rdma_conn_param *src)
23275216638SSean Hefty {
23375216638SSean Hefty 	if (src->private_data_len)
23475216638SSean Hefty 		memcpy(dst->private_data, src->private_data,
23575216638SSean Hefty 		       src->private_data_len);
23675216638SSean Hefty 	dst->private_data_len = src->private_data_len;
23775216638SSean Hefty 	dst->responder_resources = src->responder_resources;
23875216638SSean Hefty 	dst->initiator_depth = src->initiator_depth;
23975216638SSean Hefty 	dst->flow_control = src->flow_control;
24075216638SSean Hefty 	dst->retry_count = src->retry_count;
24175216638SSean Hefty 	dst->rnr_retry_count = src->rnr_retry_count;
24275216638SSean Hefty 	dst->srq = src->srq;
24375216638SSean Hefty 	dst->qp_num = src->qp_num;
24475216638SSean Hefty }
24575216638SSean Hefty 
ucma_copy_ud_event(struct ib_device * device,struct rdma_ucm_ud_param * dst,struct rdma_ud_param * src)246d541e455SDasaratharaman Chandramouli static void ucma_copy_ud_event(struct ib_device *device,
247d541e455SDasaratharaman Chandramouli 			       struct rdma_ucm_ud_param *dst,
24875216638SSean Hefty 			       struct rdma_ud_param *src)
24975216638SSean Hefty {
25075216638SSean Hefty 	if (src->private_data_len)
25175216638SSean Hefty 		memcpy(dst->private_data, src->private_data,
25275216638SSean Hefty 		       src->private_data_len);
25375216638SSean Hefty 	dst->private_data_len = src->private_data_len;
254d541e455SDasaratharaman Chandramouli 	ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
25575216638SSean Hefty 	dst->qp_num = src->qp_num;
25675216638SSean Hefty 	dst->qkey = src->qkey;
25775216638SSean Hefty }
25875216638SSean Hefty 
ucma_create_uevent(struct ucma_context * ctx,struct rdma_cm_event * event)259a1d33b70SJason Gunthorpe static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
260a1d33b70SJason Gunthorpe 					     struct rdma_cm_event *event)
26175216638SSean Hefty {
262a1d33b70SJason Gunthorpe 	struct ucma_event *uevent;
263a1d33b70SJason Gunthorpe 
264a1d33b70SJason Gunthorpe 	uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
265a1d33b70SJason Gunthorpe 	if (!uevent)
266a1d33b70SJason Gunthorpe 		return NULL;
267a1d33b70SJason Gunthorpe 
26875216638SSean Hefty 	uevent->ctx = ctx;
269c8f6a362SSean Hefty 	switch (event->event) {
270c8f6a362SSean Hefty 	case RDMA_CM_EVENT_MULTICAST_JOIN:
271c8f6a362SSean Hefty 	case RDMA_CM_EVENT_MULTICAST_ERROR:
272c8f6a362SSean Hefty 		uevent->mc = (struct ucma_multicast *)
273c8f6a362SSean Hefty 			     event->param.ud.private_data;
274c8f6a362SSean Hefty 		uevent->resp.uid = uevent->mc->uid;
275c8f6a362SSean Hefty 		uevent->resp.id = uevent->mc->id;
276c8f6a362SSean Hefty 		break;
277c8f6a362SSean Hefty 	default:
27875216638SSean Hefty 		uevent->resp.uid = ctx->uid;
27975216638SSean Hefty 		uevent->resp.id = ctx->id;
280c8f6a362SSean Hefty 		break;
281c8f6a362SSean Hefty 	}
282a1d33b70SJason Gunthorpe 	uevent->resp.event = event->event;
283a1d33b70SJason Gunthorpe 	uevent->resp.status = event->status;
284a1d33b70SJason Gunthorpe 	if (ctx->cm_id->qp_type == IB_QPT_UD)
285a1d33b70SJason Gunthorpe 		ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud,
286a1d33b70SJason Gunthorpe 				   &event->param.ud);
287a1d33b70SJason Gunthorpe 	else
288a1d33b70SJason Gunthorpe 		ucma_copy_conn_event(&uevent->resp.param.conn,
289a1d33b70SJason Gunthorpe 				     &event->param.conn);
290a1d33b70SJason Gunthorpe 
291a1d33b70SJason Gunthorpe 	uevent->resp.ece.vendor_id = event->ece.vendor_id;
292a1d33b70SJason Gunthorpe 	uevent->resp.ece.attr_mod = event->ece.attr_mod;
293a1d33b70SJason Gunthorpe 	return uevent;
29475216638SSean Hefty }
29575216638SSean Hefty 
ucma_connect_event_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)296a1d33b70SJason Gunthorpe static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
297a1d33b70SJason Gunthorpe 				      struct rdma_cm_event *event)
298e1c30298SYishai Hadas {
299a1d33b70SJason Gunthorpe 	struct ucma_context *listen_ctx = cm_id->context;
300a1d33b70SJason Gunthorpe 	struct ucma_context *ctx;
301a1d33b70SJason Gunthorpe 	struct ucma_event *uevent;
302e1c30298SYishai Hadas 
303a1d33b70SJason Gunthorpe 	if (!atomic_add_unless(&listen_ctx->backlog, -1, 0))
304a1d33b70SJason Gunthorpe 		return -ENOMEM;
305a1d33b70SJason Gunthorpe 	ctx = ucma_alloc_ctx(listen_ctx->file);
306a1d33b70SJason Gunthorpe 	if (!ctx)
307a1d33b70SJason Gunthorpe 		goto err_backlog;
3088ae291ccSJason Gunthorpe 	ucma_set_ctx_cm_id(ctx, cm_id);
309e1c30298SYishai Hadas 
310a1d33b70SJason Gunthorpe 	uevent = ucma_create_uevent(listen_ctx, event);
311a1d33b70SJason Gunthorpe 	if (!uevent)
312a1d33b70SJason Gunthorpe 		goto err_alloc;
313c7a198c7SMaor Gottlieb 	uevent->conn_req_ctx = ctx;
314a1d33b70SJason Gunthorpe 	uevent->resp.id = ctx->id;
315a1d33b70SJason Gunthorpe 
316a1d33b70SJason Gunthorpe 	ctx->cm_id->context = ctx;
317e1c30298SYishai Hadas 
318310ca1a7SJason Gunthorpe 	mutex_lock(&ctx->file->mut);
319a1d33b70SJason Gunthorpe 	ucma_finish_ctx(ctx);
320a1d33b70SJason Gunthorpe 	list_add_tail(&uevent->list, &ctx->file->event_list);
321310ca1a7SJason Gunthorpe 	mutex_unlock(&ctx->file->mut);
322a1d33b70SJason Gunthorpe 	wake_up_interruptible(&ctx->file->poll_wait);
323a1d33b70SJason Gunthorpe 	return 0;
324a1d33b70SJason Gunthorpe 
325a1d33b70SJason Gunthorpe err_alloc:
3268ae291ccSJason Gunthorpe 	ucma_destroy_private_ctx(ctx);
327a1d33b70SJason Gunthorpe err_backlog:
328a1d33b70SJason Gunthorpe 	atomic_inc(&listen_ctx->backlog);
329a1d33b70SJason Gunthorpe 	/* Returning error causes the new ID to be destroyed */
330a1d33b70SJason Gunthorpe 	return -ENOMEM;
331e1c30298SYishai Hadas }
332e1c30298SYishai Hadas 
ucma_event_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)33375216638SSean Hefty static int ucma_event_handler(struct rdma_cm_id *cm_id,
33475216638SSean Hefty 			      struct rdma_cm_event *event)
33575216638SSean Hefty {
33675216638SSean Hefty 	struct ucma_event *uevent;
33775216638SSean Hefty 	struct ucma_context *ctx = cm_id->context;
33875216638SSean Hefty 
339a1d33b70SJason Gunthorpe 	if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST)
340a1d33b70SJason Gunthorpe 		return ucma_connect_event_handler(cm_id, event);
34175216638SSean Hefty 
3420cefcf0bSSean Hefty 	/*
343a1d33b70SJason Gunthorpe 	 * We ignore events for new connections until userspace has set their
344a1d33b70SJason Gunthorpe 	 * context.  This can only happen if an error occurs on a new connection
345a1d33b70SJason Gunthorpe 	 * before the user accepts it.  This is okay, since the accept will just
346a1d33b70SJason Gunthorpe 	 * fail later. However, we do need to release the underlying HW
347a1d33b70SJason Gunthorpe 	 * resources in case of a device removal event.
3480cefcf0bSSean Hefty 	 */
349a1d33b70SJason Gunthorpe 	if (ctx->uid) {
350a1d33b70SJason Gunthorpe 		uevent = ucma_create_uevent(ctx, event);
351a1d33b70SJason Gunthorpe 		if (!uevent)
352310ca1a7SJason Gunthorpe 			return 0;
3530cefcf0bSSean Hefty 
354310ca1a7SJason Gunthorpe 		mutex_lock(&ctx->file->mut);
35575216638SSean Hefty 		list_add_tail(&uevent->list, &ctx->file->event_list);
356310ca1a7SJason Gunthorpe 		mutex_unlock(&ctx->file->mut);
35775216638SSean Hefty 		wake_up_interruptible(&ctx->file->poll_wait);
358a1d33b70SJason Gunthorpe 	}
359a1d33b70SJason Gunthorpe 
3608ae291ccSJason Gunthorpe 	if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
3618ae291ccSJason Gunthorpe 		xa_lock(&ctx_table);
3628ae291ccSJason Gunthorpe 		if (xa_load(&ctx_table, ctx->id) == ctx)
363657360d6SJason Gunthorpe 			queue_work(system_unbound_wq, &ctx->close_work);
3648ae291ccSJason Gunthorpe 		xa_unlock(&ctx_table);
3658ae291ccSJason Gunthorpe 	}
366310ca1a7SJason Gunthorpe 	return 0;
36775216638SSean Hefty }
36875216638SSean Hefty 
ucma_get_event(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)36975216638SSean Hefty static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
37075216638SSean Hefty 			      int in_len, int out_len)
37175216638SSean Hefty {
37275216638SSean Hefty 	struct rdma_ucm_get_event cmd;
37375216638SSean Hefty 	struct ucma_event *uevent;
37475216638SSean Hefty 
375611cb92bSJason Gunthorpe 	/*
376611cb92bSJason Gunthorpe 	 * Old 32 bit user space does not send the 4 byte padding in the
377611cb92bSJason Gunthorpe 	 * reserved field. We don't care, allow it to keep working.
378611cb92bSJason Gunthorpe 	 */
37993531ee7SLeon Romanovsky 	if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) -
38093531ee7SLeon Romanovsky 			      sizeof(uevent->resp.ece))
38175216638SSean Hefty 		return -ENOSPC;
38275216638SSean Hefty 
38375216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
38475216638SSean Hefty 		return -EFAULT;
38575216638SSean Hefty 
38675216638SSean Hefty 	mutex_lock(&file->mut);
38775216638SSean Hefty 	while (list_empty(&file->event_list)) {
38875216638SSean Hefty 		mutex_unlock(&file->mut);
38975216638SSean Hefty 
390d92f7644SSean Hefty 		if (file->filp->f_flags & O_NONBLOCK)
391d92f7644SSean Hefty 			return -EAGAIN;
392d92f7644SSean Hefty 
393d92f7644SSean Hefty 		if (wait_event_interruptible(file->poll_wait,
394d92f7644SSean Hefty 					     !list_empty(&file->event_list)))
395d92f7644SSean Hefty 			return -ERESTARTSYS;
396d92f7644SSean Hefty 
397d92f7644SSean Hefty 		mutex_lock(&file->mut);
398d92f7644SSean Hefty 	}
39975216638SSean Hefty 
400620db1a1SJason Gunthorpe 	uevent = list_first_entry(&file->event_list, struct ucma_event, list);
40175216638SSean Hefty 
4026f57c933SJason Gunthorpe 	if (copy_to_user(u64_to_user_ptr(cmd.response),
403611cb92bSJason Gunthorpe 			 &uevent->resp,
404611cb92bSJason Gunthorpe 			 min_t(size_t, out_len, sizeof(uevent->resp)))) {
405a1d33b70SJason Gunthorpe 		mutex_unlock(&file->mut);
406a1d33b70SJason Gunthorpe 		return -EFAULT;
40775216638SSean Hefty 	}
40875216638SSean Hefty 
40975216638SSean Hefty 	list_del(&uevent->list);
41075216638SSean Hefty 	uevent->ctx->events_reported++;
411c8f6a362SSean Hefty 	if (uevent->mc)
412c8f6a362SSean Hefty 		uevent->mc->events_reported++;
413a1d33b70SJason Gunthorpe 	if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
414a1d33b70SJason Gunthorpe 		atomic_inc(&uevent->ctx->backlog);
415620db1a1SJason Gunthorpe 	mutex_unlock(&file->mut);
416620db1a1SJason Gunthorpe 
41775216638SSean Hefty 	kfree(uevent);
418620db1a1SJason Gunthorpe 	return 0;
41975216638SSean Hefty }
42075216638SSean Hefty 
ucma_get_qp_type(struct rdma_ucm_create_id * cmd,enum ib_qp_type * qp_type)421b26f9b99SSean Hefty static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
422b26f9b99SSean Hefty {
423b26f9b99SSean Hefty 	switch (cmd->ps) {
424b26f9b99SSean Hefty 	case RDMA_PS_TCP:
425b26f9b99SSean Hefty 		*qp_type = IB_QPT_RC;
426b26f9b99SSean Hefty 		return 0;
427b26f9b99SSean Hefty 	case RDMA_PS_UDP:
428b26f9b99SSean Hefty 	case RDMA_PS_IPOIB:
429b26f9b99SSean Hefty 		*qp_type = IB_QPT_UD;
430b26f9b99SSean Hefty 		return 0;
431638ef7a6SSean Hefty 	case RDMA_PS_IB:
432638ef7a6SSean Hefty 		*qp_type = cmd->qp_type;
433638ef7a6SSean Hefty 		return 0;
434b26f9b99SSean Hefty 	default:
435b26f9b99SSean Hefty 		return -EINVAL;
436b26f9b99SSean Hefty 	}
437b26f9b99SSean Hefty }
438b26f9b99SSean Hefty 
ucma_create_id(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)439b26f9b99SSean Hefty static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
44075216638SSean Hefty 			      int in_len, int out_len)
44175216638SSean Hefty {
44275216638SSean Hefty 	struct rdma_ucm_create_id cmd;
44375216638SSean Hefty 	struct rdma_ucm_create_id_resp resp;
44475216638SSean Hefty 	struct ucma_context *ctx;
445e8980d67SLeon Romanovsky 	struct rdma_cm_id *cm_id;
446b26f9b99SSean Hefty 	enum ib_qp_type qp_type;
44775216638SSean Hefty 	int ret;
44875216638SSean Hefty 
44975216638SSean Hefty 	if (out_len < sizeof(resp))
45075216638SSean Hefty 		return -ENOSPC;
45175216638SSean Hefty 
45275216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
45375216638SSean Hefty 		return -EFAULT;
45475216638SSean Hefty 
455b26f9b99SSean Hefty 	ret = ucma_get_qp_type(&cmd, &qp_type);
456b26f9b99SSean Hefty 	if (ret)
457b26f9b99SSean Hefty 		return ret;
458b26f9b99SSean Hefty 
45975216638SSean Hefty 	ctx = ucma_alloc_ctx(file);
46075216638SSean Hefty 	if (!ctx)
46175216638SSean Hefty 		return -ENOMEM;
46275216638SSean Hefty 
46375216638SSean Hefty 	ctx->uid = cmd.uid;
464b09c4d70SLeon Romanovsky 	cm_id = rdma_create_user_id(ucma_event_handler, ctx, cmd.ps, qp_type);
465e8980d67SLeon Romanovsky 	if (IS_ERR(cm_id)) {
466e8980d67SLeon Romanovsky 		ret = PTR_ERR(cm_id);
46775216638SSean Hefty 		goto err1;
46875216638SSean Hefty 	}
4698ae291ccSJason Gunthorpe 	ucma_set_ctx_cm_id(ctx, cm_id);
47075216638SSean Hefty 
47175216638SSean Hefty 	resp.id = ctx->id;
4726f57c933SJason Gunthorpe 	if (copy_to_user(u64_to_user_ptr(cmd.response),
47375216638SSean Hefty 			 &resp, sizeof(resp))) {
474e3d65124SXiaofei Tan 		ret = -EFAULT;
475e3d65124SXiaofei Tan 		goto err1;
47675216638SSean Hefty 	}
477e8980d67SLeon Romanovsky 
478620db1a1SJason Gunthorpe 	mutex_lock(&file->mut);
479620db1a1SJason Gunthorpe 	ucma_finish_ctx(ctx);
480620db1a1SJason Gunthorpe 	mutex_unlock(&file->mut);
48175216638SSean Hefty 	return 0;
48275216638SSean Hefty 
48375216638SSean Hefty err1:
4848ae291ccSJason Gunthorpe 	ucma_destroy_private_ctx(ctx);
48575216638SSean Hefty 	return ret;
48675216638SSean Hefty }
48775216638SSean Hefty 
ucma_cleanup_multicast(struct ucma_context * ctx)488c8f6a362SSean Hefty static void ucma_cleanup_multicast(struct ucma_context *ctx)
489c8f6a362SSean Hefty {
49036e8169eSLeon Romanovsky 	struct ucma_multicast *mc, *tmp;
491c8f6a362SSean Hefty 
49236e8169eSLeon Romanovsky 	xa_lock(&multicast_table);
49336e8169eSLeon Romanovsky 	list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
49436e8169eSLeon Romanovsky 		list_del(&mc->list);
49595fe5109SJason Gunthorpe 		/*
49695fe5109SJason Gunthorpe 		 * At this point mc->ctx->ref is 0 so the mc cannot leave the
49795fe5109SJason Gunthorpe 		 * lock on the reader and this is enough serialization
49895fe5109SJason Gunthorpe 		 */
49936e8169eSLeon Romanovsky 		__xa_erase(&multicast_table, mc->id);
500c8f6a362SSean Hefty 		kfree(mc);
501c8f6a362SSean Hefty 	}
50236e8169eSLeon Romanovsky 	xa_unlock(&multicast_table);
503c8f6a362SSean Hefty }
504c8f6a362SSean Hefty 
ucma_cleanup_mc_events(struct ucma_multicast * mc)505c8f6a362SSean Hefty static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
506c8f6a362SSean Hefty {
507c8f6a362SSean Hefty 	struct ucma_event *uevent, *tmp;
508c8f6a362SSean Hefty 
50909e328e4SJason Gunthorpe 	rdma_lock_handler(mc->ctx->cm_id);
51095fe5109SJason Gunthorpe 	mutex_lock(&mc->ctx->file->mut);
511c8f6a362SSean Hefty 	list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
512c8f6a362SSean Hefty 		if (uevent->mc != mc)
513c8f6a362SSean Hefty 			continue;
514c8f6a362SSean Hefty 
515c8f6a362SSean Hefty 		list_del(&uevent->list);
516c8f6a362SSean Hefty 		kfree(uevent);
517c8f6a362SSean Hefty 	}
51895fe5109SJason Gunthorpe 	mutex_unlock(&mc->ctx->file->mut);
51909e328e4SJason Gunthorpe 	rdma_unlock_handler(mc->ctx->cm_id);
520c8f6a362SSean Hefty }
521c8f6a362SSean Hefty 
ucma_cleanup_ctx_events(struct ucma_context * ctx)5228ae291ccSJason Gunthorpe static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
52375216638SSean Hefty {
52475216638SSean Hefty 	int events_reported;
525186834b5SHefty, Sean 	struct ucma_event *uevent, *tmp;
526186834b5SHefty, Sean 	LIST_HEAD(list);
52775216638SSean Hefty 
52875216638SSean Hefty 	/* Cleanup events not yet reported to the user.*/
52975216638SSean Hefty 	mutex_lock(&ctx->file->mut);
530186834b5SHefty, Sean 	list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
5318ae291ccSJason Gunthorpe 		if (uevent->ctx != ctx)
5328ae291ccSJason Gunthorpe 			continue;
5338ae291ccSJason Gunthorpe 
5348ae291ccSJason Gunthorpe 		if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
5358ae291ccSJason Gunthorpe 		    xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
5368ae291ccSJason Gunthorpe 			       uevent->conn_req_ctx, XA_ZERO_ENTRY,
5378ae291ccSJason Gunthorpe 			       GFP_KERNEL) == uevent->conn_req_ctx) {
538186834b5SHefty, Sean 			list_move_tail(&uevent->list, &list);
5398ae291ccSJason Gunthorpe 			continue;
5408ae291ccSJason Gunthorpe 		}
5418ae291ccSJason Gunthorpe 		list_del(&uevent->list);
5428ae291ccSJason Gunthorpe 		kfree(uevent);
543186834b5SHefty, Sean 	}
54475216638SSean Hefty 	list_del(&ctx->list);
54598837c6cSJason Gunthorpe 	events_reported = ctx->events_reported;
54675216638SSean Hefty 	mutex_unlock(&ctx->file->mut);
54775216638SSean Hefty 
548a1d33b70SJason Gunthorpe 	/*
5498ae291ccSJason Gunthorpe 	 * If this was a listening ID then any connections spawned from it that
5508ae291ccSJason Gunthorpe 	 * have not been delivered to userspace are cleaned up too. Must be done
5518ae291ccSJason Gunthorpe 	 * outside any locks.
552a1d33b70SJason Gunthorpe 	 */
553186834b5SHefty, Sean 	list_for_each_entry_safe(uevent, tmp, &list, list) {
5548ae291ccSJason Gunthorpe 		ucma_destroy_private_ctx(uevent->conn_req_ctx);
555186834b5SHefty, Sean 		kfree(uevent);
556186834b5SHefty, Sean 	}
55775216638SSean Hefty 	return events_reported;
55875216638SSean Hefty }
55975216638SSean Hefty 
560308571deSJason Gunthorpe /*
5618ae291ccSJason Gunthorpe  * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
5628ae291ccSJason Gunthorpe  * the ctx is not public to the user). This either because:
5638ae291ccSJason Gunthorpe  *  - ucma_finish_ctx() hasn't been called
5648ae291ccSJason Gunthorpe  *  - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
565308571deSJason Gunthorpe  */
ucma_destroy_private_ctx(struct ucma_context * ctx)5668ae291ccSJason Gunthorpe static int ucma_destroy_private_ctx(struct ucma_context *ctx)
5678ae291ccSJason Gunthorpe {
5688ae291ccSJason Gunthorpe 	int events_reported;
569c07e12d8SJason Gunthorpe 
5708ae291ccSJason Gunthorpe 	/*
5718ae291ccSJason Gunthorpe 	 * Destroy the underlying cm_id. New work queuing is prevented now by
5728ae291ccSJason Gunthorpe 	 * the removal from the xarray. Once the work is cancled ref will either
5738ae291ccSJason Gunthorpe 	 * be 0 because the work ran to completion and consumed the ref from the
5748ae291ccSJason Gunthorpe 	 * xarray, or it will be positive because we still have the ref from the
5758ae291ccSJason Gunthorpe 	 * xarray. This can also be 0 in cases where cm_id was never set
5768ae291ccSJason Gunthorpe 	 */
577657360d6SJason Gunthorpe 	cancel_work_sync(&ctx->close_work);
5788ae291ccSJason Gunthorpe 	if (refcount_read(&ctx->ref))
579657360d6SJason Gunthorpe 		ucma_close_id(&ctx->close_work);
5808ae291ccSJason Gunthorpe 
5818ae291ccSJason Gunthorpe 	events_reported = ucma_cleanup_ctx_events(ctx);
5828ae291ccSJason Gunthorpe 	ucma_cleanup_multicast(ctx);
5838ae291ccSJason Gunthorpe 
5848ae291ccSJason Gunthorpe 	WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
5858ae291ccSJason Gunthorpe 			   GFP_KERNEL) != NULL);
5868ae291ccSJason Gunthorpe 	mutex_destroy(&ctx->mutex);
5878ae291ccSJason Gunthorpe 	kfree(ctx);
5888ae291ccSJason Gunthorpe 	return events_reported;
589c07e12d8SJason Gunthorpe }
590c07e12d8SJason Gunthorpe 
ucma_destroy_id(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)59175216638SSean Hefty static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
59275216638SSean Hefty 			       int in_len, int out_len)
59375216638SSean Hefty {
59475216638SSean Hefty 	struct rdma_ucm_destroy_id cmd;
59575216638SSean Hefty 	struct rdma_ucm_destroy_id_resp resp;
59675216638SSean Hefty 	struct ucma_context *ctx;
59775216638SSean Hefty 	int ret = 0;
59875216638SSean Hefty 
59975216638SSean Hefty 	if (out_len < sizeof(resp))
60075216638SSean Hefty 		return -ENOSPC;
60175216638SSean Hefty 
60275216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
60375216638SSean Hefty 		return -EFAULT;
60475216638SSean Hefty 
605afcafe07SMatthew Wilcox 	xa_lock(&ctx_table);
60675216638SSean Hefty 	ctx = _ucma_find_context(cmd.id, file);
6078ae291ccSJason Gunthorpe 	if (!IS_ERR(ctx)) {
6088ae291ccSJason Gunthorpe 		if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
6098ae291ccSJason Gunthorpe 				 GFP_KERNEL) != ctx)
6108ae291ccSJason Gunthorpe 			ctx = ERR_PTR(-ENOENT);
6118ae291ccSJason Gunthorpe 	}
612afcafe07SMatthew Wilcox 	xa_unlock(&ctx_table);
61375216638SSean Hefty 
61475216638SSean Hefty 	if (IS_ERR(ctx))
61575216638SSean Hefty 		return PTR_ERR(ctx);
61675216638SSean Hefty 
6178ae291ccSJason Gunthorpe 	resp.events_reported = ucma_destroy_private_ctx(ctx);
6186f57c933SJason Gunthorpe 	if (copy_to_user(u64_to_user_ptr(cmd.response),
61975216638SSean Hefty 			 &resp, sizeof(resp)))
62075216638SSean Hefty 		ret = -EFAULT;
62175216638SSean Hefty 
62275216638SSean Hefty 	return ret;
62375216638SSean Hefty }
62475216638SSean Hefty 
ucma_bind_ip(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)62505ad9457SSean Hefty static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
62675216638SSean Hefty 			      int in_len, int out_len)
62775216638SSean Hefty {
62805ad9457SSean Hefty 	struct rdma_ucm_bind_ip cmd;
62975216638SSean Hefty 	struct ucma_context *ctx;
63075216638SSean Hefty 	int ret;
63175216638SSean Hefty 
63275216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
63375216638SSean Hefty 		return -EFAULT;
63475216638SSean Hefty 
63584652aefSRoland Dreier 	if (!rdma_addr_size_in6(&cmd.addr))
63684652aefSRoland Dreier 		return -EINVAL;
63784652aefSRoland Dreier 
63875216638SSean Hefty 	ctx = ucma_get_ctx(file, cmd.id);
63975216638SSean Hefty 	if (IS_ERR(ctx))
64075216638SSean Hefty 		return PTR_ERR(ctx);
64175216638SSean Hefty 
6427c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
64375216638SSean Hefty 	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
6447c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
6457c119107SJason Gunthorpe 
64675216638SSean Hefty 	ucma_put_ctx(ctx);
64775216638SSean Hefty 	return ret;
64875216638SSean Hefty }
64975216638SSean Hefty 
ucma_bind(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)650eebe4c3aSSean Hefty static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
651eebe4c3aSSean Hefty 			 int in_len, int out_len)
652eebe4c3aSSean Hefty {
653eebe4c3aSSean Hefty 	struct rdma_ucm_bind cmd;
654eebe4c3aSSean Hefty 	struct ucma_context *ctx;
655eebe4c3aSSean Hefty 	int ret;
656eebe4c3aSSean Hefty 
657eebe4c3aSSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
658eebe4c3aSSean Hefty 		return -EFAULT;
659eebe4c3aSSean Hefty 
66084652aefSRoland Dreier 	if (cmd.reserved || !cmd.addr_size ||
66184652aefSRoland Dreier 	    cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
662eebe4c3aSSean Hefty 		return -EINVAL;
663eebe4c3aSSean Hefty 
664eebe4c3aSSean Hefty 	ctx = ucma_get_ctx(file, cmd.id);
665eebe4c3aSSean Hefty 	if (IS_ERR(ctx))
666eebe4c3aSSean Hefty 		return PTR_ERR(ctx);
667eebe4c3aSSean Hefty 
6687c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
66984652aefSRoland Dreier 	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
6707c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
671eebe4c3aSSean Hefty 	ucma_put_ctx(ctx);
672eebe4c3aSSean Hefty 	return ret;
673eebe4c3aSSean Hefty }
674eebe4c3aSSean Hefty 
ucma_resolve_ip(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)67505ad9457SSean Hefty static ssize_t ucma_resolve_ip(struct ucma_file *file,
67675216638SSean Hefty 			       const char __user *inbuf,
67775216638SSean Hefty 			       int in_len, int out_len)
67875216638SSean Hefty {
67905ad9457SSean Hefty 	struct rdma_ucm_resolve_ip cmd;
68075216638SSean Hefty 	struct ucma_context *ctx;
68175216638SSean Hefty 	int ret;
68275216638SSean Hefty 
68375216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
68475216638SSean Hefty 		return -EFAULT;
68575216638SSean Hefty 
68609abfe7bSRoland Dreier 	if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
68784652aefSRoland Dreier 	    !rdma_addr_size_in6(&cmd.dst_addr))
6882975d5deSLeon Romanovsky 		return -EINVAL;
6892975d5deSLeon Romanovsky 
69075216638SSean Hefty 	ctx = ucma_get_ctx(file, cmd.id);
69175216638SSean Hefty 	if (IS_ERR(ctx))
69275216638SSean Hefty 		return PTR_ERR(ctx);
69375216638SSean Hefty 
6947c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
69584652aefSRoland Dreier 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
69684652aefSRoland Dreier 				(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
6977c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
69875216638SSean Hefty 	ucma_put_ctx(ctx);
69975216638SSean Hefty 	return ret;
70075216638SSean Hefty }
70175216638SSean Hefty 
ucma_resolve_addr(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)702209cf2a7SSean Hefty static ssize_t ucma_resolve_addr(struct ucma_file *file,
703209cf2a7SSean Hefty 				 const char __user *inbuf,
704209cf2a7SSean Hefty 				 int in_len, int out_len)
705209cf2a7SSean Hefty {
706209cf2a7SSean Hefty 	struct rdma_ucm_resolve_addr cmd;
707209cf2a7SSean Hefty 	struct ucma_context *ctx;
708209cf2a7SSean Hefty 	int ret;
709209cf2a7SSean Hefty 
710209cf2a7SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
711209cf2a7SSean Hefty 		return -EFAULT;
712209cf2a7SSean Hefty 
71384652aefSRoland Dreier 	if (cmd.reserved ||
71484652aefSRoland Dreier 	    (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
71584652aefSRoland Dreier 	    !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
716209cf2a7SSean Hefty 		return -EINVAL;
717209cf2a7SSean Hefty 
718209cf2a7SSean Hefty 	ctx = ucma_get_ctx(file, cmd.id);
719209cf2a7SSean Hefty 	if (IS_ERR(ctx))
720209cf2a7SSean Hefty 		return PTR_ERR(ctx);
721209cf2a7SSean Hefty 
7227c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
72384652aefSRoland Dreier 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
72484652aefSRoland Dreier 				(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
7257c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
726209cf2a7SSean Hefty 	ucma_put_ctx(ctx);
727209cf2a7SSean Hefty 	return ret;
728209cf2a7SSean Hefty }
729209cf2a7SSean Hefty 
ucma_resolve_route(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)73075216638SSean Hefty static ssize_t ucma_resolve_route(struct ucma_file *file,
73175216638SSean Hefty 				  const char __user *inbuf,
73275216638SSean Hefty 				  int in_len, int out_len)
73375216638SSean Hefty {
73475216638SSean Hefty 	struct rdma_ucm_resolve_route cmd;
73575216638SSean Hefty 	struct ucma_context *ctx;
73675216638SSean Hefty 	int ret;
73775216638SSean Hefty 
73875216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
73975216638SSean Hefty 		return -EFAULT;
74075216638SSean Hefty 
7418b77586bSJason Gunthorpe 	ctx = ucma_get_ctx_dev(file, cmd.id);
74275216638SSean Hefty 	if (IS_ERR(ctx))
74375216638SSean Hefty 		return PTR_ERR(ctx);
74475216638SSean Hefty 
7457c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
74675216638SSean Hefty 	ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
7477c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
74875216638SSean Hefty 	ucma_put_ctx(ctx);
74975216638SSean Hefty 	return ret;
75075216638SSean Hefty }
75175216638SSean Hefty 
ucma_copy_ib_route(struct rdma_ucm_query_route_resp * resp,struct rdma_route * route)75275216638SSean Hefty static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
75375216638SSean Hefty 			       struct rdma_route *route)
75475216638SSean Hefty {
75575216638SSean Hefty 	struct rdma_dev_addr *dev_addr;
75675216638SSean Hefty 
757*bf9a9928SMark Zhang 	resp->num_paths = route->num_pri_alt_paths;
758*bf9a9928SMark Zhang 	switch (route->num_pri_alt_paths) {
75975216638SSean Hefty 	case 0:
76075216638SSean Hefty 		dev_addr = &route->addr.dev_addr;
7616f8372b6SSean Hefty 		rdma_addr_get_dgid(dev_addr,
76275216638SSean Hefty 				   (union ib_gid *) &resp->ib_route[0].dgid);
7636f8372b6SSean Hefty 		rdma_addr_get_sgid(dev_addr,
76475216638SSean Hefty 				   (union ib_gid *) &resp->ib_route[0].sgid);
76575216638SSean Hefty 		resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
76675216638SSean Hefty 		break;
76775216638SSean Hefty 	case 2:
76875216638SSean Hefty 		ib_copy_path_rec_to_user(&resp->ib_route[1],
76975216638SSean Hefty 					 &route->path_rec[1]);
770df561f66SGustavo A. R. Silva 		fallthrough;
77175216638SSean Hefty 	case 1:
77275216638SSean Hefty 		ib_copy_path_rec_to_user(&resp->ib_route[0],
77375216638SSean Hefty 					 &route->path_rec[0]);
77475216638SSean Hefty 		break;
77575216638SSean Hefty 	default:
77675216638SSean Hefty 		break;
77775216638SSean Hefty 	}
77875216638SSean Hefty }
77975216638SSean Hefty 
ucma_copy_iboe_route(struct rdma_ucm_query_route_resp * resp,struct rdma_route * route)7803c86aa70SEli Cohen static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
7813c86aa70SEli Cohen 				 struct rdma_route *route)
7823c86aa70SEli Cohen {
7833c86aa70SEli Cohen 
784*bf9a9928SMark Zhang 	resp->num_paths = route->num_pri_alt_paths;
785*bf9a9928SMark Zhang 	switch (route->num_pri_alt_paths) {
7863c86aa70SEli Cohen 	case 0:
7877b85627bSMoni Shoua 		rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
7887b85627bSMoni Shoua 			    (union ib_gid *)&resp->ib_route[0].dgid);
7897b85627bSMoni Shoua 		rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
7903c86aa70SEli Cohen 			    (union ib_gid *)&resp->ib_route[0].sgid);
7913c86aa70SEli Cohen 		resp->ib_route[0].pkey = cpu_to_be16(0xffff);
7923c86aa70SEli Cohen 		break;
7933c86aa70SEli Cohen 	case 2:
7943c86aa70SEli Cohen 		ib_copy_path_rec_to_user(&resp->ib_route[1],
7953c86aa70SEli Cohen 					 &route->path_rec[1]);
796df561f66SGustavo A. R. Silva 		fallthrough;
7973c86aa70SEli Cohen 	case 1:
7983c86aa70SEli Cohen 		ib_copy_path_rec_to_user(&resp->ib_route[0],
7993c86aa70SEli Cohen 					 &route->path_rec[0]);
8003c86aa70SEli Cohen 		break;
8013c86aa70SEli Cohen 	default:
8023c86aa70SEli Cohen 		break;
8033c86aa70SEli Cohen 	}
8043c86aa70SEli Cohen }
8053c86aa70SEli Cohen 
ucma_copy_iw_route(struct rdma_ucm_query_route_resp * resp,struct rdma_route * route)806e86f8b06SSteve Wise static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
807e86f8b06SSteve Wise 			       struct rdma_route *route)
808e86f8b06SSteve Wise {
809e86f8b06SSteve Wise 	struct rdma_dev_addr *dev_addr;
810e86f8b06SSteve Wise 
811e86f8b06SSteve Wise 	dev_addr = &route->addr.dev_addr;
812e86f8b06SSteve Wise 	rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
813e86f8b06SSteve Wise 	rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
814e86f8b06SSteve Wise }
815e86f8b06SSteve Wise 
ucma_query_route(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)81675216638SSean Hefty static ssize_t ucma_query_route(struct ucma_file *file,
81775216638SSean Hefty 				const char __user *inbuf,
81875216638SSean Hefty 				int in_len, int out_len)
81975216638SSean Hefty {
820ee7aed45SSean Hefty 	struct rdma_ucm_query cmd;
82175216638SSean Hefty 	struct rdma_ucm_query_route_resp resp;
82275216638SSean Hefty 	struct ucma_context *ctx;
82375216638SSean Hefty 	struct sockaddr *addr;
82475216638SSean Hefty 	int ret = 0;
82575216638SSean Hefty 
82617793833SLeon Romanovsky 	if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index))
82775216638SSean Hefty 		return -ENOSPC;
82875216638SSean Hefty 
82975216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
83075216638SSean Hefty 		return -EFAULT;
83175216638SSean Hefty 
83275216638SSean Hefty 	ctx = ucma_get_ctx(file, cmd.id);
83375216638SSean Hefty 	if (IS_ERR(ctx))
83475216638SSean Hefty 		return PTR_ERR(ctx);
83575216638SSean Hefty 
8367c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
83775216638SSean Hefty 	memset(&resp, 0, sizeof resp);
8383f446754SRoland Dreier 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
83975216638SSean Hefty 	memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
84075216638SSean Hefty 				     sizeof(struct sockaddr_in) :
84175216638SSean Hefty 				     sizeof(struct sockaddr_in6));
8423f446754SRoland Dreier 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
84375216638SSean Hefty 	memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
84475216638SSean Hefty 				     sizeof(struct sockaddr_in) :
84575216638SSean Hefty 				     sizeof(struct sockaddr_in6));
84675216638SSean Hefty 	if (!ctx->cm_id->device)
84775216638SSean Hefty 		goto out;
84875216638SSean Hefty 
8499cda779cSRoland Dreier 	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
85017793833SLeon Romanovsky 	resp.ibdev_index = ctx->cm_id->device->index;
85175216638SSean Hefty 	resp.port_num = ctx->cm_id->port_num;
852c72f2189SMichael Wang 
853fe53ba2fSMichael Wang 	if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
85475216638SSean Hefty 		ucma_copy_ib_route(&resp, &ctx->cm_id->route);
8555d9fb044SIra Weiny 	else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
8563c86aa70SEli Cohen 		ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
857c72f2189SMichael Wang 	else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
858e86f8b06SSteve Wise 		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
85975216638SSean Hefty 
86075216638SSean Hefty out:
8617c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
86217793833SLeon Romanovsky 	if (copy_to_user(u64_to_user_ptr(cmd.response), &resp,
86317793833SLeon Romanovsky 			 min_t(size_t, out_len, sizeof(resp))))
86475216638SSean Hefty 		ret = -EFAULT;
86575216638SSean Hefty 
86675216638SSean Hefty 	ucma_put_ctx(ctx);
86775216638SSean Hefty 	return ret;
86875216638SSean Hefty }
86975216638SSean Hefty 
ucma_query_device_addr(struct rdma_cm_id * cm_id,struct rdma_ucm_query_addr_resp * resp)870ee7aed45SSean Hefty static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
871ee7aed45SSean Hefty 				   struct rdma_ucm_query_addr_resp *resp)
872ee7aed45SSean Hefty {
873ee7aed45SSean Hefty 	if (!cm_id->device)
874ee7aed45SSean Hefty 		return;
875ee7aed45SSean Hefty 
876ee7aed45SSean Hefty 	resp->node_guid = (__force __u64) cm_id->device->node_guid;
87717793833SLeon Romanovsky 	resp->ibdev_index = cm_id->device->index;
878ee7aed45SSean Hefty 	resp->port_num = cm_id->port_num;
879ee7aed45SSean Hefty 	resp->pkey = (__force __u16) cpu_to_be16(
880ee7aed45SSean Hefty 		     ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
881ee7aed45SSean Hefty }
882ee7aed45SSean Hefty 
ucma_query_addr(struct ucma_context * ctx,void __user * response,int out_len)883ee7aed45SSean Hefty static ssize_t ucma_query_addr(struct ucma_context *ctx,
884ee7aed45SSean Hefty 			       void __user *response, int out_len)
885ee7aed45SSean Hefty {
886ee7aed45SSean Hefty 	struct rdma_ucm_query_addr_resp resp;
887ee7aed45SSean Hefty 	struct sockaddr *addr;
888ee7aed45SSean Hefty 	int ret = 0;
889ee7aed45SSean Hefty 
89017793833SLeon Romanovsky 	if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
891ee7aed45SSean Hefty 		return -ENOSPC;
892ee7aed45SSean Hefty 
893ee7aed45SSean Hefty 	memset(&resp, 0, sizeof resp);
894ee7aed45SSean Hefty 
895ee7aed45SSean Hefty 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
896ee7aed45SSean Hefty 	resp.src_size = rdma_addr_size(addr);
897ee7aed45SSean Hefty 	memcpy(&resp.src_addr, addr, resp.src_size);
898ee7aed45SSean Hefty 
899ee7aed45SSean Hefty 	addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
900ee7aed45SSean Hefty 	resp.dst_size = rdma_addr_size(addr);
901ee7aed45SSean Hefty 	memcpy(&resp.dst_addr, addr, resp.dst_size);
902ee7aed45SSean Hefty 
903ee7aed45SSean Hefty 	ucma_query_device_addr(ctx->cm_id, &resp);
904ee7aed45SSean Hefty 
90517793833SLeon Romanovsky 	if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
906ee7aed45SSean Hefty 		ret = -EFAULT;
907ee7aed45SSean Hefty 
908ee7aed45SSean Hefty 	return ret;
909ee7aed45SSean Hefty }
910ee7aed45SSean Hefty 
ucma_query_path(struct ucma_context * ctx,void __user * response,int out_len)911ac53b264SSean Hefty static ssize_t ucma_query_path(struct ucma_context *ctx,
912ac53b264SSean Hefty 			       void __user *response, int out_len)
913ac53b264SSean Hefty {
914ac53b264SSean Hefty 	struct rdma_ucm_query_path_resp *resp;
915ac53b264SSean Hefty 	int i, ret = 0;
916ac53b264SSean Hefty 
917ac53b264SSean Hefty 	if (out_len < sizeof(*resp))
918ac53b264SSean Hefty 		return -ENOSPC;
919ac53b264SSean Hefty 
920ac53b264SSean Hefty 	resp = kzalloc(out_len, GFP_KERNEL);
921ac53b264SSean Hefty 	if (!resp)
922ac53b264SSean Hefty 		return -ENOMEM;
923ac53b264SSean Hefty 
924*bf9a9928SMark Zhang 	resp->num_paths = ctx->cm_id->route.num_pri_alt_paths;
925ac53b264SSean Hefty 	for (i = 0, out_len -= sizeof(*resp);
926ac53b264SSean Hefty 	     i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
927ac53b264SSean Hefty 	     i++, out_len -= sizeof(struct ib_path_rec_data)) {
92857520751SDasaratharaman Chandramouli 		struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
929ac53b264SSean Hefty 
930ac53b264SSean Hefty 		resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
931ac53b264SSean Hefty 					   IB_PATH_BIDIRECTIONAL;
93289838118SParav Pandit 		if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
93357520751SDasaratharaman Chandramouli 			struct sa_path_rec ib;
93457520751SDasaratharaman Chandramouli 
93557520751SDasaratharaman Chandramouli 			sa_convert_path_opa_to_ib(&ib, rec);
93657520751SDasaratharaman Chandramouli 			ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
93789838118SParav Pandit 
93889838118SParav Pandit 		} else {
93989838118SParav Pandit 			ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
94057520751SDasaratharaman Chandramouli 		}
941ac53b264SSean Hefty 	}
942ac53b264SSean Hefty 
9439bcb8940SGustavo A. R. Silva 	if (copy_to_user(response, resp, struct_size(resp, path_data, i)))
944ac53b264SSean Hefty 		ret = -EFAULT;
945ac53b264SSean Hefty 
946ac53b264SSean Hefty 	kfree(resp);
947ac53b264SSean Hefty 	return ret;
948ac53b264SSean Hefty }
949ac53b264SSean Hefty 
ucma_query_gid(struct ucma_context * ctx,void __user * response,int out_len)950edaa7a55SSean Hefty static ssize_t ucma_query_gid(struct ucma_context *ctx,
951edaa7a55SSean Hefty 			      void __user *response, int out_len)
952edaa7a55SSean Hefty {
953edaa7a55SSean Hefty 	struct rdma_ucm_query_addr_resp resp;
954edaa7a55SSean Hefty 	struct sockaddr_ib *addr;
955edaa7a55SSean Hefty 	int ret = 0;
956edaa7a55SSean Hefty 
95717793833SLeon Romanovsky 	if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
958edaa7a55SSean Hefty 		return -ENOSPC;
959edaa7a55SSean Hefty 
960edaa7a55SSean Hefty 	memset(&resp, 0, sizeof resp);
961edaa7a55SSean Hefty 
962edaa7a55SSean Hefty 	ucma_query_device_addr(ctx->cm_id, &resp);
963edaa7a55SSean Hefty 
964edaa7a55SSean Hefty 	addr = (struct sockaddr_ib *) &resp.src_addr;
965edaa7a55SSean Hefty 	resp.src_size = sizeof(*addr);
966edaa7a55SSean Hefty 	if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
967edaa7a55SSean Hefty 		memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
968edaa7a55SSean Hefty 	} else {
969edaa7a55SSean Hefty 		addr->sib_family = AF_IB;
970edaa7a55SSean Hefty 		addr->sib_pkey = (__force __be16) resp.pkey;
9717a2f64eeSParav Pandit 		rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
9727a2f64eeSParav Pandit 			       NULL);
973edaa7a55SSean Hefty 		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
974edaa7a55SSean Hefty 						    &ctx->cm_id->route.addr.src_addr);
975edaa7a55SSean Hefty 	}
976edaa7a55SSean Hefty 
977edaa7a55SSean Hefty 	addr = (struct sockaddr_ib *) &resp.dst_addr;
978edaa7a55SSean Hefty 	resp.dst_size = sizeof(*addr);
979edaa7a55SSean Hefty 	if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
980edaa7a55SSean Hefty 		memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
981edaa7a55SSean Hefty 	} else {
982edaa7a55SSean Hefty 		addr->sib_family = AF_IB;
983edaa7a55SSean Hefty 		addr->sib_pkey = (__force __be16) resp.pkey;
9847a2f64eeSParav Pandit 		rdma_read_gids(ctx->cm_id, NULL,
985edaa7a55SSean Hefty 			       (union ib_gid *)&addr->sib_addr);
986edaa7a55SSean Hefty 		addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
987edaa7a55SSean Hefty 						    &ctx->cm_id->route.addr.dst_addr);
988edaa7a55SSean Hefty 	}
989edaa7a55SSean Hefty 
99017793833SLeon Romanovsky 	if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
991edaa7a55SSean Hefty 		ret = -EFAULT;
992edaa7a55SSean Hefty 
993edaa7a55SSean Hefty 	return ret;
994edaa7a55SSean Hefty }
995edaa7a55SSean Hefty 
ucma_query(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)996ee7aed45SSean Hefty static ssize_t ucma_query(struct ucma_file *file,
997ee7aed45SSean Hefty 			  const char __user *inbuf,
998ee7aed45SSean Hefty 			  int in_len, int out_len)
999ee7aed45SSean Hefty {
1000ee7aed45SSean Hefty 	struct rdma_ucm_query cmd;
1001ee7aed45SSean Hefty 	struct ucma_context *ctx;
1002ee7aed45SSean Hefty 	void __user *response;
1003ee7aed45SSean Hefty 	int ret;
1004ee7aed45SSean Hefty 
1005ee7aed45SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1006ee7aed45SSean Hefty 		return -EFAULT;
1007ee7aed45SSean Hefty 
10086f57c933SJason Gunthorpe 	response = u64_to_user_ptr(cmd.response);
1009ee7aed45SSean Hefty 	ctx = ucma_get_ctx(file, cmd.id);
1010ee7aed45SSean Hefty 	if (IS_ERR(ctx))
1011ee7aed45SSean Hefty 		return PTR_ERR(ctx);
1012ee7aed45SSean Hefty 
10137c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
1014ee7aed45SSean Hefty 	switch (cmd.option) {
1015ee7aed45SSean Hefty 	case RDMA_USER_CM_QUERY_ADDR:
1016ee7aed45SSean Hefty 		ret = ucma_query_addr(ctx, response, out_len);
1017ee7aed45SSean Hefty 		break;
1018ac53b264SSean Hefty 	case RDMA_USER_CM_QUERY_PATH:
1019ac53b264SSean Hefty 		ret = ucma_query_path(ctx, response, out_len);
1020ac53b264SSean Hefty 		break;
1021edaa7a55SSean Hefty 	case RDMA_USER_CM_QUERY_GID:
1022edaa7a55SSean Hefty 		ret = ucma_query_gid(ctx, response, out_len);
1023edaa7a55SSean Hefty 		break;
1024ee7aed45SSean Hefty 	default:
1025ee7aed45SSean Hefty 		ret = -ENOSYS;
1026ee7aed45SSean Hefty 		break;
1027ee7aed45SSean Hefty 	}
10287c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
1029ee7aed45SSean Hefty 
1030ee7aed45SSean Hefty 	ucma_put_ctx(ctx);
1031ee7aed45SSean Hefty 	return ret;
1032ee7aed45SSean Hefty }
1033ee7aed45SSean Hefty 
ucma_copy_conn_param(struct rdma_cm_id * id,struct rdma_conn_param * dst,struct rdma_ucm_conn_param * src)10345c438135SSean Hefty static void ucma_copy_conn_param(struct rdma_cm_id *id,
10355c438135SSean Hefty 				 struct rdma_conn_param *dst,
103675216638SSean Hefty 				 struct rdma_ucm_conn_param *src)
103775216638SSean Hefty {
103875216638SSean Hefty 	dst->private_data = src->private_data;
103975216638SSean Hefty 	dst->private_data_len = src->private_data_len;
104075216638SSean Hefty 	dst->responder_resources = src->responder_resources;
104175216638SSean Hefty 	dst->initiator_depth = src->initiator_depth;
104275216638SSean Hefty 	dst->flow_control = src->flow_control;
104375216638SSean Hefty 	dst->retry_count = src->retry_count;
104475216638SSean Hefty 	dst->rnr_retry_count = src->rnr_retry_count;
104575216638SSean Hefty 	dst->srq = src->srq;
1046ca750d4aSLeon Romanovsky 	dst->qp_num = src->qp_num & 0xFFFFFF;
10475c438135SSean Hefty 	dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
104875216638SSean Hefty }
104975216638SSean Hefty 
ucma_connect(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)105075216638SSean Hefty static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
105175216638SSean Hefty 			    int in_len, int out_len)
105275216638SSean Hefty {
105375216638SSean Hefty 	struct rdma_conn_param conn_param;
105434e2ab57SLeon Romanovsky 	struct rdma_ucm_ece ece = {};
105534e2ab57SLeon Romanovsky 	struct rdma_ucm_connect cmd;
105675216638SSean Hefty 	struct ucma_context *ctx;
105734e2ab57SLeon Romanovsky 	size_t in_size;
105875216638SSean Hefty 	int ret;
105975216638SSean Hefty 
106031142a4bSJason Gunthorpe 	if (in_len < offsetofend(typeof(cmd), reserved))
106131142a4bSJason Gunthorpe 		return -EINVAL;
106234e2ab57SLeon Romanovsky 	in_size = min_t(size_t, in_len, sizeof(cmd));
106334e2ab57SLeon Romanovsky 	if (copy_from_user(&cmd, inbuf, in_size))
106475216638SSean Hefty 		return -EFAULT;
106575216638SSean Hefty 
106675216638SSean Hefty 	if (!cmd.conn_param.valid)
106775216638SSean Hefty 		return -EINVAL;
106875216638SSean Hefty 
10698b77586bSJason Gunthorpe 	ctx = ucma_get_ctx_dev(file, cmd.id);
107075216638SSean Hefty 	if (IS_ERR(ctx))
107175216638SSean Hefty 		return PTR_ERR(ctx);
107275216638SSean Hefty 
10735c438135SSean Hefty 	ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
107434e2ab57SLeon Romanovsky 	if (offsetofend(typeof(cmd), ece) <= in_size) {
107534e2ab57SLeon Romanovsky 		ece.vendor_id = cmd.ece.vendor_id;
107634e2ab57SLeon Romanovsky 		ece.attr_mod = cmd.ece.attr_mod;
107734e2ab57SLeon Romanovsky 	}
107834e2ab57SLeon Romanovsky 
10797c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
108034e2ab57SLeon Romanovsky 	ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
10817c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
108275216638SSean Hefty 	ucma_put_ctx(ctx);
108375216638SSean Hefty 	return ret;
108475216638SSean Hefty }
108575216638SSean Hefty 
ucma_listen(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)108675216638SSean Hefty static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
108775216638SSean Hefty 			   int in_len, int out_len)
108875216638SSean Hefty {
108975216638SSean Hefty 	struct rdma_ucm_listen cmd;
109075216638SSean Hefty 	struct ucma_context *ctx;
109175216638SSean Hefty 	int ret;
109275216638SSean Hefty 
109375216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
109475216638SSean Hefty 		return -EFAULT;
109575216638SSean Hefty 
109675216638SSean Hefty 	ctx = ucma_get_ctx(file, cmd.id);
109775216638SSean Hefty 	if (IS_ERR(ctx))
109875216638SSean Hefty 		return PTR_ERR(ctx);
109975216638SSean Hefty 
110026c15decSJason Gunthorpe 	if (cmd.backlog <= 0 || cmd.backlog > max_backlog)
110126c15decSJason Gunthorpe 		cmd.backlog = max_backlog;
110226c15decSJason Gunthorpe 	atomic_set(&ctx->backlog, cmd.backlog);
110326c15decSJason Gunthorpe 
11047c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
110526c15decSJason Gunthorpe 	ret = rdma_listen(ctx->cm_id, cmd.backlog);
11067c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
110775216638SSean Hefty 	ucma_put_ctx(ctx);
110875216638SSean Hefty 	return ret;
110975216638SSean Hefty }
111075216638SSean Hefty 
ucma_accept(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)111175216638SSean Hefty static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
111275216638SSean Hefty 			   int in_len, int out_len)
111375216638SSean Hefty {
111475216638SSean Hefty 	struct rdma_ucm_accept cmd;
111575216638SSean Hefty 	struct rdma_conn_param conn_param;
11160cb15372SLeon Romanovsky 	struct rdma_ucm_ece ece = {};
111775216638SSean Hefty 	struct ucma_context *ctx;
11180cb15372SLeon Romanovsky 	size_t in_size;
111975216638SSean Hefty 	int ret;
112075216638SSean Hefty 
112131142a4bSJason Gunthorpe 	if (in_len < offsetofend(typeof(cmd), reserved))
112231142a4bSJason Gunthorpe 		return -EINVAL;
11230cb15372SLeon Romanovsky 	in_size = min_t(size_t, in_len, sizeof(cmd));
11240cb15372SLeon Romanovsky 	if (copy_from_user(&cmd, inbuf, in_size))
112575216638SSean Hefty 		return -EFAULT;
112675216638SSean Hefty 
11278b77586bSJason Gunthorpe 	ctx = ucma_get_ctx_dev(file, cmd.id);
112875216638SSean Hefty 	if (IS_ERR(ctx))
112975216638SSean Hefty 		return PTR_ERR(ctx);
113075216638SSean Hefty 
11310cb15372SLeon Romanovsky 	if (offsetofend(typeof(cmd), ece) <= in_size) {
11320cb15372SLeon Romanovsky 		ece.vendor_id = cmd.ece.vendor_id;
11330cb15372SLeon Romanovsky 		ece.attr_mod = cmd.ece.attr_mod;
11340cb15372SLeon Romanovsky 	}
11350cb15372SLeon Romanovsky 
113675216638SSean Hefty 	if (cmd.conn_param.valid) {
11375c438135SSean Hefty 		ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
11387c119107SJason Gunthorpe 		mutex_lock(&ctx->mutex);
1139d114c6feSJason Gunthorpe 		rdma_lock_handler(ctx->cm_id);
1140b09c4d70SLeon Romanovsky 		ret = rdma_accept_ece(ctx->cm_id, &conn_param, &ece);
1141d114c6feSJason Gunthorpe 		if (!ret) {
1142d114c6feSJason Gunthorpe 			/* The uid must be set atomically with the handler */
11439ced69caSSean Hefty 			ctx->uid = cmd.uid;
1144d114c6feSJason Gunthorpe 		}
1145d114c6feSJason Gunthorpe 		rdma_unlock_handler(ctx->cm_id);
1146d114c6feSJason Gunthorpe 		mutex_unlock(&ctx->mutex);
11477c119107SJason Gunthorpe 	} else {
11487c119107SJason Gunthorpe 		mutex_lock(&ctx->mutex);
1149d114c6feSJason Gunthorpe 		rdma_lock_handler(ctx->cm_id);
1150b09c4d70SLeon Romanovsky 		ret = rdma_accept_ece(ctx->cm_id, NULL, &ece);
1151d114c6feSJason Gunthorpe 		rdma_unlock_handler(ctx->cm_id);
11527c119107SJason Gunthorpe 		mutex_unlock(&ctx->mutex);
11537c119107SJason Gunthorpe 	}
115475216638SSean Hefty 	ucma_put_ctx(ctx);
115575216638SSean Hefty 	return ret;
115675216638SSean Hefty }
115775216638SSean Hefty 
ucma_reject(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)115875216638SSean Hefty static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
115975216638SSean Hefty 			   int in_len, int out_len)
116075216638SSean Hefty {
116175216638SSean Hefty 	struct rdma_ucm_reject cmd;
116275216638SSean Hefty 	struct ucma_context *ctx;
116375216638SSean Hefty 	int ret;
116475216638SSean Hefty 
116575216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
116675216638SSean Hefty 		return -EFAULT;
116775216638SSean Hefty 
11688094ba0aSLeon Romanovsky 	if (!cmd.reason)
11698094ba0aSLeon Romanovsky 		cmd.reason = IB_CM_REJ_CONSUMER_DEFINED;
11708094ba0aSLeon Romanovsky 
11718094ba0aSLeon Romanovsky 	switch (cmd.reason) {
11728094ba0aSLeon Romanovsky 	case IB_CM_REJ_CONSUMER_DEFINED:
11738094ba0aSLeon Romanovsky 	case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED:
11748094ba0aSLeon Romanovsky 		break;
11758094ba0aSLeon Romanovsky 	default:
11768094ba0aSLeon Romanovsky 		return -EINVAL;
11778094ba0aSLeon Romanovsky 	}
11788094ba0aSLeon Romanovsky 
11798b77586bSJason Gunthorpe 	ctx = ucma_get_ctx_dev(file, cmd.id);
118075216638SSean Hefty 	if (IS_ERR(ctx))
118175216638SSean Hefty 		return PTR_ERR(ctx);
118275216638SSean Hefty 
11837c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
11848094ba0aSLeon Romanovsky 	ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len,
11858094ba0aSLeon Romanovsky 			  cmd.reason);
11867c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
118775216638SSean Hefty 	ucma_put_ctx(ctx);
118875216638SSean Hefty 	return ret;
118975216638SSean Hefty }
119075216638SSean Hefty 
ucma_disconnect(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)119175216638SSean Hefty static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
119275216638SSean Hefty 			       int in_len, int out_len)
119375216638SSean Hefty {
119475216638SSean Hefty 	struct rdma_ucm_disconnect cmd;
119575216638SSean Hefty 	struct ucma_context *ctx;
119675216638SSean Hefty 	int ret;
119775216638SSean Hefty 
119875216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
119975216638SSean Hefty 		return -EFAULT;
120075216638SSean Hefty 
12018b77586bSJason Gunthorpe 	ctx = ucma_get_ctx_dev(file, cmd.id);
120275216638SSean Hefty 	if (IS_ERR(ctx))
120375216638SSean Hefty 		return PTR_ERR(ctx);
120475216638SSean Hefty 
12057c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
120675216638SSean Hefty 	ret = rdma_disconnect(ctx->cm_id);
12077c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
120875216638SSean Hefty 	ucma_put_ctx(ctx);
120975216638SSean Hefty 	return ret;
121075216638SSean Hefty }
121175216638SSean Hefty 
ucma_init_qp_attr(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)121275216638SSean Hefty static ssize_t ucma_init_qp_attr(struct ucma_file *file,
121375216638SSean Hefty 				 const char __user *inbuf,
121475216638SSean Hefty 				 int in_len, int out_len)
121575216638SSean Hefty {
121675216638SSean Hefty 	struct rdma_ucm_init_qp_attr cmd;
121775216638SSean Hefty 	struct ib_uverbs_qp_attr resp;
121875216638SSean Hefty 	struct ucma_context *ctx;
121975216638SSean Hefty 	struct ib_qp_attr qp_attr;
122075216638SSean Hefty 	int ret;
122175216638SSean Hefty 
122275216638SSean Hefty 	if (out_len < sizeof(resp))
122375216638SSean Hefty 		return -ENOSPC;
122475216638SSean Hefty 
122575216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
122675216638SSean Hefty 		return -EFAULT;
122775216638SSean Hefty 
1228a5880b84SLeon Romanovsky 	if (cmd.qp_state > IB_QPS_ERR)
1229a5880b84SLeon Romanovsky 		return -EINVAL;
1230a5880b84SLeon Romanovsky 
12318b77586bSJason Gunthorpe 	ctx = ucma_get_ctx_dev(file, cmd.id);
123275216638SSean Hefty 	if (IS_ERR(ctx))
123375216638SSean Hefty 		return PTR_ERR(ctx);
123475216638SSean Hefty 
123575216638SSean Hefty 	resp.qp_attr_mask = 0;
123675216638SSean Hefty 	memset(&qp_attr, 0, sizeof qp_attr);
123775216638SSean Hefty 	qp_attr.qp_state = cmd.qp_state;
12387c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
123975216638SSean Hefty 	ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
12407c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
124175216638SSean Hefty 	if (ret)
124275216638SSean Hefty 		goto out;
124375216638SSean Hefty 
1244d541e455SDasaratharaman Chandramouli 	ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
12456f57c933SJason Gunthorpe 	if (copy_to_user(u64_to_user_ptr(cmd.response),
124675216638SSean Hefty 			 &resp, sizeof(resp)))
124775216638SSean Hefty 		ret = -EFAULT;
124875216638SSean Hefty 
124975216638SSean Hefty out:
125075216638SSean Hefty 	ucma_put_ctx(ctx);
125175216638SSean Hefty 	return ret;
125275216638SSean Hefty }
125375216638SSean Hefty 
ucma_set_option_id(struct ucma_context * ctx,int optname,void * optval,size_t optlen)12547ce86409SSean Hefty static int ucma_set_option_id(struct ucma_context *ctx, int optname,
12557ce86409SSean Hefty 			      void *optval, size_t optlen)
12567ce86409SSean Hefty {
12577ce86409SSean Hefty 	int ret = 0;
12587ce86409SSean Hefty 
12597ce86409SSean Hefty 	switch (optname) {
12607ce86409SSean Hefty 	case RDMA_OPTION_ID_TOS:
12617ce86409SSean Hefty 		if (optlen != sizeof(u8)) {
12627ce86409SSean Hefty 			ret = -EINVAL;
12637ce86409SSean Hefty 			break;
12647ce86409SSean Hefty 		}
12657ce86409SSean Hefty 		rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
12667ce86409SSean Hefty 		break;
1267a9bb7912SHefty, Sean 	case RDMA_OPTION_ID_REUSEADDR:
1268a9bb7912SHefty, Sean 		if (optlen != sizeof(int)) {
1269a9bb7912SHefty, Sean 			ret = -EINVAL;
1270a9bb7912SHefty, Sean 			break;
1271a9bb7912SHefty, Sean 		}
1272a9bb7912SHefty, Sean 		ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1273a9bb7912SHefty, Sean 		break;
127468602120SSean Hefty 	case RDMA_OPTION_ID_AFONLY:
127568602120SSean Hefty 		if (optlen != sizeof(int)) {
127668602120SSean Hefty 			ret = -EINVAL;
127768602120SSean Hefty 			break;
127868602120SSean Hefty 		}
127968602120SSean Hefty 		ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
128068602120SSean Hefty 		break;
12812c1619edSDanit Goldberg 	case RDMA_OPTION_ID_ACK_TIMEOUT:
12822c1619edSDanit Goldberg 		if (optlen != sizeof(u8)) {
12832c1619edSDanit Goldberg 			ret = -EINVAL;
12842c1619edSDanit Goldberg 			break;
12852c1619edSDanit Goldberg 		}
12862c1619edSDanit Goldberg 		ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
12872c1619edSDanit Goldberg 		break;
12887ce86409SSean Hefty 	default:
12897ce86409SSean Hefty 		ret = -ENOSYS;
12907ce86409SSean Hefty 	}
12917ce86409SSean Hefty 
12927ce86409SSean Hefty 	return ret;
12937ce86409SSean Hefty }
12947ce86409SSean Hefty 
ucma_set_ib_path(struct ucma_context * ctx,struct ib_path_rec_data * path_data,size_t optlen)1295a7ca1f00SSean Hefty static int ucma_set_ib_path(struct ucma_context *ctx,
1296a7ca1f00SSean Hefty 			    struct ib_path_rec_data *path_data, size_t optlen)
1297a7ca1f00SSean Hefty {
1298c2f8fc4eSDasaratharaman Chandramouli 	struct sa_path_rec sa_path;
1299a7ca1f00SSean Hefty 	struct rdma_cm_event event;
1300a7ca1f00SSean Hefty 	int ret;
1301a7ca1f00SSean Hefty 
1302a7ca1f00SSean Hefty 	if (optlen % sizeof(*path_data))
1303a7ca1f00SSean Hefty 		return -EINVAL;
1304a7ca1f00SSean Hefty 
1305a7ca1f00SSean Hefty 	for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1306a7ca1f00SSean Hefty 		if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1307a7ca1f00SSean Hefty 					 IB_PATH_BIDIRECTIONAL))
1308a7ca1f00SSean Hefty 			break;
1309a7ca1f00SSean Hefty 	}
1310a7ca1f00SSean Hefty 
1311a7ca1f00SSean Hefty 	if (!optlen)
1312a7ca1f00SSean Hefty 		return -EINVAL;
1313a7ca1f00SSean Hefty 
13148435168dSRoland Dreier 	if (!ctx->cm_id->device)
13158435168dSRoland Dreier 		return -EINVAL;
13168435168dSRoland Dreier 
1317c2be9dc0SIlya Nelkenbaum 	memset(&sa_path, 0, sizeof(sa_path));
1318c2be9dc0SIlya Nelkenbaum 
131957520751SDasaratharaman Chandramouli 	sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1320a7ca1f00SSean Hefty 	ib_sa_unpack_path(path_data->path_rec, &sa_path);
132157520751SDasaratharaman Chandramouli 
132257520751SDasaratharaman Chandramouli 	if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
132357520751SDasaratharaman Chandramouli 		struct sa_path_rec opa;
132457520751SDasaratharaman Chandramouli 
132557520751SDasaratharaman Chandramouli 		sa_convert_path_ib_to_opa(&opa, &sa_path);
13267c119107SJason Gunthorpe 		mutex_lock(&ctx->mutex);
1327fe75889fSParav Pandit 		ret = rdma_set_ib_path(ctx->cm_id, &opa);
13287c119107SJason Gunthorpe 		mutex_unlock(&ctx->mutex);
132957520751SDasaratharaman Chandramouli 	} else {
13307c119107SJason Gunthorpe 		mutex_lock(&ctx->mutex);
1331fe75889fSParav Pandit 		ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
13327c119107SJason Gunthorpe 		mutex_unlock(&ctx->mutex);
133357520751SDasaratharaman Chandramouli 	}
1334a7ca1f00SSean Hefty 	if (ret)
1335a7ca1f00SSean Hefty 		return ret;
1336a7ca1f00SSean Hefty 
1337a7ca1f00SSean Hefty 	memset(&event, 0, sizeof event);
1338a7ca1f00SSean Hefty 	event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1339a7ca1f00SSean Hefty 	return ucma_event_handler(ctx->cm_id, &event);
1340a7ca1f00SSean Hefty }
1341a7ca1f00SSean Hefty 
ucma_set_option_ib(struct ucma_context * ctx,int optname,void * optval,size_t optlen)1342a7ca1f00SSean Hefty static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1343a7ca1f00SSean Hefty 			      void *optval, size_t optlen)
1344a7ca1f00SSean Hefty {
1345a7ca1f00SSean Hefty 	int ret;
1346a7ca1f00SSean Hefty 
1347a7ca1f00SSean Hefty 	switch (optname) {
1348a7ca1f00SSean Hefty 	case RDMA_OPTION_IB_PATH:
1349a7ca1f00SSean Hefty 		ret = ucma_set_ib_path(ctx, optval, optlen);
1350a7ca1f00SSean Hefty 		break;
1351a7ca1f00SSean Hefty 	default:
1352a7ca1f00SSean Hefty 		ret = -ENOSYS;
1353a7ca1f00SSean Hefty 	}
1354a7ca1f00SSean Hefty 
1355a7ca1f00SSean Hefty 	return ret;
1356a7ca1f00SSean Hefty }
1357a7ca1f00SSean Hefty 
ucma_set_option_level(struct ucma_context * ctx,int level,int optname,void * optval,size_t optlen)13587ce86409SSean Hefty static int ucma_set_option_level(struct ucma_context *ctx, int level,
13597ce86409SSean Hefty 				 int optname, void *optval, size_t optlen)
13607ce86409SSean Hefty {
13617ce86409SSean Hefty 	int ret;
13627ce86409SSean Hefty 
13637ce86409SSean Hefty 	switch (level) {
13647ce86409SSean Hefty 	case RDMA_OPTION_ID:
13657c119107SJason Gunthorpe 		mutex_lock(&ctx->mutex);
13667ce86409SSean Hefty 		ret = ucma_set_option_id(ctx, optname, optval, optlen);
13677c119107SJason Gunthorpe 		mutex_unlock(&ctx->mutex);
13687ce86409SSean Hefty 		break;
1369a7ca1f00SSean Hefty 	case RDMA_OPTION_IB:
1370a7ca1f00SSean Hefty 		ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1371a7ca1f00SSean Hefty 		break;
13727ce86409SSean Hefty 	default:
13737ce86409SSean Hefty 		ret = -ENOSYS;
13747ce86409SSean Hefty 	}
13757ce86409SSean Hefty 
13767ce86409SSean Hefty 	return ret;
13777ce86409SSean Hefty }
13787ce86409SSean Hefty 
ucma_set_option(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)13797ce86409SSean Hefty static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
13807ce86409SSean Hefty 			       int in_len, int out_len)
13817ce86409SSean Hefty {
13827ce86409SSean Hefty 	struct rdma_ucm_set_option cmd;
13837ce86409SSean Hefty 	struct ucma_context *ctx;
13847ce86409SSean Hefty 	void *optval;
13857ce86409SSean Hefty 	int ret;
13867ce86409SSean Hefty 
13877ce86409SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
13887ce86409SSean Hefty 		return -EFAULT;
13897ce86409SSean Hefty 
1390ef95a90aSShamir Rabinovitch 	if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1391ef95a90aSShamir Rabinovitch 		return -EINVAL;
1392ef95a90aSShamir Rabinovitch 
13937ce86409SSean Hefty 	ctx = ucma_get_ctx(file, cmd.id);
13947ce86409SSean Hefty 	if (IS_ERR(ctx))
13957ce86409SSean Hefty 		return PTR_ERR(ctx);
13967ce86409SSean Hefty 
13976f57c933SJason Gunthorpe 	optval = memdup_user(u64_to_user_ptr(cmd.optval),
13980764c76eSRoland Dreier 			     cmd.optlen);
13990764c76eSRoland Dreier 	if (IS_ERR(optval)) {
14000764c76eSRoland Dreier 		ret = PTR_ERR(optval);
14010764c76eSRoland Dreier 		goto out;
14027ce86409SSean Hefty 	}
14037ce86409SSean Hefty 
14047ce86409SSean Hefty 	ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
14057ce86409SSean Hefty 				    cmd.optlen);
14067ce86409SSean Hefty 	kfree(optval);
14070764c76eSRoland Dreier 
14080764c76eSRoland Dreier out:
14097ce86409SSean Hefty 	ucma_put_ctx(ctx);
14107ce86409SSean Hefty 	return ret;
14117ce86409SSean Hefty }
14127ce86409SSean Hefty 
ucma_notify(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)141375216638SSean Hefty static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
141475216638SSean Hefty 			   int in_len, int out_len)
141575216638SSean Hefty {
141675216638SSean Hefty 	struct rdma_ucm_notify cmd;
141775216638SSean Hefty 	struct ucma_context *ctx;
1418c8d3bcbfSLeon Romanovsky 	int ret = -EINVAL;
141975216638SSean Hefty 
142075216638SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
142175216638SSean Hefty 		return -EFAULT;
142275216638SSean Hefty 
142375216638SSean Hefty 	ctx = ucma_get_ctx(file, cmd.id);
142475216638SSean Hefty 	if (IS_ERR(ctx))
142575216638SSean Hefty 		return PTR_ERR(ctx);
142675216638SSean Hefty 
14277c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
1428c8d3bcbfSLeon Romanovsky 	if (ctx->cm_id->device)
142975216638SSean Hefty 		ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
14307c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
1431c8d3bcbfSLeon Romanovsky 
143275216638SSean Hefty 	ucma_put_ctx(ctx);
143375216638SSean Hefty 	return ret;
143475216638SSean Hefty }
143575216638SSean Hefty 
ucma_process_join(struct ucma_file * file,struct rdma_ucm_join_mcast * cmd,int out_len)14365bc2b7b3SSean Hefty static ssize_t ucma_process_join(struct ucma_file *file,
14375bc2b7b3SSean Hefty 				 struct rdma_ucm_join_mcast *cmd,  int out_len)
1438c8f6a362SSean Hefty {
1439c8f6a362SSean Hefty 	struct rdma_ucm_create_id_resp resp;
1440c8f6a362SSean Hefty 	struct ucma_context *ctx;
1441c8f6a362SSean Hefty 	struct ucma_multicast *mc;
14425bc2b7b3SSean Hefty 	struct sockaddr *addr;
1443c8f6a362SSean Hefty 	int ret;
1444ab15c95aSAlex Vesker 	u8 join_state;
1445c8f6a362SSean Hefty 
1446c8f6a362SSean Hefty 	if (out_len < sizeof(resp))
1447c8f6a362SSean Hefty 		return -ENOSPC;
1448c8f6a362SSean Hefty 
14495bc2b7b3SSean Hefty 	addr = (struct sockaddr *) &cmd->addr;
14500c81ffc6SLeon Romanovsky 	if (cmd->addr_size != rdma_addr_size(addr))
1451ab15c95aSAlex Vesker 		return -EINVAL;
1452ab15c95aSAlex Vesker 
1453ab15c95aSAlex Vesker 	if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1454ab15c95aSAlex Vesker 		join_state = BIT(FULLMEMBER_JOIN);
1455ab15c95aSAlex Vesker 	else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1456ab15c95aSAlex Vesker 		join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1457ab15c95aSAlex Vesker 	else
14585bc2b7b3SSean Hefty 		return -EINVAL;
1459c8f6a362SSean Hefty 
14608b77586bSJason Gunthorpe 	ctx = ucma_get_ctx_dev(file, cmd->id);
1461c8f6a362SSean Hefty 	if (IS_ERR(ctx))
1462c8f6a362SSean Hefty 		return PTR_ERR(ctx);
1463c8f6a362SSean Hefty 
146495fe5109SJason Gunthorpe 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
14656aea938fSJulien Brunel 	if (!mc) {
14666aea938fSJulien Brunel 		ret = -ENOMEM;
14674f680cb9SAlex Dewar 		goto err_put_ctx;
1468c8f6a362SSean Hefty 	}
146995fe5109SJason Gunthorpe 
147095fe5109SJason Gunthorpe 	mc->ctx = ctx;
1471ab15c95aSAlex Vesker 	mc->join_state = join_state;
14725bc2b7b3SSean Hefty 	mc->uid = cmd->uid;
14735bc2b7b3SSean Hefty 	memcpy(&mc->addr, addr, cmd->addr_size);
147495fe5109SJason Gunthorpe 
147536e8169eSLeon Romanovsky 	xa_lock(&multicast_table);
147636e8169eSLeon Romanovsky 	if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
147795fe5109SJason Gunthorpe 		     GFP_KERNEL)) {
147895fe5109SJason Gunthorpe 		ret = -ENOMEM;
14794f680cb9SAlex Dewar 		goto err_free_mc;
148095fe5109SJason Gunthorpe 	}
148195fe5109SJason Gunthorpe 
148236e8169eSLeon Romanovsky 	list_add_tail(&mc->list, &ctx->mc_list);
148336e8169eSLeon Romanovsky 	xa_unlock(&multicast_table);
148436e8169eSLeon Romanovsky 
14857c119107SJason Gunthorpe 	mutex_lock(&ctx->mutex);
1486ab15c95aSAlex Vesker 	ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1487ab15c95aSAlex Vesker 				  join_state, mc);
14887c119107SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
1489c8f6a362SSean Hefty 	if (ret)
14904f680cb9SAlex Dewar 		goto err_xa_erase;
1491c8f6a362SSean Hefty 
1492c8f6a362SSean Hefty 	resp.id = mc->id;
14936f57c933SJason Gunthorpe 	if (copy_to_user(u64_to_user_ptr(cmd->response),
1494c8f6a362SSean Hefty 			 &resp, sizeof(resp))) {
1495c8f6a362SSean Hefty 		ret = -EFAULT;
14964f680cb9SAlex Dewar 		goto err_leave_multicast;
1497c8f6a362SSean Hefty 	}
1498c8f6a362SSean Hefty 
14994dfd5321SMatthew Wilcox 	xa_store(&multicast_table, mc->id, mc, 0);
1500cb2595c1SCong Wang 
1501c8f6a362SSean Hefty 	ucma_put_ctx(ctx);
1502c8f6a362SSean Hefty 	return 0;
1503c8f6a362SSean Hefty 
15044f680cb9SAlex Dewar err_leave_multicast:
150538e03d09SJason Gunthorpe 	mutex_lock(&ctx->mutex);
15063f446754SRoland Dreier 	rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
150738e03d09SJason Gunthorpe 	mutex_unlock(&ctx->mutex);
1508c8f6a362SSean Hefty 	ucma_cleanup_mc_events(mc);
15094f680cb9SAlex Dewar err_xa_erase:
151036e8169eSLeon Romanovsky 	xa_lock(&multicast_table);
151136e8169eSLeon Romanovsky 	list_del(&mc->list);
151236e8169eSLeon Romanovsky 	__xa_erase(&multicast_table, mc->id);
15134f680cb9SAlex Dewar err_free_mc:
151436e8169eSLeon Romanovsky 	xa_unlock(&multicast_table);
1515c8f6a362SSean Hefty 	kfree(mc);
15164f680cb9SAlex Dewar err_put_ctx:
1517c8f6a362SSean Hefty 	ucma_put_ctx(ctx);
1518c8f6a362SSean Hefty 	return ret;
1519c8f6a362SSean Hefty }
1520c8f6a362SSean Hefty 
ucma_join_ip_multicast(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)15215bc2b7b3SSean Hefty static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
15225bc2b7b3SSean Hefty 				      const char __user *inbuf,
15235bc2b7b3SSean Hefty 				      int in_len, int out_len)
15245bc2b7b3SSean Hefty {
15255bc2b7b3SSean Hefty 	struct rdma_ucm_join_ip_mcast cmd;
15265bc2b7b3SSean Hefty 	struct rdma_ucm_join_mcast join_cmd;
15275bc2b7b3SSean Hefty 
15285bc2b7b3SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
15295bc2b7b3SSean Hefty 		return -EFAULT;
15305bc2b7b3SSean Hefty 
15315bc2b7b3SSean Hefty 	join_cmd.response = cmd.response;
15325bc2b7b3SSean Hefty 	join_cmd.uid = cmd.uid;
15335bc2b7b3SSean Hefty 	join_cmd.id = cmd.id;
153484652aefSRoland Dreier 	join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
15350c81ffc6SLeon Romanovsky 	if (!join_cmd.addr_size)
15360c81ffc6SLeon Romanovsky 		return -EINVAL;
15370c81ffc6SLeon Romanovsky 
1538ab15c95aSAlex Vesker 	join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
15395bc2b7b3SSean Hefty 	memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
15405bc2b7b3SSean Hefty 
15415bc2b7b3SSean Hefty 	return ucma_process_join(file, &join_cmd, out_len);
15425bc2b7b3SSean Hefty }
15435bc2b7b3SSean Hefty 
ucma_join_multicast(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)15445bc2b7b3SSean Hefty static ssize_t ucma_join_multicast(struct ucma_file *file,
15455bc2b7b3SSean Hefty 				   const char __user *inbuf,
15465bc2b7b3SSean Hefty 				   int in_len, int out_len)
15475bc2b7b3SSean Hefty {
15485bc2b7b3SSean Hefty 	struct rdma_ucm_join_mcast cmd;
15495bc2b7b3SSean Hefty 
15505bc2b7b3SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
15515bc2b7b3SSean Hefty 		return -EFAULT;
15525bc2b7b3SSean Hefty 
155384652aefSRoland Dreier 	if (!rdma_addr_size_kss(&cmd.addr))
15540c81ffc6SLeon Romanovsky 		return -EINVAL;
15550c81ffc6SLeon Romanovsky 
15565bc2b7b3SSean Hefty 	return ucma_process_join(file, &cmd, out_len);
15575bc2b7b3SSean Hefty }
15585bc2b7b3SSean Hefty 
ucma_leave_multicast(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1559c8f6a362SSean Hefty static ssize_t ucma_leave_multicast(struct ucma_file *file,
1560c8f6a362SSean Hefty 				    const char __user *inbuf,
1561c8f6a362SSean Hefty 				    int in_len, int out_len)
1562c8f6a362SSean Hefty {
1563c8f6a362SSean Hefty 	struct rdma_ucm_destroy_id cmd;
1564c8f6a362SSean Hefty 	struct rdma_ucm_destroy_id_resp resp;
1565c8f6a362SSean Hefty 	struct ucma_multicast *mc;
1566c8f6a362SSean Hefty 	int ret = 0;
1567c8f6a362SSean Hefty 
1568c8f6a362SSean Hefty 	if (out_len < sizeof(resp))
1569c8f6a362SSean Hefty 		return -ENOSPC;
1570c8f6a362SSean Hefty 
1571c8f6a362SSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1572c8f6a362SSean Hefty 		return -EFAULT;
1573c8f6a362SSean Hefty 
15744dfd5321SMatthew Wilcox 	xa_lock(&multicast_table);
15754dfd5321SMatthew Wilcox 	mc = xa_load(&multicast_table, cmd.id);
1576c8f6a362SSean Hefty 	if (!mc)
1577c8f6a362SSean Hefty 		mc = ERR_PTR(-ENOENT);
157809e328e4SJason Gunthorpe 	else if (READ_ONCE(mc->ctx->file) != file)
1579c8f6a362SSean Hefty 		mc = ERR_PTR(-EINVAL);
1580167b95ecSJason Gunthorpe 	else if (!refcount_inc_not_zero(&mc->ctx->ref))
15817e967fd0SJason Gunthorpe 		mc = ERR_PTR(-ENXIO);
1582c8f6a362SSean Hefty 
1583c8f6a362SSean Hefty 	if (IS_ERR(mc)) {
158436e8169eSLeon Romanovsky 		xa_unlock(&multicast_table);
1585c8f6a362SSean Hefty 		ret = PTR_ERR(mc);
1586c8f6a362SSean Hefty 		goto out;
1587c8f6a362SSean Hefty 	}
1588c8f6a362SSean Hefty 
158936e8169eSLeon Romanovsky 	list_del(&mc->list);
159036e8169eSLeon Romanovsky 	__xa_erase(&multicast_table, mc->id);
159136e8169eSLeon Romanovsky 	xa_unlock(&multicast_table);
159236e8169eSLeon Romanovsky 
15937c119107SJason Gunthorpe 	mutex_lock(&mc->ctx->mutex);
15943f446754SRoland Dreier 	rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
15957c119107SJason Gunthorpe 	mutex_unlock(&mc->ctx->mutex);
15967c119107SJason Gunthorpe 
1597c8f6a362SSean Hefty 	ucma_cleanup_mc_events(mc);
1598c8f6a362SSean Hefty 
1599c8f6a362SSean Hefty 	ucma_put_ctx(mc->ctx);
1600c8f6a362SSean Hefty 	resp.events_reported = mc->events_reported;
1601c8f6a362SSean Hefty 	kfree(mc);
1602c8f6a362SSean Hefty 
16036f57c933SJason Gunthorpe 	if (copy_to_user(u64_to_user_ptr(cmd.response),
1604c8f6a362SSean Hefty 			 &resp, sizeof(resp)))
1605c8f6a362SSean Hefty 		ret = -EFAULT;
1606c8f6a362SSean Hefty out:
1607c8f6a362SSean Hefty 	return ret;
1608c8f6a362SSean Hefty }
1609c8f6a362SSean Hefty 
ucma_migrate_id(struct ucma_file * new_file,const char __user * inbuf,int in_len,int out_len)161088314e4dSSean Hefty static ssize_t ucma_migrate_id(struct ucma_file *new_file,
161188314e4dSSean Hefty 			       const char __user *inbuf,
161288314e4dSSean Hefty 			       int in_len, int out_len)
161388314e4dSSean Hefty {
161488314e4dSSean Hefty 	struct rdma_ucm_migrate_id cmd;
161588314e4dSSean Hefty 	struct rdma_ucm_migrate_resp resp;
1616f5449e74SJason Gunthorpe 	struct ucma_event *uevent, *tmp;
161788314e4dSSean Hefty 	struct ucma_context *ctx;
1618f5449e74SJason Gunthorpe 	LIST_HEAD(event_list);
16192903ff01SAl Viro 	struct fd f;
162088314e4dSSean Hefty 	struct ucma_file *cur_file;
162188314e4dSSean Hefty 	int ret = 0;
162288314e4dSSean Hefty 
162388314e4dSSean Hefty 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
162488314e4dSSean Hefty 		return -EFAULT;
162588314e4dSSean Hefty 
162688314e4dSSean Hefty 	/* Get current fd to protect against it being closed */
16272903ff01SAl Viro 	f = fdget(cmd.fd);
16282903ff01SAl Viro 	if (!f.file)
162988314e4dSSean Hefty 		return -ENOENT;
16300d23ba60SJann Horn 	if (f.file->f_op != &ucma_fops) {
16310d23ba60SJann Horn 		ret = -EINVAL;
16320d23ba60SJann Horn 		goto file_put;
16330d23ba60SJann Horn 	}
1634f5449e74SJason Gunthorpe 	cur_file = f.file->private_data;
163588314e4dSSean Hefty 
163688314e4dSSean Hefty 	/* Validate current fd and prevent destruction of id. */
1637f5449e74SJason Gunthorpe 	ctx = ucma_get_ctx(cur_file, cmd.id);
163888314e4dSSean Hefty 	if (IS_ERR(ctx)) {
163988314e4dSSean Hefty 		ret = PTR_ERR(ctx);
164088314e4dSSean Hefty 		goto file_put;
164188314e4dSSean Hefty 	}
164288314e4dSSean Hefty 
164309e328e4SJason Gunthorpe 	rdma_lock_handler(ctx->cm_id);
1644f5449e74SJason Gunthorpe 	/*
1645f5449e74SJason Gunthorpe 	 * ctx->file can only be changed under the handler & xa_lock. xa_load()
1646f5449e74SJason Gunthorpe 	 * must be checked again to ensure the ctx hasn't begun destruction
1647f5449e74SJason Gunthorpe 	 * since the ucma_get_ctx().
1648f5449e74SJason Gunthorpe 	 */
1649f5449e74SJason Gunthorpe 	xa_lock(&ctx_table);
1650f5449e74SJason Gunthorpe 	if (_ucma_find_context(cmd.id, cur_file) != ctx) {
1651f5449e74SJason Gunthorpe 		xa_unlock(&ctx_table);
1652f5449e74SJason Gunthorpe 		ret = -ENOENT;
1653f5449e74SJason Gunthorpe 		goto err_unlock;
1654f5449e74SJason Gunthorpe 	}
1655f5449e74SJason Gunthorpe 	ctx->file = new_file;
1656f5449e74SJason Gunthorpe 	xa_unlock(&ctx_table);
1657f5449e74SJason Gunthorpe 
165898837c6cSJason Gunthorpe 	mutex_lock(&cur_file->mut);
1659f5449e74SJason Gunthorpe 	list_del(&ctx->list);
1660f5449e74SJason Gunthorpe 	/*
1661f5449e74SJason Gunthorpe 	 * At this point lock_handler() prevents addition of new uevents for
1662f5449e74SJason Gunthorpe 	 * this ctx.
1663f5449e74SJason Gunthorpe 	 */
1664f5449e74SJason Gunthorpe 	list_for_each_entry_safe(uevent, tmp, &cur_file->event_list, list)
1665f5449e74SJason Gunthorpe 		if (uevent->ctx == ctx)
1666f5449e74SJason Gunthorpe 			list_move_tail(&uevent->list, &event_list);
166788314e4dSSean Hefty 	resp.events_reported = ctx->events_reported;
166898837c6cSJason Gunthorpe 	mutex_unlock(&cur_file->mut);
166988314e4dSSean Hefty 
1670f5449e74SJason Gunthorpe 	mutex_lock(&new_file->mut);
1671f5449e74SJason Gunthorpe 	list_add_tail(&ctx->list, &new_file->ctx_list);
1672f5449e74SJason Gunthorpe 	list_splice_tail(&event_list, &new_file->event_list);
1673f5449e74SJason Gunthorpe 	mutex_unlock(&new_file->mut);
167488314e4dSSean Hefty 
16756f57c933SJason Gunthorpe 	if (copy_to_user(u64_to_user_ptr(cmd.response),
167688314e4dSSean Hefty 			 &resp, sizeof(resp)))
167788314e4dSSean Hefty 		ret = -EFAULT;
167888314e4dSSean Hefty 
1679f5449e74SJason Gunthorpe err_unlock:
168009e328e4SJason Gunthorpe 	rdma_unlock_handler(ctx->cm_id);
168188314e4dSSean Hefty 	ucma_put_ctx(ctx);
168288314e4dSSean Hefty file_put:
16832903ff01SAl Viro 	fdput(f);
168488314e4dSSean Hefty 	return ret;
168588314e4dSSean Hefty }
168688314e4dSSean Hefty 
168775216638SSean Hefty static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
168875216638SSean Hefty 				   const char __user *inbuf,
168975216638SSean Hefty 				   int in_len, int out_len) = {
169075216638SSean Hefty 	[RDMA_USER_CM_CMD_CREATE_ID] 	 = ucma_create_id,
169175216638SSean Hefty 	[RDMA_USER_CM_CMD_DESTROY_ID]	 = ucma_destroy_id,
169205ad9457SSean Hefty 	[RDMA_USER_CM_CMD_BIND_IP]	 = ucma_bind_ip,
169305ad9457SSean Hefty 	[RDMA_USER_CM_CMD_RESOLVE_IP]	 = ucma_resolve_ip,
169475216638SSean Hefty 	[RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
169575216638SSean Hefty 	[RDMA_USER_CM_CMD_QUERY_ROUTE]	 = ucma_query_route,
169675216638SSean Hefty 	[RDMA_USER_CM_CMD_CONNECT]	 = ucma_connect,
169775216638SSean Hefty 	[RDMA_USER_CM_CMD_LISTEN]	 = ucma_listen,
169875216638SSean Hefty 	[RDMA_USER_CM_CMD_ACCEPT]	 = ucma_accept,
169975216638SSean Hefty 	[RDMA_USER_CM_CMD_REJECT]	 = ucma_reject,
170075216638SSean Hefty 	[RDMA_USER_CM_CMD_DISCONNECT]	 = ucma_disconnect,
170175216638SSean Hefty 	[RDMA_USER_CM_CMD_INIT_QP_ATTR]	 = ucma_init_qp_attr,
170275216638SSean Hefty 	[RDMA_USER_CM_CMD_GET_EVENT]	 = ucma_get_event,
170375216638SSean Hefty 	[RDMA_USER_CM_CMD_GET_OPTION]	 = NULL,
17047ce86409SSean Hefty 	[RDMA_USER_CM_CMD_SET_OPTION]	 = ucma_set_option,
170575216638SSean Hefty 	[RDMA_USER_CM_CMD_NOTIFY]	 = ucma_notify,
170605ad9457SSean Hefty 	[RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1707c8f6a362SSean Hefty 	[RDMA_USER_CM_CMD_LEAVE_MCAST]	 = ucma_leave_multicast,
1708ee7aed45SSean Hefty 	[RDMA_USER_CM_CMD_MIGRATE_ID]	 = ucma_migrate_id,
1709eebe4c3aSSean Hefty 	[RDMA_USER_CM_CMD_QUERY]	 = ucma_query,
1710209cf2a7SSean Hefty 	[RDMA_USER_CM_CMD_BIND]		 = ucma_bind,
17115bc2b7b3SSean Hefty 	[RDMA_USER_CM_CMD_RESOLVE_ADDR]	 = ucma_resolve_addr,
17125bc2b7b3SSean Hefty 	[RDMA_USER_CM_CMD_JOIN_MCAST]	 = ucma_join_multicast
171375216638SSean Hefty };
171475216638SSean Hefty 
ucma_write(struct file * filp,const char __user * buf,size_t len,loff_t * pos)171575216638SSean Hefty static ssize_t ucma_write(struct file *filp, const char __user *buf,
171675216638SSean Hefty 			  size_t len, loff_t *pos)
171775216638SSean Hefty {
171875216638SSean Hefty 	struct ucma_file *file = filp->private_data;
171975216638SSean Hefty 	struct rdma_ucm_cmd_hdr hdr;
172075216638SSean Hefty 	ssize_t ret;
172175216638SSean Hefty 
1722f73a1dbcSLeon Romanovsky 	if (!ib_safe_file_access(filp)) {
1723ab27f45fSWenpeng Liang 		pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1724ab27f45fSWenpeng Liang 			    __func__, task_tgid_vnr(current), current->comm);
1725e6bd18f5SJason Gunthorpe 		return -EACCES;
1726f73a1dbcSLeon Romanovsky 	}
1727e6bd18f5SJason Gunthorpe 
172875216638SSean Hefty 	if (len < sizeof(hdr))
172975216638SSean Hefty 		return -EINVAL;
173075216638SSean Hefty 
173175216638SSean Hefty 	if (copy_from_user(&hdr, buf, sizeof(hdr)))
173275216638SSean Hefty 		return -EFAULT;
173375216638SSean Hefty 
1734caf6e3f2SHefty, Sean 	if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
173575216638SSean Hefty 		return -EINVAL;
1736a3671a4fSGustavo A. R. Silva 	hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
173775216638SSean Hefty 
173875216638SSean Hefty 	if (hdr.in + sizeof(hdr) > len)
173975216638SSean Hefty 		return -EINVAL;
174075216638SSean Hefty 
174175216638SSean Hefty 	if (!ucma_cmd_table[hdr.cmd])
174275216638SSean Hefty 		return -ENOSYS;
174375216638SSean Hefty 
174475216638SSean Hefty 	ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
174575216638SSean Hefty 	if (!ret)
174675216638SSean Hefty 		ret = len;
174775216638SSean Hefty 
174875216638SSean Hefty 	return ret;
174975216638SSean Hefty }
175075216638SSean Hefty 
ucma_poll(struct file * filp,struct poll_table_struct * wait)1751afc9a42bSAl Viro static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
175275216638SSean Hefty {
175375216638SSean Hefty 	struct ucma_file *file = filp->private_data;
1754afc9a42bSAl Viro 	__poll_t mask = 0;
175575216638SSean Hefty 
175675216638SSean Hefty 	poll_wait(filp, &file->poll_wait, wait);
175775216638SSean Hefty 
175875216638SSean Hefty 	if (!list_empty(&file->event_list))
1759a9a08845SLinus Torvalds 		mask = EPOLLIN | EPOLLRDNORM;
176075216638SSean Hefty 
176175216638SSean Hefty 	return mask;
176275216638SSean Hefty }
176375216638SSean Hefty 
1764f7a6117eSRoland Dreier /*
1765f7a6117eSRoland Dreier  * ucma_open() does not need the BKL:
1766f7a6117eSRoland Dreier  *
1767f7a6117eSRoland Dreier  *  - no global state is referred to;
1768f7a6117eSRoland Dreier  *  - there is no ioctl method to race against;
1769f7a6117eSRoland Dreier  *  - no further module initialization is required for open to work
1770f7a6117eSRoland Dreier  *    after the device is registered.
1771f7a6117eSRoland Dreier  */
ucma_open(struct inode * inode,struct file * filp)177275216638SSean Hefty static int ucma_open(struct inode *inode, struct file *filp)
177375216638SSean Hefty {
177475216638SSean Hefty 	struct ucma_file *file;
177575216638SSean Hefty 
177675216638SSean Hefty 	file = kmalloc(sizeof *file, GFP_KERNEL);
177775216638SSean Hefty 	if (!file)
177875216638SSean Hefty 		return -ENOMEM;
177975216638SSean Hefty 
178075216638SSean Hefty 	INIT_LIST_HEAD(&file->event_list);
178175216638SSean Hefty 	INIT_LIST_HEAD(&file->ctx_list);
178275216638SSean Hefty 	init_waitqueue_head(&file->poll_wait);
178375216638SSean Hefty 	mutex_init(&file->mut);
178475216638SSean Hefty 
178575216638SSean Hefty 	filp->private_data = file;
178675216638SSean Hefty 	file->filp = filp;
1787bc1db9afSRoland Dreier 
1788c5bf68feSKirill Smelkov 	return stream_open(inode, filp);
178975216638SSean Hefty }
179075216638SSean Hefty 
ucma_close(struct inode * inode,struct file * filp)179175216638SSean Hefty static int ucma_close(struct inode *inode, struct file *filp)
179275216638SSean Hefty {
179375216638SSean Hefty 	struct ucma_file *file = filp->private_data;
179475216638SSean Hefty 
179507e266a7SJason Gunthorpe 	/*
1796a1d33b70SJason Gunthorpe 	 * All paths that touch ctx_list or ctx_list starting from write() are
1797a1d33b70SJason Gunthorpe 	 * prevented by this being a FD release function. The list_add_tail() in
1798a1d33b70SJason Gunthorpe 	 * ucma_connect_event_handler() can run concurrently, however it only
1799a1d33b70SJason Gunthorpe 	 * adds to the list *after* a listening ID. By only reading the first of
18008ae291ccSJason Gunthorpe 	 * the list, and relying on ucma_destroy_private_ctx() to block
1801a1d33b70SJason Gunthorpe 	 * ucma_connect_event_handler(), no additional locking is needed.
180207e266a7SJason Gunthorpe 	 */
1803a1d33b70SJason Gunthorpe 	while (!list_empty(&file->ctx_list)) {
1804a1d33b70SJason Gunthorpe 		struct ucma_context *ctx = list_first_entry(
1805a1d33b70SJason Gunthorpe 			&file->ctx_list, struct ucma_context, list);
1806a1d33b70SJason Gunthorpe 
18078ae291ccSJason Gunthorpe 		WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
18088ae291ccSJason Gunthorpe 				   GFP_KERNEL) != ctx);
18098ae291ccSJason Gunthorpe 		ucma_destroy_private_ctx(ctx);
181075216638SSean Hefty 	}
181175216638SSean Hefty 	kfree(file);
181275216638SSean Hefty 	return 0;
181375216638SSean Hefty }
181475216638SSean Hefty 
18152b8693c0SArjan van de Ven static const struct file_operations ucma_fops = {
181675216638SSean Hefty 	.owner 	 = THIS_MODULE,
181775216638SSean Hefty 	.open 	 = ucma_open,
181875216638SSean Hefty 	.release = ucma_close,
181975216638SSean Hefty 	.write	 = ucma_write,
182075216638SSean Hefty 	.poll    = ucma_poll,
1821bc1db9afSRoland Dreier 	.llseek	 = no_llseek,
182275216638SSean Hefty };
182375216638SSean Hefty 
182475216638SSean Hefty static struct miscdevice ucma_misc = {
182575216638SSean Hefty 	.minor		= MISC_DYNAMIC_MINOR,
182675216638SSean Hefty 	.name		= "rdma_cm",
182704ea2f81SRoland Dreier 	.nodename	= "infiniband/rdma_cm",
182804ea2f81SRoland Dreier 	.mode		= 0666,
182975216638SSean Hefty 	.fops		= &ucma_fops,
183075216638SSean Hefty };
183175216638SSean Hefty 
ucma_get_global_nl_info(struct ib_client_nl_info * res)18328f71bb00SJason Gunthorpe static int ucma_get_global_nl_info(struct ib_client_nl_info *res)
18338f71bb00SJason Gunthorpe {
18348f71bb00SJason Gunthorpe 	res->abi = RDMA_USER_CM_ABI_VERSION;
18358f71bb00SJason Gunthorpe 	res->cdev = ucma_misc.this_device;
18368f71bb00SJason Gunthorpe 	return 0;
18378f71bb00SJason Gunthorpe }
18388f71bb00SJason Gunthorpe 
18398f71bb00SJason Gunthorpe static struct ib_client rdma_cma_client = {
18408f71bb00SJason Gunthorpe 	.name = "rdma_cm",
18418f71bb00SJason Gunthorpe 	.get_global_nl_info = ucma_get_global_nl_info,
18428f71bb00SJason Gunthorpe };
18438f71bb00SJason Gunthorpe MODULE_ALIAS_RDMA_CLIENT("rdma_cm");
18448f71bb00SJason Gunthorpe 
abi_version_show(struct device * dev,struct device_attribute * attr,char * buf)1845c5b8eaf8SYueHaibing static ssize_t abi_version_show(struct device *dev,
1846c5b8eaf8SYueHaibing 				struct device_attribute *attr, char *buf)
184775216638SSean Hefty {
18481c7fd726SJoe Perches 	return sysfs_emit(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
184975216638SSean Hefty }
1850c5b8eaf8SYueHaibing static DEVICE_ATTR_RO(abi_version);
185175216638SSean Hefty 
ucma_init(void)185275216638SSean Hefty static int __init ucma_init(void)
185375216638SSean Hefty {
185475216638SSean Hefty 	int ret;
185575216638SSean Hefty 
185675216638SSean Hefty 	ret = misc_register(&ucma_misc);
185775216638SSean Hefty 	if (ret)
185875216638SSean Hefty 		return ret;
185975216638SSean Hefty 
186075216638SSean Hefty 	ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
186175216638SSean Hefty 	if (ret) {
1862aba25a3eSParav Pandit 		pr_err("rdma_ucm: couldn't create abi_version attr\n");
186397cb7e40SSteve Wise 		goto err1;
186497cb7e40SSteve Wise 	}
186597cb7e40SSteve Wise 
1866ec8f23ceSEric W. Biederman 	ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
186797cb7e40SSteve Wise 	if (!ucma_ctl_table_hdr) {
1868aba25a3eSParav Pandit 		pr_err("rdma_ucm: couldn't register sysctl paths\n");
186997cb7e40SSteve Wise 		ret = -ENOMEM;
187097cb7e40SSteve Wise 		goto err2;
187175216638SSean Hefty 	}
18728f71bb00SJason Gunthorpe 
18738f71bb00SJason Gunthorpe 	ret = ib_register_client(&rdma_cma_client);
18748f71bb00SJason Gunthorpe 	if (ret)
18758f71bb00SJason Gunthorpe 		goto err3;
18768f71bb00SJason Gunthorpe 
187775216638SSean Hefty 	return 0;
18788f71bb00SJason Gunthorpe err3:
18798f71bb00SJason Gunthorpe 	unregister_net_sysctl_table(ucma_ctl_table_hdr);
188097cb7e40SSteve Wise err2:
188197cb7e40SSteve Wise 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
188297cb7e40SSteve Wise err1:
188375216638SSean Hefty 	misc_deregister(&ucma_misc);
188475216638SSean Hefty 	return ret;
188575216638SSean Hefty }
188675216638SSean Hefty 
ucma_cleanup(void)188775216638SSean Hefty static void __exit ucma_cleanup(void)
188875216638SSean Hefty {
18898f71bb00SJason Gunthorpe 	ib_unregister_client(&rdma_cma_client);
18905dd3df10SEric W. Biederman 	unregister_net_sysctl_table(ucma_ctl_table_hdr);
189175216638SSean Hefty 	device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
189275216638SSean Hefty 	misc_deregister(&ucma_misc);
189375216638SSean Hefty }
189475216638SSean Hefty 
189575216638SSean Hefty module_init(ucma_init);
189675216638SSean Hefty module_exit(ucma_cleanup);
1897