1 /*
2  * Copyright (c) 2016, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/file.h>
34 #include <linux/anon_inodes.h>
35 #include <rdma/ib_verbs.h>
36 #include <rdma/uverbs_types.h>
37 #include <linux/rcupdate.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include <rdma/rdma_user_ioctl.h>
40 #include "uverbs.h"
41 #include "core_priv.h"
42 #include "rdma_core.h"
43 
44 int uverbs_ns_idx(u16 *id, unsigned int ns_count)
45 {
46 	int ret = (*id & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT;
47 
48 	if (ret >= ns_count)
49 		return -EINVAL;
50 
51 	*id &= ~UVERBS_ID_NS_MASK;
52 	return ret;
53 }
54 
55 const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
56 						   uint16_t object)
57 {
58 	const struct uverbs_root_spec *object_hash = ibdev->specs_root;
59 	const struct uverbs_object_spec_hash *objects;
60 	int ret = uverbs_ns_idx(&object, object_hash->num_buckets);
61 
62 	if (ret < 0)
63 		return NULL;
64 
65 	objects = object_hash->object_buckets[ret];
66 
67 	if (object >= objects->num_objects)
68 		return NULL;
69 
70 	return objects->objects[object];
71 }
72 
73 const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
74 						   uint16_t method)
75 {
76 	const struct uverbs_method_spec_hash *methods;
77 	int ret = uverbs_ns_idx(&method, object->num_buckets);
78 
79 	if (ret < 0)
80 		return NULL;
81 
82 	methods = object->method_buckets[ret];
83 	if (method >= methods->num_methods)
84 		return NULL;
85 
86 	return methods->methods[method];
87 }
88 
89 void uverbs_uobject_get(struct ib_uobject *uobject)
90 {
91 	kref_get(&uobject->ref);
92 }
93 
94 static void uverbs_uobject_free(struct kref *ref)
95 {
96 	struct ib_uobject *uobj =
97 		container_of(ref, struct ib_uobject, ref);
98 
99 	if (uobj->type->type_class->needs_kfree_rcu)
100 		kfree_rcu(uobj, rcu);
101 	else
102 		kfree(uobj);
103 }
104 
105 void uverbs_uobject_put(struct ib_uobject *uobject)
106 {
107 	kref_put(&uobject->ref, uverbs_uobject_free);
108 }
109 
110 static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
111 {
112 	/*
113 	 * When a shared access is required, we use a positive counter. Each
114 	 * shared access request checks that the value != -1 and increment it.
115 	 * Exclusive access is required for operations like write or destroy.
116 	 * In exclusive access mode, we check that the counter is zero (nobody
117 	 * claimed this object) and we set it to -1. Releasing a shared access
118 	 * lock is done simply by decreasing the counter. As for exclusive
119 	 * access locks, since only a single one of them is is allowed
120 	 * concurrently, setting the counter to zero is enough for releasing
121 	 * this lock.
122 	 */
123 	if (!exclusive)
124 		return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
125 			-EBUSY : 0;
126 
127 	/* lock is either WRITE or DESTROY - should be exclusive */
128 	return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY;
129 }
130 
131 static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
132 				     const struct uverbs_obj_type *type)
133 {
134 	struct ib_uobject *uobj = kzalloc(type->obj_size, GFP_KERNEL);
135 
136 	if (!uobj)
137 		return ERR_PTR(-ENOMEM);
138 	/*
139 	 * user_handle should be filled by the handler,
140 	 * The object is added to the list in the commit stage.
141 	 */
142 	uobj->context = context;
143 	uobj->type = type;
144 	atomic_set(&uobj->usecnt, 0);
145 	kref_init(&uobj->ref);
146 
147 	return uobj;
148 }
149 
150 static int idr_add_uobj(struct ib_uobject *uobj)
151 {
152 	int ret;
153 
154 	idr_preload(GFP_KERNEL);
155 	spin_lock(&uobj->context->ufile->idr_lock);
156 
157 	/*
158 	 * We start with allocating an idr pointing to NULL. This represents an
159 	 * object which isn't initialized yet. We'll replace it later on with
160 	 * the real object once we commit.
161 	 */
162 	ret = idr_alloc(&uobj->context->ufile->idr, NULL, 0,
163 			min_t(unsigned long, U32_MAX - 1, INT_MAX), GFP_NOWAIT);
164 	if (ret >= 0)
165 		uobj->id = ret;
166 
167 	spin_unlock(&uobj->context->ufile->idr_lock);
168 	idr_preload_end();
169 
170 	return ret < 0 ? ret : 0;
171 }
172 
173 /*
174  * It only removes it from the uobjects list, uverbs_uobject_put() is still
175  * required.
176  */
177 static void uverbs_idr_remove_uobj(struct ib_uobject *uobj)
178 {
179 	spin_lock(&uobj->context->ufile->idr_lock);
180 	idr_remove(&uobj->context->ufile->idr, uobj->id);
181 	spin_unlock(&uobj->context->ufile->idr_lock);
182 }
183 
184 /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
185 static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *type,
186 						 struct ib_ucontext *ucontext,
187 						 int id, bool exclusive)
188 {
189 	struct ib_uobject *uobj;
190 
191 	rcu_read_lock();
192 	/* object won't be released as we're protected in rcu */
193 	uobj = idr_find(&ucontext->ufile->idr, id);
194 	if (!uobj) {
195 		uobj = ERR_PTR(-ENOENT);
196 		goto free;
197 	}
198 
199 	uverbs_uobject_get(uobj);
200 free:
201 	rcu_read_unlock();
202 	return uobj;
203 }
204 
205 static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type,
206 						struct ib_ucontext *ucontext,
207 						int id, bool exclusive)
208 {
209 	struct file *f;
210 	struct ib_uobject *uobject;
211 	const struct uverbs_obj_fd_type *fd_type =
212 		container_of(type, struct uverbs_obj_fd_type, type);
213 
214 	if (exclusive)
215 		return ERR_PTR(-EOPNOTSUPP);
216 
217 	f = fget(id);
218 	if (!f)
219 		return ERR_PTR(-EBADF);
220 
221 	uobject = f->private_data;
222 	/*
223 	 * fget(id) ensures we are not currently running uverbs_close_fd,
224 	 * and the caller is expected to ensure that uverbs_close_fd is never
225 	 * done while a call top lookup is possible.
226 	 */
227 	if (f->f_op != fd_type->fops) {
228 		fput(f);
229 		return ERR_PTR(-EBADF);
230 	}
231 
232 	uverbs_uobject_get(uobject);
233 	return uobject;
234 }
235 
236 struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
237 					   struct ib_ucontext *ucontext,
238 					   int id, bool exclusive)
239 {
240 	struct ib_uobject *uobj;
241 	int ret;
242 
243 	uobj = type->type_class->lookup_get(type, ucontext, id, exclusive);
244 	if (IS_ERR(uobj))
245 		return uobj;
246 
247 	if (uobj->type != type) {
248 		ret = -EINVAL;
249 		goto free;
250 	}
251 
252 	ret = uverbs_try_lock_object(uobj, exclusive);
253 	if (ret) {
254 		WARN(ucontext->cleanup_reason,
255 		     "ib_uverbs: Trying to lookup_get while cleanup context\n");
256 		goto free;
257 	}
258 
259 	return uobj;
260 free:
261 	uobj->type->type_class->lookup_put(uobj, exclusive);
262 	uverbs_uobject_put(uobj);
263 	return ERR_PTR(ret);
264 }
265 
266 static struct ib_uobject *alloc_begin_idr_uobject(const struct uverbs_obj_type *type,
267 						  struct ib_ucontext *ucontext)
268 {
269 	int ret;
270 	struct ib_uobject *uobj;
271 
272 	uobj = alloc_uobj(ucontext, type);
273 	if (IS_ERR(uobj))
274 		return uobj;
275 
276 	ret = idr_add_uobj(uobj);
277 	if (ret)
278 		goto uobj_put;
279 
280 	ret = ib_rdmacg_try_charge(&uobj->cg_obj, ucontext->device,
281 				   RDMACG_RESOURCE_HCA_OBJECT);
282 	if (ret)
283 		goto idr_remove;
284 
285 	return uobj;
286 
287 idr_remove:
288 	uverbs_idr_remove_uobj(uobj);
289 uobj_put:
290 	uverbs_uobject_put(uobj);
291 	return ERR_PTR(ret);
292 }
293 
294 static struct ib_uobject *alloc_begin_fd_uobject(const struct uverbs_obj_type *type,
295 						 struct ib_ucontext *ucontext)
296 {
297 	const struct uverbs_obj_fd_type *fd_type =
298 		container_of(type, struct uverbs_obj_fd_type, type);
299 	int new_fd;
300 	struct ib_uobject *uobj;
301 	struct ib_uobject_file *uobj_file;
302 	struct file *filp;
303 
304 	new_fd = get_unused_fd_flags(O_CLOEXEC);
305 	if (new_fd < 0)
306 		return ERR_PTR(new_fd);
307 
308 	uobj = alloc_uobj(ucontext, type);
309 	if (IS_ERR(uobj)) {
310 		put_unused_fd(new_fd);
311 		return uobj;
312 	}
313 
314 	uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
315 	filp = anon_inode_getfile(fd_type->name,
316 				  fd_type->fops,
317 				  uobj_file,
318 				  fd_type->flags);
319 	if (IS_ERR(filp)) {
320 		put_unused_fd(new_fd);
321 		uverbs_uobject_put(uobj);
322 		return (void *)filp;
323 	}
324 
325 	uobj_file->uobj.id = new_fd;
326 	uobj_file->uobj.object = filp;
327 	uobj_file->ufile = ucontext->ufile;
328 	INIT_LIST_HEAD(&uobj->list);
329 	kref_get(&uobj_file->ufile->ref);
330 
331 	return uobj;
332 }
333 
334 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
335 					    struct ib_ucontext *ucontext)
336 {
337 	return type->type_class->alloc_begin(type, ucontext);
338 }
339 
340 static void uverbs_uobject_add(struct ib_uobject *uobject)
341 {
342 	mutex_lock(&uobject->context->uobjects_lock);
343 	list_add(&uobject->list, &uobject->context->uobjects);
344 	mutex_unlock(&uobject->context->uobjects_lock);
345 }
346 
347 static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
348 						  enum rdma_remove_reason why)
349 {
350 	const struct uverbs_obj_idr_type *idr_type =
351 		container_of(uobj->type, struct uverbs_obj_idr_type,
352 			     type);
353 	int ret = idr_type->destroy_object(uobj, why);
354 
355 	/*
356 	 * We can only fail gracefully if the user requested to destroy the
357 	 * object. In the rest of the cases, just remove whatever you can.
358 	 */
359 	if (why == RDMA_REMOVE_DESTROY && ret)
360 		return ret;
361 
362 	ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
363 			   RDMACG_RESOURCE_HCA_OBJECT);
364 	uverbs_idr_remove_uobj(uobj);
365 
366 	return ret;
367 }
368 
369 static void alloc_abort_fd_uobject(struct ib_uobject *uobj)
370 {
371 	struct ib_uobject_file *uobj_file =
372 		container_of(uobj, struct ib_uobject_file, uobj);
373 	struct file *filp = uobj->object;
374 	int id = uobj_file->uobj.id;
375 
376 	/* Unsuccessful NEW */
377 	fput(filp);
378 	put_unused_fd(id);
379 }
380 
381 static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
382 						 enum rdma_remove_reason why)
383 {
384 	const struct uverbs_obj_fd_type *fd_type =
385 		container_of(uobj->type, struct uverbs_obj_fd_type, type);
386 	struct ib_uobject_file *uobj_file =
387 		container_of(uobj, struct ib_uobject_file, uobj);
388 	int ret = fd_type->context_closed(uobj_file, why);
389 
390 	if (why == RDMA_REMOVE_DESTROY && ret)
391 		return ret;
392 
393 	if (why == RDMA_REMOVE_DURING_CLEANUP) {
394 		alloc_abort_fd_uobject(uobj);
395 		return ret;
396 	}
397 
398 	uobj_file->uobj.context = NULL;
399 	return ret;
400 }
401 
402 static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
403 {
404 #ifdef CONFIG_LOCKDEP
405 	if (exclusive)
406 		WARN_ON(atomic_read(&uobj->usecnt) > 0);
407 	else
408 		WARN_ON(atomic_read(&uobj->usecnt) == -1);
409 #endif
410 }
411 
412 static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
413 						    enum rdma_remove_reason why)
414 {
415 	int ret;
416 	struct ib_ucontext *ucontext = uobj->context;
417 
418 	ret = uobj->type->type_class->remove_commit(uobj, why);
419 	if (ret && why == RDMA_REMOVE_DESTROY) {
420 		/* We couldn't remove the object, so just unlock the uobject */
421 		atomic_set(&uobj->usecnt, 0);
422 		uobj->type->type_class->lookup_put(uobj, true);
423 	} else {
424 		mutex_lock(&ucontext->uobjects_lock);
425 		list_del(&uobj->list);
426 		mutex_unlock(&ucontext->uobjects_lock);
427 		/* put the ref we took when we created the object */
428 		uverbs_uobject_put(uobj);
429 	}
430 
431 	return ret;
432 }
433 
434 /* This is called only for user requested DESTROY reasons */
435 int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
436 {
437 	int ret;
438 	struct ib_ucontext *ucontext = uobj->context;
439 
440 	/* put the ref count we took at lookup_get */
441 	uverbs_uobject_put(uobj);
442 	/* Cleanup is running. Calling this should have been impossible */
443 	if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
444 		WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
445 		return 0;
446 	}
447 	lockdep_check(uobj, true);
448 	ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
449 
450 	up_read(&ucontext->cleanup_rwsem);
451 	return ret;
452 }
453 
454 static int null_obj_type_class_remove_commit(struct ib_uobject *uobj,
455 					     enum rdma_remove_reason why)
456 {
457 	return 0;
458 }
459 
460 static const struct uverbs_obj_type null_obj_type = {
461 	.type_class = &((const struct uverbs_obj_type_class){
462 			.remove_commit = null_obj_type_class_remove_commit,
463 			/* be cautious */
464 			.needs_kfree_rcu = true}),
465 };
466 
467 int rdma_explicit_destroy(struct ib_uobject *uobject)
468 {
469 	int ret;
470 	struct ib_ucontext *ucontext = uobject->context;
471 
472 	/* Cleanup is running. Calling this should have been impossible */
473 	if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
474 		WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
475 		return 0;
476 	}
477 	lockdep_check(uobject, true);
478 	ret = uobject->type->type_class->remove_commit(uobject,
479 						       RDMA_REMOVE_DESTROY);
480 	if (ret)
481 		return ret;
482 
483 	uobject->type = &null_obj_type;
484 
485 	up_read(&ucontext->cleanup_rwsem);
486 	return 0;
487 }
488 
489 static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
490 {
491 	uverbs_uobject_add(uobj);
492 	spin_lock(&uobj->context->ufile->idr_lock);
493 	/*
494 	 * We already allocated this IDR with a NULL object, so
495 	 * this shouldn't fail.
496 	 */
497 	WARN_ON(idr_replace(&uobj->context->ufile->idr,
498 			    uobj, uobj->id));
499 	spin_unlock(&uobj->context->ufile->idr_lock);
500 }
501 
502 static void alloc_commit_fd_uobject(struct ib_uobject *uobj)
503 {
504 	struct ib_uobject_file *uobj_file =
505 		container_of(uobj, struct ib_uobject_file, uobj);
506 
507 	uverbs_uobject_add(&uobj_file->uobj);
508 	fd_install(uobj_file->uobj.id, uobj->object);
509 	/* This shouldn't be used anymore. Use the file object instead */
510 	uobj_file->uobj.id = 0;
511 	/* Get another reference as we export this to the fops */
512 	uverbs_uobject_get(&uobj_file->uobj);
513 }
514 
515 int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
516 {
517 	/* Cleanup is running. Calling this should have been impossible */
518 	if (!down_read_trylock(&uobj->context->cleanup_rwsem)) {
519 		int ret;
520 
521 		WARN(true, "ib_uverbs: Cleanup is running while allocating an uobject\n");
522 		ret = uobj->type->type_class->remove_commit(uobj,
523 							    RDMA_REMOVE_DURING_CLEANUP);
524 		if (ret)
525 			pr_warn("ib_uverbs: cleanup of idr object %d failed\n",
526 				uobj->id);
527 		return ret;
528 	}
529 
530 	uobj->type->type_class->alloc_commit(uobj);
531 	up_read(&uobj->context->cleanup_rwsem);
532 
533 	return 0;
534 }
535 
536 static void alloc_abort_idr_uobject(struct ib_uobject *uobj)
537 {
538 	uverbs_idr_remove_uobj(uobj);
539 	ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
540 			   RDMACG_RESOURCE_HCA_OBJECT);
541 	uverbs_uobject_put(uobj);
542 }
543 
544 void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
545 {
546 	uobj->type->type_class->alloc_abort(uobj);
547 }
548 
549 static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool exclusive)
550 {
551 }
552 
553 static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
554 {
555 	struct file *filp = uobj->object;
556 
557 	WARN_ON(exclusive);
558 	/* This indirectly calls uverbs_close_fd and free the object */
559 	fput(filp);
560 }
561 
562 void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
563 {
564 	lockdep_check(uobj, exclusive);
565 	uobj->type->type_class->lookup_put(uobj, exclusive);
566 	/*
567 	 * In order to unlock an object, either decrease its usecnt for
568 	 * read access or zero it in case of exclusive access. See
569 	 * uverbs_try_lock_object for locking schema information.
570 	 */
571 	if (!exclusive)
572 		atomic_dec(&uobj->usecnt);
573 	else
574 		atomic_set(&uobj->usecnt, 0);
575 
576 	uverbs_uobject_put(uobj);
577 }
578 
579 const struct uverbs_obj_type_class uverbs_idr_class = {
580 	.alloc_begin = alloc_begin_idr_uobject,
581 	.lookup_get = lookup_get_idr_uobject,
582 	.alloc_commit = alloc_commit_idr_uobject,
583 	.alloc_abort = alloc_abort_idr_uobject,
584 	.lookup_put = lookup_put_idr_uobject,
585 	.remove_commit = remove_commit_idr_uobject,
586 	/*
587 	 * When we destroy an object, we first just lock it for WRITE and
588 	 * actually DESTROY it in the finalize stage. So, the problematic
589 	 * scenario is when we just started the finalize stage of the
590 	 * destruction (nothing was executed yet). Now, the other thread
591 	 * fetched the object for READ access, but it didn't lock it yet.
592 	 * The DESTROY thread continues and starts destroying the object.
593 	 * When the other thread continue - without the RCU, it would
594 	 * access freed memory. However, the rcu_read_lock delays the free
595 	 * until the rcu_read_lock of the READ operation quits. Since the
596 	 * exclusive lock of the object is still taken by the DESTROY flow, the
597 	 * READ operation will get -EBUSY and it'll just bail out.
598 	 */
599 	.needs_kfree_rcu = true,
600 };
601 
602 static void _uverbs_close_fd(struct ib_uobject_file *uobj_file)
603 {
604 	struct ib_ucontext *ucontext;
605 	struct ib_uverbs_file *ufile = uobj_file->ufile;
606 	int ret;
607 
608 	mutex_lock(&uobj_file->ufile->cleanup_mutex);
609 
610 	/* uobject was either already cleaned up or is cleaned up right now anyway */
611 	if (!uobj_file->uobj.context ||
612 	    !down_read_trylock(&uobj_file->uobj.context->cleanup_rwsem))
613 		goto unlock;
614 
615 	ucontext = uobj_file->uobj.context;
616 	ret = _rdma_remove_commit_uobject(&uobj_file->uobj, RDMA_REMOVE_CLOSE);
617 	up_read(&ucontext->cleanup_rwsem);
618 	if (ret)
619 		pr_warn("uverbs: unable to clean up uobject file in uverbs_close_fd.\n");
620 unlock:
621 	mutex_unlock(&ufile->cleanup_mutex);
622 }
623 
624 void uverbs_close_fd(struct file *f)
625 {
626 	struct ib_uobject_file *uobj_file = f->private_data;
627 	struct kref *uverbs_file_ref = &uobj_file->ufile->ref;
628 
629 	_uverbs_close_fd(uobj_file);
630 	uverbs_uobject_put(&uobj_file->uobj);
631 	kref_put(uverbs_file_ref, ib_uverbs_release_file);
632 }
633 
634 void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
635 {
636 	enum rdma_remove_reason reason = device_removed ?
637 		RDMA_REMOVE_DRIVER_REMOVE : RDMA_REMOVE_CLOSE;
638 	unsigned int cur_order = 0;
639 
640 	ucontext->cleanup_reason = reason;
641 	/*
642 	 * Waits for all remove_commit and alloc_commit to finish. Logically, We
643 	 * want to hold this forever as the context is going to be destroyed,
644 	 * but we'll release it since it causes a "held lock freed" BUG message.
645 	 */
646 	down_write(&ucontext->cleanup_rwsem);
647 
648 	while (!list_empty(&ucontext->uobjects)) {
649 		struct ib_uobject *obj, *next_obj;
650 		unsigned int next_order = UINT_MAX;
651 
652 		/*
653 		 * This shouldn't run while executing other commands on this
654 		 * context. Thus, the only thing we should take care of is
655 		 * releasing a FD while traversing this list. The FD could be
656 		 * closed and released from the _release fop of this FD.
657 		 * In order to mitigate this, we add a lock.
658 		 * We take and release the lock per order traversal in order
659 		 * to let other threads (which might still use the FDs) chance
660 		 * to run.
661 		 */
662 		mutex_lock(&ucontext->uobjects_lock);
663 		list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects,
664 					 list) {
665 			if (obj->type->destroy_order == cur_order) {
666 				int ret;
667 
668 				/*
669 				 * if we hit this WARN_ON, that means we are
670 				 * racing with a lookup_get.
671 				 */
672 				WARN_ON(uverbs_try_lock_object(obj, true));
673 				ret = obj->type->type_class->remove_commit(obj,
674 									   reason);
675 				list_del(&obj->list);
676 				if (ret)
677 					pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
678 						obj->id, cur_order);
679 				/* put the ref we took when we created the object */
680 				uverbs_uobject_put(obj);
681 			} else {
682 				next_order = min(next_order,
683 						 obj->type->destroy_order);
684 			}
685 		}
686 		mutex_unlock(&ucontext->uobjects_lock);
687 		cur_order = next_order;
688 	}
689 	up_write(&ucontext->cleanup_rwsem);
690 }
691 
692 void uverbs_initialize_ucontext(struct ib_ucontext *ucontext)
693 {
694 	ucontext->cleanup_reason = 0;
695 	mutex_init(&ucontext->uobjects_lock);
696 	INIT_LIST_HEAD(&ucontext->uobjects);
697 	init_rwsem(&ucontext->cleanup_rwsem);
698 }
699 
700 const struct uverbs_obj_type_class uverbs_fd_class = {
701 	.alloc_begin = alloc_begin_fd_uobject,
702 	.lookup_get = lookup_get_fd_uobject,
703 	.alloc_commit = alloc_commit_fd_uobject,
704 	.alloc_abort = alloc_abort_fd_uobject,
705 	.lookup_put = lookup_put_fd_uobject,
706 	.remove_commit = remove_commit_fd_uobject,
707 	.needs_kfree_rcu = false,
708 };
709 
710 struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
711 						   struct ib_ucontext *ucontext,
712 						   enum uverbs_obj_access access,
713 						   int id)
714 {
715 	switch (access) {
716 	case UVERBS_ACCESS_READ:
717 		return rdma_lookup_get_uobject(type_attrs, ucontext, id, false);
718 	case UVERBS_ACCESS_DESTROY:
719 	case UVERBS_ACCESS_WRITE:
720 		return rdma_lookup_get_uobject(type_attrs, ucontext, id, true);
721 	case UVERBS_ACCESS_NEW:
722 		return rdma_alloc_begin_uobject(type_attrs, ucontext);
723 	default:
724 		WARN_ON(true);
725 		return ERR_PTR(-EOPNOTSUPP);
726 	}
727 }
728 
729 int uverbs_finalize_object(struct ib_uobject *uobj,
730 			   enum uverbs_obj_access access,
731 			   bool commit)
732 {
733 	int ret = 0;
734 
735 	/*
736 	 * refcounts should be handled at the object level and not at the
737 	 * uobject level. Refcounts of the objects themselves are done in
738 	 * handlers.
739 	 */
740 
741 	switch (access) {
742 	case UVERBS_ACCESS_READ:
743 		rdma_lookup_put_uobject(uobj, false);
744 		break;
745 	case UVERBS_ACCESS_WRITE:
746 		rdma_lookup_put_uobject(uobj, true);
747 		break;
748 	case UVERBS_ACCESS_DESTROY:
749 		if (commit)
750 			ret = rdma_remove_commit_uobject(uobj);
751 		else
752 			rdma_lookup_put_uobject(uobj, true);
753 		break;
754 	case UVERBS_ACCESS_NEW:
755 		if (commit)
756 			ret = rdma_alloc_commit_uobject(uobj);
757 		else
758 			rdma_alloc_abort_uobject(uobj);
759 		break;
760 	default:
761 		WARN_ON(true);
762 		ret = -EOPNOTSUPP;
763 	}
764 
765 	return ret;
766 }
767 
768 int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
769 			    struct uverbs_attr_spec_hash * const *spec_hash,
770 			    size_t num,
771 			    bool commit)
772 {
773 	unsigned int i;
774 	int ret = 0;
775 
776 	for (i = 0; i < num; i++) {
777 		struct uverbs_attr_bundle_hash *curr_bundle =
778 			&attrs_bundle->hash[i];
779 		const struct uverbs_attr_spec_hash *curr_spec_bucket =
780 			spec_hash[i];
781 		unsigned int j;
782 
783 		for (j = 0; j < curr_bundle->num_attrs; j++) {
784 			struct uverbs_attr *attr;
785 			const struct uverbs_attr_spec *spec;
786 
787 			if (!uverbs_attr_is_valid_in_hash(curr_bundle, j))
788 				continue;
789 
790 			attr = &curr_bundle->attrs[j];
791 			spec = &curr_spec_bucket->attrs[j];
792 
793 			if (spec->type == UVERBS_ATTR_TYPE_IDR ||
794 			    spec->type == UVERBS_ATTR_TYPE_FD) {
795 				int current_ret;
796 
797 				current_ret = uverbs_finalize_object(attr->obj_attr.uobject,
798 								     spec->obj.access,
799 								     commit);
800 				if (!ret)
801 					ret = current_ret;
802 			}
803 		}
804 	}
805 	return ret;
806 }
807