1 /* 2 * Copyright (c) 2016, Mellanox Technologies inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/file.h> 34 #include <linux/anon_inodes.h> 35 #include <linux/sched/mm.h> 36 #include <rdma/ib_verbs.h> 37 #include <rdma/uverbs_types.h> 38 #include <linux/rcupdate.h> 39 #include <rdma/uverbs_ioctl.h> 40 #include <rdma/rdma_user_ioctl.h> 41 #include "uverbs.h" 42 #include "core_priv.h" 43 #include "rdma_core.h" 44 45 static void uverbs_uobject_free(struct kref *ref) 46 { 47 kfree_rcu(container_of(ref, struct ib_uobject, ref), rcu); 48 } 49 50 /* 51 * In order to indicate we no longer needs this uobject, uverbs_uobject_put 52 * is called. When the reference count is decreased, the uobject is freed. 53 * For example, this is used when attaching a completion channel to a CQ. 54 */ 55 void uverbs_uobject_put(struct ib_uobject *uobject) 56 { 57 kref_put(&uobject->ref, uverbs_uobject_free); 58 } 59 EXPORT_SYMBOL(uverbs_uobject_put); 60 61 static int uverbs_try_lock_object(struct ib_uobject *uobj, 62 enum rdma_lookup_mode mode) 63 { 64 /* 65 * When a shared access is required, we use a positive counter. Each 66 * shared access request checks that the value != -1 and increment it. 67 * Exclusive access is required for operations like write or destroy. 68 * In exclusive access mode, we check that the counter is zero (nobody 69 * claimed this object) and we set it to -1. Releasing a shared access 70 * lock is done simply by decreasing the counter. As for exclusive 71 * access locks, since only a single one of them is is allowed 72 * concurrently, setting the counter to zero is enough for releasing 73 * this lock. 74 */ 75 switch (mode) { 76 case UVERBS_LOOKUP_READ: 77 return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ? 78 -EBUSY : 0; 79 case UVERBS_LOOKUP_WRITE: 80 /* lock is exclusive */ 81 return atomic_cmpxchg(&uobj->usecnt, 0, -1) == 0 ? 0 : -EBUSY; 82 case UVERBS_LOOKUP_DESTROY: 83 return 0; 84 } 85 return 0; 86 } 87 88 static void assert_uverbs_usecnt(struct ib_uobject *uobj, 89 enum rdma_lookup_mode mode) 90 { 91 #ifdef CONFIG_LOCKDEP 92 switch (mode) { 93 case UVERBS_LOOKUP_READ: 94 WARN_ON(atomic_read(&uobj->usecnt) <= 0); 95 break; 96 case UVERBS_LOOKUP_WRITE: 97 WARN_ON(atomic_read(&uobj->usecnt) != -1); 98 break; 99 case UVERBS_LOOKUP_DESTROY: 100 break; 101 } 102 #endif 103 } 104 105 /* 106 * This must be called with the hw_destroy_rwsem locked for read or write, 107 * also the uobject itself must be locked for write. 108 * 109 * Upon return the HW object is guaranteed to be destroyed. 110 * 111 * For RDMA_REMOVE_ABORT, the hw_destroy_rwsem is not required to be held, 112 * however the type's allocat_commit function cannot have been called and the 113 * uobject cannot be on the uobjects_lists 114 * 115 * For RDMA_REMOVE_DESTROY the caller shold be holding a kref (eg via 116 * rdma_lookup_get_uobject) and the object is left in a state where the caller 117 * needs to call rdma_lookup_put_uobject. 118 * 119 * For all other destroy modes this function internally unlocks the uobject 120 * and consumes the kref on the uobj. 121 */ 122 static int uverbs_destroy_uobject(struct ib_uobject *uobj, 123 enum rdma_remove_reason reason, 124 struct uverbs_attr_bundle *attrs) 125 { 126 struct ib_uverbs_file *ufile = attrs->ufile; 127 unsigned long flags; 128 int ret; 129 130 lockdep_assert_held(&ufile->hw_destroy_rwsem); 131 assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE); 132 133 if (reason == RDMA_REMOVE_ABORT_HWOBJ) { 134 reason = RDMA_REMOVE_ABORT; 135 ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason, 136 attrs); 137 /* 138 * Drivers are not permitted to ignore RDMA_REMOVE_ABORT, see 139 * ib_is_destroy_retryable, cleanup_retryable == false here. 140 */ 141 WARN_ON(ret); 142 } 143 144 if (reason == RDMA_REMOVE_ABORT) { 145 WARN_ON(!list_empty(&uobj->list)); 146 WARN_ON(!uobj->context); 147 uobj->uapi_object->type_class->alloc_abort(uobj); 148 } else if (uobj->object) { 149 ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason, 150 attrs); 151 if (ret) { 152 if (ib_is_destroy_retryable(ret, reason, uobj)) 153 return ret; 154 155 /* Nothing to be done, dangle the memory and move on */ 156 WARN(true, 157 "ib_uverbs: failed to remove uobject id %d, driver err=%d", 158 uobj->id, ret); 159 } 160 161 uobj->object = NULL; 162 } 163 164 uobj->context = NULL; 165 166 /* 167 * For DESTROY the usecnt is not changed, the caller is expected to 168 * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR 169 * handle. 170 */ 171 if (reason != RDMA_REMOVE_DESTROY) 172 atomic_set(&uobj->usecnt, 0); 173 else 174 uobj->uapi_object->type_class->remove_handle(uobj); 175 176 if (!list_empty(&uobj->list)) { 177 spin_lock_irqsave(&ufile->uobjects_lock, flags); 178 list_del_init(&uobj->list); 179 spin_unlock_irqrestore(&ufile->uobjects_lock, flags); 180 181 /* 182 * Pairs with the get in rdma_alloc_commit_uobject(), could 183 * destroy uobj. 184 */ 185 uverbs_uobject_put(uobj); 186 } 187 188 /* 189 * When aborting the stack kref remains owned by the core code, and is 190 * not transferred into the type. Pairs with the get in alloc_uobj 191 */ 192 if (reason == RDMA_REMOVE_ABORT) 193 uverbs_uobject_put(uobj); 194 195 return 0; 196 } 197 198 /* 199 * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY 200 * sequence. It should only be used from command callbacks. On success the 201 * caller must pair this with uobj_put_destroy(). This 202 * version requires the caller to have already obtained an 203 * LOOKUP_DESTROY uobject kref. 204 */ 205 int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) 206 { 207 struct ib_uverbs_file *ufile = attrs->ufile; 208 int ret; 209 210 down_read(&ufile->hw_destroy_rwsem); 211 212 /* 213 * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left 214 * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY. 215 * This is because any other concurrent thread can still see the object 216 * in the xarray due to RCU. Leaving it locked ensures nothing else will 217 * touch it. 218 */ 219 ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE); 220 if (ret) 221 goto out_unlock; 222 223 ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs); 224 if (ret) { 225 atomic_set(&uobj->usecnt, 0); 226 goto out_unlock; 227 } 228 229 out_unlock: 230 up_read(&ufile->hw_destroy_rwsem); 231 return ret; 232 } 233 234 /* 235 * uobj_get_destroy destroys the HW object and returns a handle to the uobj 236 * with a NULL object pointer. The caller must pair this with 237 * uobj_put_destroy(). 238 */ 239 struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, 240 u32 id, struct uverbs_attr_bundle *attrs) 241 { 242 struct ib_uobject *uobj; 243 int ret; 244 245 uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id, 246 UVERBS_LOOKUP_DESTROY, attrs); 247 if (IS_ERR(uobj)) 248 return uobj; 249 250 ret = uobj_destroy(uobj, attrs); 251 if (ret) { 252 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); 253 return ERR_PTR(ret); 254 } 255 256 return uobj; 257 } 258 259 /* 260 * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success 261 * (negative errno on failure). For use by callers that do not need the uobj. 262 */ 263 int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, 264 struct uverbs_attr_bundle *attrs) 265 { 266 struct ib_uobject *uobj; 267 268 uobj = __uobj_get_destroy(obj, id, attrs); 269 if (IS_ERR(uobj)) 270 return PTR_ERR(uobj); 271 uobj_put_destroy(uobj); 272 return 0; 273 } 274 275 /* alloc_uobj must be undone by uverbs_destroy_uobject() */ 276 static struct ib_uobject *alloc_uobj(struct uverbs_attr_bundle *attrs, 277 const struct uverbs_api_object *obj) 278 { 279 struct ib_uverbs_file *ufile = attrs->ufile; 280 struct ib_uobject *uobj; 281 282 if (!attrs->context) { 283 struct ib_ucontext *ucontext = 284 ib_uverbs_get_ucontext_file(ufile); 285 286 if (IS_ERR(ucontext)) 287 return ERR_CAST(ucontext); 288 attrs->context = ucontext; 289 } 290 291 uobj = kzalloc(obj->type_attrs->obj_size, GFP_KERNEL); 292 if (!uobj) 293 return ERR_PTR(-ENOMEM); 294 /* 295 * user_handle should be filled by the handler, 296 * The object is added to the list in the commit stage. 297 */ 298 uobj->ufile = ufile; 299 uobj->context = attrs->context; 300 INIT_LIST_HEAD(&uobj->list); 301 uobj->uapi_object = obj; 302 /* 303 * Allocated objects start out as write locked to deny any other 304 * syscalls from accessing them until they are committed. See 305 * rdma_alloc_commit_uobject 306 */ 307 atomic_set(&uobj->usecnt, -1); 308 kref_init(&uobj->ref); 309 310 return uobj; 311 } 312 313 static int idr_add_uobj(struct ib_uobject *uobj) 314 { 315 /* 316 * We start with allocating an idr pointing to NULL. This represents an 317 * object which isn't initialized yet. We'll replace it later on with 318 * the real object once we commit. 319 */ 320 return xa_alloc(&uobj->ufile->idr, &uobj->id, NULL, xa_limit_32b, 321 GFP_KERNEL); 322 } 323 324 /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */ 325 static struct ib_uobject * 326 lookup_get_idr_uobject(const struct uverbs_api_object *obj, 327 struct ib_uverbs_file *ufile, s64 id, 328 enum rdma_lookup_mode mode) 329 { 330 struct ib_uobject *uobj; 331 332 if (id < 0 || id > ULONG_MAX) 333 return ERR_PTR(-EINVAL); 334 335 rcu_read_lock(); 336 /* 337 * The idr_find is guaranteed to return a pointer to something that 338 * isn't freed yet, or NULL, as the free after idr_remove goes through 339 * kfree_rcu(). However the object may still have been released and 340 * kfree() could be called at any time. 341 */ 342 uobj = xa_load(&ufile->idr, id); 343 if (!uobj || !kref_get_unless_zero(&uobj->ref)) 344 uobj = ERR_PTR(-ENOENT); 345 rcu_read_unlock(); 346 return uobj; 347 } 348 349 static struct ib_uobject * 350 lookup_get_fd_uobject(const struct uverbs_api_object *obj, 351 struct ib_uverbs_file *ufile, s64 id, 352 enum rdma_lookup_mode mode) 353 { 354 const struct uverbs_obj_fd_type *fd_type; 355 struct file *f; 356 struct ib_uobject *uobject; 357 int fdno = id; 358 359 if (fdno != id) 360 return ERR_PTR(-EINVAL); 361 362 if (mode != UVERBS_LOOKUP_READ) 363 return ERR_PTR(-EOPNOTSUPP); 364 365 if (!obj->type_attrs) 366 return ERR_PTR(-EIO); 367 fd_type = 368 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); 369 370 f = fget(fdno); 371 if (!f) 372 return ERR_PTR(-EBADF); 373 374 uobject = f->private_data; 375 /* 376 * fget(id) ensures we are not currently running 377 * uverbs_uobject_fd_release(), and the caller is expected to ensure 378 * that release is never done while a call to lookup is possible. 379 */ 380 if (f->f_op != fd_type->fops || uobject->ufile != ufile) { 381 fput(f); 382 return ERR_PTR(-EBADF); 383 } 384 385 uverbs_uobject_get(uobject); 386 return uobject; 387 } 388 389 struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj, 390 struct ib_uverbs_file *ufile, s64 id, 391 enum rdma_lookup_mode mode, 392 struct uverbs_attr_bundle *attrs) 393 { 394 struct ib_uobject *uobj; 395 int ret; 396 397 if (obj == ERR_PTR(-ENOMSG)) { 398 /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */ 399 uobj = lookup_get_idr_uobject(NULL, ufile, id, mode); 400 if (IS_ERR(uobj)) 401 return uobj; 402 } else { 403 if (IS_ERR(obj)) 404 return ERR_PTR(-EINVAL); 405 406 uobj = obj->type_class->lookup_get(obj, ufile, id, mode); 407 if (IS_ERR(uobj)) 408 return uobj; 409 410 if (uobj->uapi_object != obj) { 411 ret = -EINVAL; 412 goto free; 413 } 414 } 415 416 /* 417 * If we have been disassociated block every command except for 418 * DESTROY based commands. 419 */ 420 if (mode != UVERBS_LOOKUP_DESTROY && 421 !srcu_dereference(ufile->device->ib_dev, 422 &ufile->device->disassociate_srcu)) { 423 ret = -EIO; 424 goto free; 425 } 426 427 ret = uverbs_try_lock_object(uobj, mode); 428 if (ret) 429 goto free; 430 if (attrs) 431 attrs->context = uobj->context; 432 433 return uobj; 434 free: 435 uobj->uapi_object->type_class->lookup_put(uobj, mode); 436 uverbs_uobject_put(uobj); 437 return ERR_PTR(ret); 438 } 439 440 static struct ib_uobject * 441 alloc_begin_idr_uobject(const struct uverbs_api_object *obj, 442 struct uverbs_attr_bundle *attrs) 443 { 444 int ret; 445 struct ib_uobject *uobj; 446 447 uobj = alloc_uobj(attrs, obj); 448 if (IS_ERR(uobj)) 449 return uobj; 450 451 ret = idr_add_uobj(uobj); 452 if (ret) 453 goto uobj_put; 454 455 ret = ib_rdmacg_try_charge(&uobj->cg_obj, uobj->context->device, 456 RDMACG_RESOURCE_HCA_OBJECT); 457 if (ret) 458 goto remove; 459 460 return uobj; 461 462 remove: 463 xa_erase(&attrs->ufile->idr, uobj->id); 464 uobj_put: 465 uverbs_uobject_put(uobj); 466 return ERR_PTR(ret); 467 } 468 469 static struct ib_uobject * 470 alloc_begin_fd_uobject(const struct uverbs_api_object *obj, 471 struct uverbs_attr_bundle *attrs) 472 { 473 const struct uverbs_obj_fd_type *fd_type = 474 container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); 475 int new_fd; 476 struct ib_uobject *uobj; 477 struct file *filp; 478 479 if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && 480 fd_type->fops->release != &uverbs_async_event_release)) 481 return ERR_PTR(-EINVAL); 482 483 new_fd = get_unused_fd_flags(O_CLOEXEC); 484 if (new_fd < 0) 485 return ERR_PTR(new_fd); 486 487 uobj = alloc_uobj(attrs, obj); 488 if (IS_ERR(uobj)) 489 goto err_fd; 490 491 /* Note that uverbs_uobject_fd_release() is called during abort */ 492 filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, 493 fd_type->flags); 494 if (IS_ERR(filp)) { 495 uverbs_uobject_put(uobj); 496 uobj = ERR_CAST(filp); 497 goto err_fd; 498 } 499 uobj->object = filp; 500 501 uobj->id = new_fd; 502 return uobj; 503 504 err_fd: 505 put_unused_fd(new_fd); 506 return uobj; 507 } 508 509 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, 510 struct uverbs_attr_bundle *attrs) 511 { 512 struct ib_uverbs_file *ufile = attrs->ufile; 513 struct ib_uobject *ret; 514 515 if (IS_ERR(obj)) 516 return ERR_PTR(-EINVAL); 517 518 /* 519 * The hw_destroy_rwsem is held across the entire object creation and 520 * released during rdma_alloc_commit_uobject or 521 * rdma_alloc_abort_uobject 522 */ 523 if (!down_read_trylock(&ufile->hw_destroy_rwsem)) 524 return ERR_PTR(-EIO); 525 526 ret = obj->type_class->alloc_begin(obj, attrs); 527 if (IS_ERR(ret)) { 528 up_read(&ufile->hw_destroy_rwsem); 529 return ret; 530 } 531 return ret; 532 } 533 534 static void alloc_abort_idr_uobject(struct ib_uobject *uobj) 535 { 536 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device, 537 RDMACG_RESOURCE_HCA_OBJECT); 538 539 xa_erase(&uobj->ufile->idr, uobj->id); 540 } 541 542 static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj, 543 enum rdma_remove_reason why, 544 struct uverbs_attr_bundle *attrs) 545 { 546 const struct uverbs_obj_idr_type *idr_type = 547 container_of(uobj->uapi_object->type_attrs, 548 struct uverbs_obj_idr_type, type); 549 int ret = idr_type->destroy_object(uobj, why, attrs); 550 551 /* 552 * We can only fail gracefully if the user requested to destroy the 553 * object or when a retry may be called upon an error. 554 * In the rest of the cases, just remove whatever you can. 555 */ 556 if (ib_is_destroy_retryable(ret, why, uobj)) 557 return ret; 558 559 if (why == RDMA_REMOVE_ABORT) 560 return 0; 561 562 ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device, 563 RDMACG_RESOURCE_HCA_OBJECT); 564 565 return 0; 566 } 567 568 static void remove_handle_idr_uobject(struct ib_uobject *uobj) 569 { 570 xa_erase(&uobj->ufile->idr, uobj->id); 571 /* Matches the kref in alloc_commit_idr_uobject */ 572 uverbs_uobject_put(uobj); 573 } 574 575 static void alloc_abort_fd_uobject(struct ib_uobject *uobj) 576 { 577 struct file *filp = uobj->object; 578 579 fput(filp); 580 put_unused_fd(uobj->id); 581 } 582 583 static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj, 584 enum rdma_remove_reason why, 585 struct uverbs_attr_bundle *attrs) 586 { 587 const struct uverbs_obj_fd_type *fd_type = container_of( 588 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type); 589 int ret = fd_type->destroy_object(uobj, why); 590 591 if (ib_is_destroy_retryable(ret, why, uobj)) 592 return ret; 593 594 return 0; 595 } 596 597 static void remove_handle_fd_uobject(struct ib_uobject *uobj) 598 { 599 } 600 601 static void alloc_commit_idr_uobject(struct ib_uobject *uobj) 602 { 603 struct ib_uverbs_file *ufile = uobj->ufile; 604 void *old; 605 606 /* 607 * We already allocated this IDR with a NULL object, so 608 * this shouldn't fail. 609 * 610 * NOTE: Storing the uobj transfers our kref on uobj to the XArray. 611 * It will be put by remove_commit_idr_uobject() 612 */ 613 old = xa_store(&ufile->idr, uobj->id, uobj, GFP_KERNEL); 614 WARN_ON(old != NULL); 615 } 616 617 static void alloc_commit_fd_uobject(struct ib_uobject *uobj) 618 { 619 int fd = uobj->id; 620 struct file *filp = uobj->object; 621 622 /* Matching put will be done in uverbs_uobject_fd_release() */ 623 kref_get(&uobj->ufile->ref); 624 625 /* This shouldn't be used anymore. Use the file object instead */ 626 uobj->id = 0; 627 628 /* 629 * NOTE: Once we install the file we loose ownership of our kref on 630 * uobj. It will be put by uverbs_uobject_fd_release() 631 */ 632 filp->private_data = uobj; 633 fd_install(fd, filp); 634 } 635 636 /* 637 * In all cases rdma_alloc_commit_uobject() consumes the kref to uobj and the 638 * caller can no longer assume uobj is valid. If this function fails it 639 * destroys the uboject, including the attached HW object. 640 */ 641 void rdma_alloc_commit_uobject(struct ib_uobject *uobj, 642 struct uverbs_attr_bundle *attrs) 643 { 644 struct ib_uverbs_file *ufile = attrs->ufile; 645 646 /* alloc_commit consumes the uobj kref */ 647 uobj->uapi_object->type_class->alloc_commit(uobj); 648 649 /* kref is held so long as the uobj is on the uobj list. */ 650 uverbs_uobject_get(uobj); 651 spin_lock_irq(&ufile->uobjects_lock); 652 list_add(&uobj->list, &ufile->uobjects); 653 spin_unlock_irq(&ufile->uobjects_lock); 654 655 /* matches atomic_set(-1) in alloc_uobj */ 656 atomic_set(&uobj->usecnt, 0); 657 658 /* Matches the down_read in rdma_alloc_begin_uobject */ 659 up_read(&ufile->hw_destroy_rwsem); 660 } 661 662 /* 663 * This consumes the kref for uobj. It is up to the caller to unwind the HW 664 * object and anything else connected to uobj before calling this. 665 */ 666 void rdma_alloc_abort_uobject(struct ib_uobject *uobj, 667 struct uverbs_attr_bundle *attrs, 668 bool hw_obj_valid) 669 { 670 struct ib_uverbs_file *ufile = uobj->ufile; 671 672 uverbs_destroy_uobject(uobj, 673 hw_obj_valid ? RDMA_REMOVE_ABORT_HWOBJ : 674 RDMA_REMOVE_ABORT, 675 attrs); 676 677 /* Matches the down_read in rdma_alloc_begin_uobject */ 678 up_read(&ufile->hw_destroy_rwsem); 679 } 680 681 static void lookup_put_idr_uobject(struct ib_uobject *uobj, 682 enum rdma_lookup_mode mode) 683 { 684 } 685 686 static void lookup_put_fd_uobject(struct ib_uobject *uobj, 687 enum rdma_lookup_mode mode) 688 { 689 struct file *filp = uobj->object; 690 691 WARN_ON(mode != UVERBS_LOOKUP_READ); 692 /* 693 * This indirectly calls uverbs_uobject_fd_release() and free the 694 * object 695 */ 696 fput(filp); 697 } 698 699 void rdma_lookup_put_uobject(struct ib_uobject *uobj, 700 enum rdma_lookup_mode mode) 701 { 702 assert_uverbs_usecnt(uobj, mode); 703 /* 704 * In order to unlock an object, either decrease its usecnt for 705 * read access or zero it in case of exclusive access. See 706 * uverbs_try_lock_object for locking schema information. 707 */ 708 switch (mode) { 709 case UVERBS_LOOKUP_READ: 710 atomic_dec(&uobj->usecnt); 711 break; 712 case UVERBS_LOOKUP_WRITE: 713 atomic_set(&uobj->usecnt, 0); 714 break; 715 case UVERBS_LOOKUP_DESTROY: 716 break; 717 } 718 719 uobj->uapi_object->type_class->lookup_put(uobj, mode); 720 /* Pairs with the kref obtained by type->lookup_get */ 721 uverbs_uobject_put(uobj); 722 } 723 724 void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile) 725 { 726 xa_init_flags(&ufile->idr, XA_FLAGS_ALLOC); 727 } 728 729 void release_ufile_idr_uobject(struct ib_uverbs_file *ufile) 730 { 731 struct ib_uobject *entry; 732 unsigned long id; 733 734 /* 735 * At this point uverbs_cleanup_ufile() is guaranteed to have run, and 736 * there are no HW objects left, however the xarray is still populated 737 * with anything that has not been cleaned up by userspace. Since the 738 * kref on ufile is 0, nothing is allowed to call lookup_get. 739 * 740 * This is an optimized equivalent to remove_handle_idr_uobject 741 */ 742 xa_for_each(&ufile->idr, id, entry) { 743 WARN_ON(entry->object); 744 uverbs_uobject_put(entry); 745 } 746 747 xa_destroy(&ufile->idr); 748 } 749 750 const struct uverbs_obj_type_class uverbs_idr_class = { 751 .alloc_begin = alloc_begin_idr_uobject, 752 .lookup_get = lookup_get_idr_uobject, 753 .alloc_commit = alloc_commit_idr_uobject, 754 .alloc_abort = alloc_abort_idr_uobject, 755 .lookup_put = lookup_put_idr_uobject, 756 .destroy_hw = destroy_hw_idr_uobject, 757 .remove_handle = remove_handle_idr_uobject, 758 }; 759 EXPORT_SYMBOL(uverbs_idr_class); 760 761 /* 762 * Users of UVERBS_TYPE_ALLOC_FD should set this function as the struct 763 * file_operations release method. 764 */ 765 int uverbs_uobject_fd_release(struct inode *inode, struct file *filp) 766 { 767 struct ib_uverbs_file *ufile; 768 struct ib_uobject *uobj; 769 770 /* 771 * This can only happen if the fput came from alloc_abort_fd_uobject() 772 */ 773 if (!filp->private_data) 774 return 0; 775 uobj = filp->private_data; 776 ufile = uobj->ufile; 777 778 if (down_read_trylock(&ufile->hw_destroy_rwsem)) { 779 struct uverbs_attr_bundle attrs = { 780 .context = uobj->context, 781 .ufile = ufile, 782 }; 783 784 /* 785 * lookup_get_fd_uobject holds the kref on the struct file any 786 * time a FD uobj is locked, which prevents this release 787 * method from being invoked. Meaning we can always get the 788 * write lock here, or we have a kernel bug. 789 */ 790 WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE)); 791 uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs); 792 up_read(&ufile->hw_destroy_rwsem); 793 } 794 795 /* Matches the get in alloc_commit_fd_uobject() */ 796 kref_put(&ufile->ref, ib_uverbs_release_file); 797 798 /* Pairs with filp->private_data in alloc_begin_fd_uobject */ 799 uverbs_uobject_put(uobj); 800 return 0; 801 } 802 EXPORT_SYMBOL(uverbs_uobject_fd_release); 803 804 /* 805 * Drop the ucontext off the ufile and completely disconnect it from the 806 * ib_device 807 */ 808 static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile, 809 enum rdma_remove_reason reason) 810 { 811 struct ib_ucontext *ucontext = ufile->ucontext; 812 struct ib_device *ib_dev = ucontext->device; 813 814 /* 815 * If we are closing the FD then the user mmap VMAs must have 816 * already been destroyed as they hold on to the filep, otherwise 817 * they need to be zap'd. 818 */ 819 if (reason == RDMA_REMOVE_DRIVER_REMOVE) { 820 uverbs_user_mmap_disassociate(ufile); 821 if (ib_dev->ops.disassociate_ucontext) 822 ib_dev->ops.disassociate_ucontext(ucontext); 823 } 824 825 ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev, 826 RDMACG_RESOURCE_HCA_HANDLE); 827 828 rdma_restrack_del(&ucontext->res); 829 830 ib_dev->ops.dealloc_ucontext(ucontext); 831 WARN_ON(!xa_empty(&ucontext->mmap_xa)); 832 kfree(ucontext); 833 834 ufile->ucontext = NULL; 835 } 836 837 static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile, 838 enum rdma_remove_reason reason) 839 { 840 struct ib_uobject *obj, *next_obj; 841 int ret = -EINVAL; 842 struct uverbs_attr_bundle attrs = { .ufile = ufile }; 843 844 /* 845 * This shouldn't run while executing other commands on this 846 * context. Thus, the only thing we should take care of is 847 * releasing a FD while traversing this list. The FD could be 848 * closed and released from the _release fop of this FD. 849 * In order to mitigate this, we add a lock. 850 * We take and release the lock per traversal in order to let 851 * other threads (which might still use the FDs) chance to run. 852 */ 853 list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) { 854 attrs.context = obj->context; 855 /* 856 * if we hit this WARN_ON, that means we are 857 * racing with a lookup_get. 858 */ 859 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); 860 if (!uverbs_destroy_uobject(obj, reason, &attrs)) 861 ret = 0; 862 else 863 atomic_set(&obj->usecnt, 0); 864 } 865 return ret; 866 } 867 868 /* 869 * Destroy the uncontext and every uobject associated with it. 870 * 871 * This is internally locked and can be called in parallel from multiple 872 * contexts. 873 */ 874 void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile, 875 enum rdma_remove_reason reason) 876 { 877 down_write(&ufile->hw_destroy_rwsem); 878 879 /* 880 * If a ucontext was never created then we can't have any uobjects to 881 * cleanup, nothing to do. 882 */ 883 if (!ufile->ucontext) 884 goto done; 885 886 ufile->ucontext->closing = true; 887 ufile->ucontext->cleanup_retryable = true; 888 while (!list_empty(&ufile->uobjects)) 889 if (__uverbs_cleanup_ufile(ufile, reason)) { 890 /* 891 * No entry was cleaned-up successfully during this 892 * iteration 893 */ 894 break; 895 } 896 897 ufile->ucontext->cleanup_retryable = false; 898 if (!list_empty(&ufile->uobjects)) 899 __uverbs_cleanup_ufile(ufile, reason); 900 901 ufile_destroy_ucontext(ufile, reason); 902 903 done: 904 up_write(&ufile->hw_destroy_rwsem); 905 } 906 907 const struct uverbs_obj_type_class uverbs_fd_class = { 908 .alloc_begin = alloc_begin_fd_uobject, 909 .lookup_get = lookup_get_fd_uobject, 910 .alloc_commit = alloc_commit_fd_uobject, 911 .alloc_abort = alloc_abort_fd_uobject, 912 .lookup_put = lookup_put_fd_uobject, 913 .destroy_hw = destroy_hw_fd_uobject, 914 .remove_handle = remove_handle_fd_uobject, 915 }; 916 EXPORT_SYMBOL(uverbs_fd_class); 917 918 struct ib_uobject * 919 uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access, 920 s64 id, struct uverbs_attr_bundle *attrs) 921 { 922 const struct uverbs_api_object *obj = 923 uapi_get_object(attrs->ufile->device->uapi, object_id); 924 925 switch (access) { 926 case UVERBS_ACCESS_READ: 927 return rdma_lookup_get_uobject(obj, attrs->ufile, id, 928 UVERBS_LOOKUP_READ, attrs); 929 case UVERBS_ACCESS_DESTROY: 930 /* Actual destruction is done inside uverbs_handle_method */ 931 return rdma_lookup_get_uobject(obj, attrs->ufile, id, 932 UVERBS_LOOKUP_DESTROY, attrs); 933 case UVERBS_ACCESS_WRITE: 934 return rdma_lookup_get_uobject(obj, attrs->ufile, id, 935 UVERBS_LOOKUP_WRITE, attrs); 936 case UVERBS_ACCESS_NEW: 937 return rdma_alloc_begin_uobject(obj, attrs); 938 default: 939 WARN_ON(true); 940 return ERR_PTR(-EOPNOTSUPP); 941 } 942 } 943 944 void uverbs_finalize_object(struct ib_uobject *uobj, 945 enum uverbs_obj_access access, bool hw_obj_valid, 946 bool commit, struct uverbs_attr_bundle *attrs) 947 { 948 /* 949 * refcounts should be handled at the object level and not at the 950 * uobject level. Refcounts of the objects themselves are done in 951 * handlers. 952 */ 953 954 switch (access) { 955 case UVERBS_ACCESS_READ: 956 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ); 957 break; 958 case UVERBS_ACCESS_WRITE: 959 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); 960 break; 961 case UVERBS_ACCESS_DESTROY: 962 if (uobj) 963 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); 964 break; 965 case UVERBS_ACCESS_NEW: 966 if (commit) 967 rdma_alloc_commit_uobject(uobj, attrs); 968 else 969 rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid); 970 break; 971 default: 972 WARN_ON(true); 973 } 974 } 975