1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) 4 * 5 * Based on bo.c which bears the following copyright notice, 6 * but is dual licensed: 7 * 8 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 9 * All Rights Reserved. 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a 12 * copy of this software and associated documentation files (the 13 * "Software"), to deal in the Software without restriction, including 14 * without limitation the rights to use, copy, modify, merge, publish, 15 * distribute, sub license, and/or sell copies of the Software, and to 16 * permit persons to whom the Software is furnished to do so, subject to 17 * the following conditions: 18 * 19 * The above copyright notice and this permission notice (including the 20 * next paragraph) shall be included in all copies or substantial portions 21 * of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 29 * USE OR OTHER DEALINGS IN THE SOFTWARE. 30 * 31 **************************************************************************/ 32 /* 33 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 34 */ 35 36 #include <linux/dma-resv.h> 37 #include <linux/export.h> 38 #include <linux/mm.h> 39 #include <linux/sched/mm.h> 40 #include <linux/mmu_notifier.h> 41 #include <linux/seq_file.h> 42 43 /** 44 * DOC: Reservation Object Overview 45 * 46 * The reservation object provides a mechanism to manage shared and 47 * exclusive fences associated with a buffer. A reservation object 48 * can have attached one exclusive fence (normally associated with 49 * write operations) or N shared fences (read operations). The RCU 50 * mechanism is used to protect read access to fences from locked 51 * write-side updates. 52 * 53 * See struct dma_resv for more details. 54 */ 55 56 DEFINE_WD_CLASS(reservation_ww_class); 57 EXPORT_SYMBOL(reservation_ww_class); 58 59 /** 60 * dma_resv_list_alloc - allocate fence list 61 * @shared_max: number of fences we need space for 62 * 63 * Allocate a new dma_resv_list and make sure to correctly initialize 64 * shared_max. 65 */ 66 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) 67 { 68 struct dma_resv_list *list; 69 70 list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL); 71 if (!list) 72 return NULL; 73 74 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) / 75 sizeof(*list->shared); 76 77 return list; 78 } 79 80 /** 81 * dma_resv_list_free - free fence list 82 * @list: list to free 83 * 84 * Free a dma_resv_list and make sure to drop all references. 85 */ 86 static void dma_resv_list_free(struct dma_resv_list *list) 87 { 88 unsigned int i; 89 90 if (!list) 91 return; 92 93 for (i = 0; i < list->shared_count; ++i) 94 dma_fence_put(rcu_dereference_protected(list->shared[i], true)); 95 96 kfree_rcu(list, rcu); 97 } 98 99 /** 100 * dma_resv_init - initialize a reservation object 101 * @obj: the reservation object 102 */ 103 void dma_resv_init(struct dma_resv *obj) 104 { 105 ww_mutex_init(&obj->lock, &reservation_ww_class); 106 seqcount_ww_mutex_init(&obj->seq, &obj->lock); 107 108 RCU_INIT_POINTER(obj->fence, NULL); 109 RCU_INIT_POINTER(obj->fence_excl, NULL); 110 } 111 EXPORT_SYMBOL(dma_resv_init); 112 113 /** 114 * dma_resv_fini - destroys a reservation object 115 * @obj: the reservation object 116 */ 117 void dma_resv_fini(struct dma_resv *obj) 118 { 119 struct dma_resv_list *fobj; 120 struct dma_fence *excl; 121 122 /* 123 * This object should be dead and all references must have 124 * been released to it, so no need to be protected with rcu. 125 */ 126 excl = rcu_dereference_protected(obj->fence_excl, 1); 127 if (excl) 128 dma_fence_put(excl); 129 130 fobj = rcu_dereference_protected(obj->fence, 1); 131 dma_resv_list_free(fobj); 132 ww_mutex_destroy(&obj->lock); 133 } 134 EXPORT_SYMBOL(dma_resv_fini); 135 136 /** 137 * dma_resv_reserve_shared - Reserve space to add shared fences to 138 * a dma_resv. 139 * @obj: reservation object 140 * @num_fences: number of fences we want to add 141 * 142 * Should be called before dma_resv_add_shared_fence(). Must 143 * be called with @obj locked through dma_resv_lock(). 144 * 145 * Note that the preallocated slots need to be re-reserved if @obj is unlocked 146 * at any time before calling dma_resv_add_shared_fence(). This is validated 147 * when CONFIG_DEBUG_MUTEXES is enabled. 148 * 149 * RETURNS 150 * Zero for success, or -errno 151 */ 152 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) 153 { 154 struct dma_resv_list *old, *new; 155 unsigned int i, j, k, max; 156 157 dma_resv_assert_held(obj); 158 159 old = dma_resv_shared_list(obj); 160 if (old && old->shared_max) { 161 if ((old->shared_count + num_fences) <= old->shared_max) 162 return 0; 163 max = max(old->shared_count + num_fences, old->shared_max * 2); 164 } else { 165 max = max(4ul, roundup_pow_of_two(num_fences)); 166 } 167 168 new = dma_resv_list_alloc(max); 169 if (!new) 170 return -ENOMEM; 171 172 /* 173 * no need to bump fence refcounts, rcu_read access 174 * requires the use of kref_get_unless_zero, and the 175 * references from the old struct are carried over to 176 * the new. 177 */ 178 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) { 179 struct dma_fence *fence; 180 181 fence = rcu_dereference_protected(old->shared[i], 182 dma_resv_held(obj)); 183 if (dma_fence_is_signaled(fence)) 184 RCU_INIT_POINTER(new->shared[--k], fence); 185 else 186 RCU_INIT_POINTER(new->shared[j++], fence); 187 } 188 new->shared_count = j; 189 190 /* 191 * We are not changing the effective set of fences here so can 192 * merely update the pointer to the new array; both existing 193 * readers and new readers will see exactly the same set of 194 * active (unsignaled) shared fences. Individual fences and the 195 * old array are protected by RCU and so will not vanish under 196 * the gaze of the rcu_read_lock() readers. 197 */ 198 rcu_assign_pointer(obj->fence, new); 199 200 if (!old) 201 return 0; 202 203 /* Drop the references to the signaled fences */ 204 for (i = k; i < max; ++i) { 205 struct dma_fence *fence; 206 207 fence = rcu_dereference_protected(new->shared[i], 208 dma_resv_held(obj)); 209 dma_fence_put(fence); 210 } 211 kfree_rcu(old, rcu); 212 213 return 0; 214 } 215 EXPORT_SYMBOL(dma_resv_reserve_shared); 216 217 #ifdef CONFIG_DEBUG_MUTEXES 218 /** 219 * dma_resv_reset_shared_max - reset shared fences for debugging 220 * @obj: the dma_resv object to reset 221 * 222 * Reset the number of pre-reserved shared slots to test that drivers do 223 * correct slot allocation using dma_resv_reserve_shared(). See also 224 * &dma_resv_list.shared_max. 225 */ 226 void dma_resv_reset_shared_max(struct dma_resv *obj) 227 { 228 struct dma_resv_list *fences = dma_resv_shared_list(obj); 229 230 dma_resv_assert_held(obj); 231 232 /* Test shared fence slot reservation */ 233 if (fences) 234 fences->shared_max = fences->shared_count; 235 } 236 EXPORT_SYMBOL(dma_resv_reset_shared_max); 237 #endif 238 239 /** 240 * dma_resv_add_shared_fence - Add a fence to a shared slot 241 * @obj: the reservation object 242 * @fence: the shared fence to add 243 * 244 * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and 245 * dma_resv_reserve_shared() has been called. 246 * 247 * See also &dma_resv.fence for a discussion of the semantics. 248 */ 249 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) 250 { 251 struct dma_resv_list *fobj; 252 struct dma_fence *old; 253 unsigned int i, count; 254 255 dma_fence_get(fence); 256 257 dma_resv_assert_held(obj); 258 259 /* Drivers should not add containers here, instead add each fence 260 * individually. 261 */ 262 WARN_ON(dma_fence_is_container(fence)); 263 264 fobj = dma_resv_shared_list(obj); 265 count = fobj->shared_count; 266 267 write_seqcount_begin(&obj->seq); 268 269 for (i = 0; i < count; ++i) { 270 271 old = rcu_dereference_protected(fobj->shared[i], 272 dma_resv_held(obj)); 273 if (old->context == fence->context || 274 dma_fence_is_signaled(old)) 275 goto replace; 276 } 277 278 BUG_ON(fobj->shared_count >= fobj->shared_max); 279 old = NULL; 280 count++; 281 282 replace: 283 RCU_INIT_POINTER(fobj->shared[i], fence); 284 /* pointer update must be visible before we extend the shared_count */ 285 smp_store_mb(fobj->shared_count, count); 286 287 write_seqcount_end(&obj->seq); 288 dma_fence_put(old); 289 } 290 EXPORT_SYMBOL(dma_resv_add_shared_fence); 291 292 /** 293 * dma_resv_add_excl_fence - Add an exclusive fence. 294 * @obj: the reservation object 295 * @fence: the exclusive fence to add 296 * 297 * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock(). 298 * Note that this function replaces all fences attached to @obj, see also 299 * &dma_resv.fence_excl for a discussion of the semantics. 300 */ 301 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) 302 { 303 struct dma_fence *old_fence = dma_resv_excl_fence(obj); 304 struct dma_resv_list *old; 305 u32 i = 0; 306 307 dma_resv_assert_held(obj); 308 309 old = dma_resv_shared_list(obj); 310 if (old) 311 i = old->shared_count; 312 313 dma_fence_get(fence); 314 315 write_seqcount_begin(&obj->seq); 316 /* write_seqcount_begin provides the necessary memory barrier */ 317 RCU_INIT_POINTER(obj->fence_excl, fence); 318 if (old) 319 old->shared_count = 0; 320 write_seqcount_end(&obj->seq); 321 322 /* inplace update, no shared fences */ 323 while (i--) 324 dma_fence_put(rcu_dereference_protected(old->shared[i], 325 dma_resv_held(obj))); 326 327 dma_fence_put(old_fence); 328 } 329 EXPORT_SYMBOL(dma_resv_add_excl_fence); 330 331 /* Restart the iterator by initializing all the necessary fields, but not the 332 * relation to the dma_resv object. */ 333 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) 334 { 335 cursor->seq = read_seqcount_begin(&cursor->obj->seq); 336 cursor->index = -1; 337 cursor->shared_count = 0; 338 if (cursor->all_fences) { 339 cursor->fences = dma_resv_shared_list(cursor->obj); 340 if (cursor->fences) 341 cursor->shared_count = cursor->fences->shared_count; 342 } else { 343 cursor->fences = NULL; 344 } 345 cursor->is_restarted = true; 346 } 347 348 /* Walk to the next not signaled fence and grab a reference to it */ 349 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) 350 { 351 struct dma_resv *obj = cursor->obj; 352 353 do { 354 /* Drop the reference from the previous round */ 355 dma_fence_put(cursor->fence); 356 357 if (cursor->index == -1) { 358 cursor->fence = dma_resv_excl_fence(obj); 359 cursor->index++; 360 if (!cursor->fence) 361 continue; 362 363 } else if (!cursor->fences || 364 cursor->index >= cursor->shared_count) { 365 cursor->fence = NULL; 366 break; 367 368 } else { 369 struct dma_resv_list *fences = cursor->fences; 370 unsigned int idx = cursor->index++; 371 372 cursor->fence = rcu_dereference(fences->shared[idx]); 373 } 374 cursor->fence = dma_fence_get_rcu(cursor->fence); 375 if (!cursor->fence || !dma_fence_is_signaled(cursor->fence)) 376 break; 377 } while (true); 378 } 379 380 /** 381 * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj. 382 * @cursor: the cursor with the current position 383 * 384 * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). 385 * 386 * Beware that the iterator can be restarted. Code which accumulates statistics 387 * or similar needs to check for this with dma_resv_iter_is_restarted(). For 388 * this reason prefer the locked dma_resv_iter_first() whenver possible. 389 * 390 * Returns the first fence from an unlocked dma_resv obj. 391 */ 392 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) 393 { 394 rcu_read_lock(); 395 do { 396 dma_resv_iter_restart_unlocked(cursor); 397 dma_resv_iter_walk_unlocked(cursor); 398 } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq)); 399 rcu_read_unlock(); 400 401 return cursor->fence; 402 } 403 EXPORT_SYMBOL(dma_resv_iter_first_unlocked); 404 405 /** 406 * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj. 407 * @cursor: the cursor with the current position 408 * 409 * Beware that the iterator can be restarted. Code which accumulates statistics 410 * or similar needs to check for this with dma_resv_iter_is_restarted(). For 411 * this reason prefer the locked dma_resv_iter_next() whenver possible. 412 * 413 * Returns the next fence from an unlocked dma_resv obj. 414 */ 415 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) 416 { 417 bool restart; 418 419 rcu_read_lock(); 420 cursor->is_restarted = false; 421 restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq); 422 do { 423 if (restart) 424 dma_resv_iter_restart_unlocked(cursor); 425 dma_resv_iter_walk_unlocked(cursor); 426 restart = true; 427 } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq)); 428 rcu_read_unlock(); 429 430 return cursor->fence; 431 } 432 EXPORT_SYMBOL(dma_resv_iter_next_unlocked); 433 434 /** 435 * dma_resv_iter_first - first fence from a locked dma_resv object 436 * @cursor: cursor to record the current position 437 * 438 * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). 439 * 440 * Return the first fence in the dma_resv object while holding the 441 * &dma_resv.lock. 442 */ 443 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor) 444 { 445 struct dma_fence *fence; 446 447 dma_resv_assert_held(cursor->obj); 448 449 cursor->index = 0; 450 if (cursor->all_fences) 451 cursor->fences = dma_resv_shared_list(cursor->obj); 452 else 453 cursor->fences = NULL; 454 455 fence = dma_resv_excl_fence(cursor->obj); 456 if (!fence) 457 fence = dma_resv_iter_next(cursor); 458 459 cursor->is_restarted = true; 460 return fence; 461 } 462 EXPORT_SYMBOL_GPL(dma_resv_iter_first); 463 464 /** 465 * dma_resv_iter_next - next fence from a locked dma_resv object 466 * @cursor: cursor to record the current position 467 * 468 * Return the next fences from the dma_resv object while holding the 469 * &dma_resv.lock. 470 */ 471 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor) 472 { 473 unsigned int idx; 474 475 dma_resv_assert_held(cursor->obj); 476 477 cursor->is_restarted = false; 478 if (!cursor->fences || cursor->index >= cursor->fences->shared_count) 479 return NULL; 480 481 idx = cursor->index++; 482 return rcu_dereference_protected(cursor->fences->shared[idx], 483 dma_resv_held(cursor->obj)); 484 } 485 EXPORT_SYMBOL_GPL(dma_resv_iter_next); 486 487 /** 488 * dma_resv_copy_fences - Copy all fences from src to dst. 489 * @dst: the destination reservation object 490 * @src: the source reservation object 491 * 492 * Copy all fences from src to dst. dst-lock must be held. 493 */ 494 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) 495 { 496 struct dma_resv_iter cursor; 497 struct dma_resv_list *list; 498 struct dma_fence *f, *excl; 499 500 dma_resv_assert_held(dst); 501 502 list = NULL; 503 excl = NULL; 504 505 dma_resv_iter_begin(&cursor, src, true); 506 dma_resv_for_each_fence_unlocked(&cursor, f) { 507 508 if (dma_resv_iter_is_restarted(&cursor)) { 509 dma_resv_list_free(list); 510 dma_fence_put(excl); 511 512 if (cursor.shared_count) { 513 list = dma_resv_list_alloc(cursor.shared_count); 514 if (!list) { 515 dma_resv_iter_end(&cursor); 516 return -ENOMEM; 517 } 518 519 list->shared_count = 0; 520 521 } else { 522 list = NULL; 523 } 524 excl = NULL; 525 } 526 527 dma_fence_get(f); 528 if (dma_resv_iter_is_exclusive(&cursor)) 529 excl = f; 530 else 531 RCU_INIT_POINTER(list->shared[list->shared_count++], f); 532 } 533 dma_resv_iter_end(&cursor); 534 535 write_seqcount_begin(&dst->seq); 536 excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst)); 537 list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst)); 538 write_seqcount_end(&dst->seq); 539 540 dma_resv_list_free(list); 541 dma_fence_put(excl); 542 543 return 0; 544 } 545 EXPORT_SYMBOL(dma_resv_copy_fences); 546 547 /** 548 * dma_resv_get_fences - Get an object's shared and exclusive 549 * fences without update side lock held 550 * @obj: the reservation object 551 * @write: true if we should return all fences 552 * @num_fences: the number of fences returned 553 * @fences: the array of fence ptrs returned (array is krealloc'd to the 554 * required size, and must be freed by caller) 555 * 556 * Retrieve all fences from the reservation object. 557 * Returns either zero or -ENOMEM. 558 */ 559 int dma_resv_get_fences(struct dma_resv *obj, bool write, 560 unsigned int *num_fences, struct dma_fence ***fences) 561 { 562 struct dma_resv_iter cursor; 563 struct dma_fence *fence; 564 565 *num_fences = 0; 566 *fences = NULL; 567 568 dma_resv_iter_begin(&cursor, obj, write); 569 dma_resv_for_each_fence_unlocked(&cursor, fence) { 570 571 if (dma_resv_iter_is_restarted(&cursor)) { 572 unsigned int count; 573 574 while (*num_fences) 575 dma_fence_put((*fences)[--(*num_fences)]); 576 577 count = cursor.shared_count + 1; 578 579 /* Eventually re-allocate the array */ 580 *fences = krealloc_array(*fences, count, 581 sizeof(void *), 582 GFP_KERNEL); 583 if (count && !*fences) { 584 dma_resv_iter_end(&cursor); 585 return -ENOMEM; 586 } 587 } 588 589 (*fences)[(*num_fences)++] = dma_fence_get(fence); 590 } 591 dma_resv_iter_end(&cursor); 592 593 return 0; 594 } 595 EXPORT_SYMBOL_GPL(dma_resv_get_fences); 596 597 /** 598 * dma_resv_wait_timeout - Wait on reservation's objects 599 * shared and/or exclusive fences. 600 * @obj: the reservation object 601 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 602 * @intr: if true, do interruptible wait 603 * @timeout: timeout value in jiffies or zero to return immediately 604 * 605 * Callers are not required to hold specific locks, but maybe hold 606 * dma_resv_lock() already 607 * RETURNS 608 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 609 * greater than zer on success. 610 */ 611 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, 612 unsigned long timeout) 613 { 614 long ret = timeout ? timeout : 1; 615 struct dma_resv_iter cursor; 616 struct dma_fence *fence; 617 618 dma_resv_iter_begin(&cursor, obj, wait_all); 619 dma_resv_for_each_fence_unlocked(&cursor, fence) { 620 621 ret = dma_fence_wait_timeout(fence, intr, ret); 622 if (ret <= 0) { 623 dma_resv_iter_end(&cursor); 624 return ret; 625 } 626 } 627 dma_resv_iter_end(&cursor); 628 629 return ret; 630 } 631 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); 632 633 634 /** 635 * dma_resv_test_signaled - Test if a reservation object's fences have been 636 * signaled. 637 * @obj: the reservation object 638 * @test_all: if true, test all fences, otherwise only test the exclusive 639 * fence 640 * 641 * Callers are not required to hold specific locks, but maybe hold 642 * dma_resv_lock() already. 643 * 644 * RETURNS 645 * 646 * True if all fences signaled, else false. 647 */ 648 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) 649 { 650 struct dma_resv_iter cursor; 651 struct dma_fence *fence; 652 653 dma_resv_iter_begin(&cursor, obj, test_all); 654 dma_resv_for_each_fence_unlocked(&cursor, fence) { 655 dma_resv_iter_end(&cursor); 656 return false; 657 } 658 dma_resv_iter_end(&cursor); 659 return true; 660 } 661 EXPORT_SYMBOL_GPL(dma_resv_test_signaled); 662 663 /** 664 * dma_resv_describe - Dump description of the resv object into seq_file 665 * @obj: the reservation object 666 * @seq: the seq_file to dump the description into 667 * 668 * Dump a textual description of the fences inside an dma_resv object into the 669 * seq_file. 670 */ 671 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) 672 { 673 struct dma_resv_iter cursor; 674 struct dma_fence *fence; 675 676 dma_resv_for_each_fence(&cursor, obj, true, fence) { 677 seq_printf(seq, "\t%s fence:", 678 dma_resv_iter_is_exclusive(&cursor) ? 679 "Exclusive" : "Shared"); 680 dma_fence_describe(fence, seq); 681 } 682 } 683 EXPORT_SYMBOL_GPL(dma_resv_describe); 684 685 #if IS_ENABLED(CONFIG_LOCKDEP) 686 static int __init dma_resv_lockdep(void) 687 { 688 struct mm_struct *mm = mm_alloc(); 689 struct ww_acquire_ctx ctx; 690 struct dma_resv obj; 691 struct address_space mapping; 692 int ret; 693 694 if (!mm) 695 return -ENOMEM; 696 697 dma_resv_init(&obj); 698 address_space_init_once(&mapping); 699 700 mmap_read_lock(mm); 701 ww_acquire_init(&ctx, &reservation_ww_class); 702 ret = dma_resv_lock(&obj, &ctx); 703 if (ret == -EDEADLK) 704 dma_resv_lock_slow(&obj, &ctx); 705 fs_reclaim_acquire(GFP_KERNEL); 706 /* for unmap_mapping_range on trylocked buffer objects in shrinkers */ 707 i_mmap_lock_write(&mapping); 708 i_mmap_unlock_write(&mapping); 709 #ifdef CONFIG_MMU_NOTIFIER 710 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 711 __dma_fence_might_wait(); 712 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 713 #else 714 __dma_fence_might_wait(); 715 #endif 716 fs_reclaim_release(GFP_KERNEL); 717 ww_mutex_unlock(&obj.lock); 718 ww_acquire_fini(&ctx); 719 mmap_read_unlock(mm); 720 721 mmput(mm); 722 723 return 0; 724 } 725 subsys_initcall(dma_resv_lockdep); 726 #endif 727