1 /* 2 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) 3 * 4 * Based on bo.c which bears the following copyright notice, 5 * but is dual licensed: 6 * 7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 8 * All Rights Reserved. 9 * 10 * Permission is hereby granted, free of charge, to any person obtaining a 11 * copy of this software and associated documentation files (the 12 * "Software"), to deal in the Software without restriction, including 13 * without limitation the rights to use, copy, modify, merge, publish, 14 * distribute, sub license, and/or sell copies of the Software, and to 15 * permit persons to whom the Software is furnished to do so, subject to 16 * the following conditions: 17 * 18 * The above copyright notice and this permission notice (including the 19 * next paragraph) shall be included in all copies or substantial portions 20 * of the Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 28 * USE OR OTHER DEALINGS IN THE SOFTWARE. 29 * 30 **************************************************************************/ 31 /* 32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 33 */ 34 35 #include <linux/dma-resv.h> 36 #include <linux/export.h> 37 #include <linux/sched/mm.h> 38 39 /** 40 * DOC: Reservation Object Overview 41 * 42 * The reservation object provides a mechanism to manage shared and 43 * exclusive fences associated with a buffer. A reservation object 44 * can have attached one exclusive fence (normally associated with 45 * write operations) or N shared fences (read operations). The RCU 46 * mechanism is used to protect read access to fences from locked 47 * write-side updates. 48 */ 49 50 DEFINE_WD_CLASS(reservation_ww_class); 51 EXPORT_SYMBOL(reservation_ww_class); 52 53 struct lock_class_key reservation_seqcount_class; 54 EXPORT_SYMBOL(reservation_seqcount_class); 55 56 const char reservation_seqcount_string[] = "reservation_seqcount"; 57 EXPORT_SYMBOL(reservation_seqcount_string); 58 59 /** 60 * dma_resv_list_alloc - allocate fence list 61 * @shared_max: number of fences we need space for 62 * 63 * Allocate a new dma_resv_list and make sure to correctly initialize 64 * shared_max. 65 */ 66 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) 67 { 68 struct dma_resv_list *list; 69 70 list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL); 71 if (!list) 72 return NULL; 73 74 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) / 75 sizeof(*list->shared); 76 77 return list; 78 } 79 80 /** 81 * dma_resv_list_free - free fence list 82 * @list: list to free 83 * 84 * Free a dma_resv_list and make sure to drop all references. 85 */ 86 static void dma_resv_list_free(struct dma_resv_list *list) 87 { 88 unsigned int i; 89 90 if (!list) 91 return; 92 93 for (i = 0; i < list->shared_count; ++i) 94 dma_fence_put(rcu_dereference_protected(list->shared[i], true)); 95 96 kfree_rcu(list, rcu); 97 } 98 99 #if IS_ENABLED(CONFIG_LOCKDEP) 100 static int __init dma_resv_lockdep(void) 101 { 102 struct mm_struct *mm = mm_alloc(); 103 struct ww_acquire_ctx ctx; 104 struct dma_resv obj; 105 int ret; 106 107 if (!mm) 108 return -ENOMEM; 109 110 dma_resv_init(&obj); 111 112 down_read(&mm->mmap_sem); 113 ww_acquire_init(&ctx, &reservation_ww_class); 114 ret = dma_resv_lock(&obj, &ctx); 115 if (ret == -EDEADLK) 116 dma_resv_lock_slow(&obj, &ctx); 117 fs_reclaim_acquire(GFP_KERNEL); 118 fs_reclaim_release(GFP_KERNEL); 119 ww_mutex_unlock(&obj.lock); 120 ww_acquire_fini(&ctx); 121 up_read(&mm->mmap_sem); 122 123 mmput(mm); 124 125 return 0; 126 } 127 subsys_initcall(dma_resv_lockdep); 128 #endif 129 130 /** 131 * dma_resv_init - initialize a reservation object 132 * @obj: the reservation object 133 */ 134 void dma_resv_init(struct dma_resv *obj) 135 { 136 ww_mutex_init(&obj->lock, &reservation_ww_class); 137 138 __seqcount_init(&obj->seq, reservation_seqcount_string, 139 &reservation_seqcount_class); 140 RCU_INIT_POINTER(obj->fence, NULL); 141 RCU_INIT_POINTER(obj->fence_excl, NULL); 142 } 143 EXPORT_SYMBOL(dma_resv_init); 144 145 /** 146 * dma_resv_fini - destroys a reservation object 147 * @obj: the reservation object 148 */ 149 void dma_resv_fini(struct dma_resv *obj) 150 { 151 struct dma_resv_list *fobj; 152 struct dma_fence *excl; 153 154 /* 155 * This object should be dead and all references must have 156 * been released to it, so no need to be protected with rcu. 157 */ 158 excl = rcu_dereference_protected(obj->fence_excl, 1); 159 if (excl) 160 dma_fence_put(excl); 161 162 fobj = rcu_dereference_protected(obj->fence, 1); 163 dma_resv_list_free(fobj); 164 ww_mutex_destroy(&obj->lock); 165 } 166 EXPORT_SYMBOL(dma_resv_fini); 167 168 /** 169 * dma_resv_reserve_shared - Reserve space to add shared fences to 170 * a dma_resv. 171 * @obj: reservation object 172 * @num_fences: number of fences we want to add 173 * 174 * Should be called before dma_resv_add_shared_fence(). Must 175 * be called with obj->lock held. 176 * 177 * RETURNS 178 * Zero for success, or -errno 179 */ 180 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) 181 { 182 struct dma_resv_list *old, *new; 183 unsigned int i, j, k, max; 184 185 dma_resv_assert_held(obj); 186 187 old = dma_resv_get_list(obj); 188 189 if (old && old->shared_max) { 190 if ((old->shared_count + num_fences) <= old->shared_max) 191 return 0; 192 else 193 max = max(old->shared_count + num_fences, 194 old->shared_max * 2); 195 } else { 196 max = 4; 197 } 198 199 new = dma_resv_list_alloc(max); 200 if (!new) 201 return -ENOMEM; 202 203 /* 204 * no need to bump fence refcounts, rcu_read access 205 * requires the use of kref_get_unless_zero, and the 206 * references from the old struct are carried over to 207 * the new. 208 */ 209 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) { 210 struct dma_fence *fence; 211 212 fence = rcu_dereference_protected(old->shared[i], 213 dma_resv_held(obj)); 214 if (dma_fence_is_signaled(fence)) 215 RCU_INIT_POINTER(new->shared[--k], fence); 216 else 217 RCU_INIT_POINTER(new->shared[j++], fence); 218 } 219 new->shared_count = j; 220 221 /* 222 * We are not changing the effective set of fences here so can 223 * merely update the pointer to the new array; both existing 224 * readers and new readers will see exactly the same set of 225 * active (unsignaled) shared fences. Individual fences and the 226 * old array are protected by RCU and so will not vanish under 227 * the gaze of the rcu_read_lock() readers. 228 */ 229 rcu_assign_pointer(obj->fence, new); 230 231 if (!old) 232 return 0; 233 234 /* Drop the references to the signaled fences */ 235 for (i = k; i < max; ++i) { 236 struct dma_fence *fence; 237 238 fence = rcu_dereference_protected(new->shared[i], 239 dma_resv_held(obj)); 240 dma_fence_put(fence); 241 } 242 kfree_rcu(old, rcu); 243 244 return 0; 245 } 246 EXPORT_SYMBOL(dma_resv_reserve_shared); 247 248 /** 249 * dma_resv_add_shared_fence - Add a fence to a shared slot 250 * @obj: the reservation object 251 * @fence: the shared fence to add 252 * 253 * Add a fence to a shared slot, obj->lock must be held, and 254 * dma_resv_reserve_shared() has been called. 255 */ 256 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) 257 { 258 struct dma_resv_list *fobj; 259 struct dma_fence *old; 260 unsigned int i, count; 261 262 dma_fence_get(fence); 263 264 dma_resv_assert_held(obj); 265 266 fobj = dma_resv_get_list(obj); 267 count = fobj->shared_count; 268 269 preempt_disable(); 270 write_seqcount_begin(&obj->seq); 271 272 for (i = 0; i < count; ++i) { 273 274 old = rcu_dereference_protected(fobj->shared[i], 275 dma_resv_held(obj)); 276 if (old->context == fence->context || 277 dma_fence_is_signaled(old)) 278 goto replace; 279 } 280 281 BUG_ON(fobj->shared_count >= fobj->shared_max); 282 old = NULL; 283 count++; 284 285 replace: 286 RCU_INIT_POINTER(fobj->shared[i], fence); 287 /* pointer update must be visible before we extend the shared_count */ 288 smp_store_mb(fobj->shared_count, count); 289 290 write_seqcount_end(&obj->seq); 291 preempt_enable(); 292 dma_fence_put(old); 293 } 294 EXPORT_SYMBOL(dma_resv_add_shared_fence); 295 296 /** 297 * dma_resv_add_excl_fence - Add an exclusive fence. 298 * @obj: the reservation object 299 * @fence: the shared fence to add 300 * 301 * Add a fence to the exclusive slot. The obj->lock must be held. 302 */ 303 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) 304 { 305 struct dma_fence *old_fence = dma_resv_get_excl(obj); 306 struct dma_resv_list *old; 307 u32 i = 0; 308 309 dma_resv_assert_held(obj); 310 311 old = dma_resv_get_list(obj); 312 if (old) 313 i = old->shared_count; 314 315 if (fence) 316 dma_fence_get(fence); 317 318 preempt_disable(); 319 write_seqcount_begin(&obj->seq); 320 /* write_seqcount_begin provides the necessary memory barrier */ 321 RCU_INIT_POINTER(obj->fence_excl, fence); 322 if (old) 323 old->shared_count = 0; 324 write_seqcount_end(&obj->seq); 325 preempt_enable(); 326 327 /* inplace update, no shared fences */ 328 while (i--) 329 dma_fence_put(rcu_dereference_protected(old->shared[i], 330 dma_resv_held(obj))); 331 332 dma_fence_put(old_fence); 333 } 334 EXPORT_SYMBOL(dma_resv_add_excl_fence); 335 336 /** 337 * dma_resv_copy_fences - Copy all fences from src to dst. 338 * @dst: the destination reservation object 339 * @src: the source reservation object 340 * 341 * Copy all fences from src to dst. dst-lock must be held. 342 */ 343 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) 344 { 345 struct dma_resv_list *src_list, *dst_list; 346 struct dma_fence *old, *new; 347 unsigned i; 348 349 dma_resv_assert_held(dst); 350 351 rcu_read_lock(); 352 src_list = rcu_dereference(src->fence); 353 354 retry: 355 if (src_list) { 356 unsigned shared_count = src_list->shared_count; 357 358 rcu_read_unlock(); 359 360 dst_list = dma_resv_list_alloc(shared_count); 361 if (!dst_list) 362 return -ENOMEM; 363 364 rcu_read_lock(); 365 src_list = rcu_dereference(src->fence); 366 if (!src_list || src_list->shared_count > shared_count) { 367 kfree(dst_list); 368 goto retry; 369 } 370 371 dst_list->shared_count = 0; 372 for (i = 0; i < src_list->shared_count; ++i) { 373 struct dma_fence *fence; 374 375 fence = rcu_dereference(src_list->shared[i]); 376 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 377 &fence->flags)) 378 continue; 379 380 if (!dma_fence_get_rcu(fence)) { 381 dma_resv_list_free(dst_list); 382 src_list = rcu_dereference(src->fence); 383 goto retry; 384 } 385 386 if (dma_fence_is_signaled(fence)) { 387 dma_fence_put(fence); 388 continue; 389 } 390 391 rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence); 392 } 393 } else { 394 dst_list = NULL; 395 } 396 397 new = dma_fence_get_rcu_safe(&src->fence_excl); 398 rcu_read_unlock(); 399 400 src_list = dma_resv_get_list(dst); 401 old = dma_resv_get_excl(dst); 402 403 preempt_disable(); 404 write_seqcount_begin(&dst->seq); 405 /* write_seqcount_begin provides the necessary memory barrier */ 406 RCU_INIT_POINTER(dst->fence_excl, new); 407 RCU_INIT_POINTER(dst->fence, dst_list); 408 write_seqcount_end(&dst->seq); 409 preempt_enable(); 410 411 dma_resv_list_free(src_list); 412 dma_fence_put(old); 413 414 return 0; 415 } 416 EXPORT_SYMBOL(dma_resv_copy_fences); 417 418 /** 419 * dma_resv_get_fences_rcu - Get an object's shared and exclusive 420 * fences without update side lock held 421 * @obj: the reservation object 422 * @pfence_excl: the returned exclusive fence (or NULL) 423 * @pshared_count: the number of shared fences returned 424 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to 425 * the required size, and must be freed by caller) 426 * 427 * Retrieve all fences from the reservation object. If the pointer for the 428 * exclusive fence is not specified the fence is put into the array of the 429 * shared fences as well. Returns either zero or -ENOMEM. 430 */ 431 int dma_resv_get_fences_rcu(struct dma_resv *obj, 432 struct dma_fence **pfence_excl, 433 unsigned *pshared_count, 434 struct dma_fence ***pshared) 435 { 436 struct dma_fence **shared = NULL; 437 struct dma_fence *fence_excl; 438 unsigned int shared_count; 439 int ret = 1; 440 441 do { 442 struct dma_resv_list *fobj; 443 unsigned int i, seq; 444 size_t sz = 0; 445 446 shared_count = i = 0; 447 448 rcu_read_lock(); 449 seq = read_seqcount_begin(&obj->seq); 450 451 fence_excl = rcu_dereference(obj->fence_excl); 452 if (fence_excl && !dma_fence_get_rcu(fence_excl)) 453 goto unlock; 454 455 fobj = rcu_dereference(obj->fence); 456 if (fobj) 457 sz += sizeof(*shared) * fobj->shared_max; 458 459 if (!pfence_excl && fence_excl) 460 sz += sizeof(*shared); 461 462 if (sz) { 463 struct dma_fence **nshared; 464 465 nshared = krealloc(shared, sz, 466 GFP_NOWAIT | __GFP_NOWARN); 467 if (!nshared) { 468 rcu_read_unlock(); 469 470 dma_fence_put(fence_excl); 471 fence_excl = NULL; 472 473 nshared = krealloc(shared, sz, GFP_KERNEL); 474 if (nshared) { 475 shared = nshared; 476 continue; 477 } 478 479 ret = -ENOMEM; 480 break; 481 } 482 shared = nshared; 483 shared_count = fobj ? fobj->shared_count : 0; 484 for (i = 0; i < shared_count; ++i) { 485 shared[i] = rcu_dereference(fobj->shared[i]); 486 if (!dma_fence_get_rcu(shared[i])) 487 break; 488 } 489 } 490 491 if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { 492 while (i--) 493 dma_fence_put(shared[i]); 494 dma_fence_put(fence_excl); 495 goto unlock; 496 } 497 498 ret = 0; 499 unlock: 500 rcu_read_unlock(); 501 } while (ret); 502 503 if (pfence_excl) 504 *pfence_excl = fence_excl; 505 else if (fence_excl) 506 shared[shared_count++] = fence_excl; 507 508 if (!shared_count) { 509 kfree(shared); 510 shared = NULL; 511 } 512 513 *pshared_count = shared_count; 514 *pshared = shared; 515 return ret; 516 } 517 EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); 518 519 /** 520 * dma_resv_wait_timeout_rcu - Wait on reservation's objects 521 * shared and/or exclusive fences. 522 * @obj: the reservation object 523 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 524 * @intr: if true, do interruptible wait 525 * @timeout: timeout value in jiffies or zero to return immediately 526 * 527 * RETURNS 528 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 529 * greater than zer on success. 530 */ 531 long dma_resv_wait_timeout_rcu(struct dma_resv *obj, 532 bool wait_all, bool intr, 533 unsigned long timeout) 534 { 535 struct dma_fence *fence; 536 unsigned seq, shared_count; 537 long ret = timeout ? timeout : 1; 538 int i; 539 540 retry: 541 shared_count = 0; 542 seq = read_seqcount_begin(&obj->seq); 543 rcu_read_lock(); 544 i = -1; 545 546 fence = rcu_dereference(obj->fence_excl); 547 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 548 if (!dma_fence_get_rcu(fence)) 549 goto unlock_retry; 550 551 if (dma_fence_is_signaled(fence)) { 552 dma_fence_put(fence); 553 fence = NULL; 554 } 555 556 } else { 557 fence = NULL; 558 } 559 560 if (wait_all) { 561 struct dma_resv_list *fobj = rcu_dereference(obj->fence); 562 563 if (fobj) 564 shared_count = fobj->shared_count; 565 566 for (i = 0; !fence && i < shared_count; ++i) { 567 struct dma_fence *lfence = rcu_dereference(fobj->shared[i]); 568 569 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 570 &lfence->flags)) 571 continue; 572 573 if (!dma_fence_get_rcu(lfence)) 574 goto unlock_retry; 575 576 if (dma_fence_is_signaled(lfence)) { 577 dma_fence_put(lfence); 578 continue; 579 } 580 581 fence = lfence; 582 break; 583 } 584 } 585 586 rcu_read_unlock(); 587 if (fence) { 588 if (read_seqcount_retry(&obj->seq, seq)) { 589 dma_fence_put(fence); 590 goto retry; 591 } 592 593 ret = dma_fence_wait_timeout(fence, intr, ret); 594 dma_fence_put(fence); 595 if (ret > 0 && wait_all && (i + 1 < shared_count)) 596 goto retry; 597 } 598 return ret; 599 600 unlock_retry: 601 rcu_read_unlock(); 602 goto retry; 603 } 604 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); 605 606 607 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) 608 { 609 struct dma_fence *fence, *lfence = passed_fence; 610 int ret = 1; 611 612 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) { 613 fence = dma_fence_get_rcu(lfence); 614 if (!fence) 615 return -1; 616 617 ret = !!dma_fence_is_signaled(fence); 618 dma_fence_put(fence); 619 } 620 return ret; 621 } 622 623 /** 624 * dma_resv_test_signaled_rcu - Test if a reservation object's 625 * fences have been signaled. 626 * @obj: the reservation object 627 * @test_all: if true, test all fences, otherwise only test the exclusive 628 * fence 629 * 630 * RETURNS 631 * true if all fences signaled, else false 632 */ 633 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) 634 { 635 unsigned seq, shared_count; 636 int ret; 637 638 rcu_read_lock(); 639 retry: 640 ret = true; 641 shared_count = 0; 642 seq = read_seqcount_begin(&obj->seq); 643 644 if (test_all) { 645 unsigned i; 646 647 struct dma_resv_list *fobj = rcu_dereference(obj->fence); 648 649 if (fobj) 650 shared_count = fobj->shared_count; 651 652 for (i = 0; i < shared_count; ++i) { 653 struct dma_fence *fence = rcu_dereference(fobj->shared[i]); 654 655 ret = dma_resv_test_signaled_single(fence); 656 if (ret < 0) 657 goto retry; 658 else if (!ret) 659 break; 660 } 661 662 if (read_seqcount_retry(&obj->seq, seq)) 663 goto retry; 664 } 665 666 if (!shared_count) { 667 struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); 668 669 if (fence_excl) { 670 ret = dma_resv_test_signaled_single(fence_excl); 671 if (ret < 0) 672 goto retry; 673 674 if (read_seqcount_retry(&obj->seq, seq)) 675 goto retry; 676 } 677 } 678 679 rcu_read_unlock(); 680 return ret; 681 } 682 EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); 683