1 /* 2 * Fence mechanism for dma-buf and to allow for asynchronous dma access 3 * 4 * Copyright (C) 2012 Canonical Ltd 5 * Copyright (C) 2012 Texas Instruments 6 * 7 * Authors: 8 * Rob Clark <robdclark@gmail.com> 9 * Maarten Lankhorst <maarten.lankhorst@canonical.com> 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License version 2 as published by 13 * the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 18 * more details. 19 */ 20 21 #include <linux/slab.h> 22 #include <linux/export.h> 23 #include <linux/atomic.h> 24 #include <linux/dma-fence.h> 25 #include <linux/sched/signal.h> 26 27 #define CREATE_TRACE_POINTS 28 #include <trace/events/dma_fence.h> 29 30 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); 31 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); 32 33 static DEFINE_SPINLOCK(dma_fence_stub_lock); 34 static struct dma_fence dma_fence_stub; 35 36 /* 37 * fence context counter: each execution context should have its own 38 * fence context, this allows checking if fences belong to the same 39 * context or not. One device can have multiple separate contexts, 40 * and they're used if some engine can run independently of another. 41 */ 42 static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); 43 44 /** 45 * DOC: DMA fences overview 46 * 47 * DMA fences, represented by &struct dma_fence, are the kernel internal 48 * synchronization primitive for DMA operations like GPU rendering, video 49 * encoding/decoding, or displaying buffers on a screen. 50 * 51 * A fence is initialized using dma_fence_init() and completed using 52 * dma_fence_signal(). Fences are associated with a context, allocated through 53 * dma_fence_context_alloc(), and all fences on the same context are 54 * fully ordered. 55 * 56 * Since the purposes of fences is to facilitate cross-device and 57 * cross-application synchronization, there's multiple ways to use one: 58 * 59 * - Individual fences can be exposed as a &sync_file, accessed as a file 60 * descriptor from userspace, created by calling sync_file_create(). This is 61 * called explicit fencing, since userspace passes around explicit 62 * synchronization points. 63 * 64 * - Some subsystems also have their own explicit fencing primitives, like 65 * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying 66 * fence to be updated. 67 * 68 * - Then there's also implicit fencing, where the synchronization points are 69 * implicitly passed around as part of shared &dma_buf instances. Such 70 * implicit fences are stored in &struct reservation_object through the 71 * &dma_buf.resv pointer. 72 */ 73 74 static const char *dma_fence_stub_get_name(struct dma_fence *fence) 75 { 76 return "stub"; 77 } 78 79 static const struct dma_fence_ops dma_fence_stub_ops = { 80 .get_driver_name = dma_fence_stub_get_name, 81 .get_timeline_name = dma_fence_stub_get_name, 82 }; 83 84 /** 85 * dma_fence_get_stub - return a signaled fence 86 * 87 * Return a stub fence which is already signaled. 88 */ 89 struct dma_fence *dma_fence_get_stub(void) 90 { 91 spin_lock(&dma_fence_stub_lock); 92 if (!dma_fence_stub.ops) { 93 dma_fence_init(&dma_fence_stub, 94 &dma_fence_stub_ops, 95 &dma_fence_stub_lock, 96 0, 0); 97 dma_fence_signal_locked(&dma_fence_stub); 98 } 99 spin_unlock(&dma_fence_stub_lock); 100 101 return dma_fence_get(&dma_fence_stub); 102 } 103 EXPORT_SYMBOL(dma_fence_get_stub); 104 105 /** 106 * dma_fence_context_alloc - allocate an array of fence contexts 107 * @num: amount of contexts to allocate 108 * 109 * This function will return the first index of the number of fence contexts 110 * allocated. The fence context is used for setting &dma_fence.context to a 111 * unique number by passing the context to dma_fence_init(). 112 */ 113 u64 dma_fence_context_alloc(unsigned num) 114 { 115 WARN_ON(!num); 116 return atomic64_add_return(num, &dma_fence_context_counter) - num; 117 } 118 EXPORT_SYMBOL(dma_fence_context_alloc); 119 120 /** 121 * dma_fence_signal_locked - signal completion of a fence 122 * @fence: the fence to signal 123 * 124 * Signal completion for software callbacks on a fence, this will unblock 125 * dma_fence_wait() calls and run all the callbacks added with 126 * dma_fence_add_callback(). Can be called multiple times, but since a fence 127 * can only go from the unsignaled to the signaled state and not back, it will 128 * only be effective the first time. 129 * 130 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock 131 * held. 132 * 133 * Returns 0 on success and a negative error value when @fence has been 134 * signalled already. 135 */ 136 int dma_fence_signal_locked(struct dma_fence *fence) 137 { 138 struct dma_fence_cb *cur, *tmp; 139 int ret = 0; 140 141 lockdep_assert_held(fence->lock); 142 143 if (WARN_ON(!fence)) 144 return -EINVAL; 145 146 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 147 ret = -EINVAL; 148 149 /* 150 * we might have raced with the unlocked dma_fence_signal, 151 * still run through all callbacks 152 */ 153 } else { 154 fence->timestamp = ktime_get(); 155 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); 156 trace_dma_fence_signaled(fence); 157 } 158 159 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 160 list_del_init(&cur->node); 161 cur->func(fence, cur); 162 } 163 return ret; 164 } 165 EXPORT_SYMBOL(dma_fence_signal_locked); 166 167 /** 168 * dma_fence_signal - signal completion of a fence 169 * @fence: the fence to signal 170 * 171 * Signal completion for software callbacks on a fence, this will unblock 172 * dma_fence_wait() calls and run all the callbacks added with 173 * dma_fence_add_callback(). Can be called multiple times, but since a fence 174 * can only go from the unsignaled to the signaled state and not back, it will 175 * only be effective the first time. 176 * 177 * Returns 0 on success and a negative error value when @fence has been 178 * signalled already. 179 */ 180 int dma_fence_signal(struct dma_fence *fence) 181 { 182 unsigned long flags; 183 184 if (!fence) 185 return -EINVAL; 186 187 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 188 return -EINVAL; 189 190 fence->timestamp = ktime_get(); 191 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); 192 trace_dma_fence_signaled(fence); 193 194 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { 195 struct dma_fence_cb *cur, *tmp; 196 197 spin_lock_irqsave(fence->lock, flags); 198 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 199 list_del_init(&cur->node); 200 cur->func(fence, cur); 201 } 202 spin_unlock_irqrestore(fence->lock, flags); 203 } 204 return 0; 205 } 206 EXPORT_SYMBOL(dma_fence_signal); 207 208 /** 209 * dma_fence_wait_timeout - sleep until the fence gets signaled 210 * or until timeout elapses 211 * @fence: the fence to wait on 212 * @intr: if true, do an interruptible wait 213 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT 214 * 215 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the 216 * remaining timeout in jiffies on success. Other error values may be 217 * returned on custom implementations. 218 * 219 * Performs a synchronous wait on this fence. It is assumed the caller 220 * directly or indirectly (buf-mgr between reservation and committing) 221 * holds a reference to the fence, otherwise the fence might be 222 * freed before return, resulting in undefined behavior. 223 * 224 * See also dma_fence_wait() and dma_fence_wait_any_timeout(). 225 */ 226 signed long 227 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) 228 { 229 signed long ret; 230 231 if (WARN_ON(timeout < 0)) 232 return -EINVAL; 233 234 trace_dma_fence_wait_start(fence); 235 if (fence->ops->wait) 236 ret = fence->ops->wait(fence, intr, timeout); 237 else 238 ret = dma_fence_default_wait(fence, intr, timeout); 239 trace_dma_fence_wait_end(fence); 240 return ret; 241 } 242 EXPORT_SYMBOL(dma_fence_wait_timeout); 243 244 /** 245 * dma_fence_release - default relese function for fences 246 * @kref: &dma_fence.recfount 247 * 248 * This is the default release functions for &dma_fence. Drivers shouldn't call 249 * this directly, but instead call dma_fence_put(). 250 */ 251 void dma_fence_release(struct kref *kref) 252 { 253 struct dma_fence *fence = 254 container_of(kref, struct dma_fence, refcount); 255 256 trace_dma_fence_destroy(fence); 257 258 /* Failed to signal before release, could be a refcounting issue */ 259 WARN_ON(!list_empty(&fence->cb_list)); 260 261 if (fence->ops->release) 262 fence->ops->release(fence); 263 else 264 dma_fence_free(fence); 265 } 266 EXPORT_SYMBOL(dma_fence_release); 267 268 /** 269 * dma_fence_free - default release function for &dma_fence. 270 * @fence: fence to release 271 * 272 * This is the default implementation for &dma_fence_ops.release. It calls 273 * kfree_rcu() on @fence. 274 */ 275 void dma_fence_free(struct dma_fence *fence) 276 { 277 kfree_rcu(fence, rcu); 278 } 279 EXPORT_SYMBOL(dma_fence_free); 280 281 /** 282 * dma_fence_enable_sw_signaling - enable signaling on fence 283 * @fence: the fence to enable 284 * 285 * This will request for sw signaling to be enabled, to make the fence 286 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling 287 * internally. 288 */ 289 void dma_fence_enable_sw_signaling(struct dma_fence *fence) 290 { 291 unsigned long flags; 292 293 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 294 &fence->flags) && 295 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && 296 fence->ops->enable_signaling) { 297 trace_dma_fence_enable_signal(fence); 298 299 spin_lock_irqsave(fence->lock, flags); 300 301 if (!fence->ops->enable_signaling(fence)) 302 dma_fence_signal_locked(fence); 303 304 spin_unlock_irqrestore(fence->lock, flags); 305 } 306 } 307 EXPORT_SYMBOL(dma_fence_enable_sw_signaling); 308 309 /** 310 * dma_fence_add_callback - add a callback to be called when the fence 311 * is signaled 312 * @fence: the fence to wait on 313 * @cb: the callback to register 314 * @func: the function to call 315 * 316 * @cb will be initialized by dma_fence_add_callback(), no initialization 317 * by the caller is required. Any number of callbacks can be registered 318 * to a fence, but a callback can only be registered to one fence at a time. 319 * 320 * Note that the callback can be called from an atomic context. If 321 * fence is already signaled, this function will return -ENOENT (and 322 * *not* call the callback). 323 * 324 * Add a software callback to the fence. Same restrictions apply to 325 * refcount as it does to dma_fence_wait(), however the caller doesn't need to 326 * keep a refcount to fence afterward dma_fence_add_callback() has returned: 327 * when software access is enabled, the creator of the fence is required to keep 328 * the fence alive until after it signals with dma_fence_signal(). The callback 329 * itself can be called from irq context. 330 * 331 * Returns 0 in case of success, -ENOENT if the fence is already signaled 332 * and -EINVAL in case of error. 333 */ 334 int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, 335 dma_fence_func_t func) 336 { 337 unsigned long flags; 338 int ret = 0; 339 bool was_set; 340 341 if (WARN_ON(!fence || !func)) 342 return -EINVAL; 343 344 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 345 INIT_LIST_HEAD(&cb->node); 346 return -ENOENT; 347 } 348 349 spin_lock_irqsave(fence->lock, flags); 350 351 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 352 &fence->flags); 353 354 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 355 ret = -ENOENT; 356 else if (!was_set && fence->ops->enable_signaling) { 357 trace_dma_fence_enable_signal(fence); 358 359 if (!fence->ops->enable_signaling(fence)) { 360 dma_fence_signal_locked(fence); 361 ret = -ENOENT; 362 } 363 } 364 365 if (!ret) { 366 cb->func = func; 367 list_add_tail(&cb->node, &fence->cb_list); 368 } else 369 INIT_LIST_HEAD(&cb->node); 370 spin_unlock_irqrestore(fence->lock, flags); 371 372 return ret; 373 } 374 EXPORT_SYMBOL(dma_fence_add_callback); 375 376 /** 377 * dma_fence_get_status - returns the status upon completion 378 * @fence: the dma_fence to query 379 * 380 * This wraps dma_fence_get_status_locked() to return the error status 381 * condition on a signaled fence. See dma_fence_get_status_locked() for more 382 * details. 383 * 384 * Returns 0 if the fence has not yet been signaled, 1 if the fence has 385 * been signaled without an error condition, or a negative error code 386 * if the fence has been completed in err. 387 */ 388 int dma_fence_get_status(struct dma_fence *fence) 389 { 390 unsigned long flags; 391 int status; 392 393 spin_lock_irqsave(fence->lock, flags); 394 status = dma_fence_get_status_locked(fence); 395 spin_unlock_irqrestore(fence->lock, flags); 396 397 return status; 398 } 399 EXPORT_SYMBOL(dma_fence_get_status); 400 401 /** 402 * dma_fence_remove_callback - remove a callback from the signaling list 403 * @fence: the fence to wait on 404 * @cb: the callback to remove 405 * 406 * Remove a previously queued callback from the fence. This function returns 407 * true if the callback is successfully removed, or false if the fence has 408 * already been signaled. 409 * 410 * *WARNING*: 411 * Cancelling a callback should only be done if you really know what you're 412 * doing, since deadlocks and race conditions could occur all too easily. For 413 * this reason, it should only ever be done on hardware lockup recovery, 414 * with a reference held to the fence. 415 * 416 * Behaviour is undefined if @cb has not been added to @fence using 417 * dma_fence_add_callback() beforehand. 418 */ 419 bool 420 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) 421 { 422 unsigned long flags; 423 bool ret; 424 425 spin_lock_irqsave(fence->lock, flags); 426 427 ret = !list_empty(&cb->node); 428 if (ret) 429 list_del_init(&cb->node); 430 431 spin_unlock_irqrestore(fence->lock, flags); 432 433 return ret; 434 } 435 EXPORT_SYMBOL(dma_fence_remove_callback); 436 437 struct default_wait_cb { 438 struct dma_fence_cb base; 439 struct task_struct *task; 440 }; 441 442 static void 443 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 444 { 445 struct default_wait_cb *wait = 446 container_of(cb, struct default_wait_cb, base); 447 448 wake_up_state(wait->task, TASK_NORMAL); 449 } 450 451 /** 452 * dma_fence_default_wait - default sleep until the fence gets signaled 453 * or until timeout elapses 454 * @fence: the fence to wait on 455 * @intr: if true, do an interruptible wait 456 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT 457 * 458 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the 459 * remaining timeout in jiffies on success. If timeout is zero the value one is 460 * returned if the fence is already signaled for consistency with other 461 * functions taking a jiffies timeout. 462 */ 463 signed long 464 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) 465 { 466 struct default_wait_cb cb; 467 unsigned long flags; 468 signed long ret = timeout ? timeout : 1; 469 bool was_set; 470 471 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 472 return ret; 473 474 spin_lock_irqsave(fence->lock, flags); 475 476 if (intr && signal_pending(current)) { 477 ret = -ERESTARTSYS; 478 goto out; 479 } 480 481 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 482 &fence->flags); 483 484 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 485 goto out; 486 487 if (!was_set && fence->ops->enable_signaling) { 488 trace_dma_fence_enable_signal(fence); 489 490 if (!fence->ops->enable_signaling(fence)) { 491 dma_fence_signal_locked(fence); 492 goto out; 493 } 494 } 495 496 if (!timeout) { 497 ret = 0; 498 goto out; 499 } 500 501 cb.base.func = dma_fence_default_wait_cb; 502 cb.task = current; 503 list_add(&cb.base.node, &fence->cb_list); 504 505 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { 506 if (intr) 507 __set_current_state(TASK_INTERRUPTIBLE); 508 else 509 __set_current_state(TASK_UNINTERRUPTIBLE); 510 spin_unlock_irqrestore(fence->lock, flags); 511 512 ret = schedule_timeout(ret); 513 514 spin_lock_irqsave(fence->lock, flags); 515 if (ret > 0 && intr && signal_pending(current)) 516 ret = -ERESTARTSYS; 517 } 518 519 if (!list_empty(&cb.base.node)) 520 list_del(&cb.base.node); 521 __set_current_state(TASK_RUNNING); 522 523 out: 524 spin_unlock_irqrestore(fence->lock, flags); 525 return ret; 526 } 527 EXPORT_SYMBOL(dma_fence_default_wait); 528 529 static bool 530 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, 531 uint32_t *idx) 532 { 533 int i; 534 535 for (i = 0; i < count; ++i) { 536 struct dma_fence *fence = fences[i]; 537 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 538 if (idx) 539 *idx = i; 540 return true; 541 } 542 } 543 return false; 544 } 545 546 /** 547 * dma_fence_wait_any_timeout - sleep until any fence gets signaled 548 * or until timeout elapses 549 * @fences: array of fences to wait on 550 * @count: number of fences to wait on 551 * @intr: if true, do an interruptible wait 552 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT 553 * @idx: used to store the first signaled fence index, meaningful only on 554 * positive return 555 * 556 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if 557 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies 558 * on success. 559 * 560 * Synchronous waits for the first fence in the array to be signaled. The 561 * caller needs to hold a reference to all fences in the array, otherwise a 562 * fence might be freed before return, resulting in undefined behavior. 563 * 564 * See also dma_fence_wait() and dma_fence_wait_timeout(). 565 */ 566 signed long 567 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, 568 bool intr, signed long timeout, uint32_t *idx) 569 { 570 struct default_wait_cb *cb; 571 signed long ret = timeout; 572 unsigned i; 573 574 if (WARN_ON(!fences || !count || timeout < 0)) 575 return -EINVAL; 576 577 if (timeout == 0) { 578 for (i = 0; i < count; ++i) 579 if (dma_fence_is_signaled(fences[i])) { 580 if (idx) 581 *idx = i; 582 return 1; 583 } 584 585 return 0; 586 } 587 588 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL); 589 if (cb == NULL) { 590 ret = -ENOMEM; 591 goto err_free_cb; 592 } 593 594 for (i = 0; i < count; ++i) { 595 struct dma_fence *fence = fences[i]; 596 597 cb[i].task = current; 598 if (dma_fence_add_callback(fence, &cb[i].base, 599 dma_fence_default_wait_cb)) { 600 /* This fence is already signaled */ 601 if (idx) 602 *idx = i; 603 goto fence_rm_cb; 604 } 605 } 606 607 while (ret > 0) { 608 if (intr) 609 set_current_state(TASK_INTERRUPTIBLE); 610 else 611 set_current_state(TASK_UNINTERRUPTIBLE); 612 613 if (dma_fence_test_signaled_any(fences, count, idx)) 614 break; 615 616 ret = schedule_timeout(ret); 617 618 if (ret > 0 && intr && signal_pending(current)) 619 ret = -ERESTARTSYS; 620 } 621 622 __set_current_state(TASK_RUNNING); 623 624 fence_rm_cb: 625 while (i-- > 0) 626 dma_fence_remove_callback(fences[i], &cb[i].base); 627 628 err_free_cb: 629 kfree(cb); 630 631 return ret; 632 } 633 EXPORT_SYMBOL(dma_fence_wait_any_timeout); 634 635 /** 636 * dma_fence_init - Initialize a custom fence. 637 * @fence: the fence to initialize 638 * @ops: the dma_fence_ops for operations on this fence 639 * @lock: the irqsafe spinlock to use for locking this fence 640 * @context: the execution context this fence is run on 641 * @seqno: a linear increasing sequence number for this context 642 * 643 * Initializes an allocated fence, the caller doesn't have to keep its 644 * refcount after committing with this fence, but it will need to hold a 645 * refcount again if &dma_fence_ops.enable_signaling gets called. 646 * 647 * context and seqno are used for easy comparison between fences, allowing 648 * to check which fence is later by simply using dma_fence_later(). 649 */ 650 void 651 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, 652 spinlock_t *lock, u64 context, unsigned seqno) 653 { 654 BUG_ON(!lock); 655 BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); 656 657 kref_init(&fence->refcount); 658 fence->ops = ops; 659 INIT_LIST_HEAD(&fence->cb_list); 660 fence->lock = lock; 661 fence->context = context; 662 fence->seqno = seqno; 663 fence->flags = 0UL; 664 fence->error = 0; 665 666 trace_dma_fence_init(fence); 667 } 668 EXPORT_SYMBOL(dma_fence_init); 669