1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * (C) Copyright 2016 Intel Corporation 5 */ 6 7 #include <linux/slab.h> 8 #include <linux/dma-fence.h> 9 #include <linux/irq_work.h> 10 #include <linux/dma-resv.h> 11 12 #include "i915_sw_fence.h" 13 #include "i915_selftest.h" 14 15 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) 16 #define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr) 17 #else 18 #define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) 19 #endif 20 21 static DEFINE_SPINLOCK(i915_sw_fence_lock); 22 23 #define WQ_FLAG_BITS \ 24 BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags)) 25 26 /* after WQ_FLAG_* for safety */ 27 #define I915_SW_FENCE_FLAG_FENCE BIT(WQ_FLAG_BITS - 1) 28 #define I915_SW_FENCE_FLAG_ALLOC BIT(WQ_FLAG_BITS - 2) 29 30 enum { 31 DEBUG_FENCE_IDLE = 0, 32 DEBUG_FENCE_NOTIFY, 33 }; 34 35 static void *i915_sw_fence_debug_hint(void *addr) 36 { 37 return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK); 38 } 39 40 #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS 41 42 static const struct debug_obj_descr i915_sw_fence_debug_descr = { 43 .name = "i915_sw_fence", 44 .debug_hint = i915_sw_fence_debug_hint, 45 }; 46 47 static inline void debug_fence_init(struct i915_sw_fence *fence) 48 { 49 debug_object_init(fence, &i915_sw_fence_debug_descr); 50 } 51 52 static inline void debug_fence_init_onstack(struct i915_sw_fence *fence) 53 { 54 debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr); 55 } 56 57 static inline void debug_fence_activate(struct i915_sw_fence *fence) 58 { 59 debug_object_activate(fence, &i915_sw_fence_debug_descr); 60 } 61 62 static inline void debug_fence_set_state(struct i915_sw_fence *fence, 63 int old, int new) 64 { 65 debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new); 66 } 67 68 static inline void debug_fence_deactivate(struct i915_sw_fence *fence) 69 { 70 debug_object_deactivate(fence, &i915_sw_fence_debug_descr); 71 } 72 73 static inline void debug_fence_destroy(struct i915_sw_fence *fence) 74 { 75 debug_object_destroy(fence, &i915_sw_fence_debug_descr); 76 } 77 78 static inline void debug_fence_free(struct i915_sw_fence *fence) 79 { 80 debug_object_free(fence, &i915_sw_fence_debug_descr); 81 smp_wmb(); /* flush the change in state before reallocation */ 82 } 83 84 static inline void debug_fence_assert(struct i915_sw_fence *fence) 85 { 86 debug_object_assert_init(fence, &i915_sw_fence_debug_descr); 87 } 88 89 #else 90 91 static inline void debug_fence_init(struct i915_sw_fence *fence) 92 { 93 } 94 95 static inline void debug_fence_init_onstack(struct i915_sw_fence *fence) 96 { 97 } 98 99 static inline void debug_fence_activate(struct i915_sw_fence *fence) 100 { 101 } 102 103 static inline void debug_fence_set_state(struct i915_sw_fence *fence, 104 int old, int new) 105 { 106 } 107 108 static inline void debug_fence_deactivate(struct i915_sw_fence *fence) 109 { 110 } 111 112 static inline void debug_fence_destroy(struct i915_sw_fence *fence) 113 { 114 } 115 116 static inline void debug_fence_free(struct i915_sw_fence *fence) 117 { 118 } 119 120 static inline void debug_fence_assert(struct i915_sw_fence *fence) 121 { 122 } 123 124 #endif 125 126 static int __i915_sw_fence_notify(struct i915_sw_fence *fence, 127 enum i915_sw_fence_notify state) 128 { 129 i915_sw_fence_notify_t fn; 130 131 fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK); 132 return fn(fence, state); 133 } 134 135 #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS 136 void i915_sw_fence_fini(struct i915_sw_fence *fence) 137 { 138 debug_fence_free(fence); 139 } 140 #endif 141 142 static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, 143 struct list_head *continuation) 144 { 145 wait_queue_head_t *x = &fence->wait; 146 wait_queue_entry_t *pos, *next; 147 unsigned long flags; 148 149 debug_fence_deactivate(fence); 150 atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */ 151 152 /* 153 * To prevent unbounded recursion as we traverse the graph of 154 * i915_sw_fences, we move the entry list from this, the next ready 155 * fence, to the tail of the original fence's entry list 156 * (and so added to the list to be woken). 157 */ 158 159 spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation); 160 if (continuation) { 161 list_for_each_entry_safe(pos, next, &x->head, entry) { 162 if (pos->flags & I915_SW_FENCE_FLAG_FENCE) 163 list_move_tail(&pos->entry, continuation); 164 else 165 pos->func(pos, TASK_NORMAL, 0, continuation); 166 } 167 } else { 168 LIST_HEAD(extra); 169 170 do { 171 list_for_each_entry_safe(pos, next, &x->head, entry) { 172 int wake_flags; 173 174 wake_flags = 0; 175 if (pos->flags & I915_SW_FENCE_FLAG_FENCE) 176 wake_flags = fence->error; 177 178 pos->func(pos, TASK_NORMAL, wake_flags, &extra); 179 } 180 181 if (list_empty(&extra)) 182 break; 183 184 list_splice_tail_init(&extra, &x->head); 185 } while (1); 186 } 187 spin_unlock_irqrestore(&x->lock, flags); 188 189 debug_fence_assert(fence); 190 } 191 192 static void __i915_sw_fence_complete(struct i915_sw_fence *fence, 193 struct list_head *continuation) 194 { 195 debug_fence_assert(fence); 196 197 if (!atomic_dec_and_test(&fence->pending)) 198 return; 199 200 debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY); 201 202 if (__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE) 203 return; 204 205 debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE); 206 207 __i915_sw_fence_wake_up_all(fence, continuation); 208 209 debug_fence_destroy(fence); 210 __i915_sw_fence_notify(fence, FENCE_FREE); 211 } 212 213 void i915_sw_fence_complete(struct i915_sw_fence *fence) 214 { 215 debug_fence_assert(fence); 216 217 if (WARN_ON(i915_sw_fence_done(fence))) 218 return; 219 220 __i915_sw_fence_complete(fence, NULL); 221 } 222 223 bool i915_sw_fence_await(struct i915_sw_fence *fence) 224 { 225 int pending; 226 227 /* 228 * It is only safe to add a new await to the fence while it has 229 * not yet been signaled (i.e. there are still existing signalers). 230 */ 231 pending = atomic_read(&fence->pending); 232 do { 233 if (pending < 1) 234 return false; 235 } while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1)); 236 237 return true; 238 } 239 240 void __i915_sw_fence_init(struct i915_sw_fence *fence, 241 i915_sw_fence_notify_t fn, 242 const char *name, 243 struct lock_class_key *key) 244 { 245 BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK); 246 247 __init_waitqueue_head(&fence->wait, name, key); 248 fence->flags = (unsigned long)fn; 249 250 i915_sw_fence_reinit(fence); 251 } 252 253 void i915_sw_fence_reinit(struct i915_sw_fence *fence) 254 { 255 debug_fence_init(fence); 256 257 atomic_set(&fence->pending, 1); 258 fence->error = 0; 259 260 I915_SW_FENCE_BUG_ON(!fence->flags); 261 I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head)); 262 } 263 264 void i915_sw_fence_commit(struct i915_sw_fence *fence) 265 { 266 debug_fence_activate(fence); 267 i915_sw_fence_complete(fence); 268 } 269 270 static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) 271 { 272 i915_sw_fence_set_error_once(wq->private, flags); 273 274 list_del(&wq->entry); 275 __i915_sw_fence_complete(wq->private, key); 276 277 if (wq->flags & I915_SW_FENCE_FLAG_ALLOC) 278 kfree(wq); 279 return 0; 280 } 281 282 static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, 283 const struct i915_sw_fence * const signaler) 284 { 285 wait_queue_entry_t *wq; 286 287 if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) 288 return false; 289 290 if (fence == signaler) 291 return true; 292 293 list_for_each_entry(wq, &fence->wait.head, entry) { 294 if (wq->func != i915_sw_fence_wake) 295 continue; 296 297 if (__i915_sw_fence_check_if_after(wq->private, signaler)) 298 return true; 299 } 300 301 return false; 302 } 303 304 static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence) 305 { 306 wait_queue_entry_t *wq; 307 308 if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) 309 return; 310 311 list_for_each_entry(wq, &fence->wait.head, entry) { 312 if (wq->func != i915_sw_fence_wake) 313 continue; 314 315 __i915_sw_fence_clear_checked_bit(wq->private); 316 } 317 } 318 319 static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, 320 const struct i915_sw_fence * const signaler) 321 { 322 unsigned long flags; 323 bool err; 324 325 if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG)) 326 return false; 327 328 spin_lock_irqsave(&i915_sw_fence_lock, flags); 329 err = __i915_sw_fence_check_if_after(fence, signaler); 330 __i915_sw_fence_clear_checked_bit(fence); 331 spin_unlock_irqrestore(&i915_sw_fence_lock, flags); 332 333 return err; 334 } 335 336 static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, 337 struct i915_sw_fence *signaler, 338 wait_queue_entry_t *wq, gfp_t gfp) 339 { 340 unsigned int pending; 341 unsigned long flags; 342 343 debug_fence_assert(fence); 344 might_sleep_if(gfpflags_allow_blocking(gfp)); 345 346 if (i915_sw_fence_done(signaler)) { 347 i915_sw_fence_set_error_once(fence, signaler->error); 348 return 0; 349 } 350 351 debug_fence_assert(signaler); 352 353 /* The dependency graph must be acyclic. */ 354 if (unlikely(i915_sw_fence_check_if_after(fence, signaler))) 355 return -EINVAL; 356 357 pending = I915_SW_FENCE_FLAG_FENCE; 358 if (!wq) { 359 wq = kmalloc(sizeof(*wq), gfp); 360 if (!wq) { 361 if (!gfpflags_allow_blocking(gfp)) 362 return -ENOMEM; 363 364 i915_sw_fence_wait(signaler); 365 i915_sw_fence_set_error_once(fence, signaler->error); 366 return 0; 367 } 368 369 pending |= I915_SW_FENCE_FLAG_ALLOC; 370 } 371 372 INIT_LIST_HEAD(&wq->entry); 373 wq->flags = pending; 374 wq->func = i915_sw_fence_wake; 375 wq->private = fence; 376 377 i915_sw_fence_await(fence); 378 379 spin_lock_irqsave(&signaler->wait.lock, flags); 380 if (likely(!i915_sw_fence_done(signaler))) { 381 __add_wait_queue_entry_tail(&signaler->wait, wq); 382 pending = 1; 383 } else { 384 i915_sw_fence_wake(wq, 0, signaler->error, NULL); 385 pending = 0; 386 } 387 spin_unlock_irqrestore(&signaler->wait.lock, flags); 388 389 return pending; 390 } 391 392 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, 393 struct i915_sw_fence *signaler, 394 wait_queue_entry_t *wq) 395 { 396 return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0); 397 } 398 399 int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, 400 struct i915_sw_fence *signaler, 401 gfp_t gfp) 402 { 403 return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp); 404 } 405 406 struct i915_sw_dma_fence_cb_timer { 407 struct i915_sw_dma_fence_cb base; 408 struct dma_fence *dma; 409 struct timer_list timer; 410 struct irq_work work; 411 struct rcu_head rcu; 412 }; 413 414 static void dma_i915_sw_fence_wake(struct dma_fence *dma, 415 struct dma_fence_cb *data) 416 { 417 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); 418 419 i915_sw_fence_set_error_once(cb->fence, dma->error); 420 i915_sw_fence_complete(cb->fence); 421 kfree(cb); 422 } 423 424 static void timer_i915_sw_fence_wake(struct timer_list *t) 425 { 426 struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer); 427 struct i915_sw_fence *fence; 428 429 fence = xchg(&cb->base.fence, NULL); 430 if (!fence) 431 return; 432 433 pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n", 434 cb->dma->ops->get_driver_name(cb->dma), 435 cb->dma->ops->get_timeline_name(cb->dma), 436 cb->dma->seqno, 437 i915_sw_fence_debug_hint(fence)); 438 439 i915_sw_fence_set_error_once(fence, -ETIMEDOUT); 440 i915_sw_fence_complete(fence); 441 } 442 443 static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma, 444 struct dma_fence_cb *data) 445 { 446 struct i915_sw_dma_fence_cb_timer *cb = 447 container_of(data, typeof(*cb), base.base); 448 struct i915_sw_fence *fence; 449 450 fence = xchg(&cb->base.fence, NULL); 451 if (fence) { 452 i915_sw_fence_set_error_once(fence, dma->error); 453 i915_sw_fence_complete(fence); 454 } 455 456 irq_work_queue(&cb->work); 457 } 458 459 static void irq_i915_sw_fence_work(struct irq_work *wrk) 460 { 461 struct i915_sw_dma_fence_cb_timer *cb = 462 container_of(wrk, typeof(*cb), work); 463 464 del_timer_sync(&cb->timer); 465 dma_fence_put(cb->dma); 466 467 kfree_rcu(cb, rcu); 468 } 469 470 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, 471 struct dma_fence *dma, 472 unsigned long timeout, 473 gfp_t gfp) 474 { 475 struct i915_sw_dma_fence_cb *cb; 476 dma_fence_func_t func; 477 int ret; 478 479 debug_fence_assert(fence); 480 might_sleep_if(gfpflags_allow_blocking(gfp)); 481 482 if (dma_fence_is_signaled(dma)) { 483 i915_sw_fence_set_error_once(fence, dma->error); 484 return 0; 485 } 486 487 cb = kmalloc(timeout ? 488 sizeof(struct i915_sw_dma_fence_cb_timer) : 489 sizeof(struct i915_sw_dma_fence_cb), 490 gfp); 491 if (!cb) { 492 if (!gfpflags_allow_blocking(gfp)) 493 return -ENOMEM; 494 495 ret = dma_fence_wait(dma, false); 496 if (ret) 497 return ret; 498 499 i915_sw_fence_set_error_once(fence, dma->error); 500 return 0; 501 } 502 503 cb->fence = fence; 504 i915_sw_fence_await(fence); 505 506 func = dma_i915_sw_fence_wake; 507 if (timeout) { 508 struct i915_sw_dma_fence_cb_timer *timer = 509 container_of(cb, typeof(*timer), base); 510 511 timer->dma = dma_fence_get(dma); 512 init_irq_work(&timer->work, irq_i915_sw_fence_work); 513 514 timer_setup(&timer->timer, 515 timer_i915_sw_fence_wake, TIMER_IRQSAFE); 516 mod_timer(&timer->timer, round_jiffies_up(jiffies + timeout)); 517 518 func = dma_i915_sw_fence_wake_timer; 519 } 520 521 ret = dma_fence_add_callback(dma, &cb->base, func); 522 if (ret == 0) { 523 ret = 1; 524 } else { 525 func(dma, &cb->base); 526 if (ret == -ENOENT) /* fence already signaled */ 527 ret = 0; 528 } 529 530 return ret; 531 } 532 533 static void __dma_i915_sw_fence_wake(struct dma_fence *dma, 534 struct dma_fence_cb *data) 535 { 536 struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); 537 538 i915_sw_fence_set_error_once(cb->fence, dma->error); 539 i915_sw_fence_complete(cb->fence); 540 } 541 542 int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, 543 struct dma_fence *dma, 544 struct i915_sw_dma_fence_cb *cb) 545 { 546 int ret; 547 548 debug_fence_assert(fence); 549 550 if (dma_fence_is_signaled(dma)) { 551 i915_sw_fence_set_error_once(fence, dma->error); 552 return 0; 553 } 554 555 cb->fence = fence; 556 i915_sw_fence_await(fence); 557 558 ret = 1; 559 if (dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake)) { 560 /* fence already signaled */ 561 __dma_i915_sw_fence_wake(dma, &cb->base); 562 ret = 0; 563 } 564 565 return ret; 566 } 567 568 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, 569 struct dma_resv *resv, 570 const struct dma_fence_ops *exclude, 571 bool write, 572 unsigned long timeout, 573 gfp_t gfp) 574 { 575 struct dma_fence *excl; 576 int ret = 0, pending; 577 578 debug_fence_assert(fence); 579 might_sleep_if(gfpflags_allow_blocking(gfp)); 580 581 if (write) { 582 struct dma_fence **shared; 583 unsigned int count, i; 584 585 ret = dma_resv_get_fences(resv, &excl, &count, &shared); 586 if (ret) 587 return ret; 588 589 for (i = 0; i < count; i++) { 590 if (shared[i]->ops == exclude) 591 continue; 592 593 pending = i915_sw_fence_await_dma_fence(fence, 594 shared[i], 595 timeout, 596 gfp); 597 if (pending < 0) { 598 ret = pending; 599 break; 600 } 601 602 ret |= pending; 603 } 604 605 for (i = 0; i < count; i++) 606 dma_fence_put(shared[i]); 607 kfree(shared); 608 } else { 609 excl = dma_resv_get_excl_unlocked(resv); 610 } 611 612 if (ret >= 0 && excl && excl->ops != exclude) { 613 pending = i915_sw_fence_await_dma_fence(fence, 614 excl, 615 timeout, 616 gfp); 617 if (pending < 0) 618 ret = pending; 619 else 620 ret |= pending; 621 } 622 623 dma_fence_put(excl); 624 625 return ret; 626 } 627 628 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 629 #include "selftests/lib_sw_fence.c" 630 #include "selftests/i915_sw_fence.c" 631 #endif 632