1 /* 2 * Copyright © 2008-2018 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef I915_REQUEST_H 26 #define I915_REQUEST_H 27 28 #include <linux/dma-fence.h> 29 #include <linux/hrtimer.h> 30 #include <linux/irq_work.h> 31 #include <linux/llist.h> 32 #include <linux/lockdep.h> 33 34 #include "gem/i915_gem_context_types.h" 35 #include "gt/intel_context_types.h" 36 #include "gt/intel_engine_types.h" 37 #include "gt/intel_timeline_types.h" 38 39 #include "i915_gem.h" 40 #include "i915_scheduler.h" 41 #include "i915_selftest.h" 42 #include "i915_sw_fence.h" 43 44 #include <uapi/drm/i915_drm.h> 45 46 struct drm_file; 47 struct drm_i915_gem_object; 48 struct drm_printer; 49 struct i915_request; 50 51 struct i915_capture_list { 52 struct i915_capture_list *next; 53 struct i915_vma *vma; 54 }; 55 56 #define RQ_TRACE(rq, fmt, ...) do { \ 57 const struct i915_request *rq__ = (rq); \ 58 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \ 59 rq__->fence.context, rq__->fence.seqno, \ 60 hwsp_seqno(rq__), ##__VA_ARGS__); \ 61 } while (0) 62 63 enum { 64 /* 65 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW. 66 * 67 * Set by __i915_request_submit() on handing over to HW, and cleared 68 * by __i915_request_unsubmit() if we preempt this request. 69 * 70 * Finally cleared for consistency on retiring the request, when 71 * we know the HW is no longer running this request. 72 * 73 * See i915_request_is_active() 74 */ 75 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS, 76 77 /* 78 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution 79 * 80 * Using the scheduler, when a request is ready for execution it is put 81 * into the priority queue, and removed from that queue when transferred 82 * to the HW runlists. We want to track its membership within the 83 * priority queue so that we can easily check before rescheduling. 84 * 85 * See i915_request_in_priority_queue() 86 */ 87 I915_FENCE_FLAG_PQUEUE, 88 89 /* 90 * I915_FENCE_FLAG_HOLD - this request is currently on hold 91 * 92 * This request has been suspended, pending an ongoing investigation. 93 */ 94 I915_FENCE_FLAG_HOLD, 95 96 /* 97 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial 98 * breadcrumb that marks the end of semaphore waits and start of the 99 * user payload. 100 */ 101 I915_FENCE_FLAG_INITIAL_BREADCRUMB, 102 103 /* 104 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list 105 * 106 * Internal bookkeeping used by the breadcrumb code to track when 107 * a request is on the various signal_list. 108 */ 109 I915_FENCE_FLAG_SIGNAL, 110 111 /* 112 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted 113 * 114 * The execution of some requests should not be interrupted. This is 115 * a sensitive operation as it makes the request super important, 116 * blocking other higher priority work. Abuse of this flag will 117 * lead to quality of service issues. 118 */ 119 I915_FENCE_FLAG_NOPREEMPT, 120 121 /* 122 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue 123 * 124 * A high priority sentinel request may be submitted to clear the 125 * submission queue. As it will be the only request in-flight, upon 126 * execution all other active requests will have been preempted and 127 * unsubmitted. This preemptive pulse is used to re-evaluate the 128 * in-flight requests, particularly in cases where an active context 129 * is banned and those active requests need to be cancelled. 130 */ 131 I915_FENCE_FLAG_SENTINEL, 132 133 /* 134 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request 135 * 136 * Some requests are more important than others! In particular, a 137 * request that the user is waiting on is typically required for 138 * interactive latency, for which we want to minimise by upclocking 139 * the GPU. Here we track such boost requests on a per-request basis. 140 */ 141 I915_FENCE_FLAG_BOOST, 142 143 /* 144 * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a 145 * parent-child relationship (parallel submission, multi-lrc) should 146 * trigger a submission to the GuC rather than just moving the context 147 * tail. 148 */ 149 I915_FENCE_FLAG_SUBMIT_PARALLEL, 150 151 /* 152 * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a 153 * parent-child relationship (parallel submission, multi-lrc) that 154 * hit an error while generating requests in the execbuf IOCTL. 155 * Indicates this request should be skipped as another request in 156 * submission / relationship encoutered an error. 157 */ 158 I915_FENCE_FLAG_SKIP_PARALLEL, 159 160 /* 161 * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite 162 * fence (dma_fence_array) and i915 generated for parallel submission. 163 */ 164 I915_FENCE_FLAG_COMPOSITE, 165 }; 166 167 /** 168 * Request queue structure. 169 * 170 * The request queue allows us to note sequence numbers that have been emitted 171 * and may be associated with active buffers to be retired. 172 * 173 * By keeping this list, we can avoid having to do questionable sequence 174 * number comparisons on buffer last_read|write_seqno. It also allows an 175 * emission time to be associated with the request for tracking how far ahead 176 * of the GPU the submission is. 177 * 178 * When modifying this structure be very aware that we perform a lockless 179 * RCU lookup of it that may race against reallocation of the struct 180 * from the slab freelist. We intentionally do not zero the structure on 181 * allocation so that the lookup can use the dangling pointers (and is 182 * cogniscent that those pointers may be wrong). Instead, everything that 183 * needs to be initialised must be done so explicitly. 184 * 185 * The requests are reference counted. 186 */ 187 struct i915_request { 188 struct dma_fence fence; 189 spinlock_t lock; 190 191 /** 192 * Context and ring buffer related to this request 193 * Contexts are refcounted, so when this request is associated with a 194 * context, we must increment the context's refcount, to guarantee that 195 * it persists while any request is linked to it. Requests themselves 196 * are also refcounted, so the request will only be freed when the last 197 * reference to it is dismissed, and the code in 198 * i915_request_free() will then decrement the refcount on the 199 * context. 200 */ 201 struct intel_engine_cs *engine; 202 struct intel_context *context; 203 struct intel_ring *ring; 204 struct intel_timeline __rcu *timeline; 205 206 struct list_head signal_link; 207 struct llist_node signal_node; 208 209 /* 210 * The rcu epoch of when this request was allocated. Used to judiciously 211 * apply backpressure on future allocations to ensure that under 212 * mempressure there is sufficient RCU ticks for us to reclaim our 213 * RCU protected slabs. 214 */ 215 unsigned long rcustate; 216 217 /* 218 * We pin the timeline->mutex while constructing the request to 219 * ensure that no caller accidentally drops it during construction. 220 * The timeline->mutex must be held to ensure that only this caller 221 * can use the ring and manipulate the associated timeline during 222 * construction. 223 */ 224 struct pin_cookie cookie; 225 226 /* 227 * Fences for the various phases in the request's lifetime. 228 * 229 * The submit fence is used to await upon all of the request's 230 * dependencies. When it is signaled, the request is ready to run. 231 * It is used by the driver to then queue the request for execution. 232 */ 233 struct i915_sw_fence submit; 234 union { 235 wait_queue_entry_t submitq; 236 struct i915_sw_dma_fence_cb dmaq; 237 struct i915_request_duration_cb { 238 struct dma_fence_cb cb; 239 ktime_t emitted; 240 } duration; 241 }; 242 struct llist_head execute_cb; 243 struct i915_sw_fence semaphore; 244 /** 245 * @submit_work: complete submit fence from an IRQ if needed for 246 * locking hierarchy reasons. 247 */ 248 struct irq_work submit_work; 249 250 /* 251 * A list of everyone we wait upon, and everyone who waits upon us. 252 * Even though we will not be submitted to the hardware before the 253 * submit fence is signaled (it waits for all external events as well 254 * as our own requests), the scheduler still needs to know the 255 * dependency tree for the lifetime of the request (from execbuf 256 * to retirement), i.e. bidirectional dependency information for the 257 * request not tied to individual fences. 258 */ 259 struct i915_sched_node sched; 260 struct i915_dependency dep; 261 intel_engine_mask_t execution_mask; 262 263 /* 264 * A convenience pointer to the current breadcrumb value stored in 265 * the HW status page (or our timeline's local equivalent). The full 266 * path would be rq->hw_context->ring->timeline->hwsp_seqno. 267 */ 268 const u32 *hwsp_seqno; 269 270 /** Position in the ring of the start of the request */ 271 u32 head; 272 273 /** Position in the ring of the start of the user packets */ 274 u32 infix; 275 276 /** 277 * Position in the ring of the start of the postfix. 278 * This is required to calculate the maximum available ring space 279 * without overwriting the postfix. 280 */ 281 u32 postfix; 282 283 /** Position in the ring of the end of the whole request */ 284 u32 tail; 285 286 /** Position in the ring of the end of any workarounds after the tail */ 287 u32 wa_tail; 288 289 /** Preallocate space in the ring for the emitting the request */ 290 u32 reserved_space; 291 292 /** Batch buffer related to this request if any (used for 293 * error state dump only). 294 */ 295 struct i915_vma *batch; 296 /** 297 * Additional buffers requested by userspace to be captured upon 298 * a GPU hang. The vma/obj on this list are protected by their 299 * active reference - all objects on this list must also be 300 * on the active_list (of their final request). 301 */ 302 struct i915_capture_list *capture_list; 303 304 /** Time at which this request was emitted, in jiffies. */ 305 unsigned long emitted_jiffies; 306 307 /** timeline->request entry for this request */ 308 struct list_head link; 309 310 /** Watchdog support fields. */ 311 struct i915_request_watchdog { 312 struct llist_node link; 313 struct hrtimer timer; 314 } watchdog; 315 316 /** 317 * @guc_fence_link: Requests may need to be stalled when using GuC 318 * submission waiting for certain GuC operations to complete. If that is 319 * the case, stalled requests are added to a per context list of stalled 320 * requests. The below list_head is the link in that list. Protected by 321 * ce->guc_state.lock. 322 */ 323 struct list_head guc_fence_link; 324 325 /** 326 * @guc_prio: Priority level while the request is in flight. Differs 327 * from i915 scheduler priority. See comment above 328 * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by 329 * ce->guc_active.lock. Two special values (GUC_PRIO_INIT and 330 * GUC_PRIO_FINI) outside the GuC priority range are used to indicate 331 * if the priority has not been initialized yet or if no more updates 332 * are possible because the request has completed. 333 */ 334 #define GUC_PRIO_INIT 0xff 335 #define GUC_PRIO_FINI 0xfe 336 u8 guc_prio; 337 338 I915_SELFTEST_DECLARE(struct { 339 struct list_head link; 340 unsigned long delay; 341 } mock;) 342 }; 343 344 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 345 346 extern const struct dma_fence_ops i915_fence_ops; 347 348 static inline bool dma_fence_is_i915(const struct dma_fence *fence) 349 { 350 return fence->ops == &i915_fence_ops; 351 } 352 353 struct kmem_cache *i915_request_slab_cache(void); 354 355 struct i915_request * __must_check 356 __i915_request_create(struct intel_context *ce, gfp_t gfp); 357 struct i915_request * __must_check 358 i915_request_create(struct intel_context *ce); 359 360 void __i915_request_skip(struct i915_request *rq); 361 bool i915_request_set_error_once(struct i915_request *rq, int error); 362 struct i915_request *i915_request_mark_eio(struct i915_request *rq); 363 364 struct i915_request *__i915_request_commit(struct i915_request *request); 365 void __i915_request_queue(struct i915_request *rq, 366 const struct i915_sched_attr *attr); 367 void __i915_request_queue_bh(struct i915_request *rq); 368 369 bool i915_request_retire(struct i915_request *rq); 370 void i915_request_retire_upto(struct i915_request *rq); 371 372 static inline struct i915_request * 373 to_request(struct dma_fence *fence) 374 { 375 /* We assume that NULL fence/request are interoperable */ 376 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0); 377 GEM_BUG_ON(fence && !dma_fence_is_i915(fence)); 378 return container_of(fence, struct i915_request, fence); 379 } 380 381 static inline struct i915_request * 382 i915_request_get(struct i915_request *rq) 383 { 384 return to_request(dma_fence_get(&rq->fence)); 385 } 386 387 static inline struct i915_request * 388 i915_request_get_rcu(struct i915_request *rq) 389 { 390 return to_request(dma_fence_get_rcu(&rq->fence)); 391 } 392 393 static inline void 394 i915_request_put(struct i915_request *rq) 395 { 396 dma_fence_put(&rq->fence); 397 } 398 399 int i915_request_await_object(struct i915_request *to, 400 struct drm_i915_gem_object *obj, 401 bool write); 402 int i915_request_await_dma_fence(struct i915_request *rq, 403 struct dma_fence *fence); 404 int i915_request_await_execution(struct i915_request *rq, 405 struct dma_fence *fence); 406 407 void i915_request_add(struct i915_request *rq); 408 409 bool __i915_request_submit(struct i915_request *request); 410 void i915_request_submit(struct i915_request *request); 411 412 void __i915_request_unsubmit(struct i915_request *request); 413 void i915_request_unsubmit(struct i915_request *request); 414 415 void i915_request_cancel(struct i915_request *rq, int error); 416 417 long i915_request_wait(struct i915_request *rq, 418 unsigned int flags, 419 long timeout) 420 __attribute__((nonnull(1))); 421 #define I915_WAIT_INTERRUPTIBLE BIT(0) 422 #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */ 423 #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ 424 425 void i915_request_show(struct drm_printer *m, 426 const struct i915_request *rq, 427 const char *prefix, 428 int indent); 429 430 static inline bool i915_request_signaled(const struct i915_request *rq) 431 { 432 /* The request may live longer than its HWSP, so check flags first! */ 433 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags); 434 } 435 436 static inline bool i915_request_is_active(const struct i915_request *rq) 437 { 438 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 439 } 440 441 static inline bool i915_request_in_priority_queue(const struct i915_request *rq) 442 { 443 return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 444 } 445 446 static inline bool 447 i915_request_has_initial_breadcrumb(const struct i915_request *rq) 448 { 449 return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags); 450 } 451 452 /** 453 * Returns true if seq1 is later than seq2. 454 */ 455 static inline bool i915_seqno_passed(u32 seq1, u32 seq2) 456 { 457 return (s32)(seq1 - seq2) >= 0; 458 } 459 460 static inline u32 __hwsp_seqno(const struct i915_request *rq) 461 { 462 const u32 *hwsp = READ_ONCE(rq->hwsp_seqno); 463 464 return READ_ONCE(*hwsp); 465 } 466 467 /** 468 * hwsp_seqno - the current breadcrumb value in the HW status page 469 * @rq: the request, to chase the relevant HW status page 470 * 471 * The emphasis in naming here is that hwsp_seqno() is not a property of the 472 * request, but an indication of the current HW state (associated with this 473 * request). Its value will change as the GPU executes more requests. 474 * 475 * Returns the current breadcrumb value in the associated HW status page (or 476 * the local timeline's equivalent) for this request. The request itself 477 * has the associated breadcrumb value of rq->fence.seqno, when the HW 478 * status page has that breadcrumb or later, this request is complete. 479 */ 480 static inline u32 hwsp_seqno(const struct i915_request *rq) 481 { 482 u32 seqno; 483 484 rcu_read_lock(); /* the HWSP may be freed at runtime */ 485 seqno = __hwsp_seqno(rq); 486 rcu_read_unlock(); 487 488 return seqno; 489 } 490 491 static inline bool __i915_request_has_started(const struct i915_request *rq) 492 { 493 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1); 494 } 495 496 /** 497 * i915_request_started - check if the request has begun being executed 498 * @rq: the request 499 * 500 * If the timeline is not using initial breadcrumbs, a request is 501 * considered started if the previous request on its timeline (i.e. 502 * context) has been signaled. 503 * 504 * If the timeline is using semaphores, it will also be emitting an 505 * "initial breadcrumb" after the semaphores are complete and just before 506 * it began executing the user payload. A request can therefore be active 507 * on the HW and not yet started as it is still busywaiting on its 508 * dependencies (via HW semaphores). 509 * 510 * If the request has started, its dependencies will have been signaled 511 * (either by fences or by semaphores) and it will have begun processing 512 * the user payload. 513 * 514 * However, even if a request has started, it may have been preempted and 515 * so no longer active, or it may have already completed. 516 * 517 * See also i915_request_is_active(). 518 * 519 * Returns true if the request has begun executing the user payload, or 520 * has completed: 521 */ 522 static inline bool i915_request_started(const struct i915_request *rq) 523 { 524 bool result; 525 526 if (i915_request_signaled(rq)) 527 return true; 528 529 result = true; 530 rcu_read_lock(); /* the HWSP may be freed at runtime */ 531 if (likely(!i915_request_signaled(rq))) 532 /* Remember: started but may have since been preempted! */ 533 result = __i915_request_has_started(rq); 534 rcu_read_unlock(); 535 536 return result; 537 } 538 539 /** 540 * i915_request_is_running - check if the request may actually be executing 541 * @rq: the request 542 * 543 * Returns true if the request is currently submitted to hardware, has passed 544 * its start point (i.e. the context is setup and not busywaiting). Note that 545 * it may no longer be running by the time the function returns! 546 */ 547 static inline bool i915_request_is_running(const struct i915_request *rq) 548 { 549 bool result; 550 551 if (!i915_request_is_active(rq)) 552 return false; 553 554 rcu_read_lock(); 555 result = __i915_request_has_started(rq) && i915_request_is_active(rq); 556 rcu_read_unlock(); 557 558 return result; 559 } 560 561 /** 562 * i915_request_is_ready - check if the request is ready for execution 563 * @rq: the request 564 * 565 * Upon construction, the request is instructed to wait upon various 566 * signals before it is ready to be executed by the HW. That is, we do 567 * not want to start execution and read data before it is written. In practice, 568 * this is controlled with a mixture of interrupts and semaphores. Once 569 * the submit fence is completed, the backend scheduler will place the 570 * request into its queue and from there submit it for execution. So we 571 * can detect when a request is eligible for execution (and is under control 572 * of the scheduler) by querying where it is in any of the scheduler's lists. 573 * 574 * Returns true if the request is ready for execution (it may be inflight), 575 * false otherwise. 576 */ 577 static inline bool i915_request_is_ready(const struct i915_request *rq) 578 { 579 return !list_empty(&rq->sched.link); 580 } 581 582 static inline bool __i915_request_is_complete(const struct i915_request *rq) 583 { 584 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); 585 } 586 587 static inline bool i915_request_completed(const struct i915_request *rq) 588 { 589 bool result; 590 591 if (i915_request_signaled(rq)) 592 return true; 593 594 result = true; 595 rcu_read_lock(); /* the HWSP may be freed at runtime */ 596 if (likely(!i915_request_signaled(rq))) 597 result = __i915_request_is_complete(rq); 598 rcu_read_unlock(); 599 600 return result; 601 } 602 603 static inline void i915_request_mark_complete(struct i915_request *rq) 604 { 605 WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */ 606 (u32 *)&rq->fence.seqno); 607 } 608 609 static inline bool i915_request_has_waitboost(const struct i915_request *rq) 610 { 611 return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); 612 } 613 614 static inline bool i915_request_has_nopreempt(const struct i915_request *rq) 615 { 616 /* Preemption should only be disabled very rarely */ 617 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags)); 618 } 619 620 static inline bool i915_request_has_sentinel(const struct i915_request *rq) 621 { 622 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags)); 623 } 624 625 static inline bool i915_request_on_hold(const struct i915_request *rq) 626 { 627 return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags)); 628 } 629 630 static inline void i915_request_set_hold(struct i915_request *rq) 631 { 632 set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); 633 } 634 635 static inline void i915_request_clear_hold(struct i915_request *rq) 636 { 637 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); 638 } 639 640 static inline struct intel_timeline * 641 i915_request_timeline(const struct i915_request *rq) 642 { 643 /* Valid only while the request is being constructed (or retired). */ 644 return rcu_dereference_protected(rq->timeline, 645 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex)); 646 } 647 648 static inline struct i915_gem_context * 649 i915_request_gem_context(const struct i915_request *rq) 650 { 651 /* Valid only while the request is being constructed (or retired). */ 652 return rcu_dereference_protected(rq->context->gem_context, true); 653 } 654 655 static inline struct intel_timeline * 656 i915_request_active_timeline(const struct i915_request *rq) 657 { 658 /* 659 * When in use during submission, we are protected by a guarantee that 660 * the context/timeline is pinned and must remain pinned until after 661 * this submission. 662 */ 663 return rcu_dereference_protected(rq->timeline, 664 lockdep_is_held(&rq->engine->sched_engine->lock)); 665 } 666 667 static inline u32 668 i915_request_active_seqno(const struct i915_request *rq) 669 { 670 u32 hwsp_phys_base = 671 page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset); 672 u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno); 673 674 /* 675 * Because of wraparound, we cannot simply take tl->hwsp_offset, 676 * but instead use the fact that the relative for vaddr is the 677 * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset 678 * and combine them with the relative offset in rq->hwsp_seqno. 679 * 680 * As rw->hwsp_seqno is rewritten when signaled, this only works 681 * when the request isn't signaled yet, but at that point you 682 * no longer need the offset. 683 */ 684 685 return hwsp_phys_base + hwsp_relative_offset; 686 } 687 688 bool 689 i915_request_active_engine(struct i915_request *rq, 690 struct intel_engine_cs **active); 691 692 void i915_request_notify_execute_cb_imm(struct i915_request *rq); 693 694 enum i915_request_state { 695 I915_REQUEST_UNKNOWN = 0, 696 I915_REQUEST_COMPLETE, 697 I915_REQUEST_PENDING, 698 I915_REQUEST_QUEUED, 699 I915_REQUEST_ACTIVE, 700 }; 701 702 enum i915_request_state i915_test_request_state(struct i915_request *rq); 703 704 void i915_request_module_exit(void); 705 int i915_request_module_init(void); 706 707 #endif /* I915_REQUEST_H */ 708