1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_ENGINE_TYPES__ 7 #define __INTEL_ENGINE_TYPES__ 8 9 #include <linux/average.h> 10 #include <linux/hashtable.h> 11 #include <linux/irq_work.h> 12 #include <linux/kref.h> 13 #include <linux/list.h> 14 #include <linux/llist.h> 15 #include <linux/rbtree.h> 16 #include <linux/timer.h> 17 #include <linux/types.h> 18 #include <linux/workqueue.h> 19 20 #include "i915_gem.h" 21 #include "i915_pmu.h" 22 #include "i915_priolist_types.h" 23 #include "i915_selftest.h" 24 #include "intel_sseu.h" 25 #include "intel_timeline_types.h" 26 #include "intel_uncore.h" 27 #include "intel_wakeref.h" 28 #include "intel_workarounds_types.h" 29 30 /* HW Engine class + instance */ 31 #define RENDER_CLASS 0 32 #define VIDEO_DECODE_CLASS 1 33 #define VIDEO_ENHANCEMENT_CLASS 2 34 #define COPY_ENGINE_CLASS 3 35 #define OTHER_CLASS 4 36 #define COMPUTE_CLASS 5 37 #define MAX_ENGINE_CLASS 5 38 #define MAX_ENGINE_INSTANCE 8 39 40 #define I915_MAX_SLICES 3 41 #define I915_MAX_SUBSLICES 8 42 43 #define I915_CMD_HASH_ORDER 9 44 45 struct dma_fence; 46 struct drm_i915_gem_object; 47 struct drm_i915_reg_table; 48 struct i915_gem_context; 49 struct i915_request; 50 struct i915_sched_attr; 51 struct i915_sched_engine; 52 struct intel_gt; 53 struct intel_ring; 54 struct intel_uncore; 55 struct intel_breadcrumbs; 56 struct intel_engine_cs; 57 struct i915_perf_group; 58 59 typedef u32 intel_engine_mask_t; 60 #define ALL_ENGINES ((intel_engine_mask_t)~0ul) 61 #define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1) 62 63 struct intel_hw_status_page { 64 struct list_head timelines; 65 struct i915_vma *vma; 66 u32 *addr; 67 }; 68 69 struct intel_instdone { 70 u32 instdone; 71 /* The following exist only in the RCS engine */ 72 u32 slice_common; 73 u32 slice_common_extra[2]; 74 u32 sampler[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 75 u32 row[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 76 77 /* Added in XeHPG */ 78 u32 geom_svg[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 79 }; 80 81 /* 82 * we use a single page to load ctx workarounds so all of these 83 * values are referred in terms of dwords 84 * 85 * struct i915_wa_ctx_bb: 86 * offset: specifies batch starting position, also helpful in case 87 * if we want to have multiple batches at different offsets based on 88 * some criteria. It is not a requirement at the moment but provides 89 * an option for future use. 90 * size: size of the batch in DWORDS 91 */ 92 struct i915_ctx_workarounds { 93 struct i915_wa_ctx_bb { 94 u32 offset; 95 u32 size; 96 } indirect_ctx, per_ctx; 97 struct i915_vma *vma; 98 }; 99 100 #define I915_MAX_VCS 8 101 #define I915_MAX_VECS 4 102 #define I915_MAX_SFC (I915_MAX_VCS / 2) 103 #define I915_MAX_CCS 4 104 #define I915_MAX_RCS 1 105 #define I915_MAX_BCS 9 106 107 /* 108 * Engine IDs definitions. 109 * Keep instances of the same type engine together. 110 */ 111 enum intel_engine_id { 112 RCS0 = 0, 113 BCS0, 114 BCS1, 115 BCS2, 116 BCS3, 117 BCS4, 118 BCS5, 119 BCS6, 120 BCS7, 121 BCS8, 122 #define _BCS(n) (BCS0 + (n)) 123 VCS0, 124 VCS1, 125 VCS2, 126 VCS3, 127 VCS4, 128 VCS5, 129 VCS6, 130 VCS7, 131 #define _VCS(n) (VCS0 + (n)) 132 VECS0, 133 VECS1, 134 VECS2, 135 VECS3, 136 #define _VECS(n) (VECS0 + (n)) 137 CCS0, 138 CCS1, 139 CCS2, 140 CCS3, 141 #define _CCS(n) (CCS0 + (n)) 142 GSC0, 143 I915_NUM_ENGINES 144 #define INVALID_ENGINE ((enum intel_engine_id)-1) 145 }; 146 147 /* A simple estimator for the round-trip latency of an engine */ 148 DECLARE_EWMA(_engine_latency, 6, 4) 149 150 struct st_preempt_hang { 151 struct completion completion; 152 unsigned int count; 153 }; 154 155 /** 156 * struct intel_engine_execlists - execlist submission queue and port state 157 * 158 * The struct intel_engine_execlists represents the combined logical state of 159 * driver and the hardware state for execlist mode of submission. 160 */ 161 struct intel_engine_execlists { 162 /** 163 * @timer: kick the current context if its timeslice expires 164 */ 165 struct timer_list timer; 166 167 /** 168 * @preempt: reset the current context if it fails to give way 169 */ 170 struct timer_list preempt; 171 172 /** 173 * @preempt_target: active request at the time of the preemption request 174 * 175 * We force a preemption to occur if the pending contexts have not 176 * been promoted to active upon receipt of the CS ack event within 177 * the timeout. This timeout maybe chosen based on the target, 178 * using a very short timeout if the context is no longer schedulable. 179 * That short timeout may not be applicable to other contexts, so 180 * if a context switch should happen within before the preemption 181 * timeout, we may shoot early at an innocent context. To prevent this, 182 * we record which context was active at the time of the preemption 183 * request and only reset that context upon the timeout. 184 */ 185 const struct i915_request *preempt_target; 186 187 /** 188 * @ccid: identifier for contexts submitted to this engine 189 */ 190 u32 ccid; 191 192 /** 193 * @yield: CCID at the time of the last semaphore-wait interrupt. 194 * 195 * Instead of leaving a semaphore busy-spinning on an engine, we would 196 * like to switch to another ready context, i.e. yielding the semaphore 197 * timeslice. 198 */ 199 u32 yield; 200 201 /** 202 * @error_interrupt: CS Master EIR 203 * 204 * The CS generates an interrupt when it detects an error. We capture 205 * the first error interrupt, record the EIR and schedule the tasklet. 206 * In the tasklet, we process the pending CS events to ensure we have 207 * the guilty request, and then reset the engine. 208 * 209 * Low 16b are used by HW, with the upper 16b used as the enabling mask. 210 * Reserve the upper 16b for tracking internal errors. 211 */ 212 u32 error_interrupt; 213 #define ERROR_CSB BIT(31) 214 #define ERROR_PREEMPT BIT(30) 215 216 /** 217 * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset 218 */ 219 u32 reset_ccid; 220 221 /** 222 * @submit_reg: gen-specific execlist submission register 223 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to 224 * the ExecList Submission Queue Contents register array for Gen11+ 225 */ 226 u32 __iomem *submit_reg; 227 228 /** 229 * @ctrl_reg: the enhanced execlists control register, used to load the 230 * submit queue on the HW and to request preemptions to idle 231 */ 232 u32 __iomem *ctrl_reg; 233 234 #define EXECLIST_MAX_PORTS 2 235 /** 236 * @active: the currently known context executing on HW 237 */ 238 struct i915_request * const *active; 239 /** 240 * @inflight: the set of contexts submitted and acknowleged by HW 241 * 242 * The set of inflight contexts is managed by reading CS events 243 * from the HW. On a context-switch event (not preemption), we 244 * know the HW has transitioned from port0 to port1, and we 245 * advance our inflight/active tracking accordingly. 246 */ 247 struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */]; 248 /** 249 * @pending: the next set of contexts submitted to ELSP 250 * 251 * We store the array of contexts that we submit to HW (via ELSP) and 252 * promote them to the inflight array once HW has signaled the 253 * preemption or idle-to-active event. 254 */ 255 struct i915_request *pending[EXECLIST_MAX_PORTS + 1]; 256 257 /** 258 * @port_mask: number of execlist ports - 1 259 */ 260 unsigned int port_mask; 261 262 /** 263 * @virtual: Queue of requets on a virtual engine, sorted by priority. 264 * Each RB entry is a struct i915_priolist containing a list of requests 265 * of the same priority. 266 */ 267 struct rb_root_cached virtual; 268 269 /** 270 * @csb_write: control register for Context Switch buffer 271 * 272 * Note this register may be either mmio or HWSP shadow. 273 */ 274 u32 *csb_write; 275 276 /** 277 * @csb_status: status array for Context Switch buffer 278 * 279 * Note these register may be either mmio or HWSP shadow. 280 */ 281 u64 *csb_status; 282 283 /** 284 * @csb_size: context status buffer FIFO size 285 */ 286 u8 csb_size; 287 288 /** 289 * @csb_head: context status buffer head 290 */ 291 u8 csb_head; 292 293 /* private: selftest */ 294 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) 295 }; 296 297 #define INTEL_ENGINE_CS_MAX_NAME 8 298 299 struct intel_engine_execlists_stats { 300 /** 301 * @active: Number of contexts currently scheduled in. 302 */ 303 unsigned int active; 304 305 /** 306 * @lock: Lock protecting the below fields. 307 */ 308 seqcount_t lock; 309 310 /** 311 * @total: Total time this engine was busy. 312 * 313 * Accumulated time not counting the most recent block in cases where 314 * engine is currently busy (active > 0). 315 */ 316 ktime_t total; 317 318 /** 319 * @start: Timestamp of the last idle to active transition. 320 * 321 * Idle is defined as active == 0, active is active > 0. 322 */ 323 ktime_t start; 324 }; 325 326 struct intel_engine_guc_stats { 327 /** 328 * @running: Active state of the engine when busyness was last sampled. 329 */ 330 bool running; 331 332 /** 333 * @prev_total: Previous value of total runtime clock cycles. 334 */ 335 u32 prev_total; 336 337 /** 338 * @total_gt_clks: Total gt clock cycles this engine was busy. 339 */ 340 u64 total_gt_clks; 341 342 /** 343 * @start_gt_clk: GT clock time of last idle to active transition. 344 */ 345 u64 start_gt_clk; 346 347 /** 348 * @total: The last value of total returned 349 */ 350 u64 total; 351 }; 352 353 union intel_engine_tlb_inv_reg { 354 i915_reg_t reg; 355 i915_mcr_reg_t mcr_reg; 356 }; 357 358 struct intel_engine_tlb_inv { 359 bool mcr; 360 union intel_engine_tlb_inv_reg reg; 361 u32 request; 362 u32 done; 363 }; 364 365 struct intel_engine_cs { 366 struct drm_i915_private *i915; 367 struct intel_gt *gt; 368 struct intel_uncore *uncore; 369 char name[INTEL_ENGINE_CS_MAX_NAME]; 370 371 enum intel_engine_id id; 372 enum intel_engine_id legacy_idx; 373 374 unsigned int guc_id; 375 376 intel_engine_mask_t mask; 377 u32 reset_domain; 378 /** 379 * @logical_mask: logical mask of engine, reported to user space via 380 * query IOCTL and used to communicate with the GuC in logical space. 381 * The logical instance of a physical engine can change based on product 382 * and fusing. 383 */ 384 intel_engine_mask_t logical_mask; 385 386 u8 class; 387 u8 instance; 388 389 u16 uabi_class; 390 u16 uabi_instance; 391 392 u32 uabi_capabilities; 393 u32 context_size; 394 u32 mmio_base; 395 396 struct intel_engine_tlb_inv tlb_inv; 397 398 /* 399 * Some w/a require forcewake to be held (which prevents RC6) while 400 * a particular engine is active. If so, we set fw_domain to which 401 * domains need to be held for the duration of request activity, 402 * and 0 if none. We try to limit the duration of the hold as much 403 * as possible. 404 */ 405 enum forcewake_domains fw_domain; 406 unsigned int fw_active; 407 408 unsigned long context_tag; 409 410 struct rb_node uabi_node; 411 412 struct intel_sseu sseu; 413 414 struct i915_sched_engine *sched_engine; 415 416 /* keep a request in reserve for a [pm] barrier under oom */ 417 struct i915_request *request_pool; 418 419 struct intel_context *hung_ce; 420 421 struct llist_head barrier_tasks; 422 423 struct intel_context *kernel_context; /* pinned */ 424 425 /** 426 * pinned_contexts_list: List of pinned contexts. This list is only 427 * assumed to be manipulated during driver load- or unload time and 428 * does therefore not have any additional protection. 429 */ 430 struct list_head pinned_contexts_list; 431 432 intel_engine_mask_t saturated; /* submitting semaphores too late? */ 433 434 struct { 435 struct delayed_work work; 436 struct i915_request *systole; 437 unsigned long blocked; 438 } heartbeat; 439 440 unsigned long serial; 441 442 unsigned long wakeref_serial; 443 struct intel_wakeref wakeref; 444 struct file *default_state; 445 446 struct { 447 struct intel_ring *ring; 448 struct intel_timeline *timeline; 449 } legacy; 450 451 /* 452 * We track the average duration of the idle pulse on parking the 453 * engine to keep an estimate of the how the fast the engine is 454 * under ideal conditions. 455 */ 456 struct ewma__engine_latency latency; 457 458 /* Keep track of all the seqno used, a trail of breadcrumbs */ 459 struct intel_breadcrumbs *breadcrumbs; 460 461 struct intel_engine_pmu { 462 /** 463 * @enable: Bitmask of enable sample events on this engine. 464 * 465 * Bits correspond to sample event types, for instance 466 * I915_SAMPLE_QUEUED is bit 0 etc. 467 */ 468 u32 enable; 469 /** 470 * @enable_count: Reference count for the enabled samplers. 471 * 472 * Index number corresponds to @enum drm_i915_pmu_engine_sample. 473 */ 474 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; 475 /** 476 * @sample: Counter values for sampling events. 477 * 478 * Our internal timer stores the current counters in this field. 479 * 480 * Index number corresponds to @enum drm_i915_pmu_engine_sample. 481 */ 482 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; 483 } pmu; 484 485 struct intel_hw_status_page status_page; 486 struct i915_ctx_workarounds wa_ctx; 487 struct i915_wa_list ctx_wa_list; 488 struct i915_wa_list wa_list; 489 struct i915_wa_list whitelist; 490 491 u32 irq_keep_mask; /* always keep these interrupts */ 492 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 493 void (*irq_enable)(struct intel_engine_cs *engine); 494 void (*irq_disable)(struct intel_engine_cs *engine); 495 void (*irq_handler)(struct intel_engine_cs *engine, u16 iir); 496 497 void (*sanitize)(struct intel_engine_cs *engine); 498 int (*resume)(struct intel_engine_cs *engine); 499 500 struct { 501 void (*prepare)(struct intel_engine_cs *engine); 502 503 void (*rewind)(struct intel_engine_cs *engine, bool stalled); 504 void (*cancel)(struct intel_engine_cs *engine); 505 506 void (*finish)(struct intel_engine_cs *engine); 507 } reset; 508 509 void (*park)(struct intel_engine_cs *engine); 510 void (*unpark)(struct intel_engine_cs *engine); 511 512 void (*bump_serial)(struct intel_engine_cs *engine); 513 514 void (*set_default_submission)(struct intel_engine_cs *engine); 515 516 const struct intel_context_ops *cops; 517 518 int (*request_alloc)(struct i915_request *rq); 519 520 int (*emit_flush)(struct i915_request *request, u32 mode); 521 #define EMIT_INVALIDATE BIT(0) 522 #define EMIT_FLUSH BIT(1) 523 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) 524 int (*emit_bb_start)(struct i915_request *rq, 525 u64 offset, u32 length, 526 unsigned int dispatch_flags); 527 #define I915_DISPATCH_SECURE BIT(0) 528 #define I915_DISPATCH_PINNED BIT(1) 529 int (*emit_init_breadcrumb)(struct i915_request *rq); 530 u32 *(*emit_fini_breadcrumb)(struct i915_request *rq, 531 u32 *cs); 532 unsigned int emit_fini_breadcrumb_dw; 533 534 /* Pass the request to the hardware queue (e.g. directly into 535 * the legacy ringbuffer or to the end of an execlist). 536 * 537 * This is called from an atomic context with irqs disabled; must 538 * be irq safe. 539 */ 540 void (*submit_request)(struct i915_request *rq); 541 542 void (*release)(struct intel_engine_cs *engine); 543 544 /* 545 * Add / remove request from engine active tracking 546 */ 547 void (*add_active_request)(struct i915_request *rq); 548 void (*remove_active_request)(struct i915_request *rq); 549 550 /* 551 * Get engine busyness and the time at which the busyness was sampled. 552 */ 553 ktime_t (*busyness)(struct intel_engine_cs *engine, 554 ktime_t *now); 555 556 struct intel_engine_execlists execlists; 557 558 /* 559 * Keep track of completed timelines on this engine for early 560 * retirement with the goal of quickly enabling powersaving as 561 * soon as the engine is idle. 562 */ 563 struct intel_timeline *retire; 564 struct work_struct retire_work; 565 566 /* status_notifier: list of callbacks for context-switch changes */ 567 struct atomic_notifier_head context_status_notifier; 568 569 #define I915_ENGINE_USING_CMD_PARSER BIT(0) 570 #define I915_ENGINE_SUPPORTS_STATS BIT(1) 571 #define I915_ENGINE_HAS_PREEMPTION BIT(2) 572 #define I915_ENGINE_HAS_SEMAPHORES BIT(3) 573 #define I915_ENGINE_HAS_TIMESLICES BIT(4) 574 #define I915_ENGINE_IS_VIRTUAL BIT(5) 575 #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) 576 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) 577 #define I915_ENGINE_WANT_FORCED_PREEMPTION BIT(8) 578 #define I915_ENGINE_HAS_RCS_REG_STATE BIT(9) 579 #define I915_ENGINE_HAS_EU_PRIORITY BIT(10) 580 #define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11) 581 #define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12) 582 unsigned int flags; 583 584 /* 585 * Table of commands the command parser needs to know about 586 * for this engine. 587 */ 588 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 589 590 /* 591 * Table of registers allowed in commands that read/write registers. 592 */ 593 const struct drm_i915_reg_table *reg_tables; 594 int reg_table_count; 595 596 /* 597 * Returns the bitmask for the length field of the specified command. 598 * Return 0 for an unrecognized/invalid command. 599 * 600 * If the command parser finds an entry for a command in the engine's 601 * cmd_tables, it gets the command's length based on the table entry. 602 * If not, it calls this function to determine the per-engine length 603 * field encoding for the command (i.e. different opcode ranges use 604 * certain bits to encode the command length in the header). 605 */ 606 u32 (*get_cmd_length_mask)(u32 cmd_header); 607 608 struct { 609 union { 610 struct intel_engine_execlists_stats execlists; 611 struct intel_engine_guc_stats guc; 612 }; 613 614 /** 615 * @rps: Utilisation at last RPS sampling. 616 */ 617 ktime_t rps; 618 } stats; 619 620 struct { 621 unsigned long heartbeat_interval_ms; 622 unsigned long max_busywait_duration_ns; 623 unsigned long preempt_timeout_ms; 624 unsigned long stop_timeout_ms; 625 unsigned long timeslice_duration_ms; 626 } props, defaults; 627 628 I915_SELFTEST_DECLARE(struct fault_attr reset_timeout); 629 630 /* 631 * The perf group maps to one OA unit which controls one OA buffer. All 632 * reports corresponding to this engine will be reported to this OA 633 * buffer. An engine will map to a single OA unit, but a single OA unit 634 * can generate reports for multiple engines. 635 */ 636 struct i915_perf_group *oa_group; 637 }; 638 639 static inline bool intel_engine_using_cmd_parser(const struct intel_engine_cs * engine)640 intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) 641 { 642 return engine->flags & I915_ENGINE_USING_CMD_PARSER; 643 } 644 645 static inline bool intel_engine_requires_cmd_parser(const struct intel_engine_cs * engine)646 intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) 647 { 648 return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; 649 } 650 651 static inline bool intel_engine_supports_stats(const struct intel_engine_cs * engine)652 intel_engine_supports_stats(const struct intel_engine_cs *engine) 653 { 654 return engine->flags & I915_ENGINE_SUPPORTS_STATS; 655 } 656 657 static inline bool intel_engine_has_preemption(const struct intel_engine_cs * engine)658 intel_engine_has_preemption(const struct intel_engine_cs *engine) 659 { 660 return engine->flags & I915_ENGINE_HAS_PREEMPTION; 661 } 662 663 static inline bool intel_engine_has_semaphores(const struct intel_engine_cs * engine)664 intel_engine_has_semaphores(const struct intel_engine_cs *engine) 665 { 666 return engine->flags & I915_ENGINE_HAS_SEMAPHORES; 667 } 668 669 static inline bool intel_engine_has_timeslices(const struct intel_engine_cs * engine)670 intel_engine_has_timeslices(const struct intel_engine_cs *engine) 671 { 672 if (!CONFIG_DRM_I915_TIMESLICE_DURATION) 673 return false; 674 675 return engine->flags & I915_ENGINE_HAS_TIMESLICES; 676 } 677 678 static inline bool intel_engine_is_virtual(const struct intel_engine_cs * engine)679 intel_engine_is_virtual(const struct intel_engine_cs *engine) 680 { 681 return engine->flags & I915_ENGINE_IS_VIRTUAL; 682 } 683 684 static inline bool intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)685 intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) 686 { 687 return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO; 688 } 689 690 /* Wa_14014475959:dg2 */ 691 static inline bool intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs * engine)692 intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine) 693 { 694 return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT; 695 } 696 697 #endif /* __INTEL_ENGINE_TYPES_H__ */ 698