1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_ENGINE_TYPES__ 7 #define __INTEL_ENGINE_TYPES__ 8 9 #include <linux/average.h> 10 #include <linux/hashtable.h> 11 #include <linux/irq_work.h> 12 #include <linux/kref.h> 13 #include <linux/list.h> 14 #include <linux/llist.h> 15 #include <linux/rbtree.h> 16 #include <linux/timer.h> 17 #include <linux/types.h> 18 #include <linux/workqueue.h> 19 20 #include "i915_gem.h" 21 #include "i915_pmu.h" 22 #include "i915_priolist_types.h" 23 #include "i915_selftest.h" 24 #include "intel_sseu.h" 25 #include "intel_timeline_types.h" 26 #include "intel_uncore.h" 27 #include "intel_wakeref.h" 28 #include "intel_workarounds_types.h" 29 30 /* HW Engine class + instance */ 31 #define RENDER_CLASS 0 32 #define VIDEO_DECODE_CLASS 1 33 #define VIDEO_ENHANCEMENT_CLASS 2 34 #define COPY_ENGINE_CLASS 3 35 #define OTHER_CLASS 4 36 #define COMPUTE_CLASS 5 37 #define MAX_ENGINE_CLASS 5 38 #define MAX_ENGINE_INSTANCE 8 39 40 #define I915_MAX_SLICES 3 41 #define I915_MAX_SUBSLICES 8 42 43 #define I915_CMD_HASH_ORDER 9 44 45 struct dma_fence; 46 struct drm_i915_gem_object; 47 struct drm_i915_reg_table; 48 struct i915_gem_context; 49 struct i915_request; 50 struct i915_sched_attr; 51 struct i915_sched_engine; 52 struct intel_gt; 53 struct intel_ring; 54 struct intel_uncore; 55 struct intel_breadcrumbs; 56 57 typedef u32 intel_engine_mask_t; 58 #define ALL_ENGINES ((intel_engine_mask_t)~0ul) 59 60 struct intel_hw_status_page { 61 struct list_head timelines; 62 struct i915_vma *vma; 63 u32 *addr; 64 }; 65 66 struct intel_instdone { 67 u32 instdone; 68 /* The following exist only in the RCS engine */ 69 u32 slice_common; 70 u32 slice_common_extra[2]; 71 u32 sampler[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 72 u32 row[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 73 74 /* Added in XeHPG */ 75 u32 geom_svg[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 76 }; 77 78 /* 79 * we use a single page to load ctx workarounds so all of these 80 * values are referred in terms of dwords 81 * 82 * struct i915_wa_ctx_bb: 83 * offset: specifies batch starting position, also helpful in case 84 * if we want to have multiple batches at different offsets based on 85 * some criteria. It is not a requirement at the moment but provides 86 * an option for future use. 87 * size: size of the batch in DWORDS 88 */ 89 struct i915_ctx_workarounds { 90 struct i915_wa_ctx_bb { 91 u32 offset; 92 u32 size; 93 } indirect_ctx, per_ctx; 94 struct i915_vma *vma; 95 }; 96 97 #define I915_MAX_VCS 8 98 #define I915_MAX_VECS 4 99 #define I915_MAX_SFC (I915_MAX_VCS / 2) 100 #define I915_MAX_CCS 4 101 #define I915_MAX_RCS 1 102 #define I915_MAX_BCS 9 103 104 /* 105 * Engine IDs definitions. 106 * Keep instances of the same type engine together. 107 */ 108 enum intel_engine_id { 109 RCS0 = 0, 110 BCS0, 111 BCS1, 112 BCS2, 113 BCS3, 114 BCS4, 115 BCS5, 116 BCS6, 117 BCS7, 118 BCS8, 119 #define _BCS(n) (BCS0 + (n)) 120 VCS0, 121 VCS1, 122 VCS2, 123 VCS3, 124 VCS4, 125 VCS5, 126 VCS6, 127 VCS7, 128 #define _VCS(n) (VCS0 + (n)) 129 VECS0, 130 VECS1, 131 VECS2, 132 VECS3, 133 #define _VECS(n) (VECS0 + (n)) 134 CCS0, 135 CCS1, 136 CCS2, 137 CCS3, 138 #define _CCS(n) (CCS0 + (n)) 139 GSC0, 140 I915_NUM_ENGINES 141 #define INVALID_ENGINE ((enum intel_engine_id)-1) 142 }; 143 144 /* A simple estimator for the round-trip latency of an engine */ 145 DECLARE_EWMA(_engine_latency, 6, 4) 146 147 struct st_preempt_hang { 148 struct completion completion; 149 unsigned int count; 150 }; 151 152 /** 153 * struct intel_engine_execlists - execlist submission queue and port state 154 * 155 * The struct intel_engine_execlists represents the combined logical state of 156 * driver and the hardware state for execlist mode of submission. 157 */ 158 struct intel_engine_execlists { 159 /** 160 * @timer: kick the current context if its timeslice expires 161 */ 162 struct timer_list timer; 163 164 /** 165 * @preempt: reset the current context if it fails to give way 166 */ 167 struct timer_list preempt; 168 169 /** 170 * @preempt_target: active request at the time of the preemption request 171 * 172 * We force a preemption to occur if the pending contexts have not 173 * been promoted to active upon receipt of the CS ack event within 174 * the timeout. This timeout maybe chosen based on the target, 175 * using a very short timeout if the context is no longer schedulable. 176 * That short timeout may not be applicable to other contexts, so 177 * if a context switch should happen within before the preemption 178 * timeout, we may shoot early at an innocent context. To prevent this, 179 * we record which context was active at the time of the preemption 180 * request and only reset that context upon the timeout. 181 */ 182 const struct i915_request *preempt_target; 183 184 /** 185 * @ccid: identifier for contexts submitted to this engine 186 */ 187 u32 ccid; 188 189 /** 190 * @yield: CCID at the time of the last semaphore-wait interrupt. 191 * 192 * Instead of leaving a semaphore busy-spinning on an engine, we would 193 * like to switch to another ready context, i.e. yielding the semaphore 194 * timeslice. 195 */ 196 u32 yield; 197 198 /** 199 * @error_interrupt: CS Master EIR 200 * 201 * The CS generates an interrupt when it detects an error. We capture 202 * the first error interrupt, record the EIR and schedule the tasklet. 203 * In the tasklet, we process the pending CS events to ensure we have 204 * the guilty request, and then reset the engine. 205 * 206 * Low 16b are used by HW, with the upper 16b used as the enabling mask. 207 * Reserve the upper 16b for tracking internal errors. 208 */ 209 u32 error_interrupt; 210 #define ERROR_CSB BIT(31) 211 #define ERROR_PREEMPT BIT(30) 212 213 /** 214 * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset 215 */ 216 u32 reset_ccid; 217 218 /** 219 * @submit_reg: gen-specific execlist submission register 220 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to 221 * the ExecList Submission Queue Contents register array for Gen11+ 222 */ 223 u32 __iomem *submit_reg; 224 225 /** 226 * @ctrl_reg: the enhanced execlists control register, used to load the 227 * submit queue on the HW and to request preemptions to idle 228 */ 229 u32 __iomem *ctrl_reg; 230 231 #define EXECLIST_MAX_PORTS 2 232 /** 233 * @active: the currently known context executing on HW 234 */ 235 struct i915_request * const *active; 236 /** 237 * @inflight: the set of contexts submitted and acknowleged by HW 238 * 239 * The set of inflight contexts is managed by reading CS events 240 * from the HW. On a context-switch event (not preemption), we 241 * know the HW has transitioned from port0 to port1, and we 242 * advance our inflight/active tracking accordingly. 243 */ 244 struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */]; 245 /** 246 * @pending: the next set of contexts submitted to ELSP 247 * 248 * We store the array of contexts that we submit to HW (via ELSP) and 249 * promote them to the inflight array once HW has signaled the 250 * preemption or idle-to-active event. 251 */ 252 struct i915_request *pending[EXECLIST_MAX_PORTS + 1]; 253 254 /** 255 * @port_mask: number of execlist ports - 1 256 */ 257 unsigned int port_mask; 258 259 /** 260 * @virtual: Queue of requets on a virtual engine, sorted by priority. 261 * Each RB entry is a struct i915_priolist containing a list of requests 262 * of the same priority. 263 */ 264 struct rb_root_cached virtual; 265 266 /** 267 * @csb_write: control register for Context Switch buffer 268 * 269 * Note this register may be either mmio or HWSP shadow. 270 */ 271 u32 *csb_write; 272 273 /** 274 * @csb_status: status array for Context Switch buffer 275 * 276 * Note these register may be either mmio or HWSP shadow. 277 */ 278 u64 *csb_status; 279 280 /** 281 * @csb_size: context status buffer FIFO size 282 */ 283 u8 csb_size; 284 285 /** 286 * @csb_head: context status buffer head 287 */ 288 u8 csb_head; 289 290 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) 291 }; 292 293 #define INTEL_ENGINE_CS_MAX_NAME 8 294 295 struct intel_engine_execlists_stats { 296 /** 297 * @active: Number of contexts currently scheduled in. 298 */ 299 unsigned int active; 300 301 /** 302 * @lock: Lock protecting the below fields. 303 */ 304 seqcount_t lock; 305 306 /** 307 * @total: Total time this engine was busy. 308 * 309 * Accumulated time not counting the most recent block in cases where 310 * engine is currently busy (active > 0). 311 */ 312 ktime_t total; 313 314 /** 315 * @start: Timestamp of the last idle to active transition. 316 * 317 * Idle is defined as active == 0, active is active > 0. 318 */ 319 ktime_t start; 320 }; 321 322 struct intel_engine_guc_stats { 323 /** 324 * @running: Active state of the engine when busyness was last sampled. 325 */ 326 bool running; 327 328 /** 329 * @prev_total: Previous value of total runtime clock cycles. 330 */ 331 u32 prev_total; 332 333 /** 334 * @total_gt_clks: Total gt clock cycles this engine was busy. 335 */ 336 u64 total_gt_clks; 337 338 /** 339 * @start_gt_clk: GT clock time of last idle to active transition. 340 */ 341 u64 start_gt_clk; 342 }; 343 344 struct intel_engine_cs { 345 struct drm_i915_private *i915; 346 struct intel_gt *gt; 347 struct intel_uncore *uncore; 348 char name[INTEL_ENGINE_CS_MAX_NAME]; 349 350 enum intel_engine_id id; 351 enum intel_engine_id legacy_idx; 352 353 unsigned int guc_id; 354 355 intel_engine_mask_t mask; 356 u32 reset_domain; 357 /** 358 * @logical_mask: logical mask of engine, reported to user space via 359 * query IOCTL and used to communicate with the GuC in logical space. 360 * The logical instance of a physical engine can change based on product 361 * and fusing. 362 */ 363 intel_engine_mask_t logical_mask; 364 365 u8 class; 366 u8 instance; 367 368 u16 uabi_class; 369 u16 uabi_instance; 370 371 u32 uabi_capabilities; 372 u32 context_size; 373 u32 mmio_base; 374 375 /* 376 * Some w/a require forcewake to be held (which prevents RC6) while 377 * a particular engine is active. If so, we set fw_domain to which 378 * domains need to be held for the duration of request activity, 379 * and 0 if none. We try to limit the duration of the hold as much 380 * as possible. 381 */ 382 enum forcewake_domains fw_domain; 383 unsigned int fw_active; 384 385 unsigned long context_tag; 386 387 struct rb_node uabi_node; 388 389 struct intel_sseu sseu; 390 391 struct i915_sched_engine *sched_engine; 392 393 /* keep a request in reserve for a [pm] barrier under oom */ 394 struct i915_request *request_pool; 395 396 struct intel_context *hung_ce; 397 398 struct llist_head barrier_tasks; 399 400 struct intel_context *kernel_context; /* pinned */ 401 402 /** 403 * pinned_contexts_list: List of pinned contexts. This list is only 404 * assumed to be manipulated during driver load- or unload time and 405 * does therefore not have any additional protection. 406 */ 407 struct list_head pinned_contexts_list; 408 409 intel_engine_mask_t saturated; /* submitting semaphores too late? */ 410 411 struct { 412 struct delayed_work work; 413 struct i915_request *systole; 414 unsigned long blocked; 415 } heartbeat; 416 417 unsigned long serial; 418 419 unsigned long wakeref_serial; 420 struct intel_wakeref wakeref; 421 struct file *default_state; 422 423 struct { 424 struct intel_ring *ring; 425 struct intel_timeline *timeline; 426 } legacy; 427 428 /* 429 * We track the average duration of the idle pulse on parking the 430 * engine to keep an estimate of the how the fast the engine is 431 * under ideal conditions. 432 */ 433 struct ewma__engine_latency latency; 434 435 /* Keep track of all the seqno used, a trail of breadcrumbs */ 436 struct intel_breadcrumbs *breadcrumbs; 437 438 struct intel_engine_pmu { 439 /** 440 * @enable: Bitmask of enable sample events on this engine. 441 * 442 * Bits correspond to sample event types, for instance 443 * I915_SAMPLE_QUEUED is bit 0 etc. 444 */ 445 u32 enable; 446 /** 447 * @enable_count: Reference count for the enabled samplers. 448 * 449 * Index number corresponds to @enum drm_i915_pmu_engine_sample. 450 */ 451 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; 452 /** 453 * @sample: Counter values for sampling events. 454 * 455 * Our internal timer stores the current counters in this field. 456 * 457 * Index number corresponds to @enum drm_i915_pmu_engine_sample. 458 */ 459 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; 460 } pmu; 461 462 struct intel_hw_status_page status_page; 463 struct i915_ctx_workarounds wa_ctx; 464 struct i915_wa_list ctx_wa_list; 465 struct i915_wa_list wa_list; 466 struct i915_wa_list whitelist; 467 468 u32 irq_keep_mask; /* always keep these interrupts */ 469 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 470 void (*irq_enable)(struct intel_engine_cs *engine); 471 void (*irq_disable)(struct intel_engine_cs *engine); 472 void (*irq_handler)(struct intel_engine_cs *engine, u16 iir); 473 474 void (*sanitize)(struct intel_engine_cs *engine); 475 int (*resume)(struct intel_engine_cs *engine); 476 477 struct { 478 void (*prepare)(struct intel_engine_cs *engine); 479 480 void (*rewind)(struct intel_engine_cs *engine, bool stalled); 481 void (*cancel)(struct intel_engine_cs *engine); 482 483 void (*finish)(struct intel_engine_cs *engine); 484 } reset; 485 486 void (*park)(struct intel_engine_cs *engine); 487 void (*unpark)(struct intel_engine_cs *engine); 488 489 void (*bump_serial)(struct intel_engine_cs *engine); 490 491 void (*set_default_submission)(struct intel_engine_cs *engine); 492 493 const struct intel_context_ops *cops; 494 495 int (*request_alloc)(struct i915_request *rq); 496 497 int (*emit_flush)(struct i915_request *request, u32 mode); 498 #define EMIT_INVALIDATE BIT(0) 499 #define EMIT_FLUSH BIT(1) 500 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) 501 int (*emit_bb_start)(struct i915_request *rq, 502 u64 offset, u32 length, 503 unsigned int dispatch_flags); 504 #define I915_DISPATCH_SECURE BIT(0) 505 #define I915_DISPATCH_PINNED BIT(1) 506 int (*emit_init_breadcrumb)(struct i915_request *rq); 507 u32 *(*emit_fini_breadcrumb)(struct i915_request *rq, 508 u32 *cs); 509 unsigned int emit_fini_breadcrumb_dw; 510 511 /* Pass the request to the hardware queue (e.g. directly into 512 * the legacy ringbuffer or to the end of an execlist). 513 * 514 * This is called from an atomic context with irqs disabled; must 515 * be irq safe. 516 */ 517 void (*submit_request)(struct i915_request *rq); 518 519 void (*release)(struct intel_engine_cs *engine); 520 521 /* 522 * Add / remove request from engine active tracking 523 */ 524 void (*add_active_request)(struct i915_request *rq); 525 void (*remove_active_request)(struct i915_request *rq); 526 527 /* 528 * Get engine busyness and the time at which the busyness was sampled. 529 */ 530 ktime_t (*busyness)(struct intel_engine_cs *engine, 531 ktime_t *now); 532 533 struct intel_engine_execlists execlists; 534 535 /* 536 * Keep track of completed timelines on this engine for early 537 * retirement with the goal of quickly enabling powersaving as 538 * soon as the engine is idle. 539 */ 540 struct intel_timeline *retire; 541 struct work_struct retire_work; 542 543 /* status_notifier: list of callbacks for context-switch changes */ 544 struct atomic_notifier_head context_status_notifier; 545 546 #define I915_ENGINE_USING_CMD_PARSER BIT(0) 547 #define I915_ENGINE_SUPPORTS_STATS BIT(1) 548 #define I915_ENGINE_HAS_PREEMPTION BIT(2) 549 #define I915_ENGINE_HAS_SEMAPHORES BIT(3) 550 #define I915_ENGINE_HAS_TIMESLICES BIT(4) 551 #define I915_ENGINE_IS_VIRTUAL BIT(5) 552 #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) 553 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) 554 #define I915_ENGINE_WANT_FORCED_PREEMPTION BIT(8) 555 #define I915_ENGINE_HAS_RCS_REG_STATE BIT(9) 556 #define I915_ENGINE_HAS_EU_PRIORITY BIT(10) 557 #define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11) 558 #define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12) 559 unsigned int flags; 560 561 /* 562 * Table of commands the command parser needs to know about 563 * for this engine. 564 */ 565 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 566 567 /* 568 * Table of registers allowed in commands that read/write registers. 569 */ 570 const struct drm_i915_reg_table *reg_tables; 571 int reg_table_count; 572 573 /* 574 * Returns the bitmask for the length field of the specified command. 575 * Return 0 for an unrecognized/invalid command. 576 * 577 * If the command parser finds an entry for a command in the engine's 578 * cmd_tables, it gets the command's length based on the table entry. 579 * If not, it calls this function to determine the per-engine length 580 * field encoding for the command (i.e. different opcode ranges use 581 * certain bits to encode the command length in the header). 582 */ 583 u32 (*get_cmd_length_mask)(u32 cmd_header); 584 585 struct { 586 union { 587 struct intel_engine_execlists_stats execlists; 588 struct intel_engine_guc_stats guc; 589 }; 590 591 /** 592 * @rps: Utilisation at last RPS sampling. 593 */ 594 ktime_t rps; 595 } stats; 596 597 struct { 598 unsigned long heartbeat_interval_ms; 599 unsigned long max_busywait_duration_ns; 600 unsigned long preempt_timeout_ms; 601 unsigned long stop_timeout_ms; 602 unsigned long timeslice_duration_ms; 603 } props, defaults; 604 605 I915_SELFTEST_DECLARE(struct fault_attr reset_timeout); 606 }; 607 608 static inline bool 609 intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) 610 { 611 return engine->flags & I915_ENGINE_USING_CMD_PARSER; 612 } 613 614 static inline bool 615 intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) 616 { 617 return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; 618 } 619 620 static inline bool 621 intel_engine_supports_stats(const struct intel_engine_cs *engine) 622 { 623 return engine->flags & I915_ENGINE_SUPPORTS_STATS; 624 } 625 626 static inline bool 627 intel_engine_has_preemption(const struct intel_engine_cs *engine) 628 { 629 return engine->flags & I915_ENGINE_HAS_PREEMPTION; 630 } 631 632 static inline bool 633 intel_engine_has_semaphores(const struct intel_engine_cs *engine) 634 { 635 return engine->flags & I915_ENGINE_HAS_SEMAPHORES; 636 } 637 638 static inline bool 639 intel_engine_has_timeslices(const struct intel_engine_cs *engine) 640 { 641 if (!CONFIG_DRM_I915_TIMESLICE_DURATION) 642 return false; 643 644 return engine->flags & I915_ENGINE_HAS_TIMESLICES; 645 } 646 647 static inline bool 648 intel_engine_is_virtual(const struct intel_engine_cs *engine) 649 { 650 return engine->flags & I915_ENGINE_IS_VIRTUAL; 651 } 652 653 static inline bool 654 intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) 655 { 656 return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO; 657 } 658 659 /* Wa_14014475959:dg2 */ 660 static inline bool 661 intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine) 662 { 663 return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT; 664 } 665 666 #endif /* __INTEL_ENGINE_TYPES_H__ */ 667