1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_ENGINE_TYPES__ 7 #define __INTEL_ENGINE_TYPES__ 8 9 #include <linux/average.h> 10 #include <linux/hashtable.h> 11 #include <linux/irq_work.h> 12 #include <linux/kref.h> 13 #include <linux/list.h> 14 #include <linux/llist.h> 15 #include <linux/rbtree.h> 16 #include <linux/timer.h> 17 #include <linux/types.h> 18 #include <linux/workqueue.h> 19 20 #include "i915_gem.h" 21 #include "i915_pmu.h" 22 #include "i915_priolist_types.h" 23 #include "i915_selftest.h" 24 #include "intel_sseu.h" 25 #include "intel_timeline_types.h" 26 #include "intel_uncore.h" 27 #include "intel_wakeref.h" 28 #include "intel_workarounds_types.h" 29 30 /* HW Engine class + instance */ 31 #define RENDER_CLASS 0 32 #define VIDEO_DECODE_CLASS 1 33 #define VIDEO_ENHANCEMENT_CLASS 2 34 #define COPY_ENGINE_CLASS 3 35 #define OTHER_CLASS 4 36 #define COMPUTE_CLASS 5 37 #define MAX_ENGINE_CLASS 5 38 #define MAX_ENGINE_INSTANCE 8 39 40 #define I915_MAX_SLICES 3 41 #define I915_MAX_SUBSLICES 8 42 43 #define I915_CMD_HASH_ORDER 9 44 45 struct dma_fence; 46 struct drm_i915_gem_object; 47 struct drm_i915_reg_table; 48 struct i915_gem_context; 49 struct i915_request; 50 struct i915_sched_attr; 51 struct i915_sched_engine; 52 struct intel_gt; 53 struct intel_ring; 54 struct intel_uncore; 55 struct intel_breadcrumbs; 56 57 typedef u32 intel_engine_mask_t; 58 #define ALL_ENGINES ((intel_engine_mask_t)~0ul) 59 60 struct intel_hw_status_page { 61 struct list_head timelines; 62 struct i915_vma *vma; 63 u32 *addr; 64 }; 65 66 struct intel_instdone { 67 u32 instdone; 68 /* The following exist only in the RCS engine */ 69 u32 slice_common; 70 u32 slice_common_extra[2]; 71 u32 sampler[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 72 u32 row[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 73 74 /* Added in XeHPG */ 75 u32 geom_svg[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 76 }; 77 78 /* 79 * we use a single page to load ctx workarounds so all of these 80 * values are referred in terms of dwords 81 * 82 * struct i915_wa_ctx_bb: 83 * offset: specifies batch starting position, also helpful in case 84 * if we want to have multiple batches at different offsets based on 85 * some criteria. It is not a requirement at the moment but provides 86 * an option for future use. 87 * size: size of the batch in DWORDS 88 */ 89 struct i915_ctx_workarounds { 90 struct i915_wa_ctx_bb { 91 u32 offset; 92 u32 size; 93 } indirect_ctx, per_ctx; 94 struct i915_vma *vma; 95 }; 96 97 #define I915_MAX_VCS 8 98 #define I915_MAX_VECS 4 99 #define I915_MAX_SFC (I915_MAX_VCS / 2) 100 #define I915_MAX_CCS 4 101 #define I915_MAX_RCS 1 102 #define I915_MAX_BCS 9 103 104 /* 105 * Engine IDs definitions. 106 * Keep instances of the same type engine together. 107 */ 108 enum intel_engine_id { 109 RCS0 = 0, 110 BCS0, 111 BCS1, 112 BCS2, 113 BCS3, 114 BCS4, 115 BCS5, 116 BCS6, 117 BCS7, 118 BCS8, 119 #define _BCS(n) (BCS0 + (n)) 120 VCS0, 121 VCS1, 122 VCS2, 123 VCS3, 124 VCS4, 125 VCS5, 126 VCS6, 127 VCS7, 128 #define _VCS(n) (VCS0 + (n)) 129 VECS0, 130 VECS1, 131 VECS2, 132 VECS3, 133 #define _VECS(n) (VECS0 + (n)) 134 CCS0, 135 CCS1, 136 CCS2, 137 CCS3, 138 #define _CCS(n) (CCS0 + (n)) 139 I915_NUM_ENGINES 140 #define INVALID_ENGINE ((enum intel_engine_id)-1) 141 }; 142 143 /* A simple estimator for the round-trip latency of an engine */ 144 DECLARE_EWMA(_engine_latency, 6, 4) 145 146 struct st_preempt_hang { 147 struct completion completion; 148 unsigned int count; 149 }; 150 151 /** 152 * struct intel_engine_execlists - execlist submission queue and port state 153 * 154 * The struct intel_engine_execlists represents the combined logical state of 155 * driver and the hardware state for execlist mode of submission. 156 */ 157 struct intel_engine_execlists { 158 /** 159 * @timer: kick the current context if its timeslice expires 160 */ 161 struct timer_list timer; 162 163 /** 164 * @preempt: reset the current context if it fails to give way 165 */ 166 struct timer_list preempt; 167 168 /** 169 * @preempt_target: active request at the time of the preemption request 170 * 171 * We force a preemption to occur if the pending contexts have not 172 * been promoted to active upon receipt of the CS ack event within 173 * the timeout. This timeout maybe chosen based on the target, 174 * using a very short timeout if the context is no longer schedulable. 175 * That short timeout may not be applicable to other contexts, so 176 * if a context switch should happen within before the preemption 177 * timeout, we may shoot early at an innocent context. To prevent this, 178 * we record which context was active at the time of the preemption 179 * request and only reset that context upon the timeout. 180 */ 181 const struct i915_request *preempt_target; 182 183 /** 184 * @ccid: identifier for contexts submitted to this engine 185 */ 186 u32 ccid; 187 188 /** 189 * @yield: CCID at the time of the last semaphore-wait interrupt. 190 * 191 * Instead of leaving a semaphore busy-spinning on an engine, we would 192 * like to switch to another ready context, i.e. yielding the semaphore 193 * timeslice. 194 */ 195 u32 yield; 196 197 /** 198 * @error_interrupt: CS Master EIR 199 * 200 * The CS generates an interrupt when it detects an error. We capture 201 * the first error interrupt, record the EIR and schedule the tasklet. 202 * In the tasklet, we process the pending CS events to ensure we have 203 * the guilty request, and then reset the engine. 204 * 205 * Low 16b are used by HW, with the upper 16b used as the enabling mask. 206 * Reserve the upper 16b for tracking internal errors. 207 */ 208 u32 error_interrupt; 209 #define ERROR_CSB BIT(31) 210 #define ERROR_PREEMPT BIT(30) 211 212 /** 213 * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset 214 */ 215 u32 reset_ccid; 216 217 /** 218 * @submit_reg: gen-specific execlist submission register 219 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to 220 * the ExecList Submission Queue Contents register array for Gen11+ 221 */ 222 u32 __iomem *submit_reg; 223 224 /** 225 * @ctrl_reg: the enhanced execlists control register, used to load the 226 * submit queue on the HW and to request preemptions to idle 227 */ 228 u32 __iomem *ctrl_reg; 229 230 #define EXECLIST_MAX_PORTS 2 231 /** 232 * @active: the currently known context executing on HW 233 */ 234 struct i915_request * const *active; 235 /** 236 * @inflight: the set of contexts submitted and acknowleged by HW 237 * 238 * The set of inflight contexts is managed by reading CS events 239 * from the HW. On a context-switch event (not preemption), we 240 * know the HW has transitioned from port0 to port1, and we 241 * advance our inflight/active tracking accordingly. 242 */ 243 struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */]; 244 /** 245 * @pending: the next set of contexts submitted to ELSP 246 * 247 * We store the array of contexts that we submit to HW (via ELSP) and 248 * promote them to the inflight array once HW has signaled the 249 * preemption or idle-to-active event. 250 */ 251 struct i915_request *pending[EXECLIST_MAX_PORTS + 1]; 252 253 /** 254 * @port_mask: number of execlist ports - 1 255 */ 256 unsigned int port_mask; 257 258 /** 259 * @virtual: Queue of requets on a virtual engine, sorted by priority. 260 * Each RB entry is a struct i915_priolist containing a list of requests 261 * of the same priority. 262 */ 263 struct rb_root_cached virtual; 264 265 /** 266 * @csb_write: control register for Context Switch buffer 267 * 268 * Note this register may be either mmio or HWSP shadow. 269 */ 270 u32 *csb_write; 271 272 /** 273 * @csb_status: status array for Context Switch buffer 274 * 275 * Note these register may be either mmio or HWSP shadow. 276 */ 277 u64 *csb_status; 278 279 /** 280 * @csb_size: context status buffer FIFO size 281 */ 282 u8 csb_size; 283 284 /** 285 * @csb_head: context status buffer head 286 */ 287 u8 csb_head; 288 289 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) 290 }; 291 292 #define INTEL_ENGINE_CS_MAX_NAME 8 293 294 struct intel_engine_execlists_stats { 295 /** 296 * @active: Number of contexts currently scheduled in. 297 */ 298 unsigned int active; 299 300 /** 301 * @lock: Lock protecting the below fields. 302 */ 303 seqcount_t lock; 304 305 /** 306 * @total: Total time this engine was busy. 307 * 308 * Accumulated time not counting the most recent block in cases where 309 * engine is currently busy (active > 0). 310 */ 311 ktime_t total; 312 313 /** 314 * @start: Timestamp of the last idle to active transition. 315 * 316 * Idle is defined as active == 0, active is active > 0. 317 */ 318 ktime_t start; 319 }; 320 321 struct intel_engine_guc_stats { 322 /** 323 * @running: Active state of the engine when busyness was last sampled. 324 */ 325 bool running; 326 327 /** 328 * @prev_total: Previous value of total runtime clock cycles. 329 */ 330 u32 prev_total; 331 332 /** 333 * @total_gt_clks: Total gt clock cycles this engine was busy. 334 */ 335 u64 total_gt_clks; 336 337 /** 338 * @start_gt_clk: GT clock time of last idle to active transition. 339 */ 340 u64 start_gt_clk; 341 }; 342 343 struct intel_engine_cs { 344 struct drm_i915_private *i915; 345 struct intel_gt *gt; 346 struct intel_uncore *uncore; 347 char name[INTEL_ENGINE_CS_MAX_NAME]; 348 349 enum intel_engine_id id; 350 enum intel_engine_id legacy_idx; 351 352 unsigned int guc_id; 353 354 intel_engine_mask_t mask; 355 u32 reset_domain; 356 /** 357 * @logical_mask: logical mask of engine, reported to user space via 358 * query IOCTL and used to communicate with the GuC in logical space. 359 * The logical instance of a physical engine can change based on product 360 * and fusing. 361 */ 362 intel_engine_mask_t logical_mask; 363 364 u8 class; 365 u8 instance; 366 367 u16 uabi_class; 368 u16 uabi_instance; 369 370 u32 uabi_capabilities; 371 u32 context_size; 372 u32 mmio_base; 373 374 /* 375 * Some w/a require forcewake to be held (which prevents RC6) while 376 * a particular engine is active. If so, we set fw_domain to which 377 * domains need to be held for the duration of request activity, 378 * and 0 if none. We try to limit the duration of the hold as much 379 * as possible. 380 */ 381 enum forcewake_domains fw_domain; 382 unsigned int fw_active; 383 384 unsigned long context_tag; 385 386 struct rb_node uabi_node; 387 388 struct intel_sseu sseu; 389 390 struct i915_sched_engine *sched_engine; 391 392 /* keep a request in reserve for a [pm] barrier under oom */ 393 struct i915_request *request_pool; 394 395 struct intel_context *hung_ce; 396 397 struct llist_head barrier_tasks; 398 399 struct intel_context *kernel_context; /* pinned */ 400 401 /** 402 * pinned_contexts_list: List of pinned contexts. This list is only 403 * assumed to be manipulated during driver load- or unload time and 404 * does therefore not have any additional protection. 405 */ 406 struct list_head pinned_contexts_list; 407 408 intel_engine_mask_t saturated; /* submitting semaphores too late? */ 409 410 struct { 411 struct delayed_work work; 412 struct i915_request *systole; 413 unsigned long blocked; 414 } heartbeat; 415 416 unsigned long serial; 417 418 unsigned long wakeref_serial; 419 struct intel_wakeref wakeref; 420 struct file *default_state; 421 422 struct { 423 struct intel_ring *ring; 424 struct intel_timeline *timeline; 425 } legacy; 426 427 /* 428 * We track the average duration of the idle pulse on parking the 429 * engine to keep an estimate of the how the fast the engine is 430 * under ideal conditions. 431 */ 432 struct ewma__engine_latency latency; 433 434 /* Keep track of all the seqno used, a trail of breadcrumbs */ 435 struct intel_breadcrumbs *breadcrumbs; 436 437 struct intel_engine_pmu { 438 /** 439 * @enable: Bitmask of enable sample events on this engine. 440 * 441 * Bits correspond to sample event types, for instance 442 * I915_SAMPLE_QUEUED is bit 0 etc. 443 */ 444 u32 enable; 445 /** 446 * @enable_count: Reference count for the enabled samplers. 447 * 448 * Index number corresponds to @enum drm_i915_pmu_engine_sample. 449 */ 450 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; 451 /** 452 * @sample: Counter values for sampling events. 453 * 454 * Our internal timer stores the current counters in this field. 455 * 456 * Index number corresponds to @enum drm_i915_pmu_engine_sample. 457 */ 458 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; 459 } pmu; 460 461 struct intel_hw_status_page status_page; 462 struct i915_ctx_workarounds wa_ctx; 463 struct i915_wa_list ctx_wa_list; 464 struct i915_wa_list wa_list; 465 struct i915_wa_list whitelist; 466 467 u32 irq_keep_mask; /* always keep these interrupts */ 468 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 469 void (*irq_enable)(struct intel_engine_cs *engine); 470 void (*irq_disable)(struct intel_engine_cs *engine); 471 void (*irq_handler)(struct intel_engine_cs *engine, u16 iir); 472 473 void (*sanitize)(struct intel_engine_cs *engine); 474 int (*resume)(struct intel_engine_cs *engine); 475 476 struct { 477 void (*prepare)(struct intel_engine_cs *engine); 478 479 void (*rewind)(struct intel_engine_cs *engine, bool stalled); 480 void (*cancel)(struct intel_engine_cs *engine); 481 482 void (*finish)(struct intel_engine_cs *engine); 483 } reset; 484 485 void (*park)(struct intel_engine_cs *engine); 486 void (*unpark)(struct intel_engine_cs *engine); 487 488 void (*bump_serial)(struct intel_engine_cs *engine); 489 490 void (*set_default_submission)(struct intel_engine_cs *engine); 491 492 const struct intel_context_ops *cops; 493 494 int (*request_alloc)(struct i915_request *rq); 495 496 int (*emit_flush)(struct i915_request *request, u32 mode); 497 #define EMIT_INVALIDATE BIT(0) 498 #define EMIT_FLUSH BIT(1) 499 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) 500 int (*emit_bb_start)(struct i915_request *rq, 501 u64 offset, u32 length, 502 unsigned int dispatch_flags); 503 #define I915_DISPATCH_SECURE BIT(0) 504 #define I915_DISPATCH_PINNED BIT(1) 505 int (*emit_init_breadcrumb)(struct i915_request *rq); 506 u32 *(*emit_fini_breadcrumb)(struct i915_request *rq, 507 u32 *cs); 508 unsigned int emit_fini_breadcrumb_dw; 509 510 /* Pass the request to the hardware queue (e.g. directly into 511 * the legacy ringbuffer or to the end of an execlist). 512 * 513 * This is called from an atomic context with irqs disabled; must 514 * be irq safe. 515 */ 516 void (*submit_request)(struct i915_request *rq); 517 518 void (*release)(struct intel_engine_cs *engine); 519 520 /* 521 * Add / remove request from engine active tracking 522 */ 523 void (*add_active_request)(struct i915_request *rq); 524 void (*remove_active_request)(struct i915_request *rq); 525 526 /* 527 * Get engine busyness and the time at which the busyness was sampled. 528 */ 529 ktime_t (*busyness)(struct intel_engine_cs *engine, 530 ktime_t *now); 531 532 struct intel_engine_execlists execlists; 533 534 /* 535 * Keep track of completed timelines on this engine for early 536 * retirement with the goal of quickly enabling powersaving as 537 * soon as the engine is idle. 538 */ 539 struct intel_timeline *retire; 540 struct work_struct retire_work; 541 542 /* status_notifier: list of callbacks for context-switch changes */ 543 struct atomic_notifier_head context_status_notifier; 544 545 #define I915_ENGINE_USING_CMD_PARSER BIT(0) 546 #define I915_ENGINE_SUPPORTS_STATS BIT(1) 547 #define I915_ENGINE_HAS_PREEMPTION BIT(2) 548 #define I915_ENGINE_HAS_SEMAPHORES BIT(3) 549 #define I915_ENGINE_HAS_TIMESLICES BIT(4) 550 #define I915_ENGINE_IS_VIRTUAL BIT(5) 551 #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) 552 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) 553 #define I915_ENGINE_WANT_FORCED_PREEMPTION BIT(8) 554 #define I915_ENGINE_HAS_RCS_REG_STATE BIT(9) 555 #define I915_ENGINE_HAS_EU_PRIORITY BIT(10) 556 #define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11) 557 #define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12) 558 unsigned int flags; 559 560 /* 561 * Table of commands the command parser needs to know about 562 * for this engine. 563 */ 564 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 565 566 /* 567 * Table of registers allowed in commands that read/write registers. 568 */ 569 const struct drm_i915_reg_table *reg_tables; 570 int reg_table_count; 571 572 /* 573 * Returns the bitmask for the length field of the specified command. 574 * Return 0 for an unrecognized/invalid command. 575 * 576 * If the command parser finds an entry for a command in the engine's 577 * cmd_tables, it gets the command's length based on the table entry. 578 * If not, it calls this function to determine the per-engine length 579 * field encoding for the command (i.e. different opcode ranges use 580 * certain bits to encode the command length in the header). 581 */ 582 u32 (*get_cmd_length_mask)(u32 cmd_header); 583 584 struct { 585 union { 586 struct intel_engine_execlists_stats execlists; 587 struct intel_engine_guc_stats guc; 588 }; 589 590 /** 591 * @rps: Utilisation at last RPS sampling. 592 */ 593 ktime_t rps; 594 } stats; 595 596 struct { 597 unsigned long heartbeat_interval_ms; 598 unsigned long max_busywait_duration_ns; 599 unsigned long preempt_timeout_ms; 600 unsigned long stop_timeout_ms; 601 unsigned long timeslice_duration_ms; 602 } props, defaults; 603 604 I915_SELFTEST_DECLARE(struct fault_attr reset_timeout); 605 }; 606 607 static inline bool 608 intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) 609 { 610 return engine->flags & I915_ENGINE_USING_CMD_PARSER; 611 } 612 613 static inline bool 614 intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) 615 { 616 return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; 617 } 618 619 static inline bool 620 intel_engine_supports_stats(const struct intel_engine_cs *engine) 621 { 622 return engine->flags & I915_ENGINE_SUPPORTS_STATS; 623 } 624 625 static inline bool 626 intel_engine_has_preemption(const struct intel_engine_cs *engine) 627 { 628 return engine->flags & I915_ENGINE_HAS_PREEMPTION; 629 } 630 631 static inline bool 632 intel_engine_has_semaphores(const struct intel_engine_cs *engine) 633 { 634 return engine->flags & I915_ENGINE_HAS_SEMAPHORES; 635 } 636 637 static inline bool 638 intel_engine_has_timeslices(const struct intel_engine_cs *engine) 639 { 640 if (!CONFIG_DRM_I915_TIMESLICE_DURATION) 641 return false; 642 643 return engine->flags & I915_ENGINE_HAS_TIMESLICES; 644 } 645 646 static inline bool 647 intel_engine_is_virtual(const struct intel_engine_cs *engine) 648 { 649 return engine->flags & I915_ENGINE_IS_VIRTUAL; 650 } 651 652 static inline bool 653 intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) 654 { 655 return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO; 656 } 657 658 /* Wa_14014475959:dg2 */ 659 static inline bool 660 intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine) 661 { 662 return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT; 663 } 664 665 #endif /* __INTEL_ENGINE_TYPES_H__ */ 666