1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_ENGINE_TYPES__ 7 #define __INTEL_ENGINE_TYPES__ 8 9 #include <linux/average.h> 10 #include <linux/hashtable.h> 11 #include <linux/irq_work.h> 12 #include <linux/kref.h> 13 #include <linux/list.h> 14 #include <linux/llist.h> 15 #include <linux/rbtree.h> 16 #include <linux/timer.h> 17 #include <linux/types.h> 18 #include <linux/workqueue.h> 19 20 #include "i915_gem.h" 21 #include "i915_pmu.h" 22 #include "i915_priolist_types.h" 23 #include "i915_selftest.h" 24 #include "intel_sseu.h" 25 #include "intel_timeline_types.h" 26 #include "intel_uncore.h" 27 #include "intel_wakeref.h" 28 #include "intel_workarounds_types.h" 29 30 /* HW Engine class + instance */ 31 #define RENDER_CLASS 0 32 #define VIDEO_DECODE_CLASS 1 33 #define VIDEO_ENHANCEMENT_CLASS 2 34 #define COPY_ENGINE_CLASS 3 35 #define OTHER_CLASS 4 36 #define COMPUTE_CLASS 5 37 #define MAX_ENGINE_CLASS 5 38 #define MAX_ENGINE_INSTANCE 7 39 40 #define I915_MAX_SLICES 3 41 #define I915_MAX_SUBSLICES 8 42 43 #define I915_CMD_HASH_ORDER 9 44 45 struct dma_fence; 46 struct drm_i915_gem_object; 47 struct drm_i915_reg_table; 48 struct i915_gem_context; 49 struct i915_request; 50 struct i915_sched_attr; 51 struct i915_sched_engine; 52 struct intel_gt; 53 struct intel_ring; 54 struct intel_uncore; 55 struct intel_breadcrumbs; 56 57 typedef u32 intel_engine_mask_t; 58 #define ALL_ENGINES ((intel_engine_mask_t)~0ul) 59 60 struct intel_hw_status_page { 61 struct list_head timelines; 62 struct i915_vma *vma; 63 u32 *addr; 64 }; 65 66 struct intel_instdone { 67 u32 instdone; 68 /* The following exist only in the RCS engine */ 69 u32 slice_common; 70 u32 slice_common_extra[2]; 71 u32 sampler[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 72 u32 row[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 73 74 /* Added in XeHPG */ 75 u32 geom_svg[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; 76 }; 77 78 /* 79 * we use a single page to load ctx workarounds so all of these 80 * values are referred in terms of dwords 81 * 82 * struct i915_wa_ctx_bb: 83 * offset: specifies batch starting position, also helpful in case 84 * if we want to have multiple batches at different offsets based on 85 * some criteria. It is not a requirement at the moment but provides 86 * an option for future use. 87 * size: size of the batch in DWORDS 88 */ 89 struct i915_ctx_workarounds { 90 struct i915_wa_ctx_bb { 91 u32 offset; 92 u32 size; 93 } indirect_ctx, per_ctx; 94 struct i915_vma *vma; 95 }; 96 97 #define I915_MAX_VCS 8 98 #define I915_MAX_VECS 4 99 #define I915_MAX_CCS 4 100 101 /* 102 * Engine IDs definitions. 103 * Keep instances of the same type engine together. 104 */ 105 enum intel_engine_id { 106 RCS0 = 0, 107 BCS0, 108 VCS0, 109 VCS1, 110 VCS2, 111 VCS3, 112 VCS4, 113 VCS5, 114 VCS6, 115 VCS7, 116 #define _VCS(n) (VCS0 + (n)) 117 VECS0, 118 VECS1, 119 VECS2, 120 VECS3, 121 #define _VECS(n) (VECS0 + (n)) 122 CCS0, 123 CCS1, 124 CCS2, 125 CCS3, 126 #define _CCS(n) (CCS0 + (n)) 127 I915_NUM_ENGINES 128 #define INVALID_ENGINE ((enum intel_engine_id)-1) 129 }; 130 131 /* A simple estimator for the round-trip latency of an engine */ 132 DECLARE_EWMA(_engine_latency, 6, 4) 133 134 struct st_preempt_hang { 135 struct completion completion; 136 unsigned int count; 137 }; 138 139 /** 140 * struct intel_engine_execlists - execlist submission queue and port state 141 * 142 * The struct intel_engine_execlists represents the combined logical state of 143 * driver and the hardware state for execlist mode of submission. 144 */ 145 struct intel_engine_execlists { 146 /** 147 * @timer: kick the current context if its timeslice expires 148 */ 149 struct timer_list timer; 150 151 /** 152 * @preempt: reset the current context if it fails to give way 153 */ 154 struct timer_list preempt; 155 156 /** 157 * @ccid: identifier for contexts submitted to this engine 158 */ 159 u32 ccid; 160 161 /** 162 * @yield: CCID at the time of the last semaphore-wait interrupt. 163 * 164 * Instead of leaving a semaphore busy-spinning on an engine, we would 165 * like to switch to another ready context, i.e. yielding the semaphore 166 * timeslice. 167 */ 168 u32 yield; 169 170 /** 171 * @error_interrupt: CS Master EIR 172 * 173 * The CS generates an interrupt when it detects an error. We capture 174 * the first error interrupt, record the EIR and schedule the tasklet. 175 * In the tasklet, we process the pending CS events to ensure we have 176 * the guilty request, and then reset the engine. 177 * 178 * Low 16b are used by HW, with the upper 16b used as the enabling mask. 179 * Reserve the upper 16b for tracking internal errors. 180 */ 181 u32 error_interrupt; 182 #define ERROR_CSB BIT(31) 183 #define ERROR_PREEMPT BIT(30) 184 185 /** 186 * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset 187 */ 188 u32 reset_ccid; 189 190 /** 191 * @submit_reg: gen-specific execlist submission register 192 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to 193 * the ExecList Submission Queue Contents register array for Gen11+ 194 */ 195 u32 __iomem *submit_reg; 196 197 /** 198 * @ctrl_reg: the enhanced execlists control register, used to load the 199 * submit queue on the HW and to request preemptions to idle 200 */ 201 u32 __iomem *ctrl_reg; 202 203 #define EXECLIST_MAX_PORTS 2 204 /** 205 * @active: the currently known context executing on HW 206 */ 207 struct i915_request * const *active; 208 /** 209 * @inflight: the set of contexts submitted and acknowleged by HW 210 * 211 * The set of inflight contexts is managed by reading CS events 212 * from the HW. On a context-switch event (not preemption), we 213 * know the HW has transitioned from port0 to port1, and we 214 * advance our inflight/active tracking accordingly. 215 */ 216 struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */]; 217 /** 218 * @pending: the next set of contexts submitted to ELSP 219 * 220 * We store the array of contexts that we submit to HW (via ELSP) and 221 * promote them to the inflight array once HW has signaled the 222 * preemption or idle-to-active event. 223 */ 224 struct i915_request *pending[EXECLIST_MAX_PORTS + 1]; 225 226 /** 227 * @port_mask: number of execlist ports - 1 228 */ 229 unsigned int port_mask; 230 231 /** 232 * @virtual: Queue of requets on a virtual engine, sorted by priority. 233 * Each RB entry is a struct i915_priolist containing a list of requests 234 * of the same priority. 235 */ 236 struct rb_root_cached virtual; 237 238 /** 239 * @csb_write: control register for Context Switch buffer 240 * 241 * Note this register may be either mmio or HWSP shadow. 242 */ 243 u32 *csb_write; 244 245 /** 246 * @csb_status: status array for Context Switch buffer 247 * 248 * Note these register may be either mmio or HWSP shadow. 249 */ 250 u64 *csb_status; 251 252 /** 253 * @csb_size: context status buffer FIFO size 254 */ 255 u8 csb_size; 256 257 /** 258 * @csb_head: context status buffer head 259 */ 260 u8 csb_head; 261 262 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) 263 }; 264 265 #define INTEL_ENGINE_CS_MAX_NAME 8 266 267 struct intel_engine_execlists_stats { 268 /** 269 * @active: Number of contexts currently scheduled in. 270 */ 271 unsigned int active; 272 273 /** 274 * @lock: Lock protecting the below fields. 275 */ 276 seqcount_t lock; 277 278 /** 279 * @total: Total time this engine was busy. 280 * 281 * Accumulated time not counting the most recent block in cases where 282 * engine is currently busy (active > 0). 283 */ 284 ktime_t total; 285 286 /** 287 * @start: Timestamp of the last idle to active transition. 288 * 289 * Idle is defined as active == 0, active is active > 0. 290 */ 291 ktime_t start; 292 }; 293 294 struct intel_engine_guc_stats { 295 /** 296 * @running: Active state of the engine when busyness was last sampled. 297 */ 298 bool running; 299 300 /** 301 * @prev_total: Previous value of total runtime clock cycles. 302 */ 303 u32 prev_total; 304 305 /** 306 * @total_gt_clks: Total gt clock cycles this engine was busy. 307 */ 308 u64 total_gt_clks; 309 310 /** 311 * @start_gt_clk: GT clock time of last idle to active transition. 312 */ 313 u64 start_gt_clk; 314 }; 315 316 struct intel_engine_cs { 317 struct drm_i915_private *i915; 318 struct intel_gt *gt; 319 struct intel_uncore *uncore; 320 char name[INTEL_ENGINE_CS_MAX_NAME]; 321 322 enum intel_engine_id id; 323 enum intel_engine_id legacy_idx; 324 325 unsigned int guc_id; 326 327 intel_engine_mask_t mask; 328 u32 reset_domain; 329 /** 330 * @logical_mask: logical mask of engine, reported to user space via 331 * query IOCTL and used to communicate with the GuC in logical space. 332 * The logical instance of a physical engine can change based on product 333 * and fusing. 334 */ 335 intel_engine_mask_t logical_mask; 336 337 u8 class; 338 u8 instance; 339 340 u16 uabi_class; 341 u16 uabi_instance; 342 343 u32 uabi_capabilities; 344 u32 context_size; 345 u32 mmio_base; 346 347 /* 348 * Some w/a require forcewake to be held (which prevents RC6) while 349 * a particular engine is active. If so, we set fw_domain to which 350 * domains need to be held for the duration of request activity, 351 * and 0 if none. We try to limit the duration of the hold as much 352 * as possible. 353 */ 354 enum forcewake_domains fw_domain; 355 unsigned int fw_active; 356 357 unsigned long context_tag; 358 359 struct rb_node uabi_node; 360 361 struct intel_sseu sseu; 362 363 struct i915_sched_engine *sched_engine; 364 365 /* keep a request in reserve for a [pm] barrier under oom */ 366 struct i915_request *request_pool; 367 368 struct intel_context *hung_ce; 369 370 struct llist_head barrier_tasks; 371 372 struct intel_context *kernel_context; /* pinned */ 373 374 /** 375 * pinned_contexts_list: List of pinned contexts. This list is only 376 * assumed to be manipulated during driver load- or unload time and 377 * does therefore not have any additional protection. 378 */ 379 struct list_head pinned_contexts_list; 380 381 intel_engine_mask_t saturated; /* submitting semaphores too late? */ 382 383 struct { 384 struct delayed_work work; 385 struct i915_request *systole; 386 unsigned long blocked; 387 } heartbeat; 388 389 unsigned long serial; 390 391 unsigned long wakeref_serial; 392 struct intel_wakeref wakeref; 393 struct file *default_state; 394 395 struct { 396 struct intel_ring *ring; 397 struct intel_timeline *timeline; 398 } legacy; 399 400 /* 401 * We track the average duration of the idle pulse on parking the 402 * engine to keep an estimate of the how the fast the engine is 403 * under ideal conditions. 404 */ 405 struct ewma__engine_latency latency; 406 407 /* Keep track of all the seqno used, a trail of breadcrumbs */ 408 struct intel_breadcrumbs *breadcrumbs; 409 410 struct intel_engine_pmu { 411 /** 412 * @enable: Bitmask of enable sample events on this engine. 413 * 414 * Bits correspond to sample event types, for instance 415 * I915_SAMPLE_QUEUED is bit 0 etc. 416 */ 417 u32 enable; 418 /** 419 * @enable_count: Reference count for the enabled samplers. 420 * 421 * Index number corresponds to @enum drm_i915_pmu_engine_sample. 422 */ 423 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; 424 /** 425 * @sample: Counter values for sampling events. 426 * 427 * Our internal timer stores the current counters in this field. 428 * 429 * Index number corresponds to @enum drm_i915_pmu_engine_sample. 430 */ 431 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; 432 } pmu; 433 434 struct intel_hw_status_page status_page; 435 struct i915_ctx_workarounds wa_ctx; 436 struct i915_wa_list ctx_wa_list; 437 struct i915_wa_list wa_list; 438 struct i915_wa_list whitelist; 439 440 u32 irq_keep_mask; /* always keep these interrupts */ 441 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 442 void (*irq_enable)(struct intel_engine_cs *engine); 443 void (*irq_disable)(struct intel_engine_cs *engine); 444 void (*irq_handler)(struct intel_engine_cs *engine, u16 iir); 445 446 void (*sanitize)(struct intel_engine_cs *engine); 447 int (*resume)(struct intel_engine_cs *engine); 448 449 struct { 450 void (*prepare)(struct intel_engine_cs *engine); 451 452 void (*rewind)(struct intel_engine_cs *engine, bool stalled); 453 void (*cancel)(struct intel_engine_cs *engine); 454 455 void (*finish)(struct intel_engine_cs *engine); 456 } reset; 457 458 void (*park)(struct intel_engine_cs *engine); 459 void (*unpark)(struct intel_engine_cs *engine); 460 461 void (*bump_serial)(struct intel_engine_cs *engine); 462 463 void (*set_default_submission)(struct intel_engine_cs *engine); 464 465 const struct intel_context_ops *cops; 466 467 int (*request_alloc)(struct i915_request *rq); 468 469 int (*emit_flush)(struct i915_request *request, u32 mode); 470 #define EMIT_INVALIDATE BIT(0) 471 #define EMIT_FLUSH BIT(1) 472 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) 473 int (*emit_bb_start)(struct i915_request *rq, 474 u64 offset, u32 length, 475 unsigned int dispatch_flags); 476 #define I915_DISPATCH_SECURE BIT(0) 477 #define I915_DISPATCH_PINNED BIT(1) 478 int (*emit_init_breadcrumb)(struct i915_request *rq); 479 u32 *(*emit_fini_breadcrumb)(struct i915_request *rq, 480 u32 *cs); 481 unsigned int emit_fini_breadcrumb_dw; 482 483 /* Pass the request to the hardware queue (e.g. directly into 484 * the legacy ringbuffer or to the end of an execlist). 485 * 486 * This is called from an atomic context with irqs disabled; must 487 * be irq safe. 488 */ 489 void (*submit_request)(struct i915_request *rq); 490 491 void (*release)(struct intel_engine_cs *engine); 492 493 /* 494 * Add / remove request from engine active tracking 495 */ 496 void (*add_active_request)(struct i915_request *rq); 497 void (*remove_active_request)(struct i915_request *rq); 498 499 /* 500 * Get engine busyness and the time at which the busyness was sampled. 501 */ 502 ktime_t (*busyness)(struct intel_engine_cs *engine, 503 ktime_t *now); 504 505 struct intel_engine_execlists execlists; 506 507 /* 508 * Keep track of completed timelines on this engine for early 509 * retirement with the goal of quickly enabling powersaving as 510 * soon as the engine is idle. 511 */ 512 struct intel_timeline *retire; 513 struct work_struct retire_work; 514 515 /* status_notifier: list of callbacks for context-switch changes */ 516 struct atomic_notifier_head context_status_notifier; 517 518 #define I915_ENGINE_USING_CMD_PARSER BIT(0) 519 #define I915_ENGINE_SUPPORTS_STATS BIT(1) 520 #define I915_ENGINE_HAS_PREEMPTION BIT(2) 521 #define I915_ENGINE_HAS_SEMAPHORES BIT(3) 522 #define I915_ENGINE_HAS_TIMESLICES BIT(4) 523 #define I915_ENGINE_IS_VIRTUAL BIT(5) 524 #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) 525 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) 526 #define I915_ENGINE_WANT_FORCED_PREEMPTION BIT(8) 527 #define I915_ENGINE_HAS_RCS_REG_STATE BIT(9) 528 #define I915_ENGINE_HAS_EU_PRIORITY BIT(10) 529 unsigned int flags; 530 531 /* 532 * Table of commands the command parser needs to know about 533 * for this engine. 534 */ 535 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); 536 537 /* 538 * Table of registers allowed in commands that read/write registers. 539 */ 540 const struct drm_i915_reg_table *reg_tables; 541 int reg_table_count; 542 543 /* 544 * Returns the bitmask for the length field of the specified command. 545 * Return 0 for an unrecognized/invalid command. 546 * 547 * If the command parser finds an entry for a command in the engine's 548 * cmd_tables, it gets the command's length based on the table entry. 549 * If not, it calls this function to determine the per-engine length 550 * field encoding for the command (i.e. different opcode ranges use 551 * certain bits to encode the command length in the header). 552 */ 553 u32 (*get_cmd_length_mask)(u32 cmd_header); 554 555 struct { 556 union { 557 struct intel_engine_execlists_stats execlists; 558 struct intel_engine_guc_stats guc; 559 }; 560 561 /** 562 * @rps: Utilisation at last RPS sampling. 563 */ 564 ktime_t rps; 565 } stats; 566 567 struct { 568 unsigned long heartbeat_interval_ms; 569 unsigned long max_busywait_duration_ns; 570 unsigned long preempt_timeout_ms; 571 unsigned long stop_timeout_ms; 572 unsigned long timeslice_duration_ms; 573 } props, defaults; 574 575 I915_SELFTEST_DECLARE(struct fault_attr reset_timeout); 576 }; 577 578 static inline bool 579 intel_engine_using_cmd_parser(const struct intel_engine_cs *engine) 580 { 581 return engine->flags & I915_ENGINE_USING_CMD_PARSER; 582 } 583 584 static inline bool 585 intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine) 586 { 587 return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER; 588 } 589 590 static inline bool 591 intel_engine_supports_stats(const struct intel_engine_cs *engine) 592 { 593 return engine->flags & I915_ENGINE_SUPPORTS_STATS; 594 } 595 596 static inline bool 597 intel_engine_has_preemption(const struct intel_engine_cs *engine) 598 { 599 return engine->flags & I915_ENGINE_HAS_PREEMPTION; 600 } 601 602 static inline bool 603 intel_engine_has_semaphores(const struct intel_engine_cs *engine) 604 { 605 return engine->flags & I915_ENGINE_HAS_SEMAPHORES; 606 } 607 608 static inline bool 609 intel_engine_has_timeslices(const struct intel_engine_cs *engine) 610 { 611 if (!CONFIG_DRM_I915_TIMESLICE_DURATION) 612 return false; 613 614 return engine->flags & I915_ENGINE_HAS_TIMESLICES; 615 } 616 617 static inline bool 618 intel_engine_is_virtual(const struct intel_engine_cs *engine) 619 { 620 return engine->flags & I915_ENGINE_IS_VIRTUAL; 621 } 622 623 static inline bool 624 intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) 625 { 626 return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO; 627 } 628 629 #define instdone_has_slice(dev_priv___, sseu___, slice___) \ 630 ((GRAPHICS_VER(dev_priv___) == 7 ? 1 : ((sseu___)->slice_mask)) & BIT(slice___)) 631 632 #define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \ 633 (GRAPHICS_VER(dev_priv__) == 7 ? (1 & BIT(subslice__)) : \ 634 intel_sseu_has_subslice(sseu__, 0, subslice__)) 635 636 #define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \ 637 for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \ 638 (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \ 639 (slice_) += ((subslice_) == 0)) \ 640 for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \ 641 (instdone_has_subslice(dev_priv_, sseu_, slice_, \ 642 subslice_))) 643 644 #define for_each_instdone_gslice_dss_xehp(dev_priv_, sseu_, iter_, gslice_, dss_) \ 645 for ((iter_) = 0, (gslice_) = 0, (dss_) = 0; \ 646 (iter_) < GEN_MAX_SUBSLICES; \ 647 (iter_)++, (gslice_) = (iter_) / GEN_DSS_PER_GSLICE, \ 648 (dss_) = (iter_) % GEN_DSS_PER_GSLICE) \ 649 for_each_if(intel_sseu_has_subslice((sseu_), 0, (iter_))) 650 651 #endif /* __INTEL_ENGINE_TYPES_H__ */ 652