1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_GT_TYPES__
7 #define __INTEL_GT_TYPES__
8 
9 #include <linux/ktime.h>
10 #include <linux/list.h>
11 #include <linux/llist.h>
12 #include <linux/mutex.h>
13 #include <linux/notifier.h>
14 #include <linux/spinlock.h>
15 #include <linux/types.h>
16 #include <linux/workqueue.h>
17 
18 #include "uc/intel_uc.h"
19 
20 #include "i915_vma.h"
21 #include "intel_engine_types.h"
22 #include "intel_gt_buffer_pool_types.h"
23 #include "intel_llc_types.h"
24 #include "intel_reset_types.h"
25 #include "intel_rc6_types.h"
26 #include "intel_rps_types.h"
27 #include "intel_wakeref.h"
28 
29 struct drm_i915_private;
30 struct i915_ggtt;
31 struct intel_engine_cs;
32 struct intel_uncore;
33 
34 enum intel_submission_method {
35 	INTEL_SUBMISSION_RING,
36 	INTEL_SUBMISSION_ELSP,
37 	INTEL_SUBMISSION_GUC,
38 };
39 
40 struct intel_gt {
41 	struct drm_i915_private *i915;
42 	struct intel_uncore *uncore;
43 	struct i915_ggtt *ggtt;
44 
45 	struct intel_uc uc;
46 
47 	struct intel_gt_timelines {
48 		spinlock_t lock; /* protects active_list */
49 		struct list_head active_list;
50 	} timelines;
51 
52 	struct intel_gt_requests {
53 		/**
54 		 * We leave the user IRQ off as much as possible,
55 		 * but this means that requests will finish and never
56 		 * be retired once the system goes idle. Set a timer to
57 		 * fire periodically while the ring is running. When it
58 		 * fires, go retire requests.
59 		 */
60 		struct delayed_work retire_work;
61 	} requests;
62 
63 	struct {
64 		struct llist_head list;
65 		struct work_struct work;
66 	} watchdog;
67 
68 	struct intel_wakeref wakeref;
69 	atomic_t user_wakeref;
70 
71 	struct list_head closed_vma;
72 	spinlock_t closed_lock; /* guards the list of closed_vma */
73 
74 	ktime_t last_init_time;
75 	struct intel_reset reset;
76 
77 	/**
78 	 * Is the GPU currently considered idle, or busy executing
79 	 * userspace requests? Whilst idle, we allow runtime power
80 	 * management to power down the hardware and display clocks.
81 	 * In order to reduce the effect on performance, there
82 	 * is a slight delay before we do so.
83 	 */
84 	intel_wakeref_t awake;
85 
86 	u32 clock_frequency;
87 	u32 clock_period_ns;
88 
89 	struct intel_llc llc;
90 	struct intel_rc6 rc6;
91 	struct intel_rps rps;
92 
93 	spinlock_t irq_lock;
94 	u32 gt_imr;
95 	u32 pm_ier;
96 	u32 pm_imr;
97 
98 	u32 pm_guc_events;
99 
100 	struct {
101 		bool active;
102 
103 		/**
104 		 * @lock: Lock protecting the below fields.
105 		 */
106 		seqcount_mutex_t lock;
107 
108 		/**
109 		 * @total: Total time this engine was busy.
110 		 *
111 		 * Accumulated time not counting the most recent block in cases
112 		 * where engine is currently busy (active > 0).
113 		 */
114 		ktime_t total;
115 
116 		/**
117 		 * @start: Timestamp of the last idle to active transition.
118 		 *
119 		 * Idle is defined as active == 0, active is active > 0.
120 		 */
121 		ktime_t start;
122 	} stats;
123 
124 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
125 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
126 					    [MAX_ENGINE_INSTANCE + 1];
127 	enum intel_submission_method submission_method;
128 
129 	/*
130 	 * Default address space (either GGTT or ppGTT depending on arch).
131 	 *
132 	 * Reserved for exclusive use by the kernel.
133 	 */
134 	struct i915_address_space *vm;
135 
136 	/*
137 	 * A pool of objects to use as shadow copies of client batch buffers
138 	 * when the command parser is enabled. Prevents the client from
139 	 * modifying the batch contents after software parsing.
140 	 *
141 	 * Buffers older than 1s are periodically reaped from the pool,
142 	 * or may be reclaimed by the shrinker before then.
143 	 */
144 	struct intel_gt_buffer_pool buffer_pool;
145 
146 	struct i915_vma *scratch;
147 
148 	struct intel_gt_info {
149 		intel_engine_mask_t engine_mask;
150 		u8 num_engines;
151 
152 		/* Media engine access to SFC per instance */
153 		u8 vdbox_sfc_access;
154 
155 		/* Slice/subslice/EU info */
156 		struct sseu_dev_info sseu;
157 	} info;
158 };
159 
160 enum intel_gt_scratch_field {
161 	/* 8 bytes */
162 	INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
163 
164 	/* 8 bytes */
165 	INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
166 
167 	/* 8 bytes */
168 	INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
169 
170 	/* 6 * 8 bytes */
171 	INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
172 
173 	/* 4 bytes */
174 	INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
175 };
176 
177 #endif /* __INTEL_GT_TYPES_H__ */
178