1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_GT_TYPES__
7 #define __INTEL_GT_TYPES__
8 
9 #include <linux/ktime.h>
10 #include <linux/list.h>
11 #include <linux/llist.h>
12 #include <linux/mutex.h>
13 #include <linux/notifier.h>
14 #include <linux/seqlock.h>
15 #include <linux/spinlock.h>
16 #include <linux/types.h>
17 #include <linux/workqueue.h>
18 
19 #include "uc/intel_uc.h"
20 #include "intel_gsc.h"
21 
22 #include "i915_vma.h"
23 #include "i915_perf_types.h"
24 #include "intel_engine_types.h"
25 #include "intel_gt_buffer_pool_types.h"
26 #include "intel_hwconfig.h"
27 #include "intel_llc_types.h"
28 #include "intel_reset_types.h"
29 #include "intel_rc6_types.h"
30 #include "intel_rps_types.h"
31 #include "intel_migrate_types.h"
32 #include "intel_wakeref.h"
33 #include "pxp/intel_pxp_types.h"
34 #include "intel_wopcm.h"
35 
36 struct drm_i915_private;
37 struct i915_ggtt;
38 struct intel_engine_cs;
39 struct intel_uncore;
40 
41 struct intel_mmio_range {
42 	u32 start;
43 	u32 end;
44 };
45 
46 /*
47  * The hardware has multiple kinds of multicast register ranges that need
48  * special register steering (and future platforms are expected to add
49  * additional types).
50  *
51  * During driver startup, we initialize the steering control register to
52  * direct reads to a slice/subslice that are valid for the 'subslice' class
53  * of multicast registers.  If another type of steering does not have any
54  * overlap in valid steering targets with 'subslice' style registers, we will
55  * need to explicitly re-steer reads of registers of the other type.
56  *
57  * Only the replication types that may need additional non-default steering
58  * are listed here.
59  */
60 enum intel_steering_type {
61 	L3BANK,
62 	MSLICE,
63 	LNCF,
64 	GAM,
65 	DSS,
66 	OADDRM,
67 
68 	/*
69 	 * On some platforms there are multiple types of MCR registers that
70 	 * will always return a non-terminated value at instance (0, 0).  We'll
71 	 * lump those all into a single category to keep things simple.
72 	 */
73 	INSTANCE0,
74 
75 	NUM_STEERING_TYPES
76 };
77 
78 enum intel_submission_method {
79 	INTEL_SUBMISSION_RING,
80 	INTEL_SUBMISSION_ELSP,
81 	INTEL_SUBMISSION_GUC,
82 };
83 
84 struct gt_defaults {
85 	u32 min_freq;
86 	u32 max_freq;
87 };
88 
89 enum intel_gt_type {
90 	GT_PRIMARY,
91 	GT_TILE,
92 	GT_MEDIA,
93 };
94 
95 struct intel_gt {
96 	struct drm_i915_private *i915;
97 	const char *name;
98 	enum intel_gt_type type;
99 
100 	struct intel_uncore *uncore;
101 	struct i915_ggtt *ggtt;
102 
103 	struct intel_uc uc;
104 	struct intel_gsc gsc;
105 	struct intel_wopcm wopcm;
106 
107 	struct {
108 		/* Serialize global tlb invalidations */
109 		struct mutex invalidate_lock;
110 
111 		/*
112 		 * Batch TLB invalidations
113 		 *
114 		 * After unbinding the PTE, we need to ensure the TLB
115 		 * are invalidated prior to releasing the physical pages.
116 		 * But we only need one such invalidation for all unbinds,
117 		 * so we track how many TLB invalidations have been
118 		 * performed since unbind the PTE and only emit an extra
119 		 * invalidate if no full barrier has been passed.
120 		 */
121 		seqcount_mutex_t seqno;
122 	} tlb;
123 
124 	struct i915_wa_list wa_list;
125 
126 	struct intel_gt_timelines {
127 		spinlock_t lock; /* protects active_list */
128 		struct list_head active_list;
129 	} timelines;
130 
131 	struct intel_gt_requests {
132 		/**
133 		 * We leave the user IRQ off as much as possible,
134 		 * but this means that requests will finish and never
135 		 * be retired once the system goes idle. Set a timer to
136 		 * fire periodically while the ring is running. When it
137 		 * fires, go retire requests.
138 		 */
139 		struct delayed_work retire_work;
140 	} requests;
141 
142 	struct {
143 		struct llist_head list;
144 		struct work_struct work;
145 	} watchdog;
146 
147 	struct intel_wakeref wakeref;
148 	atomic_t user_wakeref;
149 
150 	struct list_head closed_vma;
151 	spinlock_t closed_lock; /* guards the list of closed_vma */
152 
153 	ktime_t last_init_time;
154 	struct intel_reset reset;
155 
156 	/**
157 	 * Is the GPU currently considered idle, or busy executing
158 	 * userspace requests? Whilst idle, we allow runtime power
159 	 * management to power down the hardware and display clocks.
160 	 * In order to reduce the effect on performance, there
161 	 * is a slight delay before we do so.
162 	 */
163 	intel_wakeref_t awake;
164 
165 	u32 clock_frequency;
166 	u32 clock_period_ns;
167 
168 	struct intel_llc llc;
169 	struct intel_rc6 rc6;
170 	struct intel_rps rps;
171 
172 	spinlock_t *irq_lock;
173 	u32 gt_imr;
174 	u32 pm_ier;
175 	u32 pm_imr;
176 
177 	u32 pm_guc_events;
178 
179 	struct {
180 		bool active;
181 
182 		/**
183 		 * @lock: Lock protecting the below fields.
184 		 */
185 		seqcount_mutex_t lock;
186 
187 		/**
188 		 * @total: Total time this engine was busy.
189 		 *
190 		 * Accumulated time not counting the most recent block in cases
191 		 * where engine is currently busy (active > 0).
192 		 */
193 		ktime_t total;
194 
195 		/**
196 		 * @start: Timestamp of the last idle to active transition.
197 		 *
198 		 * Idle is defined as active == 0, active is active > 0.
199 		 */
200 		ktime_t start;
201 	} stats;
202 
203 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
204 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
205 					    [MAX_ENGINE_INSTANCE + 1];
206 	enum intel_submission_method submission_method;
207 
208 	/*
209 	 * Default address space (either GGTT or ppGTT depending on arch).
210 	 *
211 	 * Reserved for exclusive use by the kernel.
212 	 */
213 	struct i915_address_space *vm;
214 
215 	/*
216 	 * A pool of objects to use as shadow copies of client batch buffers
217 	 * when the command parser is enabled. Prevents the client from
218 	 * modifying the batch contents after software parsing.
219 	 *
220 	 * Buffers older than 1s are periodically reaped from the pool,
221 	 * or may be reclaimed by the shrinker before then.
222 	 */
223 	struct intel_gt_buffer_pool buffer_pool;
224 
225 	struct i915_vma *scratch;
226 
227 	struct intel_migrate migrate;
228 
229 	const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES];
230 
231 	struct {
232 		u8 groupid;
233 		u8 instanceid;
234 	} default_steering;
235 
236 	/*
237 	 * Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT.
238 	 */
239 	phys_addr_t phys_addr;
240 
241 	struct intel_gt_info {
242 		unsigned int id;
243 
244 		intel_engine_mask_t engine_mask;
245 
246 		u32 l3bank_mask;
247 
248 		u8 num_engines;
249 
250 		/* General presence of SFC units */
251 		u8 sfc_mask;
252 
253 		/* Media engine access to SFC per instance */
254 		u8 vdbox_sfc_access;
255 
256 		/* Slice/subslice/EU info */
257 		struct sseu_dev_info sseu;
258 
259 		unsigned long mslice_mask;
260 
261 		/** @hwconfig: hardware configuration data */
262 		struct intel_hwconfig hwconfig;
263 	} info;
264 
265 	struct {
266 		u8 uc_index;
267 		u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
268 	} mocs;
269 
270 	struct intel_pxp pxp;
271 
272 	/* gt/gtN sysfs */
273 	struct kobject sysfs_gt;
274 
275 	/* sysfs defaults per gt */
276 	struct gt_defaults defaults;
277 	struct kobject *sysfs_defaults;
278 
279 	struct i915_perf_gt perf;
280 };
281 
282 struct intel_gt_definition {
283 	enum intel_gt_type type;
284 	char *name;
285 	u32 mapping_base;
286 	u32 gsi_offset;
287 	intel_engine_mask_t engine_mask;
288 };
289 
290 enum intel_gt_scratch_field {
291 	/* 8 bytes */
292 	INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
293 
294 	/* 8 bytes */
295 	INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
296 
297 	/* 8 bytes */
298 	INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
299 
300 	/* 6 * 8 bytes */
301 	INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
302 
303 	/* 4 bytes */
304 	INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
305 };
306 
307 #endif /* __INTEL_GT_TYPES_H__ */
308