1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #ifndef _INTEL_GUC_FWIF_H
7 #define _INTEL_GUC_FWIF_H
8 
9 #include <linux/bits.h>
10 #include <linux/compiler.h>
11 #include <linux/types.h>
12 #include "gt/intel_engine_types.h"
13 
14 #include "abi/guc_actions_abi.h"
15 #include "abi/guc_errors_abi.h"
16 #include "abi/guc_communication_mmio_abi.h"
17 #include "abi/guc_communication_ctb_abi.h"
18 #include "abi/guc_messages_abi.h"
19 
20 #define GUC_CLIENT_PRIORITY_KMD_HIGH	0
21 #define GUC_CLIENT_PRIORITY_HIGH	1
22 #define GUC_CLIENT_PRIORITY_KMD_NORMAL	2
23 #define GUC_CLIENT_PRIORITY_NORMAL	3
24 #define GUC_CLIENT_PRIORITY_NUM		4
25 
26 #define GUC_MAX_STAGE_DESCRIPTORS	1024
27 #define	GUC_INVALID_STAGE_ID		GUC_MAX_STAGE_DESCRIPTORS
28 
29 #define GUC_RENDER_ENGINE		0
30 #define GUC_VIDEO_ENGINE		1
31 #define GUC_BLITTER_ENGINE		2
32 #define GUC_VIDEOENHANCE_ENGINE		3
33 #define GUC_VIDEO_ENGINE2		4
34 #define GUC_MAX_ENGINES_NUM		(GUC_VIDEO_ENGINE2 + 1)
35 
36 #define GUC_RENDER_CLASS		0
37 #define GUC_VIDEO_CLASS			1
38 #define GUC_VIDEOENHANCE_CLASS		2
39 #define GUC_BLITTER_CLASS		3
40 #define GUC_RESERVED_CLASS		4
41 #define GUC_LAST_ENGINE_CLASS		GUC_RESERVED_CLASS
42 #define GUC_MAX_ENGINE_CLASSES		16
43 #define GUC_MAX_INSTANCES_PER_CLASS	32
44 
45 #define GUC_DOORBELL_INVALID		256
46 
47 #define GUC_WQ_SIZE			(PAGE_SIZE * 2)
48 
49 /* Work queue item header definitions */
50 #define WQ_STATUS_ACTIVE		1
51 #define WQ_STATUS_SUSPENDED		2
52 #define WQ_STATUS_CMD_ERROR		3
53 #define WQ_STATUS_ENGINE_ID_NOT_USED	4
54 #define WQ_STATUS_SUSPENDED_FROM_RESET	5
55 #define WQ_TYPE_SHIFT			0
56 #define   WQ_TYPE_BATCH_BUF		(0x1 << WQ_TYPE_SHIFT)
57 #define   WQ_TYPE_PSEUDO		(0x2 << WQ_TYPE_SHIFT)
58 #define   WQ_TYPE_INORDER		(0x3 << WQ_TYPE_SHIFT)
59 #define   WQ_TYPE_NOOP			(0x4 << WQ_TYPE_SHIFT)
60 #define WQ_TARGET_SHIFT			10
61 #define WQ_LEN_SHIFT			16
62 #define WQ_NO_WCFLUSH_WAIT		(1 << 27)
63 #define WQ_PRESENT_WORKLOAD		(1 << 28)
64 
65 #define WQ_RING_TAIL_SHIFT		20
66 #define WQ_RING_TAIL_MAX		0x7FF	/* 2^11 QWords */
67 #define WQ_RING_TAIL_MASK		(WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
68 
69 #define GUC_STAGE_DESC_ATTR_ACTIVE	BIT(0)
70 #define GUC_STAGE_DESC_ATTR_PENDING_DB	BIT(1)
71 #define GUC_STAGE_DESC_ATTR_KERNEL	BIT(2)
72 #define GUC_STAGE_DESC_ATTR_PREEMPT	BIT(3)
73 #define GUC_STAGE_DESC_ATTR_RESET	BIT(4)
74 #define GUC_STAGE_DESC_ATTR_WQLOCKED	BIT(5)
75 #define GUC_STAGE_DESC_ATTR_PCH		BIT(6)
76 #define GUC_STAGE_DESC_ATTR_TERMINATED	BIT(7)
77 
78 #define GUC_CTL_LOG_PARAMS		0
79 #define   GUC_LOG_VALID			(1 << 0)
80 #define   GUC_LOG_NOTIFY_ON_HALF_FULL	(1 << 1)
81 #define   GUC_LOG_ALLOC_IN_MEGABYTE	(1 << 3)
82 #define   GUC_LOG_CRASH_SHIFT		4
83 #define   GUC_LOG_CRASH_MASK		(0x3 << GUC_LOG_CRASH_SHIFT)
84 #define   GUC_LOG_DPC_SHIFT		6
85 #define   GUC_LOG_DPC_MASK	        (0x7 << GUC_LOG_DPC_SHIFT)
86 #define   GUC_LOG_ISR_SHIFT		9
87 #define   GUC_LOG_ISR_MASK	        (0x7 << GUC_LOG_ISR_SHIFT)
88 #define   GUC_LOG_BUF_ADDR_SHIFT	12
89 
90 #define GUC_CTL_WA			1
91 #define GUC_CTL_FEATURE			2
92 #define   GUC_CTL_DISABLE_SCHEDULER	(1 << 14)
93 
94 #define GUC_CTL_DEBUG			3
95 #define   GUC_LOG_VERBOSITY_SHIFT	0
96 #define   GUC_LOG_VERBOSITY_LOW		(0 << GUC_LOG_VERBOSITY_SHIFT)
97 #define   GUC_LOG_VERBOSITY_MED		(1 << GUC_LOG_VERBOSITY_SHIFT)
98 #define   GUC_LOG_VERBOSITY_HIGH	(2 << GUC_LOG_VERBOSITY_SHIFT)
99 #define   GUC_LOG_VERBOSITY_ULTRA	(3 << GUC_LOG_VERBOSITY_SHIFT)
100 /* Verbosity range-check limits, without the shift */
101 #define	  GUC_LOG_VERBOSITY_MIN		0
102 #define	  GUC_LOG_VERBOSITY_MAX		3
103 #define	  GUC_LOG_VERBOSITY_MASK	0x0000000f
104 #define	  GUC_LOG_DESTINATION_MASK	(3 << 4)
105 #define   GUC_LOG_DISABLED		(1 << 6)
106 #define   GUC_PROFILE_ENABLED		(1 << 7)
107 
108 #define GUC_CTL_ADS			4
109 #define   GUC_ADS_ADDR_SHIFT		1
110 #define   GUC_ADS_ADDR_MASK		(0xFFFFF << GUC_ADS_ADDR_SHIFT)
111 
112 #define GUC_CTL_MAX_DWORDS		(SOFT_SCRATCH_COUNT - 2) /* [1..14] */
113 
114 /* Generic GT SysInfo data types */
115 #define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED		0
116 #define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK	1
117 #define GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI	2
118 #define GUC_GENERIC_GT_SYSINFO_MAX			16
119 
120 /*
121  * The class goes in bits [0..2] of the GuC ID, the instance in bits [3..6].
122  * Bit 7 can be used for operations that apply to all engine classes&instances.
123  */
124 #define GUC_ENGINE_CLASS_SHIFT		0
125 #define GUC_ENGINE_CLASS_MASK		(0x7 << GUC_ENGINE_CLASS_SHIFT)
126 #define GUC_ENGINE_INSTANCE_SHIFT	3
127 #define GUC_ENGINE_INSTANCE_MASK	(0xf << GUC_ENGINE_INSTANCE_SHIFT)
128 #define GUC_ENGINE_ALL_INSTANCES	BIT(7)
129 
130 #define MAKE_GUC_ID(class, instance) \
131 	(((class) << GUC_ENGINE_CLASS_SHIFT) | \
132 	 ((instance) << GUC_ENGINE_INSTANCE_SHIFT))
133 
134 #define GUC_ID_TO_ENGINE_CLASS(guc_id) \
135 	(((guc_id) & GUC_ENGINE_CLASS_MASK) >> GUC_ENGINE_CLASS_SHIFT)
136 #define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \
137 	(((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT)
138 
139 static inline u8 engine_class_to_guc_class(u8 class)
140 {
141 	BUILD_BUG_ON(GUC_RENDER_CLASS != RENDER_CLASS);
142 	BUILD_BUG_ON(GUC_BLITTER_CLASS != COPY_ENGINE_CLASS);
143 	BUILD_BUG_ON(GUC_VIDEO_CLASS != VIDEO_DECODE_CLASS);
144 	BUILD_BUG_ON(GUC_VIDEOENHANCE_CLASS != VIDEO_ENHANCEMENT_CLASS);
145 	GEM_BUG_ON(class > MAX_ENGINE_CLASS || class == OTHER_CLASS);
146 
147 	return class;
148 }
149 
150 static inline u8 guc_class_to_engine_class(u8 guc_class)
151 {
152 	GEM_BUG_ON(guc_class > GUC_LAST_ENGINE_CLASS);
153 	GEM_BUG_ON(guc_class == GUC_RESERVED_CLASS);
154 
155 	return guc_class;
156 }
157 
158 /* Work item for submitting workloads into work queue of GuC. */
159 struct guc_wq_item {
160 	u32 header;
161 	u32 context_desc;
162 	u32 submit_element_info;
163 	u32 fence_id;
164 } __packed;
165 
166 struct guc_process_desc {
167 	u32 stage_id;
168 	u64 db_base_addr;
169 	u32 head;
170 	u32 tail;
171 	u32 error_offset;
172 	u64 wq_base_addr;
173 	u32 wq_size_bytes;
174 	u32 wq_status;
175 	u32 engine_presence;
176 	u32 priority;
177 	u32 reserved[30];
178 } __packed;
179 
180 /* engine id and context id is packed into guc_execlist_context.context_id*/
181 #define GUC_ELC_CTXID_OFFSET		0
182 #define GUC_ELC_ENGINE_OFFSET		29
183 
184 /* The execlist context including software and HW information */
185 struct guc_execlist_context {
186 	u32 context_desc;
187 	u32 context_id;
188 	u32 ring_status;
189 	u32 ring_lrca;
190 	u32 ring_begin;
191 	u32 ring_end;
192 	u32 ring_next_free_location;
193 	u32 ring_current_tail_pointer_value;
194 	u8 engine_state_submit_value;
195 	u8 engine_state_wait_value;
196 	u16 pagefault_count;
197 	u16 engine_submit_queue_count;
198 } __packed;
199 
200 /*
201  * This structure describes a stage set arranged for a particular communication
202  * between uKernel (GuC) and Driver (KMD). Technically, this is known as a
203  * "GuC Context descriptor" in the specs, but we use the term "stage descriptor"
204  * to avoid confusion with all the other things already named "context" in the
205  * driver. A static pool of these descriptors are stored inside a GEM object
206  * (stage_desc_pool) which is held for the entire lifetime of our interaction
207  * with the GuC, being allocated before the GuC is loaded with its firmware.
208  */
209 struct guc_stage_desc {
210 	u32 sched_common_area;
211 	u32 stage_id;
212 	u32 pas_id;
213 	u8 engines_used;
214 	u64 db_trigger_cpu;
215 	u32 db_trigger_uk;
216 	u64 db_trigger_phy;
217 	u16 db_id;
218 
219 	struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
220 
221 	u8 attribute;
222 
223 	u32 priority;
224 
225 	u32 wq_sampled_tail_offset;
226 	u32 wq_total_submit_enqueues;
227 
228 	u32 process_desc;
229 	u32 wq_addr;
230 	u32 wq_size;
231 
232 	u32 engine_presence;
233 
234 	u8 engine_suspended;
235 
236 	u8 reserved0[3];
237 	u64 reserved1[1];
238 
239 	u64 desc_private;
240 } __packed;
241 
242 #define GUC_POWER_UNSPECIFIED	0
243 #define GUC_POWER_D0		1
244 #define GUC_POWER_D1		2
245 #define GUC_POWER_D2		3
246 #define GUC_POWER_D3		4
247 
248 /* Scheduling policy settings */
249 
250 /* Reset engine upon preempt failure */
251 #define POLICY_RESET_ENGINE		(1<<0)
252 /* Preempt to idle on quantum expiry */
253 #define POLICY_PREEMPT_TO_IDLE		(1<<1)
254 
255 #define POLICY_MAX_NUM_WI 15
256 #define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
257 #define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
258 #define POLICY_DEFAULT_PREEMPTION_TIME_US 500000
259 #define POLICY_DEFAULT_FAULT_TIME_US 250000
260 
261 struct guc_policy {
262 	/* Time for one workload to execute. (in micro seconds) */
263 	u32 execution_quantum;
264 	/* Time to wait for a preemption request to completed before issuing a
265 	 * reset. (in micro seconds). */
266 	u32 preemption_time;
267 	/* How much time to allow to run after the first fault is observed.
268 	 * Then preempt afterwards. (in micro seconds) */
269 	u32 fault_time;
270 	u32 policy_flags;
271 	u32 reserved[8];
272 } __packed;
273 
274 struct guc_policies {
275 	struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES];
276 	u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
277 	/* In micro seconds. How much time to allow before DPC processing is
278 	 * called back via interrupt (to prevent DPC queue drain starving).
279 	 * Typically 1000s of micro seconds (example only, not granularity). */
280 	u32 dpc_promote_time;
281 
282 	/* Must be set to take these new values. */
283 	u32 is_valid;
284 
285 	/* Max number of WIs to process per call. A large value may keep CS
286 	 * idle. */
287 	u32 max_num_work_items;
288 
289 	u32 reserved[4];
290 } __packed;
291 
292 /* GuC MMIO reg state struct */
293 struct guc_mmio_reg {
294 	u32 offset;
295 	u32 value;
296 	u32 flags;
297 #define GUC_REGSET_MASKED		(1 << 0)
298 } __packed;
299 
300 /* GuC register sets */
301 struct guc_mmio_reg_set {
302 	u32 address;
303 	u16 count;
304 	u16 reserved;
305 } __packed;
306 
307 /* HW info */
308 struct guc_gt_system_info {
309 	u8 mapping_table[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
310 	u32 engine_enabled_masks[GUC_MAX_ENGINE_CLASSES];
311 	u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
312 } __packed;
313 
314 /* Clients info */
315 struct guc_ct_pool_entry {
316 	struct guc_ct_buffer_desc desc;
317 	u32 reserved[7];
318 } __packed;
319 
320 #define GUC_CT_POOL_SIZE	2
321 
322 struct guc_clients_info {
323 	u32 clients_num;
324 	u32 reserved0[13];
325 	u32 ct_pool_addr;
326 	u32 ct_pool_count;
327 	u32 reserved[4];
328 } __packed;
329 
330 /* GuC Additional Data Struct */
331 struct guc_ads {
332 	struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
333 	u32 reserved0;
334 	u32 scheduler_policies;
335 	u32 gt_system_info;
336 	u32 clients_info;
337 	u32 control_data;
338 	u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
339 	u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
340 	u32 private_data;
341 	u32 reserved[15];
342 } __packed;
343 
344 /* GuC logging structures */
345 
346 enum guc_log_buffer_type {
347 	GUC_ISR_LOG_BUFFER,
348 	GUC_DPC_LOG_BUFFER,
349 	GUC_CRASH_DUMP_LOG_BUFFER,
350 	GUC_MAX_LOG_BUFFER
351 };
352 
353 /**
354  * struct guc_log_buffer_state - GuC log buffer state
355  *
356  * Below state structure is used for coordination of retrieval of GuC firmware
357  * logs. Separate state is maintained for each log buffer type.
358  * read_ptr points to the location where i915 read last in log buffer and
359  * is read only for GuC firmware. write_ptr is incremented by GuC with number
360  * of bytes written for each log entry and is read only for i915.
361  * When any type of log buffer becomes half full, GuC sends a flush interrupt.
362  * GuC firmware expects that while it is writing to 2nd half of the buffer,
363  * first half would get consumed by Host and then get a flush completed
364  * acknowledgment from Host, so that it does not end up doing any overwrite
365  * causing loss of logs. So when buffer gets half filled & i915 has requested
366  * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
367  * to the value of write_ptr and raise the interrupt.
368  * On receiving the interrupt i915 should read the buffer, clear flush_to_file
369  * field and also update read_ptr with the value of sample_write_ptr, before
370  * sending an acknowledgment to GuC. marker & version fields are for internal
371  * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every
372  * time GuC detects the log buffer overflow.
373  */
374 struct guc_log_buffer_state {
375 	u32 marker[2];
376 	u32 read_ptr;
377 	u32 write_ptr;
378 	u32 size;
379 	u32 sampled_write_ptr;
380 	union {
381 		struct {
382 			u32 flush_to_file:1;
383 			u32 buffer_full_cnt:4;
384 			u32 reserved:27;
385 		};
386 		u32 flags;
387 	};
388 	u32 version;
389 } __packed;
390 
391 struct guc_ctx_report {
392 	u32 report_return_status;
393 	u32 reserved1[64];
394 	u32 affected_count;
395 	u32 reserved2[2];
396 } __packed;
397 
398 /* GuC Shared Context Data Struct */
399 struct guc_shared_ctx_data {
400 	u32 addr_of_last_preempted_data_low;
401 	u32 addr_of_last_preempted_data_high;
402 	u32 addr_of_last_preempted_data_high_tmp;
403 	u32 padding;
404 	u32 is_mapped_to_proxy;
405 	u32 proxy_ctx_id;
406 	u32 engine_reset_ctx_id;
407 	u32 media_reset_count;
408 	u32 reserved1[8];
409 	u32 uk_last_ctx_switch_reason;
410 	u32 was_reset;
411 	u32 lrca_gpu_addr;
412 	u64 execlist_ctx;
413 	u32 reserved2[66];
414 	struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
415 } __packed;
416 
417 #define __INTEL_GUC_MSG_GET(T, m) \
418 	(((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT)
419 #define INTEL_GUC_MSG_TO_TYPE(m)	__INTEL_GUC_MSG_GET(TYPE, m)
420 #define INTEL_GUC_MSG_TO_DATA(m)	__INTEL_GUC_MSG_GET(DATA, m)
421 #define INTEL_GUC_MSG_TO_CODE(m)	__INTEL_GUC_MSG_GET(CODE, m)
422 
423 #define __INTEL_GUC_MSG_TYPE_IS(T, m) \
424 	(INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T)
425 #define INTEL_GUC_MSG_IS_REQUEST(m)	__INTEL_GUC_MSG_TYPE_IS(REQUEST, m)
426 #define INTEL_GUC_MSG_IS_RESPONSE(m)	__INTEL_GUC_MSG_TYPE_IS(RESPONSE, m)
427 
428 #define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \
429 	 (typecheck(u32, (m)) && \
430 	  ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \
431 	  ((INTEL_GUC_MSG_TYPE_RESPONSE << INTEL_GUC_MSG_TYPE_SHIFT) | \
432 	   (INTEL_GUC_RESPONSE_STATUS_SUCCESS << INTEL_GUC_MSG_CODE_SHIFT)))
433 
434 /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
435 enum intel_guc_recv_message {
436 	INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
437 	INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3)
438 };
439 
440 #endif
441