1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014 Intel Corporation
4  */
5 
6 #include <linux/circ_buf.h>
7 
8 #include "gem/i915_gem_context.h"
9 #include "gt/gen8_engine_cs.h"
10 #include "gt/intel_breadcrumbs.h"
11 #include "gt/intel_context.h"
12 #include "gt/intel_engine_heartbeat.h"
13 #include "gt/intel_engine_pm.h"
14 #include "gt/intel_engine_regs.h"
15 #include "gt/intel_gpu_commands.h"
16 #include "gt/intel_gt.h"
17 #include "gt/intel_gt_clock_utils.h"
18 #include "gt/intel_gt_irq.h"
19 #include "gt/intel_gt_pm.h"
20 #include "gt/intel_gt_regs.h"
21 #include "gt/intel_gt_requests.h"
22 #include "gt/intel_lrc.h"
23 #include "gt/intel_lrc_reg.h"
24 #include "gt/intel_mocs.h"
25 #include "gt/intel_ring.h"
26 
27 #include "intel_guc_ads.h"
28 #include "intel_guc_capture.h"
29 #include "intel_guc_submission.h"
30 
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 
34 /**
35  * DOC: GuC-based command submission
36  *
37  * The Scratch registers:
38  * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
39  * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
40  * triggers an interrupt on the GuC via another register write (0xC4C8).
41  * Firmware writes a success/fail code back to the action register after
42  * processes the request. The kernel driver polls waiting for this update and
43  * then proceeds.
44  *
45  * Command Transport buffers (CTBs):
46  * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
47  * - G2H) are a message interface between the i915 and GuC.
48  *
49  * Context registration:
50  * Before a context can be submitted it must be registered with the GuC via a
51  * H2G. A unique guc_id is associated with each context. The context is either
52  * registered at request creation time (normal operation) or at submission time
53  * (abnormal operation, e.g. after a reset).
54  *
55  * Context submission:
56  * The i915 updates the LRC tail value in memory. The i915 must enable the
57  * scheduling of the context within the GuC for the GuC to actually consider it.
58  * Therefore, the first time a disabled context is submitted we use a schedule
59  * enable H2G, while follow up submissions are done via the context submit H2G,
60  * which informs the GuC that a previously enabled context has new work
61  * available.
62  *
63  * Context unpin:
64  * To unpin a context a H2G is used to disable scheduling. When the
65  * corresponding G2H returns indicating the scheduling disable operation has
66  * completed it is safe to unpin the context. While a disable is in flight it
67  * isn't safe to resubmit the context so a fence is used to stall all future
68  * requests of that context until the G2H is returned.
69  *
70  * Context deregistration:
71  * Before a context can be destroyed or if we steal its guc_id we must
72  * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
73  * safe to submit anything to this guc_id until the deregister completes so a
74  * fence is used to stall all requests associated with this guc_id until the
75  * corresponding G2H returns indicating the guc_id has been deregistered.
76  *
77  * submission_state.guc_ids:
78  * Unique number associated with private GuC context data passed in during
79  * context registration / submission / deregistration. 64k available. Simple ida
80  * is used for allocation.
81  *
82  * Stealing guc_ids:
83  * If no guc_ids are available they can be stolen from another context at
84  * request creation time if that context is unpinned. If a guc_id can't be found
85  * we punt this problem to the user as we believe this is near impossible to hit
86  * during normal use cases.
87  *
88  * Locking:
89  * In the GuC submission code we have 3 basic spin locks which protect
90  * everything. Details about each below.
91  *
92  * sched_engine->lock
93  * This is the submission lock for all contexts that share an i915 schedule
94  * engine (sched_engine), thus only one of the contexts which share a
95  * sched_engine can be submitting at a time. Currently only one sched_engine is
96  * used for all of GuC submission but that could change in the future.
97  *
98  * guc->submission_state.lock
99  * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
100  * list.
101  *
102  * ce->guc_state.lock
103  * Protects everything under ce->guc_state. Ensures that a context is in the
104  * correct state before issuing a H2G. e.g. We don't issue a schedule disable
105  * on a disabled context (bad idea), we don't issue a schedule enable when a
106  * schedule disable is in flight, etc... Also protects list of inflight requests
107  * on the context and the priority management state. Lock is individual to each
108  * context.
109  *
110  * Lock ordering rules:
111  * sched_engine->lock -> ce->guc_state.lock
112  * guc->submission_state.lock -> ce->guc_state.lock
113  *
114  * Reset races:
115  * When a full GT reset is triggered it is assumed that some G2H responses to
116  * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
117  * fatal as we do certain operations upon receiving a G2H (e.g. destroy
118  * contexts, release guc_ids, etc...). When this occurs we can scrub the
119  * context state and cleanup appropriately, however this is quite racey.
120  * To avoid races, the reset code must disable submission before scrubbing for
121  * the missing G2H, while the submission code must check for submission being
122  * disabled and skip sending H2Gs and updating context states when it is. Both
123  * sides must also make sure to hold the relevant locks.
124  */
125 
126 /* GuC Virtual Engine */
127 struct guc_virtual_engine {
128 	struct intel_engine_cs base;
129 	struct intel_context context;
130 };
131 
132 static struct intel_context *
133 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
134 		   unsigned long flags);
135 
136 static struct intel_context *
137 guc_create_parallel(struct intel_engine_cs **engines,
138 		    unsigned int num_siblings,
139 		    unsigned int width);
140 
141 #define GUC_REQUEST_SIZE 64 /* bytes */
142 
143 /*
144  * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
145  * per the GuC submission interface. A different allocation algorithm is used
146  * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
147  * partition the guc_id space. We believe the number of multi-lrc contexts in
148  * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
149  * multi-lrc.
150  */
151 #define NUMBER_MULTI_LRC_GUC_ID(guc)	\
152 	((guc)->submission_state.num_guc_ids / 16)
153 
154 /*
155  * Below is a set of functions which control the GuC scheduling state which
156  * require a lock.
157  */
158 #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER	BIT(0)
159 #define SCHED_STATE_DESTROYED				BIT(1)
160 #define SCHED_STATE_PENDING_DISABLE			BIT(2)
161 #define SCHED_STATE_BANNED				BIT(3)
162 #define SCHED_STATE_ENABLED				BIT(4)
163 #define SCHED_STATE_PENDING_ENABLE			BIT(5)
164 #define SCHED_STATE_REGISTERED				BIT(6)
165 #define SCHED_STATE_POLICY_REQUIRED			BIT(7)
166 #define SCHED_STATE_BLOCKED_SHIFT			8
167 #define SCHED_STATE_BLOCKED		BIT(SCHED_STATE_BLOCKED_SHIFT)
168 #define SCHED_STATE_BLOCKED_MASK	(0xfff << SCHED_STATE_BLOCKED_SHIFT)
169 
170 static inline void init_sched_state(struct intel_context *ce)
171 {
172 	lockdep_assert_held(&ce->guc_state.lock);
173 	ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
174 }
175 
176 __maybe_unused
177 static bool sched_state_is_init(struct intel_context *ce)
178 {
179 	/* Kernel contexts can have SCHED_STATE_REGISTERED after suspend. */
180 	return !(ce->guc_state.sched_state &
181 		 ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
182 }
183 
184 static inline bool
185 context_wait_for_deregister_to_register(struct intel_context *ce)
186 {
187 	return ce->guc_state.sched_state &
188 		SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
189 }
190 
191 static inline void
192 set_context_wait_for_deregister_to_register(struct intel_context *ce)
193 {
194 	lockdep_assert_held(&ce->guc_state.lock);
195 	ce->guc_state.sched_state |=
196 		SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
197 }
198 
199 static inline void
200 clr_context_wait_for_deregister_to_register(struct intel_context *ce)
201 {
202 	lockdep_assert_held(&ce->guc_state.lock);
203 	ce->guc_state.sched_state &=
204 		~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
205 }
206 
207 static inline bool
208 context_destroyed(struct intel_context *ce)
209 {
210 	return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
211 }
212 
213 static inline void
214 set_context_destroyed(struct intel_context *ce)
215 {
216 	lockdep_assert_held(&ce->guc_state.lock);
217 	ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
218 }
219 
220 static inline bool context_pending_disable(struct intel_context *ce)
221 {
222 	return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
223 }
224 
225 static inline void set_context_pending_disable(struct intel_context *ce)
226 {
227 	lockdep_assert_held(&ce->guc_state.lock);
228 	ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
229 }
230 
231 static inline void clr_context_pending_disable(struct intel_context *ce)
232 {
233 	lockdep_assert_held(&ce->guc_state.lock);
234 	ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
235 }
236 
237 static inline bool context_banned(struct intel_context *ce)
238 {
239 	return ce->guc_state.sched_state & SCHED_STATE_BANNED;
240 }
241 
242 static inline void set_context_banned(struct intel_context *ce)
243 {
244 	lockdep_assert_held(&ce->guc_state.lock);
245 	ce->guc_state.sched_state |= SCHED_STATE_BANNED;
246 }
247 
248 static inline void clr_context_banned(struct intel_context *ce)
249 {
250 	lockdep_assert_held(&ce->guc_state.lock);
251 	ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
252 }
253 
254 static inline bool context_enabled(struct intel_context *ce)
255 {
256 	return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
257 }
258 
259 static inline void set_context_enabled(struct intel_context *ce)
260 {
261 	lockdep_assert_held(&ce->guc_state.lock);
262 	ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
263 }
264 
265 static inline void clr_context_enabled(struct intel_context *ce)
266 {
267 	lockdep_assert_held(&ce->guc_state.lock);
268 	ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
269 }
270 
271 static inline bool context_pending_enable(struct intel_context *ce)
272 {
273 	return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
274 }
275 
276 static inline void set_context_pending_enable(struct intel_context *ce)
277 {
278 	lockdep_assert_held(&ce->guc_state.lock);
279 	ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
280 }
281 
282 static inline void clr_context_pending_enable(struct intel_context *ce)
283 {
284 	lockdep_assert_held(&ce->guc_state.lock);
285 	ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
286 }
287 
288 static inline bool context_registered(struct intel_context *ce)
289 {
290 	return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
291 }
292 
293 static inline void set_context_registered(struct intel_context *ce)
294 {
295 	lockdep_assert_held(&ce->guc_state.lock);
296 	ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
297 }
298 
299 static inline void clr_context_registered(struct intel_context *ce)
300 {
301 	lockdep_assert_held(&ce->guc_state.lock);
302 	ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
303 }
304 
305 static inline bool context_policy_required(struct intel_context *ce)
306 {
307 	return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED;
308 }
309 
310 static inline void set_context_policy_required(struct intel_context *ce)
311 {
312 	lockdep_assert_held(&ce->guc_state.lock);
313 	ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED;
314 }
315 
316 static inline void clr_context_policy_required(struct intel_context *ce)
317 {
318 	lockdep_assert_held(&ce->guc_state.lock);
319 	ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
320 }
321 
322 static inline u32 context_blocked(struct intel_context *ce)
323 {
324 	return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
325 		SCHED_STATE_BLOCKED_SHIFT;
326 }
327 
328 static inline void incr_context_blocked(struct intel_context *ce)
329 {
330 	lockdep_assert_held(&ce->guc_state.lock);
331 
332 	ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
333 
334 	GEM_BUG_ON(!context_blocked(ce));	/* Overflow check */
335 }
336 
337 static inline void decr_context_blocked(struct intel_context *ce)
338 {
339 	lockdep_assert_held(&ce->guc_state.lock);
340 
341 	GEM_BUG_ON(!context_blocked(ce));	/* Underflow check */
342 
343 	ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
344 }
345 
346 static inline bool context_has_committed_requests(struct intel_context *ce)
347 {
348 	return !!ce->guc_state.number_committed_requests;
349 }
350 
351 static inline void incr_context_committed_requests(struct intel_context *ce)
352 {
353 	lockdep_assert_held(&ce->guc_state.lock);
354 	++ce->guc_state.number_committed_requests;
355 	GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
356 }
357 
358 static inline void decr_context_committed_requests(struct intel_context *ce)
359 {
360 	lockdep_assert_held(&ce->guc_state.lock);
361 	--ce->guc_state.number_committed_requests;
362 	GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
363 }
364 
365 static struct intel_context *
366 request_to_scheduling_context(struct i915_request *rq)
367 {
368 	return intel_context_to_parent(rq->context);
369 }
370 
371 static inline bool context_guc_id_invalid(struct intel_context *ce)
372 {
373 	return ce->guc_id.id == GUC_INVALID_CONTEXT_ID;
374 }
375 
376 static inline void set_context_guc_id_invalid(struct intel_context *ce)
377 {
378 	ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
379 }
380 
381 static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
382 {
383 	return &ce->engine->gt->uc.guc;
384 }
385 
386 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
387 {
388 	return rb_entry(rb, struct i915_priolist, node);
389 }
390 
391 /*
392  * When using multi-lrc submission a scratch memory area is reserved in the
393  * parent's context state for the process descriptor, work queue, and handshake
394  * between the parent + children contexts to insert safe preemption points
395  * between each of the BBs. Currently the scratch area is sized to a page.
396  *
397  * The layout of this scratch area is below:
398  * 0						guc_process_desc
399  * + sizeof(struct guc_process_desc)		child go
400  * + CACHELINE_BYTES				child join[0]
401  * ...
402  * + CACHELINE_BYTES				child join[n - 1]
403  * ...						unused
404  * PARENT_SCRATCH_SIZE / 2			work queue start
405  * ...						work queue
406  * PARENT_SCRATCH_SIZE - 1			work queue end
407  */
408 #define WQ_SIZE			(PARENT_SCRATCH_SIZE / 2)
409 #define WQ_OFFSET		(PARENT_SCRATCH_SIZE - WQ_SIZE)
410 
411 struct sync_semaphore {
412 	u32 semaphore;
413 	u8 unused[CACHELINE_BYTES - sizeof(u32)];
414 };
415 
416 struct parent_scratch {
417 	union guc_descs {
418 		struct guc_sched_wq_desc wq_desc;
419 		struct guc_process_desc_v69 pdesc;
420 	} descs;
421 
422 	struct sync_semaphore go;
423 	struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
424 
425 	u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
426 		sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
427 
428 	u32 wq[WQ_SIZE / sizeof(u32)];
429 };
430 
431 static u32 __get_parent_scratch_offset(struct intel_context *ce)
432 {
433 	GEM_BUG_ON(!ce->parallel.guc.parent_page);
434 
435 	return ce->parallel.guc.parent_page * PAGE_SIZE;
436 }
437 
438 static u32 __get_wq_offset(struct intel_context *ce)
439 {
440 	BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
441 
442 	return __get_parent_scratch_offset(ce) + WQ_OFFSET;
443 }
444 
445 static struct parent_scratch *
446 __get_parent_scratch(struct intel_context *ce)
447 {
448 	BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
449 	BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
450 
451 	/*
452 	 * Need to subtract LRC_STATE_OFFSET here as the
453 	 * parallel.guc.parent_page is the offset into ce->state while
454 	 * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
455 	 */
456 	return (struct parent_scratch *)
457 		(ce->lrc_reg_state +
458 		 ((__get_parent_scratch_offset(ce) -
459 		   LRC_STATE_OFFSET) / sizeof(u32)));
460 }
461 
462 static struct guc_process_desc_v69 *
463 __get_process_desc_v69(struct intel_context *ce)
464 {
465 	struct parent_scratch *ps = __get_parent_scratch(ce);
466 
467 	return &ps->descs.pdesc;
468 }
469 
470 static struct guc_sched_wq_desc *
471 __get_wq_desc_v70(struct intel_context *ce)
472 {
473 	struct parent_scratch *ps = __get_parent_scratch(ce);
474 
475 	return &ps->descs.wq_desc;
476 }
477 
478 static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
479 {
480 	/*
481 	 * Check for space in work queue. Caching a value of head pointer in
482 	 * intel_context structure in order reduce the number accesses to shared
483 	 * GPU memory which may be across a PCIe bus.
484 	 */
485 #define AVAILABLE_SPACE	\
486 	CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
487 	if (wqi_size > AVAILABLE_SPACE) {
488 		ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
489 
490 		if (wqi_size > AVAILABLE_SPACE)
491 			return NULL;
492 	}
493 #undef AVAILABLE_SPACE
494 
495 	return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
496 }
497 
498 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
499 {
500 	struct intel_context *ce = xa_load(&guc->context_lookup, id);
501 
502 	GEM_BUG_ON(id >= GUC_MAX_CONTEXT_ID);
503 
504 	return ce;
505 }
506 
507 static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
508 {
509 	struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
510 
511 	if (!base)
512 		return NULL;
513 
514 	GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
515 
516 	return &base[index];
517 }
518 
519 static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
520 {
521 	u32 size;
522 	int ret;
523 
524 	size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
525 			  GUC_MAX_CONTEXT_ID);
526 	ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
527 					     (void **)&guc->lrc_desc_pool_vaddr_v69);
528 	if (ret)
529 		return ret;
530 
531 	return 0;
532 }
533 
534 static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
535 {
536 	if (!guc->lrc_desc_pool_vaddr_v69)
537 		return;
538 
539 	guc->lrc_desc_pool_vaddr_v69 = NULL;
540 	i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
541 }
542 
543 static inline bool guc_submission_initialized(struct intel_guc *guc)
544 {
545 	return guc->submission_initialized;
546 }
547 
548 static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
549 {
550 	struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
551 
552 	if (desc)
553 		memset(desc, 0, sizeof(*desc));
554 }
555 
556 static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
557 {
558 	return __get_context(guc, id);
559 }
560 
561 static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
562 				      struct intel_context *ce)
563 {
564 	unsigned long flags;
565 
566 	/*
567 	 * xarray API doesn't have xa_save_irqsave wrapper, so calling the
568 	 * lower level functions directly.
569 	 */
570 	xa_lock_irqsave(&guc->context_lookup, flags);
571 	__xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
572 	xa_unlock_irqrestore(&guc->context_lookup, flags);
573 }
574 
575 static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
576 {
577 	unsigned long flags;
578 
579 	if (unlikely(!guc_submission_initialized(guc)))
580 		return;
581 
582 	_reset_lrc_desc_v69(guc, id);
583 
584 	/*
585 	 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
586 	 * the lower level functions directly.
587 	 */
588 	xa_lock_irqsave(&guc->context_lookup, flags);
589 	__xa_erase(&guc->context_lookup, id);
590 	xa_unlock_irqrestore(&guc->context_lookup, flags);
591 }
592 
593 static void decr_outstanding_submission_g2h(struct intel_guc *guc)
594 {
595 	if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
596 		wake_up_all(&guc->ct.wq);
597 }
598 
599 static int guc_submission_send_busy_loop(struct intel_guc *guc,
600 					 const u32 *action,
601 					 u32 len,
602 					 u32 g2h_len_dw,
603 					 bool loop)
604 {
605 	/*
606 	 * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
607 	 * so we don't handle the case where we don't get a reply because we
608 	 * aborted the send due to the channel being busy.
609 	 */
610 	GEM_BUG_ON(g2h_len_dw && !loop);
611 
612 	if (g2h_len_dw)
613 		atomic_inc(&guc->outstanding_submission_g2h);
614 
615 	return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
616 }
617 
618 int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
619 				   atomic_t *wait_var,
620 				   bool interruptible,
621 				   long timeout)
622 {
623 	const int state = interruptible ?
624 		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
625 	DEFINE_WAIT(wait);
626 
627 	might_sleep();
628 	GEM_BUG_ON(timeout < 0);
629 
630 	if (!atomic_read(wait_var))
631 		return 0;
632 
633 	if (!timeout)
634 		return -ETIME;
635 
636 	for (;;) {
637 		prepare_to_wait(&guc->ct.wq, &wait, state);
638 
639 		if (!atomic_read(wait_var))
640 			break;
641 
642 		if (signal_pending_state(state, current)) {
643 			timeout = -EINTR;
644 			break;
645 		}
646 
647 		if (!timeout) {
648 			timeout = -ETIME;
649 			break;
650 		}
651 
652 		timeout = io_schedule_timeout(timeout);
653 	}
654 	finish_wait(&guc->ct.wq, &wait);
655 
656 	return (timeout < 0) ? timeout : 0;
657 }
658 
659 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
660 {
661 	if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
662 		return 0;
663 
664 	return intel_guc_wait_for_pending_msg(guc,
665 					      &guc->outstanding_submission_g2h,
666 					      true, timeout);
667 }
668 
669 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
670 static int try_context_registration(struct intel_context *ce, bool loop);
671 
672 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
673 {
674 	int err = 0;
675 	struct intel_context *ce = request_to_scheduling_context(rq);
676 	u32 action[3];
677 	int len = 0;
678 	u32 g2h_len_dw = 0;
679 	bool enabled;
680 
681 	lockdep_assert_held(&rq->engine->sched_engine->lock);
682 
683 	/*
684 	 * Corner case where requests were sitting in the priority list or a
685 	 * request resubmitted after the context was banned.
686 	 */
687 	if (unlikely(intel_context_is_banned(ce))) {
688 		i915_request_put(i915_request_mark_eio(rq));
689 		intel_engine_signal_breadcrumbs(ce->engine);
690 		return 0;
691 	}
692 
693 	GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
694 	GEM_BUG_ON(context_guc_id_invalid(ce));
695 
696 	if (context_policy_required(ce)) {
697 		err = guc_context_policy_init_v70(ce, false);
698 		if (err)
699 			return err;
700 	}
701 
702 	spin_lock(&ce->guc_state.lock);
703 
704 	/*
705 	 * The request / context will be run on the hardware when scheduling
706 	 * gets enabled in the unblock. For multi-lrc we still submit the
707 	 * context to move the LRC tails.
708 	 */
709 	if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
710 		goto out;
711 
712 	enabled = context_enabled(ce) || context_blocked(ce);
713 
714 	if (!enabled) {
715 		action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
716 		action[len++] = ce->guc_id.id;
717 		action[len++] = GUC_CONTEXT_ENABLE;
718 		set_context_pending_enable(ce);
719 		intel_context_get(ce);
720 		g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
721 	} else {
722 		action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
723 		action[len++] = ce->guc_id.id;
724 	}
725 
726 	err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
727 	if (!enabled && !err) {
728 		trace_intel_context_sched_enable(ce);
729 		atomic_inc(&guc->outstanding_submission_g2h);
730 		set_context_enabled(ce);
731 
732 		/*
733 		 * Without multi-lrc KMD does the submission step (moving the
734 		 * lrc tail) so enabling scheduling is sufficient to submit the
735 		 * context. This isn't the case in multi-lrc submission as the
736 		 * GuC needs to move the tails, hence the need for another H2G
737 		 * to submit a multi-lrc context after enabling scheduling.
738 		 */
739 		if (intel_context_is_parent(ce)) {
740 			action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
741 			err = intel_guc_send_nb(guc, action, len - 1, 0);
742 		}
743 	} else if (!enabled) {
744 		clr_context_pending_enable(ce);
745 		intel_context_put(ce);
746 	}
747 	if (likely(!err))
748 		trace_i915_request_guc_submit(rq);
749 
750 out:
751 	spin_unlock(&ce->guc_state.lock);
752 	return err;
753 }
754 
755 static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
756 {
757 	int ret = __guc_add_request(guc, rq);
758 
759 	if (unlikely(ret == -EBUSY)) {
760 		guc->stalled_request = rq;
761 		guc->submission_stall_reason = STALL_ADD_REQUEST;
762 	}
763 
764 	return ret;
765 }
766 
767 static inline void guc_set_lrc_tail(struct i915_request *rq)
768 {
769 	rq->context->lrc_reg_state[CTX_RING_TAIL] =
770 		intel_ring_set_tail(rq->ring, rq->tail);
771 }
772 
773 static inline int rq_prio(const struct i915_request *rq)
774 {
775 	return rq->sched.attr.priority;
776 }
777 
778 static bool is_multi_lrc_rq(struct i915_request *rq)
779 {
780 	return intel_context_is_parallel(rq->context);
781 }
782 
783 static bool can_merge_rq(struct i915_request *rq,
784 			 struct i915_request *last)
785 {
786 	return request_to_scheduling_context(rq) ==
787 		request_to_scheduling_context(last);
788 }
789 
790 static u32 wq_space_until_wrap(struct intel_context *ce)
791 {
792 	return (WQ_SIZE - ce->parallel.guc.wqi_tail);
793 }
794 
795 static void write_wqi(struct intel_context *ce, u32 wqi_size)
796 {
797 	BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
798 
799 	/*
800 	 * Ensure WQI are visible before updating tail
801 	 */
802 	intel_guc_write_barrier(ce_to_guc(ce));
803 
804 	ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
805 		(WQ_SIZE - 1);
806 	WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
807 }
808 
809 static int guc_wq_noop_append(struct intel_context *ce)
810 {
811 	u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
812 	u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
813 
814 	if (!wqi)
815 		return -EBUSY;
816 
817 	GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
818 
819 	*wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
820 		FIELD_PREP(WQ_LEN_MASK, len_dw);
821 	ce->parallel.guc.wqi_tail = 0;
822 
823 	return 0;
824 }
825 
826 static int __guc_wq_item_append(struct i915_request *rq)
827 {
828 	struct intel_context *ce = request_to_scheduling_context(rq);
829 	struct intel_context *child;
830 	unsigned int wqi_size = (ce->parallel.number_children + 4) *
831 		sizeof(u32);
832 	u32 *wqi;
833 	u32 len_dw = (wqi_size / sizeof(u32)) - 1;
834 	int ret;
835 
836 	/* Ensure context is in correct state updating work queue */
837 	GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
838 	GEM_BUG_ON(context_guc_id_invalid(ce));
839 	GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
840 	GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
841 
842 	/* Insert NOOP if this work queue item will wrap the tail pointer. */
843 	if (wqi_size > wq_space_until_wrap(ce)) {
844 		ret = guc_wq_noop_append(ce);
845 		if (ret)
846 			return ret;
847 	}
848 
849 	wqi = get_wq_pointer(ce, wqi_size);
850 	if (!wqi)
851 		return -EBUSY;
852 
853 	GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
854 
855 	*wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
856 		FIELD_PREP(WQ_LEN_MASK, len_dw);
857 	*wqi++ = ce->lrc.lrca;
858 	*wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
859 	       FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
860 	*wqi++ = 0;	/* fence_id */
861 	for_each_child(ce, child)
862 		*wqi++ = child->ring->tail / sizeof(u64);
863 
864 	write_wqi(ce, wqi_size);
865 
866 	return 0;
867 }
868 
869 static int guc_wq_item_append(struct intel_guc *guc,
870 			      struct i915_request *rq)
871 {
872 	struct intel_context *ce = request_to_scheduling_context(rq);
873 	int ret = 0;
874 
875 	if (likely(!intel_context_is_banned(ce))) {
876 		ret = __guc_wq_item_append(rq);
877 
878 		if (unlikely(ret == -EBUSY)) {
879 			guc->stalled_request = rq;
880 			guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
881 		}
882 	}
883 
884 	return ret;
885 }
886 
887 static bool multi_lrc_submit(struct i915_request *rq)
888 {
889 	struct intel_context *ce = request_to_scheduling_context(rq);
890 
891 	intel_ring_set_tail(rq->ring, rq->tail);
892 
893 	/*
894 	 * We expect the front end (execbuf IOCTL) to set this flag on the last
895 	 * request generated from a multi-BB submission. This indicates to the
896 	 * backend (GuC interface) that we should submit this context thus
897 	 * submitting all the requests generated in parallel.
898 	 */
899 	return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
900 		intel_context_is_banned(ce);
901 }
902 
903 static int guc_dequeue_one_context(struct intel_guc *guc)
904 {
905 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
906 	struct i915_request *last = NULL;
907 	bool submit = false;
908 	struct rb_node *rb;
909 	int ret;
910 
911 	lockdep_assert_held(&sched_engine->lock);
912 
913 	if (guc->stalled_request) {
914 		submit = true;
915 		last = guc->stalled_request;
916 
917 		switch (guc->submission_stall_reason) {
918 		case STALL_REGISTER_CONTEXT:
919 			goto register_context;
920 		case STALL_MOVE_LRC_TAIL:
921 			goto move_lrc_tail;
922 		case STALL_ADD_REQUEST:
923 			goto add_request;
924 		default:
925 			MISSING_CASE(guc->submission_stall_reason);
926 		}
927 	}
928 
929 	while ((rb = rb_first_cached(&sched_engine->queue))) {
930 		struct i915_priolist *p = to_priolist(rb);
931 		struct i915_request *rq, *rn;
932 
933 		priolist_for_each_request_consume(rq, rn, p) {
934 			if (last && !can_merge_rq(rq, last))
935 				goto register_context;
936 
937 			list_del_init(&rq->sched.link);
938 
939 			__i915_request_submit(rq);
940 
941 			trace_i915_request_in(rq, 0);
942 			last = rq;
943 
944 			if (is_multi_lrc_rq(rq)) {
945 				/*
946 				 * We need to coalesce all multi-lrc requests in
947 				 * a relationship into a single H2G. We are
948 				 * guaranteed that all of these requests will be
949 				 * submitted sequentially.
950 				 */
951 				if (multi_lrc_submit(rq)) {
952 					submit = true;
953 					goto register_context;
954 				}
955 			} else {
956 				submit = true;
957 			}
958 		}
959 
960 		rb_erase_cached(&p->node, &sched_engine->queue);
961 		i915_priolist_free(p);
962 	}
963 
964 register_context:
965 	if (submit) {
966 		struct intel_context *ce = request_to_scheduling_context(last);
967 
968 		if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
969 			     !intel_context_is_banned(ce))) {
970 			ret = try_context_registration(ce, false);
971 			if (unlikely(ret == -EPIPE)) {
972 				goto deadlk;
973 			} else if (ret == -EBUSY) {
974 				guc->stalled_request = last;
975 				guc->submission_stall_reason =
976 					STALL_REGISTER_CONTEXT;
977 				goto schedule_tasklet;
978 			} else if (ret != 0) {
979 				GEM_WARN_ON(ret);	/* Unexpected */
980 				goto deadlk;
981 			}
982 		}
983 
984 move_lrc_tail:
985 		if (is_multi_lrc_rq(last)) {
986 			ret = guc_wq_item_append(guc, last);
987 			if (ret == -EBUSY) {
988 				goto schedule_tasklet;
989 			} else if (ret != 0) {
990 				GEM_WARN_ON(ret);	/* Unexpected */
991 				goto deadlk;
992 			}
993 		} else {
994 			guc_set_lrc_tail(last);
995 		}
996 
997 add_request:
998 		ret = guc_add_request(guc, last);
999 		if (unlikely(ret == -EPIPE)) {
1000 			goto deadlk;
1001 		} else if (ret == -EBUSY) {
1002 			goto schedule_tasklet;
1003 		} else if (ret != 0) {
1004 			GEM_WARN_ON(ret);	/* Unexpected */
1005 			goto deadlk;
1006 		}
1007 	}
1008 
1009 	guc->stalled_request = NULL;
1010 	guc->submission_stall_reason = STALL_NONE;
1011 	return submit;
1012 
1013 deadlk:
1014 	sched_engine->tasklet.callback = NULL;
1015 	tasklet_disable_nosync(&sched_engine->tasklet);
1016 	return false;
1017 
1018 schedule_tasklet:
1019 	tasklet_schedule(&sched_engine->tasklet);
1020 	return false;
1021 }
1022 
1023 static void guc_submission_tasklet(struct tasklet_struct *t)
1024 {
1025 	struct i915_sched_engine *sched_engine =
1026 		from_tasklet(sched_engine, t, tasklet);
1027 	unsigned long flags;
1028 	bool loop;
1029 
1030 	spin_lock_irqsave(&sched_engine->lock, flags);
1031 
1032 	do {
1033 		loop = guc_dequeue_one_context(sched_engine->private_data);
1034 	} while (loop);
1035 
1036 	i915_sched_engine_reset_on_empty(sched_engine);
1037 
1038 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1039 }
1040 
1041 static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
1042 {
1043 	if (iir & GT_RENDER_USER_INTERRUPT)
1044 		intel_engine_signal_breadcrumbs(engine);
1045 }
1046 
1047 static void __guc_context_destroy(struct intel_context *ce);
1048 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
1049 static void guc_signal_context_fence(struct intel_context *ce);
1050 static void guc_cancel_context_requests(struct intel_context *ce);
1051 static void guc_blocked_fence_complete(struct intel_context *ce);
1052 
1053 static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
1054 {
1055 	struct intel_context *ce;
1056 	unsigned long index, flags;
1057 	bool pending_disable, pending_enable, deregister, destroyed, banned;
1058 
1059 	xa_lock_irqsave(&guc->context_lookup, flags);
1060 	xa_for_each(&guc->context_lookup, index, ce) {
1061 		/*
1062 		 * Corner case where the ref count on the object is zero but and
1063 		 * deregister G2H was lost. In this case we don't touch the ref
1064 		 * count and finish the destroy of the context.
1065 		 */
1066 		bool do_put = kref_get_unless_zero(&ce->ref);
1067 
1068 		xa_unlock(&guc->context_lookup);
1069 
1070 		spin_lock(&ce->guc_state.lock);
1071 
1072 		/*
1073 		 * Once we are at this point submission_disabled() is guaranteed
1074 		 * to be visible to all callers who set the below flags (see above
1075 		 * flush and flushes in reset_prepare). If submission_disabled()
1076 		 * is set, the caller shouldn't set these flags.
1077 		 */
1078 
1079 		destroyed = context_destroyed(ce);
1080 		pending_enable = context_pending_enable(ce);
1081 		pending_disable = context_pending_disable(ce);
1082 		deregister = context_wait_for_deregister_to_register(ce);
1083 		banned = context_banned(ce);
1084 		init_sched_state(ce);
1085 
1086 		spin_unlock(&ce->guc_state.lock);
1087 
1088 		if (pending_enable || destroyed || deregister) {
1089 			decr_outstanding_submission_g2h(guc);
1090 			if (deregister)
1091 				guc_signal_context_fence(ce);
1092 			if (destroyed) {
1093 				intel_gt_pm_put_async(guc_to_gt(guc));
1094 				release_guc_id(guc, ce);
1095 				__guc_context_destroy(ce);
1096 			}
1097 			if (pending_enable || deregister)
1098 				intel_context_put(ce);
1099 		}
1100 
1101 		/* Not mutualy exclusive with above if statement. */
1102 		if (pending_disable) {
1103 			guc_signal_context_fence(ce);
1104 			if (banned) {
1105 				guc_cancel_context_requests(ce);
1106 				intel_engine_signal_breadcrumbs(ce->engine);
1107 			}
1108 			intel_context_sched_disable_unpin(ce);
1109 			decr_outstanding_submission_g2h(guc);
1110 
1111 			spin_lock(&ce->guc_state.lock);
1112 			guc_blocked_fence_complete(ce);
1113 			spin_unlock(&ce->guc_state.lock);
1114 
1115 			intel_context_put(ce);
1116 		}
1117 
1118 		if (do_put)
1119 			intel_context_put(ce);
1120 		xa_lock(&guc->context_lookup);
1121 	}
1122 	xa_unlock_irqrestore(&guc->context_lookup, flags);
1123 }
1124 
1125 /*
1126  * GuC stores busyness stats for each engine at context in/out boundaries. A
1127  * context 'in' logs execution start time, 'out' adds in -> out delta to total.
1128  * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
1129  * GuC.
1130  *
1131  * __i915_pmu_event_read samples engine busyness. When sampling, if context id
1132  * is valid (!= ~0) and start is non-zero, the engine is considered to be
1133  * active. For an active engine total busyness = total + (now - start), where
1134  * 'now' is the time at which the busyness is sampled. For inactive engine,
1135  * total busyness = total.
1136  *
1137  * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
1138  *
1139  * The start and total values provided by GuC are 32 bits and wrap around in a
1140  * few minutes. Since perf pmu provides busyness as 64 bit monotonically
1141  * increasing ns values, there is a need for this implementation to account for
1142  * overflows and extend the GuC provided values to 64 bits before returning
1143  * busyness to the user. In order to do that, a worker runs periodically at
1144  * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
1145  * 27 seconds for a gt clock frequency of 19.2 MHz).
1146  */
1147 
1148 #define WRAP_TIME_CLKS U32_MAX
1149 #define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
1150 
1151 static void
1152 __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
1153 {
1154 	u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1155 	u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
1156 
1157 	if (new_start == lower_32_bits(*prev_start))
1158 		return;
1159 
1160 	/*
1161 	 * When gt is unparked, we update the gt timestamp and start the ping
1162 	 * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
1163 	 * is unparked, all switched in contexts will have a start time that is
1164 	 * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
1165 	 *
1166 	 * If neither gt_stamp nor new_start has rolled over, then the
1167 	 * gt_stamp_hi does not need to be adjusted, however if one of them has
1168 	 * rolled over, we need to adjust gt_stamp_hi accordingly.
1169 	 *
1170 	 * The below conditions address the cases of new_start rollover and
1171 	 * gt_stamp_last rollover respectively.
1172 	 */
1173 	if (new_start < gt_stamp_last &&
1174 	    (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
1175 		gt_stamp_hi++;
1176 
1177 	if (new_start > gt_stamp_last &&
1178 	    (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
1179 		gt_stamp_hi--;
1180 
1181 	*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
1182 }
1183 
1184 #define record_read(map_, field_) \
1185 	iosys_map_rd_field(map_, 0, struct guc_engine_usage_record, field_)
1186 
1187 /*
1188  * GuC updates shared memory and KMD reads it. Since this is not synchronized,
1189  * we run into a race where the value read is inconsistent. Sometimes the
1190  * inconsistency is in reading the upper MSB bytes of the last_in value when
1191  * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
1192  * 24 bits are zero. Since these are non-zero values, it is non-trivial to
1193  * determine validity of these values. Instead we read the values multiple times
1194  * until they are consistent. In test runs, 3 attempts results in consistent
1195  * values. The upper bound is set to 6 attempts and may need to be tuned as per
1196  * any new occurences.
1197  */
1198 static void __get_engine_usage_record(struct intel_engine_cs *engine,
1199 				      u32 *last_in, u32 *id, u32 *total)
1200 {
1201 	struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
1202 	int i = 0;
1203 
1204 	do {
1205 		*last_in = record_read(&rec_map, last_switch_in_stamp);
1206 		*id = record_read(&rec_map, current_context_index);
1207 		*total = record_read(&rec_map, total_runtime);
1208 
1209 		if (record_read(&rec_map, last_switch_in_stamp) == *last_in &&
1210 		    record_read(&rec_map, current_context_index) == *id &&
1211 		    record_read(&rec_map, total_runtime) == *total)
1212 			break;
1213 	} while (++i < 6);
1214 }
1215 
1216 static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
1217 {
1218 	struct intel_engine_guc_stats *stats = &engine->stats.guc;
1219 	struct intel_guc *guc = &engine->gt->uc.guc;
1220 	u32 last_switch, ctx_id, total;
1221 
1222 	lockdep_assert_held(&guc->timestamp.lock);
1223 
1224 	__get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
1225 
1226 	stats->running = ctx_id != ~0U && last_switch;
1227 	if (stats->running)
1228 		__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
1229 
1230 	/*
1231 	 * Instead of adjusting the total for overflow, just add the
1232 	 * difference from previous sample stats->total_gt_clks
1233 	 */
1234 	if (total && total != ~0U) {
1235 		stats->total_gt_clks += (u32)(total - stats->prev_total);
1236 		stats->prev_total = total;
1237 	}
1238 }
1239 
1240 static u32 gpm_timestamp_shift(struct intel_gt *gt)
1241 {
1242 	intel_wakeref_t wakeref;
1243 	u32 reg, shift;
1244 
1245 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
1246 		reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
1247 
1248 	shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
1249 		GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
1250 
1251 	return 3 - shift;
1252 }
1253 
1254 static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
1255 {
1256 	struct intel_gt *gt = guc_to_gt(guc);
1257 	u32 gt_stamp_lo, gt_stamp_hi;
1258 	u64 gpm_ts;
1259 
1260 	lockdep_assert_held(&guc->timestamp.lock);
1261 
1262 	gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1263 	gpm_ts = intel_uncore_read64_2x32(gt->uncore, MISC_STATUS0,
1264 					  MISC_STATUS1) >> guc->timestamp.shift;
1265 	gt_stamp_lo = lower_32_bits(gpm_ts);
1266 	*now = ktime_get();
1267 
1268 	if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
1269 		gt_stamp_hi++;
1270 
1271 	guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
1272 }
1273 
1274 /*
1275  * Unlike the execlist mode of submission total and active times are in terms of
1276  * gt clocks. The *now parameter is retained to return the cpu time at which the
1277  * busyness was sampled.
1278  */
1279 static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
1280 {
1281 	struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
1282 	struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
1283 	struct intel_gt *gt = engine->gt;
1284 	struct intel_guc *guc = &gt->uc.guc;
1285 	u64 total, gt_stamp_saved;
1286 	unsigned long flags;
1287 	u32 reset_count;
1288 	bool in_reset;
1289 
1290 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1291 
1292 	/*
1293 	 * If a reset happened, we risk reading partially updated engine
1294 	 * busyness from GuC, so we just use the driver stored copy of busyness.
1295 	 * Synchronize with gt reset using reset_count and the
1296 	 * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
1297 	 * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
1298 	 * usable by checking the flag afterwards.
1299 	 */
1300 	reset_count = i915_reset_count(gpu_error);
1301 	in_reset = test_bit(I915_RESET_BACKOFF, &gt->reset.flags);
1302 
1303 	*now = ktime_get();
1304 
1305 	/*
1306 	 * The active busyness depends on start_gt_clk and gt_stamp.
1307 	 * gt_stamp is updated by i915 only when gt is awake and the
1308 	 * start_gt_clk is derived from GuC state. To get a consistent
1309 	 * view of activity, we query the GuC state only if gt is awake.
1310 	 */
1311 	if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
1312 		stats_saved = *stats;
1313 		gt_stamp_saved = guc->timestamp.gt_stamp;
1314 		/*
1315 		 * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
1316 		 * start_gt_clk' calculation below for active engines.
1317 		 */
1318 		guc_update_engine_gt_clks(engine);
1319 		guc_update_pm_timestamp(guc, now);
1320 		intel_gt_pm_put_async(gt);
1321 		if (i915_reset_count(gpu_error) != reset_count) {
1322 			*stats = stats_saved;
1323 			guc->timestamp.gt_stamp = gt_stamp_saved;
1324 		}
1325 	}
1326 
1327 	total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
1328 	if (stats->running) {
1329 		u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
1330 
1331 		total += intel_gt_clock_interval_to_ns(gt, clk);
1332 	}
1333 
1334 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1335 
1336 	return ns_to_ktime(total);
1337 }
1338 
1339 static void __reset_guc_busyness_stats(struct intel_guc *guc)
1340 {
1341 	struct intel_gt *gt = guc_to_gt(guc);
1342 	struct intel_engine_cs *engine;
1343 	enum intel_engine_id id;
1344 	unsigned long flags;
1345 	ktime_t unused;
1346 
1347 	cancel_delayed_work_sync(&guc->timestamp.work);
1348 
1349 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1350 
1351 	guc_update_pm_timestamp(guc, &unused);
1352 	for_each_engine(engine, gt, id) {
1353 		guc_update_engine_gt_clks(engine);
1354 		engine->stats.guc.prev_total = 0;
1355 	}
1356 
1357 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1358 }
1359 
1360 static void __update_guc_busyness_stats(struct intel_guc *guc)
1361 {
1362 	struct intel_gt *gt = guc_to_gt(guc);
1363 	struct intel_engine_cs *engine;
1364 	enum intel_engine_id id;
1365 	unsigned long flags;
1366 	ktime_t unused;
1367 
1368 	guc->timestamp.last_stat_jiffies = jiffies;
1369 
1370 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1371 
1372 	guc_update_pm_timestamp(guc, &unused);
1373 	for_each_engine(engine, gt, id)
1374 		guc_update_engine_gt_clks(engine);
1375 
1376 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1377 }
1378 
1379 static void guc_timestamp_ping(struct work_struct *wrk)
1380 {
1381 	struct intel_guc *guc = container_of(wrk, typeof(*guc),
1382 					     timestamp.work.work);
1383 	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
1384 	struct intel_gt *gt = guc_to_gt(guc);
1385 	intel_wakeref_t wakeref;
1386 	int srcu, ret;
1387 
1388 	/*
1389 	 * Synchronize with gt reset to make sure the worker does not
1390 	 * corrupt the engine/guc stats.
1391 	 */
1392 	ret = intel_gt_reset_trylock(gt, &srcu);
1393 	if (ret)
1394 		return;
1395 
1396 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
1397 		__update_guc_busyness_stats(guc);
1398 
1399 	intel_gt_reset_unlock(gt, srcu);
1400 
1401 	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1402 			 guc->timestamp.ping_delay);
1403 }
1404 
1405 static int guc_action_enable_usage_stats(struct intel_guc *guc)
1406 {
1407 	u32 offset = intel_guc_engine_usage_offset(guc);
1408 	u32 action[] = {
1409 		INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
1410 		offset,
1411 		0,
1412 	};
1413 
1414 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
1415 }
1416 
1417 static void guc_init_engine_stats(struct intel_guc *guc)
1418 {
1419 	struct intel_gt *gt = guc_to_gt(guc);
1420 	intel_wakeref_t wakeref;
1421 
1422 	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1423 			 guc->timestamp.ping_delay);
1424 
1425 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
1426 		int ret = guc_action_enable_usage_stats(guc);
1427 
1428 		if (ret)
1429 			drm_err(&gt->i915->drm,
1430 				"Failed to enable usage stats: %d!\n", ret);
1431 	}
1432 }
1433 
1434 void intel_guc_busyness_park(struct intel_gt *gt)
1435 {
1436 	struct intel_guc *guc = &gt->uc.guc;
1437 
1438 	if (!guc_submission_initialized(guc))
1439 		return;
1440 
1441 	cancel_delayed_work(&guc->timestamp.work);
1442 
1443 	/*
1444 	 * Before parking, we should sample engine busyness stats if we need to.
1445 	 * We can skip it if we are less than half a ping from the last time we
1446 	 * sampled the busyness stats.
1447 	 */
1448 	if (guc->timestamp.last_stat_jiffies &&
1449 	    !time_after(jiffies, guc->timestamp.last_stat_jiffies +
1450 			(guc->timestamp.ping_delay / 2)))
1451 		return;
1452 
1453 	__update_guc_busyness_stats(guc);
1454 }
1455 
1456 void intel_guc_busyness_unpark(struct intel_gt *gt)
1457 {
1458 	struct intel_guc *guc = &gt->uc.guc;
1459 	unsigned long flags;
1460 	ktime_t unused;
1461 
1462 	if (!guc_submission_initialized(guc))
1463 		return;
1464 
1465 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1466 	guc_update_pm_timestamp(guc, &unused);
1467 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1468 	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1469 			 guc->timestamp.ping_delay);
1470 }
1471 
1472 static inline bool
1473 submission_disabled(struct intel_guc *guc)
1474 {
1475 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1476 
1477 	return unlikely(!sched_engine ||
1478 			!__tasklet_is_enabled(&sched_engine->tasklet) ||
1479 			intel_gt_is_wedged(guc_to_gt(guc)));
1480 }
1481 
1482 static void disable_submission(struct intel_guc *guc)
1483 {
1484 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1485 
1486 	if (__tasklet_is_enabled(&sched_engine->tasklet)) {
1487 		GEM_BUG_ON(!guc->ct.enabled);
1488 		__tasklet_disable_sync_once(&sched_engine->tasklet);
1489 		sched_engine->tasklet.callback = NULL;
1490 	}
1491 }
1492 
1493 static void enable_submission(struct intel_guc *guc)
1494 {
1495 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1496 	unsigned long flags;
1497 
1498 	spin_lock_irqsave(&guc->sched_engine->lock, flags);
1499 	sched_engine->tasklet.callback = guc_submission_tasklet;
1500 	wmb();	/* Make sure callback visible */
1501 	if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
1502 	    __tasklet_enable(&sched_engine->tasklet)) {
1503 		GEM_BUG_ON(!guc->ct.enabled);
1504 
1505 		/* And kick in case we missed a new request submission. */
1506 		tasklet_hi_schedule(&sched_engine->tasklet);
1507 	}
1508 	spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
1509 }
1510 
1511 static void guc_flush_submissions(struct intel_guc *guc)
1512 {
1513 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1514 	unsigned long flags;
1515 
1516 	spin_lock_irqsave(&sched_engine->lock, flags);
1517 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1518 }
1519 
1520 static void guc_flush_destroyed_contexts(struct intel_guc *guc);
1521 
1522 void intel_guc_submission_reset_prepare(struct intel_guc *guc)
1523 {
1524 	if (unlikely(!guc_submission_initialized(guc))) {
1525 		/* Reset called during driver load? GuC not yet initialised! */
1526 		return;
1527 	}
1528 
1529 	intel_gt_park_heartbeats(guc_to_gt(guc));
1530 	disable_submission(guc);
1531 	guc->interrupts.disable(guc);
1532 	__reset_guc_busyness_stats(guc);
1533 
1534 	/* Flush IRQ handler */
1535 	spin_lock_irq(&guc_to_gt(guc)->irq_lock);
1536 	spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
1537 
1538 	guc_flush_submissions(guc);
1539 	guc_flush_destroyed_contexts(guc);
1540 	flush_work(&guc->ct.requests.worker);
1541 
1542 	scrub_guc_desc_for_outstanding_g2h(guc);
1543 }
1544 
1545 static struct intel_engine_cs *
1546 guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
1547 {
1548 	struct intel_engine_cs *engine;
1549 	intel_engine_mask_t tmp, mask = ve->mask;
1550 	unsigned int num_siblings = 0;
1551 
1552 	for_each_engine_masked(engine, ve->gt, mask, tmp)
1553 		if (num_siblings++ == sibling)
1554 			return engine;
1555 
1556 	return NULL;
1557 }
1558 
1559 static inline struct intel_engine_cs *
1560 __context_to_physical_engine(struct intel_context *ce)
1561 {
1562 	struct intel_engine_cs *engine = ce->engine;
1563 
1564 	if (intel_engine_is_virtual(engine))
1565 		engine = guc_virtual_get_sibling(engine, 0);
1566 
1567 	return engine;
1568 }
1569 
1570 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
1571 {
1572 	struct intel_engine_cs *engine = __context_to_physical_engine(ce);
1573 
1574 	if (intel_context_is_banned(ce))
1575 		return;
1576 
1577 	GEM_BUG_ON(!intel_context_is_pinned(ce));
1578 
1579 	/*
1580 	 * We want a simple context + ring to execute the breadcrumb update.
1581 	 * We cannot rely on the context being intact across the GPU hang,
1582 	 * so clear it and rebuild just what we need for the breadcrumb.
1583 	 * All pending requests for this context will be zapped, and any
1584 	 * future request will be after userspace has had the opportunity
1585 	 * to recreate its own state.
1586 	 */
1587 	if (scrub)
1588 		lrc_init_regs(ce, engine, true);
1589 
1590 	/* Rerun the request; its payload has been neutered (if guilty). */
1591 	lrc_update_regs(ce, engine, head);
1592 }
1593 
1594 static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
1595 {
1596 	if (!IS_GRAPHICS_VER(engine->i915, 11, 12))
1597 		return;
1598 
1599 	intel_engine_stop_cs(engine);
1600 
1601 	/*
1602 	 * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
1603 	 * to wait for any pending mi force wakeups
1604 	 */
1605 	intel_engine_wait_for_pending_mi_fw(engine);
1606 }
1607 
1608 static void guc_reset_nop(struct intel_engine_cs *engine)
1609 {
1610 }
1611 
1612 static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
1613 {
1614 }
1615 
1616 static void
1617 __unwind_incomplete_requests(struct intel_context *ce)
1618 {
1619 	struct i915_request *rq, *rn;
1620 	struct list_head *pl;
1621 	int prio = I915_PRIORITY_INVALID;
1622 	struct i915_sched_engine * const sched_engine =
1623 		ce->engine->sched_engine;
1624 	unsigned long flags;
1625 
1626 	spin_lock_irqsave(&sched_engine->lock, flags);
1627 	spin_lock(&ce->guc_state.lock);
1628 	list_for_each_entry_safe_reverse(rq, rn,
1629 					 &ce->guc_state.requests,
1630 					 sched.link) {
1631 		if (i915_request_completed(rq))
1632 			continue;
1633 
1634 		list_del_init(&rq->sched.link);
1635 		__i915_request_unsubmit(rq);
1636 
1637 		/* Push the request back into the queue for later resubmission. */
1638 		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
1639 		if (rq_prio(rq) != prio) {
1640 			prio = rq_prio(rq);
1641 			pl = i915_sched_lookup_priolist(sched_engine, prio);
1642 		}
1643 		GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
1644 
1645 		list_add(&rq->sched.link, pl);
1646 		set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1647 	}
1648 	spin_unlock(&ce->guc_state.lock);
1649 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1650 }
1651 
1652 static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
1653 {
1654 	bool guilty;
1655 	struct i915_request *rq;
1656 	unsigned long flags;
1657 	u32 head;
1658 	int i, number_children = ce->parallel.number_children;
1659 	struct intel_context *parent = ce;
1660 
1661 	GEM_BUG_ON(intel_context_is_child(ce));
1662 
1663 	intel_context_get(ce);
1664 
1665 	/*
1666 	 * GuC will implicitly mark the context as non-schedulable when it sends
1667 	 * the reset notification. Make sure our state reflects this change. The
1668 	 * context will be marked enabled on resubmission.
1669 	 */
1670 	spin_lock_irqsave(&ce->guc_state.lock, flags);
1671 	clr_context_enabled(ce);
1672 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1673 
1674 	/*
1675 	 * For each context in the relationship find the hanging request
1676 	 * resetting each context / request as needed
1677 	 */
1678 	for (i = 0; i < number_children + 1; ++i) {
1679 		if (!intel_context_is_pinned(ce))
1680 			goto next_context;
1681 
1682 		guilty = false;
1683 		rq = intel_context_find_active_request(ce);
1684 		if (!rq) {
1685 			head = ce->ring->tail;
1686 			goto out_replay;
1687 		}
1688 
1689 		if (i915_request_started(rq))
1690 			guilty = stalled & ce->engine->mask;
1691 
1692 		GEM_BUG_ON(i915_active_is_idle(&ce->active));
1693 		head = intel_ring_wrap(ce->ring, rq->head);
1694 
1695 		__i915_request_reset(rq, guilty);
1696 out_replay:
1697 		guc_reset_state(ce, head, guilty);
1698 next_context:
1699 		if (i != number_children)
1700 			ce = list_next_entry(ce, parallel.child_link);
1701 	}
1702 
1703 	__unwind_incomplete_requests(parent);
1704 	intel_context_put(parent);
1705 }
1706 
1707 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
1708 {
1709 	struct intel_context *ce;
1710 	unsigned long index;
1711 	unsigned long flags;
1712 
1713 	if (unlikely(!guc_submission_initialized(guc))) {
1714 		/* Reset called during driver load? GuC not yet initialised! */
1715 		return;
1716 	}
1717 
1718 	xa_lock_irqsave(&guc->context_lookup, flags);
1719 	xa_for_each(&guc->context_lookup, index, ce) {
1720 		if (!kref_get_unless_zero(&ce->ref))
1721 			continue;
1722 
1723 		xa_unlock(&guc->context_lookup);
1724 
1725 		if (intel_context_is_pinned(ce) &&
1726 		    !intel_context_is_child(ce))
1727 			__guc_reset_context(ce, stalled);
1728 
1729 		intel_context_put(ce);
1730 
1731 		xa_lock(&guc->context_lookup);
1732 	}
1733 	xa_unlock_irqrestore(&guc->context_lookup, flags);
1734 
1735 	/* GuC is blown away, drop all references to contexts */
1736 	xa_destroy(&guc->context_lookup);
1737 }
1738 
1739 static void guc_cancel_context_requests(struct intel_context *ce)
1740 {
1741 	struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
1742 	struct i915_request *rq;
1743 	unsigned long flags;
1744 
1745 	/* Mark all executing requests as skipped. */
1746 	spin_lock_irqsave(&sched_engine->lock, flags);
1747 	spin_lock(&ce->guc_state.lock);
1748 	list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
1749 		i915_request_put(i915_request_mark_eio(rq));
1750 	spin_unlock(&ce->guc_state.lock);
1751 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1752 }
1753 
1754 static void
1755 guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
1756 {
1757 	struct i915_request *rq, *rn;
1758 	struct rb_node *rb;
1759 	unsigned long flags;
1760 
1761 	/* Can be called during boot if GuC fails to load */
1762 	if (!sched_engine)
1763 		return;
1764 
1765 	/*
1766 	 * Before we call engine->cancel_requests(), we should have exclusive
1767 	 * access to the submission state. This is arranged for us by the
1768 	 * caller disabling the interrupt generation, the tasklet and other
1769 	 * threads that may then access the same state, giving us a free hand
1770 	 * to reset state. However, we still need to let lockdep be aware that
1771 	 * we know this state may be accessed in hardirq context, so we
1772 	 * disable the irq around this manipulation and we want to keep
1773 	 * the spinlock focused on its duties and not accidentally conflate
1774 	 * coverage to the submission's irq state. (Similarly, although we
1775 	 * shouldn't need to disable irq around the manipulation of the
1776 	 * submission's irq state, we also wish to remind ourselves that
1777 	 * it is irq state.)
1778 	 */
1779 	spin_lock_irqsave(&sched_engine->lock, flags);
1780 
1781 	/* Flush the queued requests to the timeline list (for retiring). */
1782 	while ((rb = rb_first_cached(&sched_engine->queue))) {
1783 		struct i915_priolist *p = to_priolist(rb);
1784 
1785 		priolist_for_each_request_consume(rq, rn, p) {
1786 			list_del_init(&rq->sched.link);
1787 
1788 			__i915_request_submit(rq);
1789 
1790 			i915_request_put(i915_request_mark_eio(rq));
1791 		}
1792 
1793 		rb_erase_cached(&p->node, &sched_engine->queue);
1794 		i915_priolist_free(p);
1795 	}
1796 
1797 	/* Remaining _unready_ requests will be nop'ed when submitted */
1798 
1799 	sched_engine->queue_priority_hint = INT_MIN;
1800 	sched_engine->queue = RB_ROOT_CACHED;
1801 
1802 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1803 }
1804 
1805 void intel_guc_submission_cancel_requests(struct intel_guc *guc)
1806 {
1807 	struct intel_context *ce;
1808 	unsigned long index;
1809 	unsigned long flags;
1810 
1811 	xa_lock_irqsave(&guc->context_lookup, flags);
1812 	xa_for_each(&guc->context_lookup, index, ce) {
1813 		if (!kref_get_unless_zero(&ce->ref))
1814 			continue;
1815 
1816 		xa_unlock(&guc->context_lookup);
1817 
1818 		if (intel_context_is_pinned(ce) &&
1819 		    !intel_context_is_child(ce))
1820 			guc_cancel_context_requests(ce);
1821 
1822 		intel_context_put(ce);
1823 
1824 		xa_lock(&guc->context_lookup);
1825 	}
1826 	xa_unlock_irqrestore(&guc->context_lookup, flags);
1827 
1828 	guc_cancel_sched_engine_requests(guc->sched_engine);
1829 
1830 	/* GuC is blown away, drop all references to contexts */
1831 	xa_destroy(&guc->context_lookup);
1832 }
1833 
1834 void intel_guc_submission_reset_finish(struct intel_guc *guc)
1835 {
1836 	/* Reset called during driver load or during wedge? */
1837 	if (unlikely(!guc_submission_initialized(guc) ||
1838 		     intel_gt_is_wedged(guc_to_gt(guc)))) {
1839 		return;
1840 	}
1841 
1842 	/*
1843 	 * Technically possible for either of these values to be non-zero here,
1844 	 * but very unlikely + harmless. Regardless let's add a warn so we can
1845 	 * see in CI if this happens frequently / a precursor to taking down the
1846 	 * machine.
1847 	 */
1848 	GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
1849 	atomic_set(&guc->outstanding_submission_g2h, 0);
1850 
1851 	intel_guc_global_policies_update(guc);
1852 	enable_submission(guc);
1853 	intel_gt_unpark_heartbeats(guc_to_gt(guc));
1854 }
1855 
1856 static void destroyed_worker_func(struct work_struct *w);
1857 static void reset_fail_worker_func(struct work_struct *w);
1858 
1859 /*
1860  * Set up the memory resources to be shared with the GuC (via the GGTT)
1861  * at firmware loading time.
1862  */
1863 int intel_guc_submission_init(struct intel_guc *guc)
1864 {
1865 	struct intel_gt *gt = guc_to_gt(guc);
1866 	int ret;
1867 
1868 	if (guc->submission_initialized)
1869 		return 0;
1870 
1871 	if (GET_UC_VER(guc) < MAKE_UC_VER(70, 0, 0)) {
1872 		ret = guc_lrc_desc_pool_create_v69(guc);
1873 		if (ret)
1874 			return ret;
1875 	}
1876 
1877 	guc->submission_state.guc_ids_bitmap =
1878 		bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
1879 	if (!guc->submission_state.guc_ids_bitmap) {
1880 		ret = -ENOMEM;
1881 		goto destroy_pool;
1882 	}
1883 
1884 	guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
1885 	guc->timestamp.shift = gpm_timestamp_shift(gt);
1886 	guc->submission_initialized = true;
1887 
1888 	return 0;
1889 
1890 destroy_pool:
1891 	guc_lrc_desc_pool_destroy_v69(guc);
1892 
1893 	return ret;
1894 }
1895 
1896 void intel_guc_submission_fini(struct intel_guc *guc)
1897 {
1898 	if (!guc->submission_initialized)
1899 		return;
1900 
1901 	guc_flush_destroyed_contexts(guc);
1902 	guc_lrc_desc_pool_destroy_v69(guc);
1903 	i915_sched_engine_put(guc->sched_engine);
1904 	bitmap_free(guc->submission_state.guc_ids_bitmap);
1905 	guc->submission_initialized = false;
1906 }
1907 
1908 static inline void queue_request(struct i915_sched_engine *sched_engine,
1909 				 struct i915_request *rq,
1910 				 int prio)
1911 {
1912 	GEM_BUG_ON(!list_empty(&rq->sched.link));
1913 	list_add_tail(&rq->sched.link,
1914 		      i915_sched_lookup_priolist(sched_engine, prio));
1915 	set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1916 	tasklet_hi_schedule(&sched_engine->tasklet);
1917 }
1918 
1919 static int guc_bypass_tasklet_submit(struct intel_guc *guc,
1920 				     struct i915_request *rq)
1921 {
1922 	int ret = 0;
1923 
1924 	__i915_request_submit(rq);
1925 
1926 	trace_i915_request_in(rq, 0);
1927 
1928 	if (is_multi_lrc_rq(rq)) {
1929 		if (multi_lrc_submit(rq)) {
1930 			ret = guc_wq_item_append(guc, rq);
1931 			if (!ret)
1932 				ret = guc_add_request(guc, rq);
1933 		}
1934 	} else {
1935 		guc_set_lrc_tail(rq);
1936 		ret = guc_add_request(guc, rq);
1937 	}
1938 
1939 	if (unlikely(ret == -EPIPE))
1940 		disable_submission(guc);
1941 
1942 	return ret;
1943 }
1944 
1945 static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
1946 {
1947 	struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
1948 	struct intel_context *ce = request_to_scheduling_context(rq);
1949 
1950 	return submission_disabled(guc) || guc->stalled_request ||
1951 		!i915_sched_engine_is_empty(sched_engine) ||
1952 		!ctx_id_mapped(guc, ce->guc_id.id);
1953 }
1954 
1955 static void guc_submit_request(struct i915_request *rq)
1956 {
1957 	struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
1958 	struct intel_guc *guc = &rq->engine->gt->uc.guc;
1959 	unsigned long flags;
1960 
1961 	/* Will be called from irq-context when using foreign fences. */
1962 	spin_lock_irqsave(&sched_engine->lock, flags);
1963 
1964 	if (need_tasklet(guc, rq))
1965 		queue_request(sched_engine, rq, rq_prio(rq));
1966 	else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
1967 		tasklet_hi_schedule(&sched_engine->tasklet);
1968 
1969 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1970 }
1971 
1972 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
1973 {
1974 	int ret;
1975 
1976 	GEM_BUG_ON(intel_context_is_child(ce));
1977 
1978 	if (intel_context_is_parent(ce))
1979 		ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
1980 					      NUMBER_MULTI_LRC_GUC_ID(guc),
1981 					      order_base_2(ce->parallel.number_children
1982 							   + 1));
1983 	else
1984 		ret = ida_simple_get(&guc->submission_state.guc_ids,
1985 				     NUMBER_MULTI_LRC_GUC_ID(guc),
1986 				     guc->submission_state.num_guc_ids,
1987 				     GFP_KERNEL | __GFP_RETRY_MAYFAIL |
1988 				     __GFP_NOWARN);
1989 	if (unlikely(ret < 0))
1990 		return ret;
1991 
1992 	ce->guc_id.id = ret;
1993 	return 0;
1994 }
1995 
1996 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1997 {
1998 	GEM_BUG_ON(intel_context_is_child(ce));
1999 
2000 	if (!context_guc_id_invalid(ce)) {
2001 		if (intel_context_is_parent(ce))
2002 			bitmap_release_region(guc->submission_state.guc_ids_bitmap,
2003 					      ce->guc_id.id,
2004 					      order_base_2(ce->parallel.number_children
2005 							   + 1));
2006 		else
2007 			ida_simple_remove(&guc->submission_state.guc_ids,
2008 					  ce->guc_id.id);
2009 		clr_ctx_id_mapping(guc, ce->guc_id.id);
2010 		set_context_guc_id_invalid(ce);
2011 	}
2012 	if (!list_empty(&ce->guc_id.link))
2013 		list_del_init(&ce->guc_id.link);
2014 }
2015 
2016 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
2017 {
2018 	unsigned long flags;
2019 
2020 	spin_lock_irqsave(&guc->submission_state.lock, flags);
2021 	__release_guc_id(guc, ce);
2022 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2023 }
2024 
2025 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
2026 {
2027 	struct intel_context *cn;
2028 
2029 	lockdep_assert_held(&guc->submission_state.lock);
2030 	GEM_BUG_ON(intel_context_is_child(ce));
2031 	GEM_BUG_ON(intel_context_is_parent(ce));
2032 
2033 	if (!list_empty(&guc->submission_state.guc_id_list)) {
2034 		cn = list_first_entry(&guc->submission_state.guc_id_list,
2035 				      struct intel_context,
2036 				      guc_id.link);
2037 
2038 		GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
2039 		GEM_BUG_ON(context_guc_id_invalid(cn));
2040 		GEM_BUG_ON(intel_context_is_child(cn));
2041 		GEM_BUG_ON(intel_context_is_parent(cn));
2042 
2043 		list_del_init(&cn->guc_id.link);
2044 		ce->guc_id.id = cn->guc_id.id;
2045 
2046 		spin_lock(&cn->guc_state.lock);
2047 		clr_context_registered(cn);
2048 		spin_unlock(&cn->guc_state.lock);
2049 
2050 		set_context_guc_id_invalid(cn);
2051 
2052 #ifdef CONFIG_DRM_I915_SELFTEST
2053 		guc->number_guc_id_stolen++;
2054 #endif
2055 
2056 		return 0;
2057 	} else {
2058 		return -EAGAIN;
2059 	}
2060 }
2061 
2062 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
2063 {
2064 	int ret;
2065 
2066 	lockdep_assert_held(&guc->submission_state.lock);
2067 	GEM_BUG_ON(intel_context_is_child(ce));
2068 
2069 	ret = new_guc_id(guc, ce);
2070 	if (unlikely(ret < 0)) {
2071 		if (intel_context_is_parent(ce))
2072 			return -ENOSPC;
2073 
2074 		ret = steal_guc_id(guc, ce);
2075 		if (ret < 0)
2076 			return ret;
2077 	}
2078 
2079 	if (intel_context_is_parent(ce)) {
2080 		struct intel_context *child;
2081 		int i = 1;
2082 
2083 		for_each_child(ce, child)
2084 			child->guc_id.id = ce->guc_id.id + i++;
2085 	}
2086 
2087 	return 0;
2088 }
2089 
2090 #define PIN_GUC_ID_TRIES	4
2091 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2092 {
2093 	int ret = 0;
2094 	unsigned long flags, tries = PIN_GUC_ID_TRIES;
2095 
2096 	GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
2097 
2098 try_again:
2099 	spin_lock_irqsave(&guc->submission_state.lock, flags);
2100 
2101 	might_lock(&ce->guc_state.lock);
2102 
2103 	if (context_guc_id_invalid(ce)) {
2104 		ret = assign_guc_id(guc, ce);
2105 		if (ret)
2106 			goto out_unlock;
2107 		ret = 1;	/* Indidcates newly assigned guc_id */
2108 	}
2109 	if (!list_empty(&ce->guc_id.link))
2110 		list_del_init(&ce->guc_id.link);
2111 	atomic_inc(&ce->guc_id.ref);
2112 
2113 out_unlock:
2114 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2115 
2116 	/*
2117 	 * -EAGAIN indicates no guc_id are available, let's retire any
2118 	 * outstanding requests to see if that frees up a guc_id. If the first
2119 	 * retire didn't help, insert a sleep with the timeslice duration before
2120 	 * attempting to retire more requests. Double the sleep period each
2121 	 * subsequent pass before finally giving up. The sleep period has max of
2122 	 * 100ms and minimum of 1ms.
2123 	 */
2124 	if (ret == -EAGAIN && --tries) {
2125 		if (PIN_GUC_ID_TRIES - tries > 1) {
2126 			unsigned int timeslice_shifted =
2127 				ce->engine->props.timeslice_duration_ms <<
2128 				(PIN_GUC_ID_TRIES - tries - 2);
2129 			unsigned int max = min_t(unsigned int, 100,
2130 						 timeslice_shifted);
2131 
2132 			msleep(max_t(unsigned int, max, 1));
2133 		}
2134 		intel_gt_retire_requests(guc_to_gt(guc));
2135 		goto try_again;
2136 	}
2137 
2138 	return ret;
2139 }
2140 
2141 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2142 {
2143 	unsigned long flags;
2144 
2145 	GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
2146 	GEM_BUG_ON(intel_context_is_child(ce));
2147 
2148 	if (unlikely(context_guc_id_invalid(ce) ||
2149 		     intel_context_is_parent(ce)))
2150 		return;
2151 
2152 	spin_lock_irqsave(&guc->submission_state.lock, flags);
2153 	if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
2154 	    !atomic_read(&ce->guc_id.ref))
2155 		list_add_tail(&ce->guc_id.link,
2156 			      &guc->submission_state.guc_id_list);
2157 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2158 }
2159 
2160 static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
2161 					       struct intel_context *ce,
2162 					       u32 guc_id,
2163 					       u32 offset,
2164 					       bool loop)
2165 {
2166 	struct intel_context *child;
2167 	u32 action[4 + MAX_ENGINE_INSTANCE];
2168 	int len = 0;
2169 
2170 	GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2171 
2172 	action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
2173 	action[len++] = guc_id;
2174 	action[len++] = ce->parallel.number_children + 1;
2175 	action[len++] = offset;
2176 	for_each_child(ce, child) {
2177 		offset += sizeof(struct guc_lrc_desc_v69);
2178 		action[len++] = offset;
2179 	}
2180 
2181 	return guc_submission_send_busy_loop(guc, action, len, 0, loop);
2182 }
2183 
2184 static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
2185 					       struct intel_context *ce,
2186 					       struct guc_ctxt_registration_info *info,
2187 					       bool loop)
2188 {
2189 	struct intel_context *child;
2190 	u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
2191 	int len = 0;
2192 	u32 next_id;
2193 
2194 	GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2195 
2196 	action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
2197 	action[len++] = info->flags;
2198 	action[len++] = info->context_idx;
2199 	action[len++] = info->engine_class;
2200 	action[len++] = info->engine_submit_mask;
2201 	action[len++] = info->wq_desc_lo;
2202 	action[len++] = info->wq_desc_hi;
2203 	action[len++] = info->wq_base_lo;
2204 	action[len++] = info->wq_base_hi;
2205 	action[len++] = info->wq_size;
2206 	action[len++] = ce->parallel.number_children + 1;
2207 	action[len++] = info->hwlrca_lo;
2208 	action[len++] = info->hwlrca_hi;
2209 
2210 	next_id = info->context_idx + 1;
2211 	for_each_child(ce, child) {
2212 		GEM_BUG_ON(next_id++ != child->guc_id.id);
2213 
2214 		/*
2215 		 * NB: GuC interface supports 64 bit LRCA even though i915/HW
2216 		 * only supports 32 bit currently.
2217 		 */
2218 		action[len++] = lower_32_bits(child->lrc.lrca);
2219 		action[len++] = upper_32_bits(child->lrc.lrca);
2220 	}
2221 
2222 	GEM_BUG_ON(len > ARRAY_SIZE(action));
2223 
2224 	return guc_submission_send_busy_loop(guc, action, len, 0, loop);
2225 }
2226 
2227 static int __guc_action_register_context_v69(struct intel_guc *guc,
2228 					     u32 guc_id,
2229 					     u32 offset,
2230 					     bool loop)
2231 {
2232 	u32 action[] = {
2233 		INTEL_GUC_ACTION_REGISTER_CONTEXT,
2234 		guc_id,
2235 		offset,
2236 	};
2237 
2238 	return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2239 					     0, loop);
2240 }
2241 
2242 static int __guc_action_register_context_v70(struct intel_guc *guc,
2243 					     struct guc_ctxt_registration_info *info,
2244 					     bool loop)
2245 {
2246 	u32 action[] = {
2247 		INTEL_GUC_ACTION_REGISTER_CONTEXT,
2248 		info->flags,
2249 		info->context_idx,
2250 		info->engine_class,
2251 		info->engine_submit_mask,
2252 		info->wq_desc_lo,
2253 		info->wq_desc_hi,
2254 		info->wq_base_lo,
2255 		info->wq_base_hi,
2256 		info->wq_size,
2257 		info->hwlrca_lo,
2258 		info->hwlrca_hi,
2259 	};
2260 
2261 	return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2262 					     0, loop);
2263 }
2264 
2265 static void prepare_context_registration_info_v69(struct intel_context *ce);
2266 static void prepare_context_registration_info_v70(struct intel_context *ce,
2267 						  struct guc_ctxt_registration_info *info);
2268 
2269 static int
2270 register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
2271 {
2272 	u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
2273 		ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
2274 
2275 	prepare_context_registration_info_v69(ce);
2276 
2277 	if (intel_context_is_parent(ce))
2278 		return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
2279 							   offset, loop);
2280 	else
2281 		return __guc_action_register_context_v69(guc, ce->guc_id.id,
2282 							 offset, loop);
2283 }
2284 
2285 static int
2286 register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
2287 {
2288 	struct guc_ctxt_registration_info info;
2289 
2290 	prepare_context_registration_info_v70(ce, &info);
2291 
2292 	if (intel_context_is_parent(ce))
2293 		return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
2294 	else
2295 		return __guc_action_register_context_v70(guc, &info, loop);
2296 }
2297 
2298 static int register_context(struct intel_context *ce, bool loop)
2299 {
2300 	struct intel_guc *guc = ce_to_guc(ce);
2301 	int ret;
2302 
2303 	GEM_BUG_ON(intel_context_is_child(ce));
2304 	trace_intel_context_register(ce);
2305 
2306 	if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
2307 		ret = register_context_v70(guc, ce, loop);
2308 	else
2309 		ret = register_context_v69(guc, ce, loop);
2310 
2311 	if (likely(!ret)) {
2312 		unsigned long flags;
2313 
2314 		spin_lock_irqsave(&ce->guc_state.lock, flags);
2315 		set_context_registered(ce);
2316 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2317 
2318 		if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
2319 			guc_context_policy_init_v70(ce, loop);
2320 	}
2321 
2322 	return ret;
2323 }
2324 
2325 static int __guc_action_deregister_context(struct intel_guc *guc,
2326 					   u32 guc_id)
2327 {
2328 	u32 action[] = {
2329 		INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
2330 		guc_id,
2331 	};
2332 
2333 	return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2334 					     G2H_LEN_DW_DEREGISTER_CONTEXT,
2335 					     true);
2336 }
2337 
2338 static int deregister_context(struct intel_context *ce, u32 guc_id)
2339 {
2340 	struct intel_guc *guc = ce_to_guc(ce);
2341 
2342 	GEM_BUG_ON(intel_context_is_child(ce));
2343 	trace_intel_context_deregister(ce);
2344 
2345 	return __guc_action_deregister_context(guc, guc_id);
2346 }
2347 
2348 static inline void clear_children_join_go_memory(struct intel_context *ce)
2349 {
2350 	struct parent_scratch *ps = __get_parent_scratch(ce);
2351 	int i;
2352 
2353 	ps->go.semaphore = 0;
2354 	for (i = 0; i < ce->parallel.number_children + 1; ++i)
2355 		ps->join[i].semaphore = 0;
2356 }
2357 
2358 static inline u32 get_children_go_value(struct intel_context *ce)
2359 {
2360 	return __get_parent_scratch(ce)->go.semaphore;
2361 }
2362 
2363 static inline u32 get_children_join_value(struct intel_context *ce,
2364 					  u8 child_index)
2365 {
2366 	return __get_parent_scratch(ce)->join[child_index].semaphore;
2367 }
2368 
2369 struct context_policy {
2370 	u32 count;
2371 	struct guc_update_context_policy h2g;
2372 };
2373 
2374 static u32 __guc_context_policy_action_size(struct context_policy *policy)
2375 {
2376 	size_t bytes = sizeof(policy->h2g.header) +
2377 		       (sizeof(policy->h2g.klv[0]) * policy->count);
2378 
2379 	return bytes / sizeof(u32);
2380 }
2381 
2382 static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id)
2383 {
2384 	policy->h2g.header.action = INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
2385 	policy->h2g.header.ctx_id = guc_id;
2386 	policy->count = 0;
2387 }
2388 
2389 #define MAKE_CONTEXT_POLICY_ADD(func, id) \
2390 static void __guc_context_policy_add_##func(struct context_policy *policy, u32 data) \
2391 { \
2392 	GEM_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
2393 	policy->h2g.klv[policy->count].kl = \
2394 		FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
2395 		FIELD_PREP(GUC_KLV_0_LEN, 1); \
2396 	policy->h2g.klv[policy->count].value = data; \
2397 	policy->count++; \
2398 }
2399 
2400 MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
2401 MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
2402 MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
2403 MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
2404 
2405 #undef MAKE_CONTEXT_POLICY_ADD
2406 
2407 static int __guc_context_set_context_policies(struct intel_guc *guc,
2408 					      struct context_policy *policy,
2409 					      bool loop)
2410 {
2411 	return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g,
2412 					__guc_context_policy_action_size(policy),
2413 					0, loop);
2414 }
2415 
2416 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
2417 {
2418 	struct intel_engine_cs *engine = ce->engine;
2419 	struct intel_guc *guc = &engine->gt->uc.guc;
2420 	struct context_policy policy;
2421 	u32 execution_quantum;
2422 	u32 preemption_timeout;
2423 	unsigned long flags;
2424 	int ret;
2425 
2426 	/* NB: For both of these, zero means disabled. */
2427 	execution_quantum = engine->props.timeslice_duration_ms * 1000;
2428 	preemption_timeout = engine->props.preempt_timeout_ms * 1000;
2429 
2430 	__guc_context_policy_start_klv(&policy, ce->guc_id.id);
2431 
2432 	__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
2433 	__guc_context_policy_add_execution_quantum(&policy, execution_quantum);
2434 	__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
2435 
2436 	if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
2437 		__guc_context_policy_add_preempt_to_idle(&policy, 1);
2438 
2439 	ret = __guc_context_set_context_policies(guc, &policy, loop);
2440 
2441 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2442 	if (ret != 0)
2443 		set_context_policy_required(ce);
2444 	else
2445 		clr_context_policy_required(ce);
2446 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2447 
2448 	return ret;
2449 }
2450 
2451 static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
2452 					struct guc_lrc_desc_v69 *desc)
2453 {
2454 	desc->policy_flags = 0;
2455 
2456 	if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
2457 		desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
2458 
2459 	/* NB: For both of these, zero means disabled. */
2460 	desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
2461 	desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
2462 }
2463 
2464 static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
2465 {
2466 	/*
2467 	 * this matches the mapping we do in map_i915_prio_to_guc_prio()
2468 	 * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
2469 	 */
2470 	switch (prio) {
2471 	default:
2472 		MISSING_CASE(prio);
2473 		fallthrough;
2474 	case GUC_CLIENT_PRIORITY_KMD_NORMAL:
2475 		return GEN12_CTX_PRIORITY_NORMAL;
2476 	case GUC_CLIENT_PRIORITY_NORMAL:
2477 		return GEN12_CTX_PRIORITY_LOW;
2478 	case GUC_CLIENT_PRIORITY_HIGH:
2479 	case GUC_CLIENT_PRIORITY_KMD_HIGH:
2480 		return GEN12_CTX_PRIORITY_HIGH;
2481 	}
2482 }
2483 
2484 static void prepare_context_registration_info_v69(struct intel_context *ce)
2485 {
2486 	struct intel_engine_cs *engine = ce->engine;
2487 	struct intel_guc *guc = &engine->gt->uc.guc;
2488 	u32 ctx_id = ce->guc_id.id;
2489 	struct guc_lrc_desc_v69 *desc;
2490 	struct intel_context *child;
2491 
2492 	GEM_BUG_ON(!engine->mask);
2493 
2494 	/*
2495 	 * Ensure LRC + CT vmas are is same region as write barrier is done
2496 	 * based on CT vma region.
2497 	 */
2498 	GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
2499 		   i915_gem_object_is_lmem(ce->ring->vma->obj));
2500 
2501 	desc = __get_lrc_desc_v69(guc, ctx_id);
2502 	desc->engine_class = engine_class_to_guc_class(engine->class);
2503 	desc->engine_submit_mask = engine->logical_mask;
2504 	desc->hw_context_desc = ce->lrc.lrca;
2505 	desc->priority = ce->guc_state.prio;
2506 	desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2507 	guc_context_policy_init_v69(engine, desc);
2508 
2509 	/*
2510 	 * If context is a parent, we need to register a process descriptor
2511 	 * describing a work queue and register all child contexts.
2512 	 */
2513 	if (intel_context_is_parent(ce)) {
2514 		struct guc_process_desc_v69 *pdesc;
2515 
2516 		ce->parallel.guc.wqi_tail = 0;
2517 		ce->parallel.guc.wqi_head = 0;
2518 
2519 		desc->process_desc = i915_ggtt_offset(ce->state) +
2520 			__get_parent_scratch_offset(ce);
2521 		desc->wq_addr = i915_ggtt_offset(ce->state) +
2522 			__get_wq_offset(ce);
2523 		desc->wq_size = WQ_SIZE;
2524 
2525 		pdesc = __get_process_desc_v69(ce);
2526 		memset(pdesc, 0, sizeof(*(pdesc)));
2527 		pdesc->stage_id = ce->guc_id.id;
2528 		pdesc->wq_base_addr = desc->wq_addr;
2529 		pdesc->wq_size_bytes = desc->wq_size;
2530 		pdesc->wq_status = WQ_STATUS_ACTIVE;
2531 
2532 		ce->parallel.guc.wq_head = &pdesc->head;
2533 		ce->parallel.guc.wq_tail = &pdesc->tail;
2534 		ce->parallel.guc.wq_status = &pdesc->wq_status;
2535 
2536 		for_each_child(ce, child) {
2537 			desc = __get_lrc_desc_v69(guc, child->guc_id.id);
2538 
2539 			desc->engine_class =
2540 				engine_class_to_guc_class(engine->class);
2541 			desc->hw_context_desc = child->lrc.lrca;
2542 			desc->priority = ce->guc_state.prio;
2543 			desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2544 			guc_context_policy_init_v69(engine, desc);
2545 		}
2546 
2547 		clear_children_join_go_memory(ce);
2548 	}
2549 }
2550 
2551 static void prepare_context_registration_info_v70(struct intel_context *ce,
2552 						  struct guc_ctxt_registration_info *info)
2553 {
2554 	struct intel_engine_cs *engine = ce->engine;
2555 	struct intel_guc *guc = &engine->gt->uc.guc;
2556 	u32 ctx_id = ce->guc_id.id;
2557 
2558 	GEM_BUG_ON(!engine->mask);
2559 
2560 	/*
2561 	 * Ensure LRC + CT vmas are is same region as write barrier is done
2562 	 * based on CT vma region.
2563 	 */
2564 	GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
2565 		   i915_gem_object_is_lmem(ce->ring->vma->obj));
2566 
2567 	memset(info, 0, sizeof(*info));
2568 	info->context_idx = ctx_id;
2569 	info->engine_class = engine_class_to_guc_class(engine->class);
2570 	info->engine_submit_mask = engine->logical_mask;
2571 	/*
2572 	 * NB: GuC interface supports 64 bit LRCA even though i915/HW
2573 	 * only supports 32 bit currently.
2574 	 */
2575 	info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
2576 	info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
2577 	if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
2578 		info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
2579 	info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
2580 
2581 	/*
2582 	 * If context is a parent, we need to register a process descriptor
2583 	 * describing a work queue and register all child contexts.
2584 	 */
2585 	if (intel_context_is_parent(ce)) {
2586 		struct guc_sched_wq_desc *wq_desc;
2587 		u64 wq_desc_offset, wq_base_offset;
2588 
2589 		ce->parallel.guc.wqi_tail = 0;
2590 		ce->parallel.guc.wqi_head = 0;
2591 
2592 		wq_desc_offset = i915_ggtt_offset(ce->state) +
2593 				 __get_parent_scratch_offset(ce);
2594 		wq_base_offset = i915_ggtt_offset(ce->state) +
2595 				 __get_wq_offset(ce);
2596 		info->wq_desc_lo = lower_32_bits(wq_desc_offset);
2597 		info->wq_desc_hi = upper_32_bits(wq_desc_offset);
2598 		info->wq_base_lo = lower_32_bits(wq_base_offset);
2599 		info->wq_base_hi = upper_32_bits(wq_base_offset);
2600 		info->wq_size = WQ_SIZE;
2601 
2602 		wq_desc = __get_wq_desc_v70(ce);
2603 		memset(wq_desc, 0, sizeof(*wq_desc));
2604 		wq_desc->wq_status = WQ_STATUS_ACTIVE;
2605 
2606 		ce->parallel.guc.wq_head = &wq_desc->head;
2607 		ce->parallel.guc.wq_tail = &wq_desc->tail;
2608 		ce->parallel.guc.wq_status = &wq_desc->wq_status;
2609 
2610 		clear_children_join_go_memory(ce);
2611 	}
2612 }
2613 
2614 static int try_context_registration(struct intel_context *ce, bool loop)
2615 {
2616 	struct intel_engine_cs *engine = ce->engine;
2617 	struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
2618 	struct intel_guc *guc = &engine->gt->uc.guc;
2619 	intel_wakeref_t wakeref;
2620 	u32 ctx_id = ce->guc_id.id;
2621 	bool context_registered;
2622 	int ret = 0;
2623 
2624 	GEM_BUG_ON(!sched_state_is_init(ce));
2625 
2626 	context_registered = ctx_id_mapped(guc, ctx_id);
2627 
2628 	clr_ctx_id_mapping(guc, ctx_id);
2629 	set_ctx_id_mapping(guc, ctx_id, ce);
2630 
2631 	/*
2632 	 * The context_lookup xarray is used to determine if the hardware
2633 	 * context is currently registered. There are two cases in which it
2634 	 * could be registered either the guc_id has been stolen from another
2635 	 * context or the lrc descriptor address of this context has changed. In
2636 	 * either case the context needs to be deregistered with the GuC before
2637 	 * registering this context.
2638 	 */
2639 	if (context_registered) {
2640 		bool disabled;
2641 		unsigned long flags;
2642 
2643 		trace_intel_context_steal_guc_id(ce);
2644 		GEM_BUG_ON(!loop);
2645 
2646 		/* Seal race with Reset */
2647 		spin_lock_irqsave(&ce->guc_state.lock, flags);
2648 		disabled = submission_disabled(guc);
2649 		if (likely(!disabled)) {
2650 			set_context_wait_for_deregister_to_register(ce);
2651 			intel_context_get(ce);
2652 		}
2653 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2654 		if (unlikely(disabled)) {
2655 			clr_ctx_id_mapping(guc, ctx_id);
2656 			return 0;	/* Will get registered later */
2657 		}
2658 
2659 		/*
2660 		 * If stealing the guc_id, this ce has the same guc_id as the
2661 		 * context whose guc_id was stolen.
2662 		 */
2663 		with_intel_runtime_pm(runtime_pm, wakeref)
2664 			ret = deregister_context(ce, ce->guc_id.id);
2665 		if (unlikely(ret == -ENODEV))
2666 			ret = 0;	/* Will get registered later */
2667 	} else {
2668 		with_intel_runtime_pm(runtime_pm, wakeref)
2669 			ret = register_context(ce, loop);
2670 		if (unlikely(ret == -EBUSY)) {
2671 			clr_ctx_id_mapping(guc, ctx_id);
2672 		} else if (unlikely(ret == -ENODEV)) {
2673 			clr_ctx_id_mapping(guc, ctx_id);
2674 			ret = 0;	/* Will get registered later */
2675 		}
2676 	}
2677 
2678 	return ret;
2679 }
2680 
2681 static int __guc_context_pre_pin(struct intel_context *ce,
2682 				 struct intel_engine_cs *engine,
2683 				 struct i915_gem_ww_ctx *ww,
2684 				 void **vaddr)
2685 {
2686 	return lrc_pre_pin(ce, engine, ww, vaddr);
2687 }
2688 
2689 static int __guc_context_pin(struct intel_context *ce,
2690 			     struct intel_engine_cs *engine,
2691 			     void *vaddr)
2692 {
2693 	if (i915_ggtt_offset(ce->state) !=
2694 	    (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
2695 		set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
2696 
2697 	/*
2698 	 * GuC context gets pinned in guc_request_alloc. See that function for
2699 	 * explaination of why.
2700 	 */
2701 
2702 	return lrc_pin(ce, engine, vaddr);
2703 }
2704 
2705 static int guc_context_pre_pin(struct intel_context *ce,
2706 			       struct i915_gem_ww_ctx *ww,
2707 			       void **vaddr)
2708 {
2709 	return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
2710 }
2711 
2712 static int guc_context_pin(struct intel_context *ce, void *vaddr)
2713 {
2714 	int ret = __guc_context_pin(ce, ce->engine, vaddr);
2715 
2716 	if (likely(!ret && !intel_context_is_barrier(ce)))
2717 		intel_engine_pm_get(ce->engine);
2718 
2719 	return ret;
2720 }
2721 
2722 static void guc_context_unpin(struct intel_context *ce)
2723 {
2724 	struct intel_guc *guc = ce_to_guc(ce);
2725 
2726 	unpin_guc_id(guc, ce);
2727 	lrc_unpin(ce);
2728 
2729 	if (likely(!intel_context_is_barrier(ce)))
2730 		intel_engine_pm_put_async(ce->engine);
2731 }
2732 
2733 static void guc_context_post_unpin(struct intel_context *ce)
2734 {
2735 	lrc_post_unpin(ce);
2736 }
2737 
2738 static void __guc_context_sched_enable(struct intel_guc *guc,
2739 				       struct intel_context *ce)
2740 {
2741 	u32 action[] = {
2742 		INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2743 		ce->guc_id.id,
2744 		GUC_CONTEXT_ENABLE
2745 	};
2746 
2747 	trace_intel_context_sched_enable(ce);
2748 
2749 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2750 				      G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2751 }
2752 
2753 static void __guc_context_sched_disable(struct intel_guc *guc,
2754 					struct intel_context *ce,
2755 					u16 guc_id)
2756 {
2757 	u32 action[] = {
2758 		INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2759 		guc_id,	/* ce->guc_id.id not stable */
2760 		GUC_CONTEXT_DISABLE
2761 	};
2762 
2763 	GEM_BUG_ON(guc_id == GUC_INVALID_CONTEXT_ID);
2764 
2765 	GEM_BUG_ON(intel_context_is_child(ce));
2766 	trace_intel_context_sched_disable(ce);
2767 
2768 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2769 				      G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2770 }
2771 
2772 static void guc_blocked_fence_complete(struct intel_context *ce)
2773 {
2774 	lockdep_assert_held(&ce->guc_state.lock);
2775 
2776 	if (!i915_sw_fence_done(&ce->guc_state.blocked))
2777 		i915_sw_fence_complete(&ce->guc_state.blocked);
2778 }
2779 
2780 static void guc_blocked_fence_reinit(struct intel_context *ce)
2781 {
2782 	lockdep_assert_held(&ce->guc_state.lock);
2783 	GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
2784 
2785 	/*
2786 	 * This fence is always complete unless a pending schedule disable is
2787 	 * outstanding. We arm the fence here and complete it when we receive
2788 	 * the pending schedule disable complete message.
2789 	 */
2790 	i915_sw_fence_fini(&ce->guc_state.blocked);
2791 	i915_sw_fence_reinit(&ce->guc_state.blocked);
2792 	i915_sw_fence_await(&ce->guc_state.blocked);
2793 	i915_sw_fence_commit(&ce->guc_state.blocked);
2794 }
2795 
2796 static u16 prep_context_pending_disable(struct intel_context *ce)
2797 {
2798 	lockdep_assert_held(&ce->guc_state.lock);
2799 
2800 	set_context_pending_disable(ce);
2801 	clr_context_enabled(ce);
2802 	guc_blocked_fence_reinit(ce);
2803 	intel_context_get(ce);
2804 
2805 	return ce->guc_id.id;
2806 }
2807 
2808 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
2809 {
2810 	struct intel_guc *guc = ce_to_guc(ce);
2811 	unsigned long flags;
2812 	struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2813 	intel_wakeref_t wakeref;
2814 	u16 guc_id;
2815 	bool enabled;
2816 
2817 	GEM_BUG_ON(intel_context_is_child(ce));
2818 
2819 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2820 
2821 	incr_context_blocked(ce);
2822 
2823 	enabled = context_enabled(ce);
2824 	if (unlikely(!enabled || submission_disabled(guc))) {
2825 		if (enabled)
2826 			clr_context_enabled(ce);
2827 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2828 		return &ce->guc_state.blocked;
2829 	}
2830 
2831 	/*
2832 	 * We add +2 here as the schedule disable complete CTB handler calls
2833 	 * intel_context_sched_disable_unpin (-2 to pin_count).
2834 	 */
2835 	atomic_add(2, &ce->pin_count);
2836 
2837 	guc_id = prep_context_pending_disable(ce);
2838 
2839 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2840 
2841 	with_intel_runtime_pm(runtime_pm, wakeref)
2842 		__guc_context_sched_disable(guc, ce, guc_id);
2843 
2844 	return &ce->guc_state.blocked;
2845 }
2846 
2847 #define SCHED_STATE_MULTI_BLOCKED_MASK \
2848 	(SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
2849 #define SCHED_STATE_NO_UNBLOCK \
2850 	(SCHED_STATE_MULTI_BLOCKED_MASK | \
2851 	 SCHED_STATE_PENDING_DISABLE | \
2852 	 SCHED_STATE_BANNED)
2853 
2854 static bool context_cant_unblock(struct intel_context *ce)
2855 {
2856 	lockdep_assert_held(&ce->guc_state.lock);
2857 
2858 	return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
2859 		context_guc_id_invalid(ce) ||
2860 		!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
2861 		!intel_context_is_pinned(ce);
2862 }
2863 
2864 static void guc_context_unblock(struct intel_context *ce)
2865 {
2866 	struct intel_guc *guc = ce_to_guc(ce);
2867 	unsigned long flags;
2868 	struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2869 	intel_wakeref_t wakeref;
2870 	bool enable;
2871 
2872 	GEM_BUG_ON(context_enabled(ce));
2873 	GEM_BUG_ON(intel_context_is_child(ce));
2874 
2875 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2876 
2877 	if (unlikely(submission_disabled(guc) ||
2878 		     context_cant_unblock(ce))) {
2879 		enable = false;
2880 	} else {
2881 		enable = true;
2882 		set_context_pending_enable(ce);
2883 		set_context_enabled(ce);
2884 		intel_context_get(ce);
2885 	}
2886 
2887 	decr_context_blocked(ce);
2888 
2889 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2890 
2891 	if (enable) {
2892 		with_intel_runtime_pm(runtime_pm, wakeref)
2893 			__guc_context_sched_enable(guc, ce);
2894 	}
2895 }
2896 
2897 static void guc_context_cancel_request(struct intel_context *ce,
2898 				       struct i915_request *rq)
2899 {
2900 	struct intel_context *block_context =
2901 		request_to_scheduling_context(rq);
2902 
2903 	if (i915_sw_fence_signaled(&rq->submit)) {
2904 		struct i915_sw_fence *fence;
2905 
2906 		intel_context_get(ce);
2907 		fence = guc_context_block(block_context);
2908 		i915_sw_fence_wait(fence);
2909 		if (!i915_request_completed(rq)) {
2910 			__i915_request_skip(rq);
2911 			guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
2912 					true);
2913 		}
2914 
2915 		guc_context_unblock(block_context);
2916 		intel_context_put(ce);
2917 	}
2918 }
2919 
2920 static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
2921 						 u16 guc_id,
2922 						 u32 preemption_timeout)
2923 {
2924 	if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
2925 		struct context_policy policy;
2926 
2927 		__guc_context_policy_start_klv(&policy, guc_id);
2928 		__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
2929 		__guc_context_set_context_policies(guc, &policy, true);
2930 	} else {
2931 		u32 action[] = {
2932 			INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
2933 			guc_id,
2934 			preemption_timeout
2935 		};
2936 
2937 		intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
2938 	}
2939 }
2940 
2941 static void
2942 guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
2943 		   unsigned int preempt_timeout_ms)
2944 {
2945 	struct intel_guc *guc = ce_to_guc(ce);
2946 	struct intel_runtime_pm *runtime_pm =
2947 		&ce->engine->gt->i915->runtime_pm;
2948 	intel_wakeref_t wakeref;
2949 	unsigned long flags;
2950 
2951 	GEM_BUG_ON(intel_context_is_child(ce));
2952 
2953 	guc_flush_submissions(guc);
2954 
2955 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2956 	set_context_banned(ce);
2957 
2958 	if (submission_disabled(guc) ||
2959 	    (!context_enabled(ce) && !context_pending_disable(ce))) {
2960 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2961 
2962 		guc_cancel_context_requests(ce);
2963 		intel_engine_signal_breadcrumbs(ce->engine);
2964 	} else if (!context_pending_disable(ce)) {
2965 		u16 guc_id;
2966 
2967 		/*
2968 		 * We add +2 here as the schedule disable complete CTB handler
2969 		 * calls intel_context_sched_disable_unpin (-2 to pin_count).
2970 		 */
2971 		atomic_add(2, &ce->pin_count);
2972 
2973 		guc_id = prep_context_pending_disable(ce);
2974 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2975 
2976 		/*
2977 		 * In addition to disabling scheduling, set the preemption
2978 		 * timeout to the minimum value (1 us) so the banned context
2979 		 * gets kicked off the HW ASAP.
2980 		 */
2981 		with_intel_runtime_pm(runtime_pm, wakeref) {
2982 			__guc_context_set_preemption_timeout(guc, guc_id,
2983 							     preempt_timeout_ms);
2984 			__guc_context_sched_disable(guc, ce, guc_id);
2985 		}
2986 	} else {
2987 		if (!context_guc_id_invalid(ce))
2988 			with_intel_runtime_pm(runtime_pm, wakeref)
2989 				__guc_context_set_preemption_timeout(guc,
2990 								     ce->guc_id.id,
2991 								     preempt_timeout_ms);
2992 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2993 	}
2994 }
2995 
2996 static void guc_context_sched_disable(struct intel_context *ce)
2997 {
2998 	struct intel_guc *guc = ce_to_guc(ce);
2999 	unsigned long flags;
3000 	struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
3001 	intel_wakeref_t wakeref;
3002 	u16 guc_id;
3003 
3004 	GEM_BUG_ON(intel_context_is_child(ce));
3005 
3006 	spin_lock_irqsave(&ce->guc_state.lock, flags);
3007 
3008 	/*
3009 	 * We have to check if the context has been disabled by another thread,
3010 	 * check if submssion has been disabled to seal a race with reset and
3011 	 * finally check if any more requests have been committed to the
3012 	 * context ensursing that a request doesn't slip through the
3013 	 * 'context_pending_disable' fence.
3014 	 */
3015 	if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
3016 		     context_has_committed_requests(ce))) {
3017 		clr_context_enabled(ce);
3018 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3019 		goto unpin;
3020 	}
3021 	guc_id = prep_context_pending_disable(ce);
3022 
3023 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3024 
3025 	with_intel_runtime_pm(runtime_pm, wakeref)
3026 		__guc_context_sched_disable(guc, ce, guc_id);
3027 
3028 	return;
3029 unpin:
3030 	intel_context_sched_disable_unpin(ce);
3031 }
3032 
3033 static inline void guc_lrc_desc_unpin(struct intel_context *ce)
3034 {
3035 	struct intel_guc *guc = ce_to_guc(ce);
3036 	struct intel_gt *gt = guc_to_gt(guc);
3037 	unsigned long flags;
3038 	bool disabled;
3039 
3040 	GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
3041 	GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
3042 	GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
3043 	GEM_BUG_ON(context_enabled(ce));
3044 
3045 	/* Seal race with Reset */
3046 	spin_lock_irqsave(&ce->guc_state.lock, flags);
3047 	disabled = submission_disabled(guc);
3048 	if (likely(!disabled)) {
3049 		__intel_gt_pm_get(gt);
3050 		set_context_destroyed(ce);
3051 		clr_context_registered(ce);
3052 	}
3053 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3054 	if (unlikely(disabled)) {
3055 		release_guc_id(guc, ce);
3056 		__guc_context_destroy(ce);
3057 		return;
3058 	}
3059 
3060 	deregister_context(ce, ce->guc_id.id);
3061 }
3062 
3063 static void __guc_context_destroy(struct intel_context *ce)
3064 {
3065 	GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
3066 		   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
3067 		   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
3068 		   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
3069 	GEM_BUG_ON(ce->guc_state.number_committed_requests);
3070 
3071 	lrc_fini(ce);
3072 	intel_context_fini(ce);
3073 
3074 	if (intel_engine_is_virtual(ce->engine)) {
3075 		struct guc_virtual_engine *ve =
3076 			container_of(ce, typeof(*ve), context);
3077 
3078 		if (ve->base.breadcrumbs)
3079 			intel_breadcrumbs_put(ve->base.breadcrumbs);
3080 
3081 		kfree(ve);
3082 	} else {
3083 		intel_context_free(ce);
3084 	}
3085 }
3086 
3087 static void guc_flush_destroyed_contexts(struct intel_guc *guc)
3088 {
3089 	struct intel_context *ce;
3090 	unsigned long flags;
3091 
3092 	GEM_BUG_ON(!submission_disabled(guc) &&
3093 		   guc_submission_initialized(guc));
3094 
3095 	while (!list_empty(&guc->submission_state.destroyed_contexts)) {
3096 		spin_lock_irqsave(&guc->submission_state.lock, flags);
3097 		ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
3098 					      struct intel_context,
3099 					      destroyed_link);
3100 		if (ce)
3101 			list_del_init(&ce->destroyed_link);
3102 		spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3103 
3104 		if (!ce)
3105 			break;
3106 
3107 		release_guc_id(guc, ce);
3108 		__guc_context_destroy(ce);
3109 	}
3110 }
3111 
3112 static void deregister_destroyed_contexts(struct intel_guc *guc)
3113 {
3114 	struct intel_context *ce;
3115 	unsigned long flags;
3116 
3117 	while (!list_empty(&guc->submission_state.destroyed_contexts)) {
3118 		spin_lock_irqsave(&guc->submission_state.lock, flags);
3119 		ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
3120 					      struct intel_context,
3121 					      destroyed_link);
3122 		if (ce)
3123 			list_del_init(&ce->destroyed_link);
3124 		spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3125 
3126 		if (!ce)
3127 			break;
3128 
3129 		guc_lrc_desc_unpin(ce);
3130 	}
3131 }
3132 
3133 static void destroyed_worker_func(struct work_struct *w)
3134 {
3135 	struct intel_guc *guc = container_of(w, struct intel_guc,
3136 					     submission_state.destroyed_worker);
3137 	struct intel_gt *gt = guc_to_gt(guc);
3138 	int tmp;
3139 
3140 	with_intel_gt_pm(gt, tmp)
3141 		deregister_destroyed_contexts(guc);
3142 }
3143 
3144 static void guc_context_destroy(struct kref *kref)
3145 {
3146 	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
3147 	struct intel_guc *guc = ce_to_guc(ce);
3148 	unsigned long flags;
3149 	bool destroy;
3150 
3151 	/*
3152 	 * If the guc_id is invalid this context has been stolen and we can free
3153 	 * it immediately. Also can be freed immediately if the context is not
3154 	 * registered with the GuC or the GuC is in the middle of a reset.
3155 	 */
3156 	spin_lock_irqsave(&guc->submission_state.lock, flags);
3157 	destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
3158 		!ctx_id_mapped(guc, ce->guc_id.id);
3159 	if (likely(!destroy)) {
3160 		if (!list_empty(&ce->guc_id.link))
3161 			list_del_init(&ce->guc_id.link);
3162 		list_add_tail(&ce->destroyed_link,
3163 			      &guc->submission_state.destroyed_contexts);
3164 	} else {
3165 		__release_guc_id(guc, ce);
3166 	}
3167 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
3168 	if (unlikely(destroy)) {
3169 		__guc_context_destroy(ce);
3170 		return;
3171 	}
3172 
3173 	/*
3174 	 * We use a worker to issue the H2G to deregister the context as we can
3175 	 * take the GT PM for the first time which isn't allowed from an atomic
3176 	 * context.
3177 	 */
3178 	queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
3179 }
3180 
3181 static int guc_context_alloc(struct intel_context *ce)
3182 {
3183 	return lrc_alloc(ce, ce->engine);
3184 }
3185 
3186 static void __guc_context_set_prio(struct intel_guc *guc,
3187 				   struct intel_context *ce)
3188 {
3189 	if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
3190 		struct context_policy policy;
3191 
3192 		__guc_context_policy_start_klv(&policy, ce->guc_id.id);
3193 		__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
3194 		__guc_context_set_context_policies(guc, &policy, true);
3195 	} else {
3196 		u32 action[] = {
3197 			INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
3198 			ce->guc_id.id,
3199 			ce->guc_state.prio,
3200 		};
3201 
3202 		guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
3203 	}
3204 }
3205 
3206 static void guc_context_set_prio(struct intel_guc *guc,
3207 				 struct intel_context *ce,
3208 				 u8 prio)
3209 {
3210 	GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
3211 		   prio > GUC_CLIENT_PRIORITY_NORMAL);
3212 	lockdep_assert_held(&ce->guc_state.lock);
3213 
3214 	if (ce->guc_state.prio == prio || submission_disabled(guc) ||
3215 	    !context_registered(ce)) {
3216 		ce->guc_state.prio = prio;
3217 		return;
3218 	}
3219 
3220 	ce->guc_state.prio = prio;
3221 	__guc_context_set_prio(guc, ce);
3222 
3223 	trace_intel_context_set_prio(ce);
3224 }
3225 
3226 static inline u8 map_i915_prio_to_guc_prio(int prio)
3227 {
3228 	if (prio == I915_PRIORITY_NORMAL)
3229 		return GUC_CLIENT_PRIORITY_KMD_NORMAL;
3230 	else if (prio < I915_PRIORITY_NORMAL)
3231 		return GUC_CLIENT_PRIORITY_NORMAL;
3232 	else if (prio < I915_PRIORITY_DISPLAY)
3233 		return GUC_CLIENT_PRIORITY_HIGH;
3234 	else
3235 		return GUC_CLIENT_PRIORITY_KMD_HIGH;
3236 }
3237 
3238 static inline void add_context_inflight_prio(struct intel_context *ce,
3239 					     u8 guc_prio)
3240 {
3241 	lockdep_assert_held(&ce->guc_state.lock);
3242 	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
3243 
3244 	++ce->guc_state.prio_count[guc_prio];
3245 
3246 	/* Overflow protection */
3247 	GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
3248 }
3249 
3250 static inline void sub_context_inflight_prio(struct intel_context *ce,
3251 					     u8 guc_prio)
3252 {
3253 	lockdep_assert_held(&ce->guc_state.lock);
3254 	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
3255 
3256 	/* Underflow protection */
3257 	GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
3258 
3259 	--ce->guc_state.prio_count[guc_prio];
3260 }
3261 
3262 static inline void update_context_prio(struct intel_context *ce)
3263 {
3264 	struct intel_guc *guc = &ce->engine->gt->uc.guc;
3265 	int i;
3266 
3267 	BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
3268 	BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
3269 
3270 	lockdep_assert_held(&ce->guc_state.lock);
3271 
3272 	for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
3273 		if (ce->guc_state.prio_count[i]) {
3274 			guc_context_set_prio(guc, ce, i);
3275 			break;
3276 		}
3277 	}
3278 }
3279 
3280 static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
3281 {
3282 	/* Lower value is higher priority */
3283 	return new_guc_prio < old_guc_prio;
3284 }
3285 
3286 static void add_to_context(struct i915_request *rq)
3287 {
3288 	struct intel_context *ce = request_to_scheduling_context(rq);
3289 	u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
3290 
3291 	GEM_BUG_ON(intel_context_is_child(ce));
3292 	GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
3293 
3294 	spin_lock(&ce->guc_state.lock);
3295 	list_move_tail(&rq->sched.link, &ce->guc_state.requests);
3296 
3297 	if (rq->guc_prio == GUC_PRIO_INIT) {
3298 		rq->guc_prio = new_guc_prio;
3299 		add_context_inflight_prio(ce, rq->guc_prio);
3300 	} else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
3301 		sub_context_inflight_prio(ce, rq->guc_prio);
3302 		rq->guc_prio = new_guc_prio;
3303 		add_context_inflight_prio(ce, rq->guc_prio);
3304 	}
3305 	update_context_prio(ce);
3306 
3307 	spin_unlock(&ce->guc_state.lock);
3308 }
3309 
3310 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
3311 {
3312 	lockdep_assert_held(&ce->guc_state.lock);
3313 
3314 	if (rq->guc_prio != GUC_PRIO_INIT &&
3315 	    rq->guc_prio != GUC_PRIO_FINI) {
3316 		sub_context_inflight_prio(ce, rq->guc_prio);
3317 		update_context_prio(ce);
3318 	}
3319 	rq->guc_prio = GUC_PRIO_FINI;
3320 }
3321 
3322 static void remove_from_context(struct i915_request *rq)
3323 {
3324 	struct intel_context *ce = request_to_scheduling_context(rq);
3325 
3326 	GEM_BUG_ON(intel_context_is_child(ce));
3327 
3328 	spin_lock_irq(&ce->guc_state.lock);
3329 
3330 	list_del_init(&rq->sched.link);
3331 	clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
3332 
3333 	/* Prevent further __await_execution() registering a cb, then flush */
3334 	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
3335 
3336 	guc_prio_fini(rq, ce);
3337 
3338 	decr_context_committed_requests(ce);
3339 
3340 	spin_unlock_irq(&ce->guc_state.lock);
3341 
3342 	atomic_dec(&ce->guc_id.ref);
3343 	i915_request_notify_execute_cb_imm(rq);
3344 }
3345 
3346 static const struct intel_context_ops guc_context_ops = {
3347 	.alloc = guc_context_alloc,
3348 
3349 	.pre_pin = guc_context_pre_pin,
3350 	.pin = guc_context_pin,
3351 	.unpin = guc_context_unpin,
3352 	.post_unpin = guc_context_post_unpin,
3353 
3354 	.revoke = guc_context_revoke,
3355 
3356 	.cancel_request = guc_context_cancel_request,
3357 
3358 	.enter = intel_context_enter_engine,
3359 	.exit = intel_context_exit_engine,
3360 
3361 	.sched_disable = guc_context_sched_disable,
3362 
3363 	.reset = lrc_reset,
3364 	.destroy = guc_context_destroy,
3365 
3366 	.create_virtual = guc_create_virtual,
3367 	.create_parallel = guc_create_parallel,
3368 };
3369 
3370 static void submit_work_cb(struct irq_work *wrk)
3371 {
3372 	struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
3373 
3374 	might_lock(&rq->engine->sched_engine->lock);
3375 	i915_sw_fence_complete(&rq->submit);
3376 }
3377 
3378 static void __guc_signal_context_fence(struct intel_context *ce)
3379 {
3380 	struct i915_request *rq, *rn;
3381 
3382 	lockdep_assert_held(&ce->guc_state.lock);
3383 
3384 	if (!list_empty(&ce->guc_state.fences))
3385 		trace_intel_context_fence_release(ce);
3386 
3387 	/*
3388 	 * Use an IRQ to ensure locking order of sched_engine->lock ->
3389 	 * ce->guc_state.lock is preserved.
3390 	 */
3391 	list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
3392 				 guc_fence_link) {
3393 		list_del(&rq->guc_fence_link);
3394 		irq_work_queue(&rq->submit_work);
3395 	}
3396 
3397 	INIT_LIST_HEAD(&ce->guc_state.fences);
3398 }
3399 
3400 static void guc_signal_context_fence(struct intel_context *ce)
3401 {
3402 	unsigned long flags;
3403 
3404 	GEM_BUG_ON(intel_context_is_child(ce));
3405 
3406 	spin_lock_irqsave(&ce->guc_state.lock, flags);
3407 	clr_context_wait_for_deregister_to_register(ce);
3408 	__guc_signal_context_fence(ce);
3409 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3410 }
3411 
3412 static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
3413 {
3414 	return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
3415 		!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
3416 		!submission_disabled(ce_to_guc(ce));
3417 }
3418 
3419 static void guc_context_init(struct intel_context *ce)
3420 {
3421 	const struct i915_gem_context *ctx;
3422 	int prio = I915_CONTEXT_DEFAULT_PRIORITY;
3423 
3424 	rcu_read_lock();
3425 	ctx = rcu_dereference(ce->gem_context);
3426 	if (ctx)
3427 		prio = ctx->sched.priority;
3428 	rcu_read_unlock();
3429 
3430 	ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
3431 	set_bit(CONTEXT_GUC_INIT, &ce->flags);
3432 }
3433 
3434 static int guc_request_alloc(struct i915_request *rq)
3435 {
3436 	struct intel_context *ce = request_to_scheduling_context(rq);
3437 	struct intel_guc *guc = ce_to_guc(ce);
3438 	unsigned long flags;
3439 	int ret;
3440 
3441 	GEM_BUG_ON(!intel_context_is_pinned(rq->context));
3442 
3443 	/*
3444 	 * Flush enough space to reduce the likelihood of waiting after
3445 	 * we start building the request - in which case we will just
3446 	 * have to repeat work.
3447 	 */
3448 	rq->reserved_space += GUC_REQUEST_SIZE;
3449 
3450 	/*
3451 	 * Note that after this point, we have committed to using
3452 	 * this request as it is being used to both track the
3453 	 * state of engine initialisation and liveness of the
3454 	 * golden renderstate above. Think twice before you try
3455 	 * to cancel/unwind this request now.
3456 	 */
3457 
3458 	/* Unconditionally invalidate GPU caches and TLBs. */
3459 	ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
3460 	if (ret)
3461 		return ret;
3462 
3463 	rq->reserved_space -= GUC_REQUEST_SIZE;
3464 
3465 	if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
3466 		guc_context_init(ce);
3467 
3468 	/*
3469 	 * Call pin_guc_id here rather than in the pinning step as with
3470 	 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
3471 	 * guc_id and creating horrible race conditions. This is especially bad
3472 	 * when guc_id are being stolen due to over subscription. By the time
3473 	 * this function is reached, it is guaranteed that the guc_id will be
3474 	 * persistent until the generated request is retired. Thus, sealing these
3475 	 * race conditions. It is still safe to fail here if guc_id are
3476 	 * exhausted and return -EAGAIN to the user indicating that they can try
3477 	 * again in the future.
3478 	 *
3479 	 * There is no need for a lock here as the timeline mutex ensures at
3480 	 * most one context can be executing this code path at once. The
3481 	 * guc_id_ref is incremented once for every request in flight and
3482 	 * decremented on each retire. When it is zero, a lock around the
3483 	 * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
3484 	 */
3485 	if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
3486 		goto out;
3487 
3488 	ret = pin_guc_id(guc, ce);	/* returns 1 if new guc_id assigned */
3489 	if (unlikely(ret < 0))
3490 		return ret;
3491 	if (context_needs_register(ce, !!ret)) {
3492 		ret = try_context_registration(ce, true);
3493 		if (unlikely(ret)) {	/* unwind */
3494 			if (ret == -EPIPE) {
3495 				disable_submission(guc);
3496 				goto out;	/* GPU will be reset */
3497 			}
3498 			atomic_dec(&ce->guc_id.ref);
3499 			unpin_guc_id(guc, ce);
3500 			return ret;
3501 		}
3502 	}
3503 
3504 	clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
3505 
3506 out:
3507 	/*
3508 	 * We block all requests on this context if a G2H is pending for a
3509 	 * schedule disable or context deregistration as the GuC will fail a
3510 	 * schedule enable or context registration if either G2H is pending
3511 	 * respectfully. Once a G2H returns, the fence is released that is
3512 	 * blocking these requests (see guc_signal_context_fence).
3513 	 */
3514 	spin_lock_irqsave(&ce->guc_state.lock, flags);
3515 	if (context_wait_for_deregister_to_register(ce) ||
3516 	    context_pending_disable(ce)) {
3517 		init_irq_work(&rq->submit_work, submit_work_cb);
3518 		i915_sw_fence_await(&rq->submit);
3519 
3520 		list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
3521 	}
3522 	incr_context_committed_requests(ce);
3523 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3524 
3525 	return 0;
3526 }
3527 
3528 static int guc_virtual_context_pre_pin(struct intel_context *ce,
3529 				       struct i915_gem_ww_ctx *ww,
3530 				       void **vaddr)
3531 {
3532 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3533 
3534 	return __guc_context_pre_pin(ce, engine, ww, vaddr);
3535 }
3536 
3537 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
3538 {
3539 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3540 	int ret = __guc_context_pin(ce, engine, vaddr);
3541 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3542 
3543 	if (likely(!ret))
3544 		for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3545 			intel_engine_pm_get(engine);
3546 
3547 	return ret;
3548 }
3549 
3550 static void guc_virtual_context_unpin(struct intel_context *ce)
3551 {
3552 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3553 	struct intel_engine_cs *engine;
3554 	struct intel_guc *guc = ce_to_guc(ce);
3555 
3556 	GEM_BUG_ON(context_enabled(ce));
3557 	GEM_BUG_ON(intel_context_is_barrier(ce));
3558 
3559 	unpin_guc_id(guc, ce);
3560 	lrc_unpin(ce);
3561 
3562 	for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3563 		intel_engine_pm_put_async(engine);
3564 }
3565 
3566 static void guc_virtual_context_enter(struct intel_context *ce)
3567 {
3568 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3569 	struct intel_engine_cs *engine;
3570 
3571 	for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3572 		intel_engine_pm_get(engine);
3573 
3574 	intel_timeline_enter(ce->timeline);
3575 }
3576 
3577 static void guc_virtual_context_exit(struct intel_context *ce)
3578 {
3579 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3580 	struct intel_engine_cs *engine;
3581 
3582 	for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3583 		intel_engine_pm_put(engine);
3584 
3585 	intel_timeline_exit(ce->timeline);
3586 }
3587 
3588 static int guc_virtual_context_alloc(struct intel_context *ce)
3589 {
3590 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3591 
3592 	return lrc_alloc(ce, engine);
3593 }
3594 
3595 static const struct intel_context_ops virtual_guc_context_ops = {
3596 	.alloc = guc_virtual_context_alloc,
3597 
3598 	.pre_pin = guc_virtual_context_pre_pin,
3599 	.pin = guc_virtual_context_pin,
3600 	.unpin = guc_virtual_context_unpin,
3601 	.post_unpin = guc_context_post_unpin,
3602 
3603 	.revoke = guc_context_revoke,
3604 
3605 	.cancel_request = guc_context_cancel_request,
3606 
3607 	.enter = guc_virtual_context_enter,
3608 	.exit = guc_virtual_context_exit,
3609 
3610 	.sched_disable = guc_context_sched_disable,
3611 
3612 	.destroy = guc_context_destroy,
3613 
3614 	.get_sibling = guc_virtual_get_sibling,
3615 };
3616 
3617 static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
3618 {
3619 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3620 	struct intel_guc *guc = ce_to_guc(ce);
3621 	int ret;
3622 
3623 	GEM_BUG_ON(!intel_context_is_parent(ce));
3624 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3625 
3626 	ret = pin_guc_id(guc, ce);
3627 	if (unlikely(ret < 0))
3628 		return ret;
3629 
3630 	return __guc_context_pin(ce, engine, vaddr);
3631 }
3632 
3633 static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
3634 {
3635 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3636 
3637 	GEM_BUG_ON(!intel_context_is_child(ce));
3638 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3639 
3640 	__intel_context_pin(ce->parallel.parent);
3641 	return __guc_context_pin(ce, engine, vaddr);
3642 }
3643 
3644 static void guc_parent_context_unpin(struct intel_context *ce)
3645 {
3646 	struct intel_guc *guc = ce_to_guc(ce);
3647 
3648 	GEM_BUG_ON(context_enabled(ce));
3649 	GEM_BUG_ON(intel_context_is_barrier(ce));
3650 	GEM_BUG_ON(!intel_context_is_parent(ce));
3651 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3652 
3653 	unpin_guc_id(guc, ce);
3654 	lrc_unpin(ce);
3655 }
3656 
3657 static void guc_child_context_unpin(struct intel_context *ce)
3658 {
3659 	GEM_BUG_ON(context_enabled(ce));
3660 	GEM_BUG_ON(intel_context_is_barrier(ce));
3661 	GEM_BUG_ON(!intel_context_is_child(ce));
3662 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3663 
3664 	lrc_unpin(ce);
3665 }
3666 
3667 static void guc_child_context_post_unpin(struct intel_context *ce)
3668 {
3669 	GEM_BUG_ON(!intel_context_is_child(ce));
3670 	GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
3671 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3672 
3673 	lrc_post_unpin(ce);
3674 	intel_context_unpin(ce->parallel.parent);
3675 }
3676 
3677 static void guc_child_context_destroy(struct kref *kref)
3678 {
3679 	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
3680 
3681 	__guc_context_destroy(ce);
3682 }
3683 
3684 static const struct intel_context_ops virtual_parent_context_ops = {
3685 	.alloc = guc_virtual_context_alloc,
3686 
3687 	.pre_pin = guc_context_pre_pin,
3688 	.pin = guc_parent_context_pin,
3689 	.unpin = guc_parent_context_unpin,
3690 	.post_unpin = guc_context_post_unpin,
3691 
3692 	.revoke = guc_context_revoke,
3693 
3694 	.cancel_request = guc_context_cancel_request,
3695 
3696 	.enter = guc_virtual_context_enter,
3697 	.exit = guc_virtual_context_exit,
3698 
3699 	.sched_disable = guc_context_sched_disable,
3700 
3701 	.destroy = guc_context_destroy,
3702 
3703 	.get_sibling = guc_virtual_get_sibling,
3704 };
3705 
3706 static const struct intel_context_ops virtual_child_context_ops = {
3707 	.alloc = guc_virtual_context_alloc,
3708 
3709 	.pre_pin = guc_context_pre_pin,
3710 	.pin = guc_child_context_pin,
3711 	.unpin = guc_child_context_unpin,
3712 	.post_unpin = guc_child_context_post_unpin,
3713 
3714 	.cancel_request = guc_context_cancel_request,
3715 
3716 	.enter = guc_virtual_context_enter,
3717 	.exit = guc_virtual_context_exit,
3718 
3719 	.destroy = guc_child_context_destroy,
3720 
3721 	.get_sibling = guc_virtual_get_sibling,
3722 };
3723 
3724 /*
3725  * The below override of the breadcrumbs is enabled when the user configures a
3726  * context for parallel submission (multi-lrc, parent-child).
3727  *
3728  * The overridden breadcrumbs implements an algorithm which allows the GuC to
3729  * safely preempt all the hw contexts configured for parallel submission
3730  * between each BB. The contract between the i915 and GuC is if the parent
3731  * context can be preempted, all the children can be preempted, and the GuC will
3732  * always try to preempt the parent before the children. A handshake between the
3733  * parent / children breadcrumbs ensures the i915 holds up its end of the deal
3734  * creating a window to preempt between each set of BBs.
3735  */
3736 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
3737 						     u64 offset, u32 len,
3738 						     const unsigned int flags);
3739 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
3740 						    u64 offset, u32 len,
3741 						    const unsigned int flags);
3742 static u32 *
3743 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
3744 						 u32 *cs);
3745 static u32 *
3746 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
3747 						u32 *cs);
3748 
3749 static struct intel_context *
3750 guc_create_parallel(struct intel_engine_cs **engines,
3751 		    unsigned int num_siblings,
3752 		    unsigned int width)
3753 {
3754 	struct intel_engine_cs **siblings = NULL;
3755 	struct intel_context *parent = NULL, *ce, *err;
3756 	int i, j;
3757 
3758 	siblings = kmalloc_array(num_siblings,
3759 				 sizeof(*siblings),
3760 				 GFP_KERNEL);
3761 	if (!siblings)
3762 		return ERR_PTR(-ENOMEM);
3763 
3764 	for (i = 0; i < width; ++i) {
3765 		for (j = 0; j < num_siblings; ++j)
3766 			siblings[j] = engines[i * num_siblings + j];
3767 
3768 		ce = intel_engine_create_virtual(siblings, num_siblings,
3769 						 FORCE_VIRTUAL);
3770 		if (IS_ERR(ce)) {
3771 			err = ERR_CAST(ce);
3772 			goto unwind;
3773 		}
3774 
3775 		if (i == 0) {
3776 			parent = ce;
3777 			parent->ops = &virtual_parent_context_ops;
3778 		} else {
3779 			ce->ops = &virtual_child_context_ops;
3780 			intel_context_bind_parent_child(parent, ce);
3781 		}
3782 	}
3783 
3784 	parent->parallel.fence_context = dma_fence_context_alloc(1);
3785 
3786 	parent->engine->emit_bb_start =
3787 		emit_bb_start_parent_no_preempt_mid_batch;
3788 	parent->engine->emit_fini_breadcrumb =
3789 		emit_fini_breadcrumb_parent_no_preempt_mid_batch;
3790 	parent->engine->emit_fini_breadcrumb_dw =
3791 		12 + 4 * parent->parallel.number_children;
3792 	for_each_child(parent, ce) {
3793 		ce->engine->emit_bb_start =
3794 			emit_bb_start_child_no_preempt_mid_batch;
3795 		ce->engine->emit_fini_breadcrumb =
3796 			emit_fini_breadcrumb_child_no_preempt_mid_batch;
3797 		ce->engine->emit_fini_breadcrumb_dw = 16;
3798 	}
3799 
3800 	kfree(siblings);
3801 	return parent;
3802 
3803 unwind:
3804 	if (parent)
3805 		intel_context_put(parent);
3806 	kfree(siblings);
3807 	return err;
3808 }
3809 
3810 static bool
3811 guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
3812 {
3813 	struct intel_engine_cs *sibling;
3814 	intel_engine_mask_t tmp, mask = b->engine_mask;
3815 	bool result = false;
3816 
3817 	for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
3818 		result |= intel_engine_irq_enable(sibling);
3819 
3820 	return result;
3821 }
3822 
3823 static void
3824 guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
3825 {
3826 	struct intel_engine_cs *sibling;
3827 	intel_engine_mask_t tmp, mask = b->engine_mask;
3828 
3829 	for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
3830 		intel_engine_irq_disable(sibling);
3831 }
3832 
3833 static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
3834 {
3835 	int i;
3836 
3837 	/*
3838 	 * In GuC submission mode we do not know which physical engine a request
3839 	 * will be scheduled on, this creates a problem because the breadcrumb
3840 	 * interrupt is per physical engine. To work around this we attach
3841 	 * requests and direct all breadcrumb interrupts to the first instance
3842 	 * of an engine per class. In addition all breadcrumb interrupts are
3843 	 * enabled / disabled across an engine class in unison.
3844 	 */
3845 	for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
3846 		struct intel_engine_cs *sibling =
3847 			engine->gt->engine_class[engine->class][i];
3848 
3849 		if (sibling) {
3850 			if (engine->breadcrumbs != sibling->breadcrumbs) {
3851 				intel_breadcrumbs_put(engine->breadcrumbs);
3852 				engine->breadcrumbs =
3853 					intel_breadcrumbs_get(sibling->breadcrumbs);
3854 			}
3855 			break;
3856 		}
3857 	}
3858 
3859 	if (engine->breadcrumbs) {
3860 		engine->breadcrumbs->engine_mask |= engine->mask;
3861 		engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
3862 		engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
3863 	}
3864 }
3865 
3866 static void guc_bump_inflight_request_prio(struct i915_request *rq,
3867 					   int prio)
3868 {
3869 	struct intel_context *ce = request_to_scheduling_context(rq);
3870 	u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
3871 
3872 	/* Short circuit function */
3873 	if (prio < I915_PRIORITY_NORMAL ||
3874 	    rq->guc_prio == GUC_PRIO_FINI ||
3875 	    (rq->guc_prio != GUC_PRIO_INIT &&
3876 	     !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
3877 		return;
3878 
3879 	spin_lock(&ce->guc_state.lock);
3880 	if (rq->guc_prio != GUC_PRIO_FINI) {
3881 		if (rq->guc_prio != GUC_PRIO_INIT)
3882 			sub_context_inflight_prio(ce, rq->guc_prio);
3883 		rq->guc_prio = new_guc_prio;
3884 		add_context_inflight_prio(ce, rq->guc_prio);
3885 		update_context_prio(ce);
3886 	}
3887 	spin_unlock(&ce->guc_state.lock);
3888 }
3889 
3890 static void guc_retire_inflight_request_prio(struct i915_request *rq)
3891 {
3892 	struct intel_context *ce = request_to_scheduling_context(rq);
3893 
3894 	spin_lock(&ce->guc_state.lock);
3895 	guc_prio_fini(rq, ce);
3896 	spin_unlock(&ce->guc_state.lock);
3897 }
3898 
3899 static void sanitize_hwsp(struct intel_engine_cs *engine)
3900 {
3901 	struct intel_timeline *tl;
3902 
3903 	list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
3904 		intel_timeline_reset_seqno(tl);
3905 }
3906 
3907 static void guc_sanitize(struct intel_engine_cs *engine)
3908 {
3909 	/*
3910 	 * Poison residual state on resume, in case the suspend didn't!
3911 	 *
3912 	 * We have to assume that across suspend/resume (or other loss
3913 	 * of control) that the contents of our pinned buffers has been
3914 	 * lost, replaced by garbage. Since this doesn't always happen,
3915 	 * let's poison such state so that we more quickly spot when
3916 	 * we falsely assume it has been preserved.
3917 	 */
3918 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
3919 		memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
3920 
3921 	/*
3922 	 * The kernel_context HWSP is stored in the status_page. As above,
3923 	 * that may be lost on resume/initialisation, and so we need to
3924 	 * reset the value in the HWSP.
3925 	 */
3926 	sanitize_hwsp(engine);
3927 
3928 	/* And scrub the dirty cachelines for the HWSP */
3929 	drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
3930 
3931 	intel_engine_reset_pinned_contexts(engine);
3932 }
3933 
3934 static void setup_hwsp(struct intel_engine_cs *engine)
3935 {
3936 	intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
3937 
3938 	ENGINE_WRITE_FW(engine,
3939 			RING_HWS_PGA,
3940 			i915_ggtt_offset(engine->status_page.vma));
3941 }
3942 
3943 static void start_engine(struct intel_engine_cs *engine)
3944 {
3945 	ENGINE_WRITE_FW(engine,
3946 			RING_MODE_GEN7,
3947 			_MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
3948 
3949 	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
3950 	ENGINE_POSTING_READ(engine, RING_MI_MODE);
3951 }
3952 
3953 static int guc_resume(struct intel_engine_cs *engine)
3954 {
3955 	assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
3956 
3957 	intel_mocs_init_engine(engine);
3958 
3959 	intel_breadcrumbs_reset(engine->breadcrumbs);
3960 
3961 	setup_hwsp(engine);
3962 	start_engine(engine);
3963 
3964 	if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
3965 		xehp_enable_ccs_engines(engine);
3966 
3967 	return 0;
3968 }
3969 
3970 static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
3971 {
3972 	return !sched_engine->tasklet.callback;
3973 }
3974 
3975 static void guc_set_default_submission(struct intel_engine_cs *engine)
3976 {
3977 	engine->submit_request = guc_submit_request;
3978 }
3979 
3980 static inline void guc_kernel_context_pin(struct intel_guc *guc,
3981 					  struct intel_context *ce)
3982 {
3983 	/*
3984 	 * Note: we purposefully do not check the returns below because
3985 	 * the registration can only fail if a reset is just starting.
3986 	 * This is called at the end of reset so presumably another reset
3987 	 * isn't happening and even it did this code would be run again.
3988 	 */
3989 
3990 	if (context_guc_id_invalid(ce))
3991 		pin_guc_id(guc, ce);
3992 
3993 	try_context_registration(ce, true);
3994 }
3995 
3996 static inline void guc_init_lrc_mapping(struct intel_guc *guc)
3997 {
3998 	struct intel_gt *gt = guc_to_gt(guc);
3999 	struct intel_engine_cs *engine;
4000 	enum intel_engine_id id;
4001 
4002 	/* make sure all descriptors are clean... */
4003 	xa_destroy(&guc->context_lookup);
4004 
4005 	/*
4006 	 * A reset might have occurred while we had a pending stalled request,
4007 	 * so make sure we clean that up.
4008 	 */
4009 	guc->stalled_request = NULL;
4010 	guc->submission_stall_reason = STALL_NONE;
4011 
4012 	/*
4013 	 * Some contexts might have been pinned before we enabled GuC
4014 	 * submission, so we need to add them to the GuC bookeeping.
4015 	 * Also, after a reset the of the GuC we want to make sure that the
4016 	 * information shared with GuC is properly reset. The kernel LRCs are
4017 	 * not attached to the gem_context, so they need to be added separately.
4018 	 */
4019 	for_each_engine(engine, gt, id) {
4020 		struct intel_context *ce;
4021 
4022 		list_for_each_entry(ce, &engine->pinned_contexts_list,
4023 				    pinned_contexts_link)
4024 			guc_kernel_context_pin(guc, ce);
4025 	}
4026 }
4027 
4028 static void guc_release(struct intel_engine_cs *engine)
4029 {
4030 	engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
4031 
4032 	intel_engine_cleanup_common(engine);
4033 	lrc_fini_wa_ctx(engine);
4034 }
4035 
4036 static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
4037 {
4038 	struct intel_engine_cs *e;
4039 	intel_engine_mask_t tmp, mask = engine->mask;
4040 
4041 	for_each_engine_masked(e, engine->gt, mask, tmp)
4042 		e->serial++;
4043 }
4044 
4045 static void guc_default_vfuncs(struct intel_engine_cs *engine)
4046 {
4047 	/* Default vfuncs which can be overridden by each engine. */
4048 
4049 	engine->resume = guc_resume;
4050 
4051 	engine->cops = &guc_context_ops;
4052 	engine->request_alloc = guc_request_alloc;
4053 	engine->add_active_request = add_to_context;
4054 	engine->remove_active_request = remove_from_context;
4055 
4056 	engine->sched_engine->schedule = i915_schedule;
4057 
4058 	engine->reset.prepare = guc_engine_reset_prepare;
4059 	engine->reset.rewind = guc_rewind_nop;
4060 	engine->reset.cancel = guc_reset_nop;
4061 	engine->reset.finish = guc_reset_nop;
4062 
4063 	engine->emit_flush = gen8_emit_flush_xcs;
4064 	engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
4065 	engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
4066 	if (GRAPHICS_VER(engine->i915) >= 12) {
4067 		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
4068 		engine->emit_flush = gen12_emit_flush_xcs;
4069 	}
4070 	engine->set_default_submission = guc_set_default_submission;
4071 	engine->busyness = guc_engine_busyness;
4072 
4073 	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
4074 	engine->flags |= I915_ENGINE_HAS_PREEMPTION;
4075 	engine->flags |= I915_ENGINE_HAS_TIMESLICES;
4076 
4077 	/* Wa_14014475959:dg2 */
4078 	if (IS_DG2(engine->i915) && engine->class == COMPUTE_CLASS)
4079 		engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
4080 
4081 	/*
4082 	 * TODO: GuC supports timeslicing and semaphores as well, but they're
4083 	 * handled by the firmware so some minor tweaks are required before
4084 	 * enabling.
4085 	 *
4086 	 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
4087 	 */
4088 
4089 	engine->emit_bb_start = gen8_emit_bb_start;
4090 	if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
4091 		engine->emit_bb_start = gen125_emit_bb_start;
4092 }
4093 
4094 static void rcs_submission_override(struct intel_engine_cs *engine)
4095 {
4096 	switch (GRAPHICS_VER(engine->i915)) {
4097 	case 12:
4098 		engine->emit_flush = gen12_emit_flush_rcs;
4099 		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
4100 		break;
4101 	case 11:
4102 		engine->emit_flush = gen11_emit_flush_rcs;
4103 		engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
4104 		break;
4105 	default:
4106 		engine->emit_flush = gen8_emit_flush_rcs;
4107 		engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
4108 		break;
4109 	}
4110 }
4111 
4112 static inline void guc_default_irqs(struct intel_engine_cs *engine)
4113 {
4114 	engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
4115 	intel_engine_set_irq_handler(engine, cs_irq_handler);
4116 }
4117 
4118 static void guc_sched_engine_destroy(struct kref *kref)
4119 {
4120 	struct i915_sched_engine *sched_engine =
4121 		container_of(kref, typeof(*sched_engine), ref);
4122 	struct intel_guc *guc = sched_engine->private_data;
4123 
4124 	guc->sched_engine = NULL;
4125 	tasklet_kill(&sched_engine->tasklet); /* flush the callback */
4126 	kfree(sched_engine);
4127 }
4128 
4129 int intel_guc_submission_setup(struct intel_engine_cs *engine)
4130 {
4131 	struct drm_i915_private *i915 = engine->i915;
4132 	struct intel_guc *guc = &engine->gt->uc.guc;
4133 
4134 	/*
4135 	 * The setup relies on several assumptions (e.g. irqs always enabled)
4136 	 * that are only valid on gen11+
4137 	 */
4138 	GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
4139 
4140 	if (!guc->sched_engine) {
4141 		guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
4142 		if (!guc->sched_engine)
4143 			return -ENOMEM;
4144 
4145 		guc->sched_engine->schedule = i915_schedule;
4146 		guc->sched_engine->disabled = guc_sched_engine_disabled;
4147 		guc->sched_engine->private_data = guc;
4148 		guc->sched_engine->destroy = guc_sched_engine_destroy;
4149 		guc->sched_engine->bump_inflight_request_prio =
4150 			guc_bump_inflight_request_prio;
4151 		guc->sched_engine->retire_inflight_request_prio =
4152 			guc_retire_inflight_request_prio;
4153 		tasklet_setup(&guc->sched_engine->tasklet,
4154 			      guc_submission_tasklet);
4155 	}
4156 	i915_sched_engine_put(engine->sched_engine);
4157 	engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
4158 
4159 	guc_default_vfuncs(engine);
4160 	guc_default_irqs(engine);
4161 	guc_init_breadcrumbs(engine);
4162 
4163 	if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
4164 		rcs_submission_override(engine);
4165 
4166 	lrc_init_wa_ctx(engine);
4167 
4168 	/* Finally, take ownership and responsibility for cleanup! */
4169 	engine->sanitize = guc_sanitize;
4170 	engine->release = guc_release;
4171 
4172 	return 0;
4173 }
4174 
4175 void intel_guc_submission_enable(struct intel_guc *guc)
4176 {
4177 	struct intel_gt *gt = guc_to_gt(guc);
4178 
4179 	/* Enable and route to GuC */
4180 	if (GRAPHICS_VER(gt->i915) >= 12)
4181 		intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
4182 				   GUC_SEM_INTR_ROUTE_TO_GUC |
4183 				   GUC_SEM_INTR_ENABLE_ALL);
4184 
4185 	guc_init_lrc_mapping(guc);
4186 	guc_init_engine_stats(guc);
4187 }
4188 
4189 void intel_guc_submission_disable(struct intel_guc *guc)
4190 {
4191 	struct intel_gt *gt = guc_to_gt(guc);
4192 
4193 	/* Note: By the time we're here, GuC may have already been reset */
4194 
4195 	/* Disable and route to host */
4196 	if (GRAPHICS_VER(gt->i915) >= 12)
4197 		intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
4198 }
4199 
4200 static bool __guc_submission_supported(struct intel_guc *guc)
4201 {
4202 	/* GuC submission is unavailable for pre-Gen11 */
4203 	return intel_guc_is_supported(guc) &&
4204 	       GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
4205 }
4206 
4207 static bool __guc_submission_selected(struct intel_guc *guc)
4208 {
4209 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
4210 
4211 	if (!intel_guc_submission_is_supported(guc))
4212 		return false;
4213 
4214 	return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
4215 }
4216 
4217 void intel_guc_submission_init_early(struct intel_guc *guc)
4218 {
4219 	xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
4220 
4221 	spin_lock_init(&guc->submission_state.lock);
4222 	INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
4223 	ida_init(&guc->submission_state.guc_ids);
4224 	INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
4225 	INIT_WORK(&guc->submission_state.destroyed_worker,
4226 		  destroyed_worker_func);
4227 	INIT_WORK(&guc->submission_state.reset_fail_worker,
4228 		  reset_fail_worker_func);
4229 
4230 	spin_lock_init(&guc->timestamp.lock);
4231 	INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
4232 
4233 	guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
4234 	guc->submission_supported = __guc_submission_supported(guc);
4235 	guc->submission_selected = __guc_submission_selected(guc);
4236 }
4237 
4238 static inline struct intel_context *
4239 g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
4240 {
4241 	struct intel_context *ce;
4242 
4243 	if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
4244 		drm_err(&guc_to_gt(guc)->i915->drm,
4245 			"Invalid ctx_id %u\n", ctx_id);
4246 		return NULL;
4247 	}
4248 
4249 	ce = __get_context(guc, ctx_id);
4250 	if (unlikely(!ce)) {
4251 		drm_err(&guc_to_gt(guc)->i915->drm,
4252 			"Context is NULL, ctx_id %u\n", ctx_id);
4253 		return NULL;
4254 	}
4255 
4256 	if (unlikely(intel_context_is_child(ce))) {
4257 		drm_err(&guc_to_gt(guc)->i915->drm,
4258 			"Context is child, ctx_id %u\n", ctx_id);
4259 		return NULL;
4260 	}
4261 
4262 	return ce;
4263 }
4264 
4265 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
4266 					  const u32 *msg,
4267 					  u32 len)
4268 {
4269 	struct intel_context *ce;
4270 	u32 ctx_id;
4271 
4272 	if (unlikely(len < 1)) {
4273 		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
4274 		return -EPROTO;
4275 	}
4276 	ctx_id = msg[0];
4277 
4278 	ce = g2h_context_lookup(guc, ctx_id);
4279 	if (unlikely(!ce))
4280 		return -EPROTO;
4281 
4282 	trace_intel_context_deregister_done(ce);
4283 
4284 #ifdef CONFIG_DRM_I915_SELFTEST
4285 	if (unlikely(ce->drop_deregister)) {
4286 		ce->drop_deregister = false;
4287 		return 0;
4288 	}
4289 #endif
4290 
4291 	if (context_wait_for_deregister_to_register(ce)) {
4292 		struct intel_runtime_pm *runtime_pm =
4293 			&ce->engine->gt->i915->runtime_pm;
4294 		intel_wakeref_t wakeref;
4295 
4296 		/*
4297 		 * Previous owner of this guc_id has been deregistered, now safe
4298 		 * register this context.
4299 		 */
4300 		with_intel_runtime_pm(runtime_pm, wakeref)
4301 			register_context(ce, true);
4302 		guc_signal_context_fence(ce);
4303 		intel_context_put(ce);
4304 	} else if (context_destroyed(ce)) {
4305 		/* Context has been destroyed */
4306 		intel_gt_pm_put_async(guc_to_gt(guc));
4307 		release_guc_id(guc, ce);
4308 		__guc_context_destroy(ce);
4309 	}
4310 
4311 	decr_outstanding_submission_g2h(guc);
4312 
4313 	return 0;
4314 }
4315 
4316 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
4317 				     const u32 *msg,
4318 				     u32 len)
4319 {
4320 	struct intel_context *ce;
4321 	unsigned long flags;
4322 	u32 ctx_id;
4323 
4324 	if (unlikely(len < 2)) {
4325 		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
4326 		return -EPROTO;
4327 	}
4328 	ctx_id = msg[0];
4329 
4330 	ce = g2h_context_lookup(guc, ctx_id);
4331 	if (unlikely(!ce))
4332 		return -EPROTO;
4333 
4334 	if (unlikely(context_destroyed(ce) ||
4335 		     (!context_pending_enable(ce) &&
4336 		     !context_pending_disable(ce)))) {
4337 		drm_err(&guc_to_gt(guc)->i915->drm,
4338 			"Bad context sched_state 0x%x, ctx_id %u\n",
4339 			ce->guc_state.sched_state, ctx_id);
4340 		return -EPROTO;
4341 	}
4342 
4343 	trace_intel_context_sched_done(ce);
4344 
4345 	if (context_pending_enable(ce)) {
4346 #ifdef CONFIG_DRM_I915_SELFTEST
4347 		if (unlikely(ce->drop_schedule_enable)) {
4348 			ce->drop_schedule_enable = false;
4349 			return 0;
4350 		}
4351 #endif
4352 
4353 		spin_lock_irqsave(&ce->guc_state.lock, flags);
4354 		clr_context_pending_enable(ce);
4355 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
4356 	} else if (context_pending_disable(ce)) {
4357 		bool banned;
4358 
4359 #ifdef CONFIG_DRM_I915_SELFTEST
4360 		if (unlikely(ce->drop_schedule_disable)) {
4361 			ce->drop_schedule_disable = false;
4362 			return 0;
4363 		}
4364 #endif
4365 
4366 		/*
4367 		 * Unpin must be done before __guc_signal_context_fence,
4368 		 * otherwise a race exists between the requests getting
4369 		 * submitted + retired before this unpin completes resulting in
4370 		 * the pin_count going to zero and the context still being
4371 		 * enabled.
4372 		 */
4373 		intel_context_sched_disable_unpin(ce);
4374 
4375 		spin_lock_irqsave(&ce->guc_state.lock, flags);
4376 		banned = context_banned(ce);
4377 		clr_context_banned(ce);
4378 		clr_context_pending_disable(ce);
4379 		__guc_signal_context_fence(ce);
4380 		guc_blocked_fence_complete(ce);
4381 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
4382 
4383 		if (banned) {
4384 			guc_cancel_context_requests(ce);
4385 			intel_engine_signal_breadcrumbs(ce->engine);
4386 		}
4387 	}
4388 
4389 	decr_outstanding_submission_g2h(guc);
4390 	intel_context_put(ce);
4391 
4392 	return 0;
4393 }
4394 
4395 static void capture_error_state(struct intel_guc *guc,
4396 				struct intel_context *ce)
4397 {
4398 	struct intel_gt *gt = guc_to_gt(guc);
4399 	struct drm_i915_private *i915 = gt->i915;
4400 	struct intel_engine_cs *engine = __context_to_physical_engine(ce);
4401 	intel_wakeref_t wakeref;
4402 
4403 	intel_engine_set_hung_context(engine, ce);
4404 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
4405 		i915_capture_error_state(gt, engine->mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
4406 	atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
4407 }
4408 
4409 static void guc_context_replay(struct intel_context *ce)
4410 {
4411 	struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
4412 
4413 	__guc_reset_context(ce, ce->engine->mask);
4414 	tasklet_hi_schedule(&sched_engine->tasklet);
4415 }
4416 
4417 static void guc_handle_context_reset(struct intel_guc *guc,
4418 				     struct intel_context *ce)
4419 {
4420 	trace_intel_context_reset(ce);
4421 
4422 	if (likely(!intel_context_is_banned(ce))) {
4423 		capture_error_state(guc, ce);
4424 		guc_context_replay(ce);
4425 	} else {
4426 		drm_info(&guc_to_gt(guc)->i915->drm,
4427 			 "Ignoring context reset notification of banned context 0x%04X on %s",
4428 			 ce->guc_id.id, ce->engine->name);
4429 	}
4430 }
4431 
4432 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
4433 					const u32 *msg, u32 len)
4434 {
4435 	struct intel_context *ce;
4436 	unsigned long flags;
4437 	int ctx_id;
4438 
4439 	if (unlikely(len != 1)) {
4440 		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
4441 		return -EPROTO;
4442 	}
4443 
4444 	ctx_id = msg[0];
4445 
4446 	/*
4447 	 * The context lookup uses the xarray but lookups only require an RCU lock
4448 	 * not the full spinlock. So take the lock explicitly and keep it until the
4449 	 * context has been reference count locked to ensure it can't be destroyed
4450 	 * asynchronously until the reset is done.
4451 	 */
4452 	xa_lock_irqsave(&guc->context_lookup, flags);
4453 	ce = g2h_context_lookup(guc, ctx_id);
4454 	if (ce)
4455 		intel_context_get(ce);
4456 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4457 
4458 	if (unlikely(!ce))
4459 		return -EPROTO;
4460 
4461 	guc_handle_context_reset(guc, ce);
4462 	intel_context_put(ce);
4463 
4464 	return 0;
4465 }
4466 
4467 int intel_guc_error_capture_process_msg(struct intel_guc *guc,
4468 					const u32 *msg, u32 len)
4469 {
4470 	u32 status;
4471 
4472 	if (unlikely(len != 1)) {
4473 		drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
4474 		return -EPROTO;
4475 	}
4476 
4477 	status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
4478 	if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
4479 		drm_warn(&guc_to_gt(guc)->i915->drm, "G2H-Error capture no space");
4480 
4481 	intel_guc_capture_process(guc);
4482 
4483 	return 0;
4484 }
4485 
4486 struct intel_engine_cs *
4487 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
4488 {
4489 	struct intel_gt *gt = guc_to_gt(guc);
4490 	u8 engine_class = guc_class_to_engine_class(guc_class);
4491 
4492 	/* Class index is checked in class converter */
4493 	GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
4494 
4495 	return gt->engine_class[engine_class][instance];
4496 }
4497 
4498 static void reset_fail_worker_func(struct work_struct *w)
4499 {
4500 	struct intel_guc *guc = container_of(w, struct intel_guc,
4501 					     submission_state.reset_fail_worker);
4502 	struct intel_gt *gt = guc_to_gt(guc);
4503 	intel_engine_mask_t reset_fail_mask;
4504 	unsigned long flags;
4505 
4506 	spin_lock_irqsave(&guc->submission_state.lock, flags);
4507 	reset_fail_mask = guc->submission_state.reset_fail_mask;
4508 	guc->submission_state.reset_fail_mask = 0;
4509 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
4510 
4511 	if (likely(reset_fail_mask))
4512 		intel_gt_handle_error(gt, reset_fail_mask,
4513 				      I915_ERROR_CAPTURE,
4514 				      "GuC failed to reset engine mask=0x%x\n",
4515 				      reset_fail_mask);
4516 }
4517 
4518 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
4519 					 const u32 *msg, u32 len)
4520 {
4521 	struct intel_engine_cs *engine;
4522 	struct intel_gt *gt = guc_to_gt(guc);
4523 	u8 guc_class, instance;
4524 	u32 reason;
4525 	unsigned long flags;
4526 
4527 	if (unlikely(len != 3)) {
4528 		drm_err(&gt->i915->drm, "Invalid length %u", len);
4529 		return -EPROTO;
4530 	}
4531 
4532 	guc_class = msg[0];
4533 	instance = msg[1];
4534 	reason = msg[2];
4535 
4536 	engine = intel_guc_lookup_engine(guc, guc_class, instance);
4537 	if (unlikely(!engine)) {
4538 		drm_err(&gt->i915->drm,
4539 			"Invalid engine %d:%d", guc_class, instance);
4540 		return -EPROTO;
4541 	}
4542 
4543 	/*
4544 	 * This is an unexpected failure of a hardware feature. So, log a real
4545 	 * error message not just the informational that comes with the reset.
4546 	 */
4547 	drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
4548 		guc_class, instance, engine->name, reason);
4549 
4550 	spin_lock_irqsave(&guc->submission_state.lock, flags);
4551 	guc->submission_state.reset_fail_mask |= engine->mask;
4552 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
4553 
4554 	/*
4555 	 * A GT reset flushes this worker queue (G2H handler) so we must use
4556 	 * another worker to trigger a GT reset.
4557 	 */
4558 	queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
4559 
4560 	return 0;
4561 }
4562 
4563 void intel_guc_find_hung_context(struct intel_engine_cs *engine)
4564 {
4565 	struct intel_guc *guc = &engine->gt->uc.guc;
4566 	struct intel_context *ce;
4567 	struct i915_request *rq;
4568 	unsigned long index;
4569 	unsigned long flags;
4570 
4571 	/* Reset called during driver load? GuC not yet initialised! */
4572 	if (unlikely(!guc_submission_initialized(guc)))
4573 		return;
4574 
4575 	xa_lock_irqsave(&guc->context_lookup, flags);
4576 	xa_for_each(&guc->context_lookup, index, ce) {
4577 		if (!kref_get_unless_zero(&ce->ref))
4578 			continue;
4579 
4580 		xa_unlock(&guc->context_lookup);
4581 
4582 		if (!intel_context_is_pinned(ce))
4583 			goto next;
4584 
4585 		if (intel_engine_is_virtual(ce->engine)) {
4586 			if (!(ce->engine->mask & engine->mask))
4587 				goto next;
4588 		} else {
4589 			if (ce->engine != engine)
4590 				goto next;
4591 		}
4592 
4593 		list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
4594 			if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
4595 				continue;
4596 
4597 			intel_engine_set_hung_context(engine, ce);
4598 
4599 			/* Can only cope with one hang at a time... */
4600 			intel_context_put(ce);
4601 			xa_lock(&guc->context_lookup);
4602 			goto done;
4603 		}
4604 next:
4605 		intel_context_put(ce);
4606 		xa_lock(&guc->context_lookup);
4607 	}
4608 done:
4609 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4610 }
4611 
4612 void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
4613 				    struct i915_request *hung_rq,
4614 				    struct drm_printer *m)
4615 {
4616 	struct intel_guc *guc = &engine->gt->uc.guc;
4617 	struct intel_context *ce;
4618 	unsigned long index;
4619 	unsigned long flags;
4620 
4621 	/* Reset called during driver load? GuC not yet initialised! */
4622 	if (unlikely(!guc_submission_initialized(guc)))
4623 		return;
4624 
4625 	xa_lock_irqsave(&guc->context_lookup, flags);
4626 	xa_for_each(&guc->context_lookup, index, ce) {
4627 		if (!kref_get_unless_zero(&ce->ref))
4628 			continue;
4629 
4630 		xa_unlock(&guc->context_lookup);
4631 
4632 		if (!intel_context_is_pinned(ce))
4633 			goto next;
4634 
4635 		if (intel_engine_is_virtual(ce->engine)) {
4636 			if (!(ce->engine->mask & engine->mask))
4637 				goto next;
4638 		} else {
4639 			if (ce->engine != engine)
4640 				goto next;
4641 		}
4642 
4643 		spin_lock(&ce->guc_state.lock);
4644 		intel_engine_dump_active_requests(&ce->guc_state.requests,
4645 						  hung_rq, m);
4646 		spin_unlock(&ce->guc_state.lock);
4647 
4648 next:
4649 		intel_context_put(ce);
4650 		xa_lock(&guc->context_lookup);
4651 	}
4652 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4653 }
4654 
4655 void intel_guc_submission_print_info(struct intel_guc *guc,
4656 				     struct drm_printer *p)
4657 {
4658 	struct i915_sched_engine *sched_engine = guc->sched_engine;
4659 	struct rb_node *rb;
4660 	unsigned long flags;
4661 
4662 	if (!sched_engine)
4663 		return;
4664 
4665 	drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
4666 		   atomic_read(&guc->outstanding_submission_g2h));
4667 	drm_printf(p, "GuC tasklet count: %u\n\n",
4668 		   atomic_read(&sched_engine->tasklet.count));
4669 
4670 	spin_lock_irqsave(&sched_engine->lock, flags);
4671 	drm_printf(p, "Requests in GuC submit tasklet:\n");
4672 	for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
4673 		struct i915_priolist *pl = to_priolist(rb);
4674 		struct i915_request *rq;
4675 
4676 		priolist_for_each_request(rq, pl)
4677 			drm_printf(p, "guc_id=%u, seqno=%llu\n",
4678 				   rq->context->guc_id.id,
4679 				   rq->fence.seqno);
4680 	}
4681 	spin_unlock_irqrestore(&sched_engine->lock, flags);
4682 	drm_printf(p, "\n");
4683 }
4684 
4685 static inline void guc_log_context_priority(struct drm_printer *p,
4686 					    struct intel_context *ce)
4687 {
4688 	int i;
4689 
4690 	drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
4691 	drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
4692 	for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
4693 	     i < GUC_CLIENT_PRIORITY_NUM; ++i) {
4694 		drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
4695 			   i, ce->guc_state.prio_count[i]);
4696 	}
4697 	drm_printf(p, "\n");
4698 }
4699 
4700 static inline void guc_log_context(struct drm_printer *p,
4701 				   struct intel_context *ce)
4702 {
4703 	drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
4704 	drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
4705 	drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
4706 		   ce->ring->head,
4707 		   ce->lrc_reg_state[CTX_RING_HEAD]);
4708 	drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
4709 		   ce->ring->tail,
4710 		   ce->lrc_reg_state[CTX_RING_TAIL]);
4711 	drm_printf(p, "\t\tContext Pin Count: %u\n",
4712 		   atomic_read(&ce->pin_count));
4713 	drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
4714 		   atomic_read(&ce->guc_id.ref));
4715 	drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
4716 		   ce->guc_state.sched_state);
4717 }
4718 
4719 void intel_guc_submission_print_context_info(struct intel_guc *guc,
4720 					     struct drm_printer *p)
4721 {
4722 	struct intel_context *ce;
4723 	unsigned long index;
4724 	unsigned long flags;
4725 
4726 	xa_lock_irqsave(&guc->context_lookup, flags);
4727 	xa_for_each(&guc->context_lookup, index, ce) {
4728 		GEM_BUG_ON(intel_context_is_child(ce));
4729 
4730 		guc_log_context(p, ce);
4731 		guc_log_context_priority(p, ce);
4732 
4733 		if (intel_context_is_parent(ce)) {
4734 			struct intel_context *child;
4735 
4736 			drm_printf(p, "\t\tNumber children: %u\n",
4737 				   ce->parallel.number_children);
4738 
4739 			if (ce->parallel.guc.wq_status) {
4740 				drm_printf(p, "\t\tWQI Head: %u\n",
4741 					   READ_ONCE(*ce->parallel.guc.wq_head));
4742 				drm_printf(p, "\t\tWQI Tail: %u\n",
4743 					   READ_ONCE(*ce->parallel.guc.wq_tail));
4744 				drm_printf(p, "\t\tWQI Status: %u\n\n",
4745 					   READ_ONCE(*ce->parallel.guc.wq_status));
4746 			}
4747 
4748 			if (ce->engine->emit_bb_start ==
4749 			    emit_bb_start_parent_no_preempt_mid_batch) {
4750 				u8 i;
4751 
4752 				drm_printf(p, "\t\tChildren Go: %u\n\n",
4753 					   get_children_go_value(ce));
4754 				for (i = 0; i < ce->parallel.number_children; ++i)
4755 					drm_printf(p, "\t\tChildren Join: %u\n",
4756 						   get_children_join_value(ce, i));
4757 			}
4758 
4759 			for_each_child(ce, child)
4760 				guc_log_context(p, child);
4761 		}
4762 	}
4763 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4764 }
4765 
4766 static inline u32 get_children_go_addr(struct intel_context *ce)
4767 {
4768 	GEM_BUG_ON(!intel_context_is_parent(ce));
4769 
4770 	return i915_ggtt_offset(ce->state) +
4771 		__get_parent_scratch_offset(ce) +
4772 		offsetof(struct parent_scratch, go.semaphore);
4773 }
4774 
4775 static inline u32 get_children_join_addr(struct intel_context *ce,
4776 					 u8 child_index)
4777 {
4778 	GEM_BUG_ON(!intel_context_is_parent(ce));
4779 
4780 	return i915_ggtt_offset(ce->state) +
4781 		__get_parent_scratch_offset(ce) +
4782 		offsetof(struct parent_scratch, join[child_index].semaphore);
4783 }
4784 
4785 #define PARENT_GO_BB			1
4786 #define PARENT_GO_FINI_BREADCRUMB	0
4787 #define CHILD_GO_BB			1
4788 #define CHILD_GO_FINI_BREADCRUMB	0
4789 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
4790 						     u64 offset, u32 len,
4791 						     const unsigned int flags)
4792 {
4793 	struct intel_context *ce = rq->context;
4794 	u32 *cs;
4795 	u8 i;
4796 
4797 	GEM_BUG_ON(!intel_context_is_parent(ce));
4798 
4799 	cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
4800 	if (IS_ERR(cs))
4801 		return PTR_ERR(cs);
4802 
4803 	/* Wait on children */
4804 	for (i = 0; i < ce->parallel.number_children; ++i) {
4805 		*cs++ = (MI_SEMAPHORE_WAIT |
4806 			 MI_SEMAPHORE_GLOBAL_GTT |
4807 			 MI_SEMAPHORE_POLL |
4808 			 MI_SEMAPHORE_SAD_EQ_SDD);
4809 		*cs++ = PARENT_GO_BB;
4810 		*cs++ = get_children_join_addr(ce, i);
4811 		*cs++ = 0;
4812 	}
4813 
4814 	/* Turn off preemption */
4815 	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
4816 	*cs++ = MI_NOOP;
4817 
4818 	/* Tell children go */
4819 	cs = gen8_emit_ggtt_write(cs,
4820 				  CHILD_GO_BB,
4821 				  get_children_go_addr(ce),
4822 				  0);
4823 
4824 	/* Jump to batch */
4825 	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
4826 		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
4827 	*cs++ = lower_32_bits(offset);
4828 	*cs++ = upper_32_bits(offset);
4829 	*cs++ = MI_NOOP;
4830 
4831 	intel_ring_advance(rq, cs);
4832 
4833 	return 0;
4834 }
4835 
4836 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
4837 						    u64 offset, u32 len,
4838 						    const unsigned int flags)
4839 {
4840 	struct intel_context *ce = rq->context;
4841 	struct intel_context *parent = intel_context_to_parent(ce);
4842 	u32 *cs;
4843 
4844 	GEM_BUG_ON(!intel_context_is_child(ce));
4845 
4846 	cs = intel_ring_begin(rq, 12);
4847 	if (IS_ERR(cs))
4848 		return PTR_ERR(cs);
4849 
4850 	/* Signal parent */
4851 	cs = gen8_emit_ggtt_write(cs,
4852 				  PARENT_GO_BB,
4853 				  get_children_join_addr(parent,
4854 							 ce->parallel.child_index),
4855 				  0);
4856 
4857 	/* Wait on parent for go */
4858 	*cs++ = (MI_SEMAPHORE_WAIT |
4859 		 MI_SEMAPHORE_GLOBAL_GTT |
4860 		 MI_SEMAPHORE_POLL |
4861 		 MI_SEMAPHORE_SAD_EQ_SDD);
4862 	*cs++ = CHILD_GO_BB;
4863 	*cs++ = get_children_go_addr(parent);
4864 	*cs++ = 0;
4865 
4866 	/* Turn off preemption */
4867 	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
4868 
4869 	/* Jump to batch */
4870 	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
4871 		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
4872 	*cs++ = lower_32_bits(offset);
4873 	*cs++ = upper_32_bits(offset);
4874 
4875 	intel_ring_advance(rq, cs);
4876 
4877 	return 0;
4878 }
4879 
4880 static u32 *
4881 __emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
4882 						   u32 *cs)
4883 {
4884 	struct intel_context *ce = rq->context;
4885 	u8 i;
4886 
4887 	GEM_BUG_ON(!intel_context_is_parent(ce));
4888 
4889 	/* Wait on children */
4890 	for (i = 0; i < ce->parallel.number_children; ++i) {
4891 		*cs++ = (MI_SEMAPHORE_WAIT |
4892 			 MI_SEMAPHORE_GLOBAL_GTT |
4893 			 MI_SEMAPHORE_POLL |
4894 			 MI_SEMAPHORE_SAD_EQ_SDD);
4895 		*cs++ = PARENT_GO_FINI_BREADCRUMB;
4896 		*cs++ = get_children_join_addr(ce, i);
4897 		*cs++ = 0;
4898 	}
4899 
4900 	/* Turn on preemption */
4901 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
4902 	*cs++ = MI_NOOP;
4903 
4904 	/* Tell children go */
4905 	cs = gen8_emit_ggtt_write(cs,
4906 				  CHILD_GO_FINI_BREADCRUMB,
4907 				  get_children_go_addr(ce),
4908 				  0);
4909 
4910 	return cs;
4911 }
4912 
4913 /*
4914  * If this true, a submission of multi-lrc requests had an error and the
4915  * requests need to be skipped. The front end (execuf IOCTL) should've called
4916  * i915_request_skip which squashes the BB but we still need to emit the fini
4917  * breadrcrumbs seqno write. At this point we don't know how many of the
4918  * requests in the multi-lrc submission were generated so we can't do the
4919  * handshake between the parent and children (e.g. if 4 requests should be
4920  * generated but 2nd hit an error only 1 would be seen by the GuC backend).
4921  * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
4922  * has occurred on any of the requests in submission / relationship.
4923  */
4924 static inline bool skip_handshake(struct i915_request *rq)
4925 {
4926 	return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
4927 }
4928 
4929 #define NON_SKIP_LEN	6
4930 static u32 *
4931 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
4932 						 u32 *cs)
4933 {
4934 	struct intel_context *ce = rq->context;
4935 	__maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
4936 	__maybe_unused u32 *start_fini_breadcrumb_cs = cs;
4937 
4938 	GEM_BUG_ON(!intel_context_is_parent(ce));
4939 
4940 	if (unlikely(skip_handshake(rq))) {
4941 		/*
4942 		 * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
4943 		 * the NON_SKIP_LEN comes from the length of the emits below.
4944 		 */
4945 		memset(cs, 0, sizeof(u32) *
4946 		       (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
4947 		cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
4948 	} else {
4949 		cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
4950 	}
4951 
4952 	/* Emit fini breadcrumb */
4953 	before_fini_breadcrumb_user_interrupt_cs = cs;
4954 	cs = gen8_emit_ggtt_write(cs,
4955 				  rq->fence.seqno,
4956 				  i915_request_active_timeline(rq)->hwsp_offset,
4957 				  0);
4958 
4959 	/* User interrupt */
4960 	*cs++ = MI_USER_INTERRUPT;
4961 	*cs++ = MI_NOOP;
4962 
4963 	/* Ensure our math for skip + emit is correct */
4964 	GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
4965 		   cs);
4966 	GEM_BUG_ON(start_fini_breadcrumb_cs +
4967 		   ce->engine->emit_fini_breadcrumb_dw != cs);
4968 
4969 	rq->tail = intel_ring_offset(rq, cs);
4970 
4971 	return cs;
4972 }
4973 
4974 static u32 *
4975 __emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
4976 						  u32 *cs)
4977 {
4978 	struct intel_context *ce = rq->context;
4979 	struct intel_context *parent = intel_context_to_parent(ce);
4980 
4981 	GEM_BUG_ON(!intel_context_is_child(ce));
4982 
4983 	/* Turn on preemption */
4984 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
4985 	*cs++ = MI_NOOP;
4986 
4987 	/* Signal parent */
4988 	cs = gen8_emit_ggtt_write(cs,
4989 				  PARENT_GO_FINI_BREADCRUMB,
4990 				  get_children_join_addr(parent,
4991 							 ce->parallel.child_index),
4992 				  0);
4993 
4994 	/* Wait parent on for go */
4995 	*cs++ = (MI_SEMAPHORE_WAIT |
4996 		 MI_SEMAPHORE_GLOBAL_GTT |
4997 		 MI_SEMAPHORE_POLL |
4998 		 MI_SEMAPHORE_SAD_EQ_SDD);
4999 	*cs++ = CHILD_GO_FINI_BREADCRUMB;
5000 	*cs++ = get_children_go_addr(parent);
5001 	*cs++ = 0;
5002 
5003 	return cs;
5004 }
5005 
5006 static u32 *
5007 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
5008 						u32 *cs)
5009 {
5010 	struct intel_context *ce = rq->context;
5011 	__maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
5012 	__maybe_unused u32 *start_fini_breadcrumb_cs = cs;
5013 
5014 	GEM_BUG_ON(!intel_context_is_child(ce));
5015 
5016 	if (unlikely(skip_handshake(rq))) {
5017 		/*
5018 		 * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
5019 		 * the NON_SKIP_LEN comes from the length of the emits below.
5020 		 */
5021 		memset(cs, 0, sizeof(u32) *
5022 		       (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
5023 		cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
5024 	} else {
5025 		cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
5026 	}
5027 
5028 	/* Emit fini breadcrumb */
5029 	before_fini_breadcrumb_user_interrupt_cs = cs;
5030 	cs = gen8_emit_ggtt_write(cs,
5031 				  rq->fence.seqno,
5032 				  i915_request_active_timeline(rq)->hwsp_offset,
5033 				  0);
5034 
5035 	/* User interrupt */
5036 	*cs++ = MI_USER_INTERRUPT;
5037 	*cs++ = MI_NOOP;
5038 
5039 	/* Ensure our math for skip + emit is correct */
5040 	GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
5041 		   cs);
5042 	GEM_BUG_ON(start_fini_breadcrumb_cs +
5043 		   ce->engine->emit_fini_breadcrumb_dw != cs);
5044 
5045 	rq->tail = intel_ring_offset(rq, cs);
5046 
5047 	return cs;
5048 }
5049 
5050 #undef NON_SKIP_LEN
5051 
5052 static struct intel_context *
5053 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
5054 		   unsigned long flags)
5055 {
5056 	struct guc_virtual_engine *ve;
5057 	struct intel_guc *guc;
5058 	unsigned int n;
5059 	int err;
5060 
5061 	ve = kzalloc(sizeof(*ve), GFP_KERNEL);
5062 	if (!ve)
5063 		return ERR_PTR(-ENOMEM);
5064 
5065 	guc = &siblings[0]->gt->uc.guc;
5066 
5067 	ve->base.i915 = siblings[0]->i915;
5068 	ve->base.gt = siblings[0]->gt;
5069 	ve->base.uncore = siblings[0]->uncore;
5070 	ve->base.id = -1;
5071 
5072 	ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
5073 	ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
5074 	ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
5075 	ve->base.saturated = ALL_ENGINES;
5076 
5077 	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
5078 
5079 	ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
5080 
5081 	ve->base.cops = &virtual_guc_context_ops;
5082 	ve->base.request_alloc = guc_request_alloc;
5083 	ve->base.bump_serial = virtual_guc_bump_serial;
5084 
5085 	ve->base.submit_request = guc_submit_request;
5086 
5087 	ve->base.flags = I915_ENGINE_IS_VIRTUAL;
5088 
5089 	intel_context_init(&ve->context, &ve->base);
5090 
5091 	for (n = 0; n < count; n++) {
5092 		struct intel_engine_cs *sibling = siblings[n];
5093 
5094 		GEM_BUG_ON(!is_power_of_2(sibling->mask));
5095 		if (sibling->mask & ve->base.mask) {
5096 			DRM_DEBUG("duplicate %s entry in load balancer\n",
5097 				  sibling->name);
5098 			err = -EINVAL;
5099 			goto err_put;
5100 		}
5101 
5102 		ve->base.mask |= sibling->mask;
5103 		ve->base.logical_mask |= sibling->logical_mask;
5104 
5105 		if (n != 0 && ve->base.class != sibling->class) {
5106 			DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
5107 				  sibling->class, ve->base.class);
5108 			err = -EINVAL;
5109 			goto err_put;
5110 		} else if (n == 0) {
5111 			ve->base.class = sibling->class;
5112 			ve->base.uabi_class = sibling->uabi_class;
5113 			snprintf(ve->base.name, sizeof(ve->base.name),
5114 				 "v%dx%d", ve->base.class, count);
5115 			ve->base.context_size = sibling->context_size;
5116 
5117 			ve->base.add_active_request =
5118 				sibling->add_active_request;
5119 			ve->base.remove_active_request =
5120 				sibling->remove_active_request;
5121 			ve->base.emit_bb_start = sibling->emit_bb_start;
5122 			ve->base.emit_flush = sibling->emit_flush;
5123 			ve->base.emit_init_breadcrumb =
5124 				sibling->emit_init_breadcrumb;
5125 			ve->base.emit_fini_breadcrumb =
5126 				sibling->emit_fini_breadcrumb;
5127 			ve->base.emit_fini_breadcrumb_dw =
5128 				sibling->emit_fini_breadcrumb_dw;
5129 			ve->base.breadcrumbs =
5130 				intel_breadcrumbs_get(sibling->breadcrumbs);
5131 
5132 			ve->base.flags |= sibling->flags;
5133 
5134 			ve->base.props.timeslice_duration_ms =
5135 				sibling->props.timeslice_duration_ms;
5136 			ve->base.props.preempt_timeout_ms =
5137 				sibling->props.preempt_timeout_ms;
5138 		}
5139 	}
5140 
5141 	return &ve->context;
5142 
5143 err_put:
5144 	intel_context_put(&ve->context);
5145 	return ERR_PTR(err);
5146 }
5147 
5148 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
5149 {
5150 	struct intel_engine_cs *engine;
5151 	intel_engine_mask_t tmp, mask = ve->mask;
5152 
5153 	for_each_engine_masked(engine, ve->gt, mask, tmp)
5154 		if (READ_ONCE(engine->props.heartbeat_interval_ms))
5155 			return true;
5156 
5157 	return false;
5158 }
5159 
5160 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5161 #include "selftest_guc.c"
5162 #include "selftest_guc_multi_lrc.c"
5163 #include "selftest_guc_hangcheck.c"
5164 #endif
5165