1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014 Intel Corporation
4  */
5 
6 #include <linux/circ_buf.h>
7 
8 #include "gem/i915_gem_context.h"
9 #include "gt/gen8_engine_cs.h"
10 #include "gt/intel_breadcrumbs.h"
11 #include "gt/intel_context.h"
12 #include "gt/intel_engine_pm.h"
13 #include "gt/intel_engine_heartbeat.h"
14 #include "gt/intel_gpu_commands.h"
15 #include "gt/intel_gt.h"
16 #include "gt/intel_gt_clock_utils.h"
17 #include "gt/intel_gt_irq.h"
18 #include "gt/intel_gt_pm.h"
19 #include "gt/intel_gt_requests.h"
20 #include "gt/intel_lrc.h"
21 #include "gt/intel_lrc_reg.h"
22 #include "gt/intel_mocs.h"
23 #include "gt/intel_ring.h"
24 
25 #include "intel_guc_ads.h"
26 #include "intel_guc_submission.h"
27 
28 #include "i915_drv.h"
29 #include "i915_trace.h"
30 
31 /**
32  * DOC: GuC-based command submission
33  *
34  * The Scratch registers:
35  * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
36  * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
37  * triggers an interrupt on the GuC via another register write (0xC4C8).
38  * Firmware writes a success/fail code back to the action register after
39  * processes the request. The kernel driver polls waiting for this update and
40  * then proceeds.
41  *
42  * Command Transport buffers (CTBs):
43  * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
44  * - G2H) are a message interface between the i915 and GuC.
45  *
46  * Context registration:
47  * Before a context can be submitted it must be registered with the GuC via a
48  * H2G. A unique guc_id is associated with each context. The context is either
49  * registered at request creation time (normal operation) or at submission time
50  * (abnormal operation, e.g. after a reset).
51  *
52  * Context submission:
53  * The i915 updates the LRC tail value in memory. The i915 must enable the
54  * scheduling of the context within the GuC for the GuC to actually consider it.
55  * Therefore, the first time a disabled context is submitted we use a schedule
56  * enable H2G, while follow up submissions are done via the context submit H2G,
57  * which informs the GuC that a previously enabled context has new work
58  * available.
59  *
60  * Context unpin:
61  * To unpin a context a H2G is used to disable scheduling. When the
62  * corresponding G2H returns indicating the scheduling disable operation has
63  * completed it is safe to unpin the context. While a disable is in flight it
64  * isn't safe to resubmit the context so a fence is used to stall all future
65  * requests of that context until the G2H is returned.
66  *
67  * Context deregistration:
68  * Before a context can be destroyed or if we steal its guc_id we must
69  * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
70  * safe to submit anything to this guc_id until the deregister completes so a
71  * fence is used to stall all requests associated with this guc_id until the
72  * corresponding G2H returns indicating the guc_id has been deregistered.
73  *
74  * submission_state.guc_ids:
75  * Unique number associated with private GuC context data passed in during
76  * context registration / submission / deregistration. 64k available. Simple ida
77  * is used for allocation.
78  *
79  * Stealing guc_ids:
80  * If no guc_ids are available they can be stolen from another context at
81  * request creation time if that context is unpinned. If a guc_id can't be found
82  * we punt this problem to the user as we believe this is near impossible to hit
83  * during normal use cases.
84  *
85  * Locking:
86  * In the GuC submission code we have 3 basic spin locks which protect
87  * everything. Details about each below.
88  *
89  * sched_engine->lock
90  * This is the submission lock for all contexts that share an i915 schedule
91  * engine (sched_engine), thus only one of the contexts which share a
92  * sched_engine can be submitting at a time. Currently only one sched_engine is
93  * used for all of GuC submission but that could change in the future.
94  *
95  * guc->submission_state.lock
96  * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
97  * list.
98  *
99  * ce->guc_state.lock
100  * Protects everything under ce->guc_state. Ensures that a context is in the
101  * correct state before issuing a H2G. e.g. We don't issue a schedule disable
102  * on a disabled context (bad idea), we don't issue a schedule enable when a
103  * schedule disable is in flight, etc... Also protects list of inflight requests
104  * on the context and the priority management state. Lock is individual to each
105  * context.
106  *
107  * Lock ordering rules:
108  * sched_engine->lock -> ce->guc_state.lock
109  * guc->submission_state.lock -> ce->guc_state.lock
110  *
111  * Reset races:
112  * When a full GT reset is triggered it is assumed that some G2H responses to
113  * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
114  * fatal as we do certain operations upon receiving a G2H (e.g. destroy
115  * contexts, release guc_ids, etc...). When this occurs we can scrub the
116  * context state and cleanup appropriately, however this is quite racey.
117  * To avoid races, the reset code must disable submission before scrubbing for
118  * the missing G2H, while the submission code must check for submission being
119  * disabled and skip sending H2Gs and updating context states when it is. Both
120  * sides must also make sure to hold the relevant locks.
121  */
122 
123 /* GuC Virtual Engine */
124 struct guc_virtual_engine {
125 	struct intel_engine_cs base;
126 	struct intel_context context;
127 };
128 
129 static struct intel_context *
130 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
131 		   unsigned long flags);
132 
133 static struct intel_context *
134 guc_create_parallel(struct intel_engine_cs **engines,
135 		    unsigned int num_siblings,
136 		    unsigned int width);
137 
138 #define GUC_REQUEST_SIZE 64 /* bytes */
139 
140 /*
141  * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
142  * per the GuC submission interface. A different allocation algorithm is used
143  * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
144  * partition the guc_id space. We believe the number of multi-lrc contexts in
145  * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
146  * multi-lrc.
147  */
148 #define NUMBER_MULTI_LRC_GUC_ID(guc)	\
149 	((guc)->submission_state.num_guc_ids / 16)
150 
151 /*
152  * Below is a set of functions which control the GuC scheduling state which
153  * require a lock.
154  */
155 #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER	BIT(0)
156 #define SCHED_STATE_DESTROYED				BIT(1)
157 #define SCHED_STATE_PENDING_DISABLE			BIT(2)
158 #define SCHED_STATE_BANNED				BIT(3)
159 #define SCHED_STATE_ENABLED				BIT(4)
160 #define SCHED_STATE_PENDING_ENABLE			BIT(5)
161 #define SCHED_STATE_REGISTERED				BIT(6)
162 #define SCHED_STATE_BLOCKED_SHIFT			7
163 #define SCHED_STATE_BLOCKED		BIT(SCHED_STATE_BLOCKED_SHIFT)
164 #define SCHED_STATE_BLOCKED_MASK	(0xfff << SCHED_STATE_BLOCKED_SHIFT)
165 
166 static inline void init_sched_state(struct intel_context *ce)
167 {
168 	lockdep_assert_held(&ce->guc_state.lock);
169 	ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
170 }
171 
172 __maybe_unused
173 static bool sched_state_is_init(struct intel_context *ce)
174 {
175 	/*
176 	 * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after
177 	 * suspend.
178 	 */
179 	return !(ce->guc_state.sched_state &=
180 		 ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
181 }
182 
183 static inline bool
184 context_wait_for_deregister_to_register(struct intel_context *ce)
185 {
186 	return ce->guc_state.sched_state &
187 		SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
188 }
189 
190 static inline void
191 set_context_wait_for_deregister_to_register(struct intel_context *ce)
192 {
193 	lockdep_assert_held(&ce->guc_state.lock);
194 	ce->guc_state.sched_state |=
195 		SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
196 }
197 
198 static inline void
199 clr_context_wait_for_deregister_to_register(struct intel_context *ce)
200 {
201 	lockdep_assert_held(&ce->guc_state.lock);
202 	ce->guc_state.sched_state &=
203 		~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
204 }
205 
206 static inline bool
207 context_destroyed(struct intel_context *ce)
208 {
209 	return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
210 }
211 
212 static inline void
213 set_context_destroyed(struct intel_context *ce)
214 {
215 	lockdep_assert_held(&ce->guc_state.lock);
216 	ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
217 }
218 
219 static inline bool context_pending_disable(struct intel_context *ce)
220 {
221 	return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
222 }
223 
224 static inline void set_context_pending_disable(struct intel_context *ce)
225 {
226 	lockdep_assert_held(&ce->guc_state.lock);
227 	ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
228 }
229 
230 static inline void clr_context_pending_disable(struct intel_context *ce)
231 {
232 	lockdep_assert_held(&ce->guc_state.lock);
233 	ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
234 }
235 
236 static inline bool context_banned(struct intel_context *ce)
237 {
238 	return ce->guc_state.sched_state & SCHED_STATE_BANNED;
239 }
240 
241 static inline void set_context_banned(struct intel_context *ce)
242 {
243 	lockdep_assert_held(&ce->guc_state.lock);
244 	ce->guc_state.sched_state |= SCHED_STATE_BANNED;
245 }
246 
247 static inline void clr_context_banned(struct intel_context *ce)
248 {
249 	lockdep_assert_held(&ce->guc_state.lock);
250 	ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
251 }
252 
253 static inline bool context_enabled(struct intel_context *ce)
254 {
255 	return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
256 }
257 
258 static inline void set_context_enabled(struct intel_context *ce)
259 {
260 	lockdep_assert_held(&ce->guc_state.lock);
261 	ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
262 }
263 
264 static inline void clr_context_enabled(struct intel_context *ce)
265 {
266 	lockdep_assert_held(&ce->guc_state.lock);
267 	ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
268 }
269 
270 static inline bool context_pending_enable(struct intel_context *ce)
271 {
272 	return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
273 }
274 
275 static inline void set_context_pending_enable(struct intel_context *ce)
276 {
277 	lockdep_assert_held(&ce->guc_state.lock);
278 	ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
279 }
280 
281 static inline void clr_context_pending_enable(struct intel_context *ce)
282 {
283 	lockdep_assert_held(&ce->guc_state.lock);
284 	ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
285 }
286 
287 static inline bool context_registered(struct intel_context *ce)
288 {
289 	return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
290 }
291 
292 static inline void set_context_registered(struct intel_context *ce)
293 {
294 	lockdep_assert_held(&ce->guc_state.lock);
295 	ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
296 }
297 
298 static inline void clr_context_registered(struct intel_context *ce)
299 {
300 	lockdep_assert_held(&ce->guc_state.lock);
301 	ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
302 }
303 
304 static inline u32 context_blocked(struct intel_context *ce)
305 {
306 	return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
307 		SCHED_STATE_BLOCKED_SHIFT;
308 }
309 
310 static inline void incr_context_blocked(struct intel_context *ce)
311 {
312 	lockdep_assert_held(&ce->guc_state.lock);
313 
314 	ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
315 
316 	GEM_BUG_ON(!context_blocked(ce));	/* Overflow check */
317 }
318 
319 static inline void decr_context_blocked(struct intel_context *ce)
320 {
321 	lockdep_assert_held(&ce->guc_state.lock);
322 
323 	GEM_BUG_ON(!context_blocked(ce));	/* Underflow check */
324 
325 	ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
326 }
327 
328 static inline bool context_has_committed_requests(struct intel_context *ce)
329 {
330 	return !!ce->guc_state.number_committed_requests;
331 }
332 
333 static inline void incr_context_committed_requests(struct intel_context *ce)
334 {
335 	lockdep_assert_held(&ce->guc_state.lock);
336 	++ce->guc_state.number_committed_requests;
337 	GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
338 }
339 
340 static inline void decr_context_committed_requests(struct intel_context *ce)
341 {
342 	lockdep_assert_held(&ce->guc_state.lock);
343 	--ce->guc_state.number_committed_requests;
344 	GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
345 }
346 
347 static struct intel_context *
348 request_to_scheduling_context(struct i915_request *rq)
349 {
350 	return intel_context_to_parent(rq->context);
351 }
352 
353 static inline bool context_guc_id_invalid(struct intel_context *ce)
354 {
355 	return ce->guc_id.id == GUC_INVALID_LRC_ID;
356 }
357 
358 static inline void set_context_guc_id_invalid(struct intel_context *ce)
359 {
360 	ce->guc_id.id = GUC_INVALID_LRC_ID;
361 }
362 
363 static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
364 {
365 	return &ce->engine->gt->uc.guc;
366 }
367 
368 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
369 {
370 	return rb_entry(rb, struct i915_priolist, node);
371 }
372 
373 /*
374  * When using multi-lrc submission a scratch memory area is reserved in the
375  * parent's context state for the process descriptor, work queue, and handshake
376  * between the parent + children contexts to insert safe preemption points
377  * between each of the BBs. Currently the scratch area is sized to a page.
378  *
379  * The layout of this scratch area is below:
380  * 0						guc_process_desc
381  * + sizeof(struct guc_process_desc)		child go
382  * + CACHELINE_BYTES				child join[0]
383  * ...
384  * + CACHELINE_BYTES				child join[n - 1]
385  * ...						unused
386  * PARENT_SCRATCH_SIZE / 2			work queue start
387  * ...						work queue
388  * PARENT_SCRATCH_SIZE - 1			work queue end
389  */
390 #define WQ_SIZE			(PARENT_SCRATCH_SIZE / 2)
391 #define WQ_OFFSET		(PARENT_SCRATCH_SIZE - WQ_SIZE)
392 
393 struct sync_semaphore {
394 	u32 semaphore;
395 	u8 unused[CACHELINE_BYTES - sizeof(u32)];
396 };
397 
398 struct parent_scratch {
399 	struct guc_process_desc pdesc;
400 
401 	struct sync_semaphore go;
402 	struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
403 
404 	u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) -
405 		sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
406 
407 	u32 wq[WQ_SIZE / sizeof(u32)];
408 };
409 
410 static u32 __get_parent_scratch_offset(struct intel_context *ce)
411 {
412 	GEM_BUG_ON(!ce->parallel.guc.parent_page);
413 
414 	return ce->parallel.guc.parent_page * PAGE_SIZE;
415 }
416 
417 static u32 __get_wq_offset(struct intel_context *ce)
418 {
419 	BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
420 
421 	return __get_parent_scratch_offset(ce) + WQ_OFFSET;
422 }
423 
424 static struct parent_scratch *
425 __get_parent_scratch(struct intel_context *ce)
426 {
427 	BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
428 	BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
429 
430 	/*
431 	 * Need to subtract LRC_STATE_OFFSET here as the
432 	 * parallel.guc.parent_page is the offset into ce->state while
433 	 * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
434 	 */
435 	return (struct parent_scratch *)
436 		(ce->lrc_reg_state +
437 		 ((__get_parent_scratch_offset(ce) -
438 		   LRC_STATE_OFFSET) / sizeof(u32)));
439 }
440 
441 static struct guc_process_desc *
442 __get_process_desc(struct intel_context *ce)
443 {
444 	struct parent_scratch *ps = __get_parent_scratch(ce);
445 
446 	return &ps->pdesc;
447 }
448 
449 static u32 *get_wq_pointer(struct guc_process_desc *desc,
450 			   struct intel_context *ce,
451 			   u32 wqi_size)
452 {
453 	/*
454 	 * Check for space in work queue. Caching a value of head pointer in
455 	 * intel_context structure in order reduce the number accesses to shared
456 	 * GPU memory which may be across a PCIe bus.
457 	 */
458 #define AVAILABLE_SPACE	\
459 	CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
460 	if (wqi_size > AVAILABLE_SPACE) {
461 		ce->parallel.guc.wqi_head = READ_ONCE(desc->head);
462 
463 		if (wqi_size > AVAILABLE_SPACE)
464 			return NULL;
465 	}
466 #undef AVAILABLE_SPACE
467 
468 	return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
469 }
470 
471 static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
472 {
473 	struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
474 
475 	GEM_BUG_ON(index >= GUC_MAX_LRC_DESCRIPTORS);
476 
477 	return &base[index];
478 }
479 
480 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
481 {
482 	struct intel_context *ce = xa_load(&guc->context_lookup, id);
483 
484 	GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS);
485 
486 	return ce;
487 }
488 
489 static int guc_lrc_desc_pool_create(struct intel_guc *guc)
490 {
491 	u32 size;
492 	int ret;
493 
494 	size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) *
495 			  GUC_MAX_LRC_DESCRIPTORS);
496 	ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
497 					     (void **)&guc->lrc_desc_pool_vaddr);
498 	if (ret)
499 		return ret;
500 
501 	return 0;
502 }
503 
504 static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
505 {
506 	guc->lrc_desc_pool_vaddr = NULL;
507 	i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
508 }
509 
510 static inline bool guc_submission_initialized(struct intel_guc *guc)
511 {
512 	return !!guc->lrc_desc_pool_vaddr;
513 }
514 
515 static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
516 {
517 	if (likely(guc_submission_initialized(guc))) {
518 		struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
519 		unsigned long flags;
520 
521 		memset(desc, 0, sizeof(*desc));
522 
523 		/*
524 		 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
525 		 * the lower level functions directly.
526 		 */
527 		xa_lock_irqsave(&guc->context_lookup, flags);
528 		__xa_erase(&guc->context_lookup, id);
529 		xa_unlock_irqrestore(&guc->context_lookup, flags);
530 	}
531 }
532 
533 static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
534 {
535 	return __get_context(guc, id);
536 }
537 
538 static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
539 					   struct intel_context *ce)
540 {
541 	unsigned long flags;
542 
543 	/*
544 	 * xarray API doesn't have xa_save_irqsave wrapper, so calling the
545 	 * lower level functions directly.
546 	 */
547 	xa_lock_irqsave(&guc->context_lookup, flags);
548 	__xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
549 	xa_unlock_irqrestore(&guc->context_lookup, flags);
550 }
551 
552 static void decr_outstanding_submission_g2h(struct intel_guc *guc)
553 {
554 	if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
555 		wake_up_all(&guc->ct.wq);
556 }
557 
558 static int guc_submission_send_busy_loop(struct intel_guc *guc,
559 					 const u32 *action,
560 					 u32 len,
561 					 u32 g2h_len_dw,
562 					 bool loop)
563 {
564 	/*
565 	 * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
566 	 * so we don't handle the case where we don't get a reply because we
567 	 * aborted the send due to the channel being busy.
568 	 */
569 	GEM_BUG_ON(g2h_len_dw && !loop);
570 
571 	if (g2h_len_dw)
572 		atomic_inc(&guc->outstanding_submission_g2h);
573 
574 	return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
575 }
576 
577 int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
578 				   atomic_t *wait_var,
579 				   bool interruptible,
580 				   long timeout)
581 {
582 	const int state = interruptible ?
583 		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
584 	DEFINE_WAIT(wait);
585 
586 	might_sleep();
587 	GEM_BUG_ON(timeout < 0);
588 
589 	if (!atomic_read(wait_var))
590 		return 0;
591 
592 	if (!timeout)
593 		return -ETIME;
594 
595 	for (;;) {
596 		prepare_to_wait(&guc->ct.wq, &wait, state);
597 
598 		if (!atomic_read(wait_var))
599 			break;
600 
601 		if (signal_pending_state(state, current)) {
602 			timeout = -EINTR;
603 			break;
604 		}
605 
606 		if (!timeout) {
607 			timeout = -ETIME;
608 			break;
609 		}
610 
611 		timeout = io_schedule_timeout(timeout);
612 	}
613 	finish_wait(&guc->ct.wq, &wait);
614 
615 	return (timeout < 0) ? timeout : 0;
616 }
617 
618 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
619 {
620 	if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
621 		return 0;
622 
623 	return intel_guc_wait_for_pending_msg(guc,
624 					      &guc->outstanding_submission_g2h,
625 					      true, timeout);
626 }
627 
628 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
629 
630 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
631 {
632 	int err = 0;
633 	struct intel_context *ce = request_to_scheduling_context(rq);
634 	u32 action[3];
635 	int len = 0;
636 	u32 g2h_len_dw = 0;
637 	bool enabled;
638 
639 	lockdep_assert_held(&rq->engine->sched_engine->lock);
640 
641 	/*
642 	 * Corner case where requests were sitting in the priority list or a
643 	 * request resubmitted after the context was banned.
644 	 */
645 	if (unlikely(intel_context_is_banned(ce))) {
646 		i915_request_put(i915_request_mark_eio(rq));
647 		intel_engine_signal_breadcrumbs(ce->engine);
648 		return 0;
649 	}
650 
651 	GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
652 	GEM_BUG_ON(context_guc_id_invalid(ce));
653 
654 	spin_lock(&ce->guc_state.lock);
655 
656 	/*
657 	 * The request / context will be run on the hardware when scheduling
658 	 * gets enabled in the unblock. For multi-lrc we still submit the
659 	 * context to move the LRC tails.
660 	 */
661 	if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
662 		goto out;
663 
664 	enabled = context_enabled(ce) || context_blocked(ce);
665 
666 	if (!enabled) {
667 		action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
668 		action[len++] = ce->guc_id.id;
669 		action[len++] = GUC_CONTEXT_ENABLE;
670 		set_context_pending_enable(ce);
671 		intel_context_get(ce);
672 		g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
673 	} else {
674 		action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
675 		action[len++] = ce->guc_id.id;
676 	}
677 
678 	err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
679 	if (!enabled && !err) {
680 		trace_intel_context_sched_enable(ce);
681 		atomic_inc(&guc->outstanding_submission_g2h);
682 		set_context_enabled(ce);
683 
684 		/*
685 		 * Without multi-lrc KMD does the submission step (moving the
686 		 * lrc tail) so enabling scheduling is sufficient to submit the
687 		 * context. This isn't the case in multi-lrc submission as the
688 		 * GuC needs to move the tails, hence the need for another H2G
689 		 * to submit a multi-lrc context after enabling scheduling.
690 		 */
691 		if (intel_context_is_parent(ce)) {
692 			action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
693 			err = intel_guc_send_nb(guc, action, len - 1, 0);
694 		}
695 	} else if (!enabled) {
696 		clr_context_pending_enable(ce);
697 		intel_context_put(ce);
698 	}
699 	if (likely(!err))
700 		trace_i915_request_guc_submit(rq);
701 
702 out:
703 	spin_unlock(&ce->guc_state.lock);
704 	return err;
705 }
706 
707 static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
708 {
709 	int ret = __guc_add_request(guc, rq);
710 
711 	if (unlikely(ret == -EBUSY)) {
712 		guc->stalled_request = rq;
713 		guc->submission_stall_reason = STALL_ADD_REQUEST;
714 	}
715 
716 	return ret;
717 }
718 
719 static inline void guc_set_lrc_tail(struct i915_request *rq)
720 {
721 	rq->context->lrc_reg_state[CTX_RING_TAIL] =
722 		intel_ring_set_tail(rq->ring, rq->tail);
723 }
724 
725 static inline int rq_prio(const struct i915_request *rq)
726 {
727 	return rq->sched.attr.priority;
728 }
729 
730 static bool is_multi_lrc_rq(struct i915_request *rq)
731 {
732 	return intel_context_is_parallel(rq->context);
733 }
734 
735 static bool can_merge_rq(struct i915_request *rq,
736 			 struct i915_request *last)
737 {
738 	return request_to_scheduling_context(rq) ==
739 		request_to_scheduling_context(last);
740 }
741 
742 static u32 wq_space_until_wrap(struct intel_context *ce)
743 {
744 	return (WQ_SIZE - ce->parallel.guc.wqi_tail);
745 }
746 
747 static void write_wqi(struct guc_process_desc *desc,
748 		      struct intel_context *ce,
749 		      u32 wqi_size)
750 {
751 	BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
752 
753 	/*
754 	 * Ensure WQI are visible before updating tail
755 	 */
756 	intel_guc_write_barrier(ce_to_guc(ce));
757 
758 	ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
759 		(WQ_SIZE - 1);
760 	WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail);
761 }
762 
763 static int guc_wq_noop_append(struct intel_context *ce)
764 {
765 	struct guc_process_desc *desc = __get_process_desc(ce);
766 	u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce));
767 	u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
768 
769 	if (!wqi)
770 		return -EBUSY;
771 
772 	GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
773 
774 	*wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
775 		FIELD_PREP(WQ_LEN_MASK, len_dw);
776 	ce->parallel.guc.wqi_tail = 0;
777 
778 	return 0;
779 }
780 
781 static int __guc_wq_item_append(struct i915_request *rq)
782 {
783 	struct intel_context *ce = request_to_scheduling_context(rq);
784 	struct intel_context *child;
785 	struct guc_process_desc *desc = __get_process_desc(ce);
786 	unsigned int wqi_size = (ce->parallel.number_children + 4) *
787 		sizeof(u32);
788 	u32 *wqi;
789 	u32 len_dw = (wqi_size / sizeof(u32)) - 1;
790 	int ret;
791 
792 	/* Ensure context is in correct state updating work queue */
793 	GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
794 	GEM_BUG_ON(context_guc_id_invalid(ce));
795 	GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
796 	GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id));
797 
798 	/* Insert NOOP if this work queue item will wrap the tail pointer. */
799 	if (wqi_size > wq_space_until_wrap(ce)) {
800 		ret = guc_wq_noop_append(ce);
801 		if (ret)
802 			return ret;
803 	}
804 
805 	wqi = get_wq_pointer(desc, ce, wqi_size);
806 	if (!wqi)
807 		return -EBUSY;
808 
809 	GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
810 
811 	*wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
812 		FIELD_PREP(WQ_LEN_MASK, len_dw);
813 	*wqi++ = ce->lrc.lrca;
814 	*wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
815 	       FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
816 	*wqi++ = 0;	/* fence_id */
817 	for_each_child(ce, child)
818 		*wqi++ = child->ring->tail / sizeof(u64);
819 
820 	write_wqi(desc, ce, wqi_size);
821 
822 	return 0;
823 }
824 
825 static int guc_wq_item_append(struct intel_guc *guc,
826 			      struct i915_request *rq)
827 {
828 	struct intel_context *ce = request_to_scheduling_context(rq);
829 	int ret = 0;
830 
831 	if (likely(!intel_context_is_banned(ce))) {
832 		ret = __guc_wq_item_append(rq);
833 
834 		if (unlikely(ret == -EBUSY)) {
835 			guc->stalled_request = rq;
836 			guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
837 		}
838 	}
839 
840 	return ret;
841 }
842 
843 static bool multi_lrc_submit(struct i915_request *rq)
844 {
845 	struct intel_context *ce = request_to_scheduling_context(rq);
846 
847 	intel_ring_set_tail(rq->ring, rq->tail);
848 
849 	/*
850 	 * We expect the front end (execbuf IOCTL) to set this flag on the last
851 	 * request generated from a multi-BB submission. This indicates to the
852 	 * backend (GuC interface) that we should submit this context thus
853 	 * submitting all the requests generated in parallel.
854 	 */
855 	return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
856 		intel_context_is_banned(ce);
857 }
858 
859 static int guc_dequeue_one_context(struct intel_guc *guc)
860 {
861 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
862 	struct i915_request *last = NULL;
863 	bool submit = false;
864 	struct rb_node *rb;
865 	int ret;
866 
867 	lockdep_assert_held(&sched_engine->lock);
868 
869 	if (guc->stalled_request) {
870 		submit = true;
871 		last = guc->stalled_request;
872 
873 		switch (guc->submission_stall_reason) {
874 		case STALL_REGISTER_CONTEXT:
875 			goto register_context;
876 		case STALL_MOVE_LRC_TAIL:
877 			goto move_lrc_tail;
878 		case STALL_ADD_REQUEST:
879 			goto add_request;
880 		default:
881 			MISSING_CASE(guc->submission_stall_reason);
882 		}
883 	}
884 
885 	while ((rb = rb_first_cached(&sched_engine->queue))) {
886 		struct i915_priolist *p = to_priolist(rb);
887 		struct i915_request *rq, *rn;
888 
889 		priolist_for_each_request_consume(rq, rn, p) {
890 			if (last && !can_merge_rq(rq, last))
891 				goto register_context;
892 
893 			list_del_init(&rq->sched.link);
894 
895 			__i915_request_submit(rq);
896 
897 			trace_i915_request_in(rq, 0);
898 			last = rq;
899 
900 			if (is_multi_lrc_rq(rq)) {
901 				/*
902 				 * We need to coalesce all multi-lrc requests in
903 				 * a relationship into a single H2G. We are
904 				 * guaranteed that all of these requests will be
905 				 * submitted sequentially.
906 				 */
907 				if (multi_lrc_submit(rq)) {
908 					submit = true;
909 					goto register_context;
910 				}
911 			} else {
912 				submit = true;
913 			}
914 		}
915 
916 		rb_erase_cached(&p->node, &sched_engine->queue);
917 		i915_priolist_free(p);
918 	}
919 
920 register_context:
921 	if (submit) {
922 		struct intel_context *ce = request_to_scheduling_context(last);
923 
924 		if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) &&
925 			     !intel_context_is_banned(ce))) {
926 			ret = guc_lrc_desc_pin(ce, false);
927 			if (unlikely(ret == -EPIPE)) {
928 				goto deadlk;
929 			} else if (ret == -EBUSY) {
930 				guc->stalled_request = last;
931 				guc->submission_stall_reason =
932 					STALL_REGISTER_CONTEXT;
933 				goto schedule_tasklet;
934 			} else if (ret != 0) {
935 				GEM_WARN_ON(ret);	/* Unexpected */
936 				goto deadlk;
937 			}
938 		}
939 
940 move_lrc_tail:
941 		if (is_multi_lrc_rq(last)) {
942 			ret = guc_wq_item_append(guc, last);
943 			if (ret == -EBUSY) {
944 				goto schedule_tasklet;
945 			} else if (ret != 0) {
946 				GEM_WARN_ON(ret);	/* Unexpected */
947 				goto deadlk;
948 			}
949 		} else {
950 			guc_set_lrc_tail(last);
951 		}
952 
953 add_request:
954 		ret = guc_add_request(guc, last);
955 		if (unlikely(ret == -EPIPE)) {
956 			goto deadlk;
957 		} else if (ret == -EBUSY) {
958 			goto schedule_tasklet;
959 		} else if (ret != 0) {
960 			GEM_WARN_ON(ret);	/* Unexpected */
961 			goto deadlk;
962 		}
963 	}
964 
965 	guc->stalled_request = NULL;
966 	guc->submission_stall_reason = STALL_NONE;
967 	return submit;
968 
969 deadlk:
970 	sched_engine->tasklet.callback = NULL;
971 	tasklet_disable_nosync(&sched_engine->tasklet);
972 	return false;
973 
974 schedule_tasklet:
975 	tasklet_schedule(&sched_engine->tasklet);
976 	return false;
977 }
978 
979 static void guc_submission_tasklet(struct tasklet_struct *t)
980 {
981 	struct i915_sched_engine *sched_engine =
982 		from_tasklet(sched_engine, t, tasklet);
983 	unsigned long flags;
984 	bool loop;
985 
986 	spin_lock_irqsave(&sched_engine->lock, flags);
987 
988 	do {
989 		loop = guc_dequeue_one_context(sched_engine->private_data);
990 	} while (loop);
991 
992 	i915_sched_engine_reset_on_empty(sched_engine);
993 
994 	spin_unlock_irqrestore(&sched_engine->lock, flags);
995 }
996 
997 static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
998 {
999 	if (iir & GT_RENDER_USER_INTERRUPT)
1000 		intel_engine_signal_breadcrumbs(engine);
1001 }
1002 
1003 static void __guc_context_destroy(struct intel_context *ce);
1004 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
1005 static void guc_signal_context_fence(struct intel_context *ce);
1006 static void guc_cancel_context_requests(struct intel_context *ce);
1007 static void guc_blocked_fence_complete(struct intel_context *ce);
1008 
1009 static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
1010 {
1011 	struct intel_context *ce;
1012 	unsigned long index, flags;
1013 	bool pending_disable, pending_enable, deregister, destroyed, banned;
1014 
1015 	xa_lock_irqsave(&guc->context_lookup, flags);
1016 	xa_for_each(&guc->context_lookup, index, ce) {
1017 		/*
1018 		 * Corner case where the ref count on the object is zero but and
1019 		 * deregister G2H was lost. In this case we don't touch the ref
1020 		 * count and finish the destroy of the context.
1021 		 */
1022 		bool do_put = kref_get_unless_zero(&ce->ref);
1023 
1024 		xa_unlock(&guc->context_lookup);
1025 
1026 		spin_lock(&ce->guc_state.lock);
1027 
1028 		/*
1029 		 * Once we are at this point submission_disabled() is guaranteed
1030 		 * to be visible to all callers who set the below flags (see above
1031 		 * flush and flushes in reset_prepare). If submission_disabled()
1032 		 * is set, the caller shouldn't set these flags.
1033 		 */
1034 
1035 		destroyed = context_destroyed(ce);
1036 		pending_enable = context_pending_enable(ce);
1037 		pending_disable = context_pending_disable(ce);
1038 		deregister = context_wait_for_deregister_to_register(ce);
1039 		banned = context_banned(ce);
1040 		init_sched_state(ce);
1041 
1042 		spin_unlock(&ce->guc_state.lock);
1043 
1044 		if (pending_enable || destroyed || deregister) {
1045 			decr_outstanding_submission_g2h(guc);
1046 			if (deregister)
1047 				guc_signal_context_fence(ce);
1048 			if (destroyed) {
1049 				intel_gt_pm_put_async(guc_to_gt(guc));
1050 				release_guc_id(guc, ce);
1051 				__guc_context_destroy(ce);
1052 			}
1053 			if (pending_enable || deregister)
1054 				intel_context_put(ce);
1055 		}
1056 
1057 		/* Not mutualy exclusive with above if statement. */
1058 		if (pending_disable) {
1059 			guc_signal_context_fence(ce);
1060 			if (banned) {
1061 				guc_cancel_context_requests(ce);
1062 				intel_engine_signal_breadcrumbs(ce->engine);
1063 			}
1064 			intel_context_sched_disable_unpin(ce);
1065 			decr_outstanding_submission_g2h(guc);
1066 
1067 			spin_lock(&ce->guc_state.lock);
1068 			guc_blocked_fence_complete(ce);
1069 			spin_unlock(&ce->guc_state.lock);
1070 
1071 			intel_context_put(ce);
1072 		}
1073 
1074 		if (do_put)
1075 			intel_context_put(ce);
1076 		xa_lock(&guc->context_lookup);
1077 	}
1078 	xa_unlock_irqrestore(&guc->context_lookup, flags);
1079 }
1080 
1081 /*
1082  * GuC stores busyness stats for each engine at context in/out boundaries. A
1083  * context 'in' logs execution start time, 'out' adds in -> out delta to total.
1084  * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
1085  * GuC.
1086  *
1087  * __i915_pmu_event_read samples engine busyness. When sampling, if context id
1088  * is valid (!= ~0) and start is non-zero, the engine is considered to be
1089  * active. For an active engine total busyness = total + (now - start), where
1090  * 'now' is the time at which the busyness is sampled. For inactive engine,
1091  * total busyness = total.
1092  *
1093  * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
1094  *
1095  * The start and total values provided by GuC are 32 bits and wrap around in a
1096  * few minutes. Since perf pmu provides busyness as 64 bit monotonically
1097  * increasing ns values, there is a need for this implementation to account for
1098  * overflows and extend the GuC provided values to 64 bits before returning
1099  * busyness to the user. In order to do that, a worker runs periodically at
1100  * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
1101  * 27 seconds for a gt clock frequency of 19.2 MHz).
1102  */
1103 
1104 #define WRAP_TIME_CLKS U32_MAX
1105 #define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
1106 
1107 static void
1108 __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
1109 {
1110 	u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1111 	u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
1112 
1113 	if (new_start == lower_32_bits(*prev_start))
1114 		return;
1115 
1116 	/*
1117 	 * When gt is unparked, we update the gt timestamp and start the ping
1118 	 * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
1119 	 * is unparked, all switched in contexts will have a start time that is
1120 	 * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
1121 	 *
1122 	 * If neither gt_stamp nor new_start has rolled over, then the
1123 	 * gt_stamp_hi does not need to be adjusted, however if one of them has
1124 	 * rolled over, we need to adjust gt_stamp_hi accordingly.
1125 	 *
1126 	 * The below conditions address the cases of new_start rollover and
1127 	 * gt_stamp_last rollover respectively.
1128 	 */
1129 	if (new_start < gt_stamp_last &&
1130 	    (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
1131 		gt_stamp_hi++;
1132 
1133 	if (new_start > gt_stamp_last &&
1134 	    (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
1135 		gt_stamp_hi--;
1136 
1137 	*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
1138 }
1139 
1140 /*
1141  * GuC updates shared memory and KMD reads it. Since this is not synchronized,
1142  * we run into a race where the value read is inconsistent. Sometimes the
1143  * inconsistency is in reading the upper MSB bytes of the last_in value when
1144  * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
1145  * 24 bits are zero. Since these are non-zero values, it is non-trivial to
1146  * determine validity of these values. Instead we read the values multiple times
1147  * until they are consistent. In test runs, 3 attempts results in consistent
1148  * values. The upper bound is set to 6 attempts and may need to be tuned as per
1149  * any new occurences.
1150  */
1151 static void __get_engine_usage_record(struct intel_engine_cs *engine,
1152 				      u32 *last_in, u32 *id, u32 *total)
1153 {
1154 	struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
1155 	int i = 0;
1156 
1157 	do {
1158 		*last_in = READ_ONCE(rec->last_switch_in_stamp);
1159 		*id = READ_ONCE(rec->current_context_index);
1160 		*total = READ_ONCE(rec->total_runtime);
1161 
1162 		if (READ_ONCE(rec->last_switch_in_stamp) == *last_in &&
1163 		    READ_ONCE(rec->current_context_index) == *id &&
1164 		    READ_ONCE(rec->total_runtime) == *total)
1165 			break;
1166 	} while (++i < 6);
1167 }
1168 
1169 static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
1170 {
1171 	struct intel_engine_guc_stats *stats = &engine->stats.guc;
1172 	struct intel_guc *guc = &engine->gt->uc.guc;
1173 	u32 last_switch, ctx_id, total;
1174 
1175 	lockdep_assert_held(&guc->timestamp.lock);
1176 
1177 	__get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
1178 
1179 	stats->running = ctx_id != ~0U && last_switch;
1180 	if (stats->running)
1181 		__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
1182 
1183 	/*
1184 	 * Instead of adjusting the total for overflow, just add the
1185 	 * difference from previous sample stats->total_gt_clks
1186 	 */
1187 	if (total && total != ~0U) {
1188 		stats->total_gt_clks += (u32)(total - stats->prev_total);
1189 		stats->prev_total = total;
1190 	}
1191 }
1192 
1193 static u32 gpm_timestamp_shift(struct intel_gt *gt)
1194 {
1195 	intel_wakeref_t wakeref;
1196 	u32 reg, shift;
1197 
1198 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
1199 		reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
1200 
1201 	shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
1202 		GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
1203 
1204 	return 3 - shift;
1205 }
1206 
1207 static u64 gpm_timestamp(struct intel_gt *gt)
1208 {
1209 	u32 lo, hi, old_hi, loop = 0;
1210 
1211 	hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
1212 	do {
1213 		lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
1214 		old_hi = hi;
1215 		hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
1216 	} while (old_hi != hi && loop++ < 2);
1217 
1218 	return ((u64)hi << 32) | lo;
1219 }
1220 
1221 static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
1222 {
1223 	struct intel_gt *gt = guc_to_gt(guc);
1224 	u32 gt_stamp_lo, gt_stamp_hi;
1225 	u64 gpm_ts;
1226 
1227 	lockdep_assert_held(&guc->timestamp.lock);
1228 
1229 	gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1230 	gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
1231 	gt_stamp_lo = lower_32_bits(gpm_ts);
1232 	*now = ktime_get();
1233 
1234 	if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
1235 		gt_stamp_hi++;
1236 
1237 	guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
1238 }
1239 
1240 /*
1241  * Unlike the execlist mode of submission total and active times are in terms of
1242  * gt clocks. The *now parameter is retained to return the cpu time at which the
1243  * busyness was sampled.
1244  */
1245 static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
1246 {
1247 	struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
1248 	struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
1249 	struct intel_gt *gt = engine->gt;
1250 	struct intel_guc *guc = &gt->uc.guc;
1251 	u64 total, gt_stamp_saved;
1252 	unsigned long flags;
1253 	u32 reset_count;
1254 	bool in_reset;
1255 
1256 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1257 
1258 	/*
1259 	 * If a reset happened, we risk reading partially updated engine
1260 	 * busyness from GuC, so we just use the driver stored copy of busyness.
1261 	 * Synchronize with gt reset using reset_count and the
1262 	 * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
1263 	 * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
1264 	 * usable by checking the flag afterwards.
1265 	 */
1266 	reset_count = i915_reset_count(gpu_error);
1267 	in_reset = test_bit(I915_RESET_BACKOFF, &gt->reset.flags);
1268 
1269 	*now = ktime_get();
1270 
1271 	/*
1272 	 * The active busyness depends on start_gt_clk and gt_stamp.
1273 	 * gt_stamp is updated by i915 only when gt is awake and the
1274 	 * start_gt_clk is derived from GuC state. To get a consistent
1275 	 * view of activity, we query the GuC state only if gt is awake.
1276 	 */
1277 	if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
1278 		stats_saved = *stats;
1279 		gt_stamp_saved = guc->timestamp.gt_stamp;
1280 		/*
1281 		 * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
1282 		 * start_gt_clk' calculation below for active engines.
1283 		 */
1284 		guc_update_engine_gt_clks(engine);
1285 		guc_update_pm_timestamp(guc, now);
1286 		intel_gt_pm_put_async(gt);
1287 		if (i915_reset_count(gpu_error) != reset_count) {
1288 			*stats = stats_saved;
1289 			guc->timestamp.gt_stamp = gt_stamp_saved;
1290 		}
1291 	}
1292 
1293 	total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
1294 	if (stats->running) {
1295 		u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
1296 
1297 		total += intel_gt_clock_interval_to_ns(gt, clk);
1298 	}
1299 
1300 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1301 
1302 	return ns_to_ktime(total);
1303 }
1304 
1305 static void __reset_guc_busyness_stats(struct intel_guc *guc)
1306 {
1307 	struct intel_gt *gt = guc_to_gt(guc);
1308 	struct intel_engine_cs *engine;
1309 	enum intel_engine_id id;
1310 	unsigned long flags;
1311 	ktime_t unused;
1312 
1313 	cancel_delayed_work_sync(&guc->timestamp.work);
1314 
1315 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1316 
1317 	guc_update_pm_timestamp(guc, &unused);
1318 	for_each_engine(engine, gt, id) {
1319 		guc_update_engine_gt_clks(engine);
1320 		engine->stats.guc.prev_total = 0;
1321 	}
1322 
1323 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1324 }
1325 
1326 static void __update_guc_busyness_stats(struct intel_guc *guc)
1327 {
1328 	struct intel_gt *gt = guc_to_gt(guc);
1329 	struct intel_engine_cs *engine;
1330 	enum intel_engine_id id;
1331 	unsigned long flags;
1332 	ktime_t unused;
1333 
1334 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1335 
1336 	guc_update_pm_timestamp(guc, &unused);
1337 	for_each_engine(engine, gt, id)
1338 		guc_update_engine_gt_clks(engine);
1339 
1340 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1341 }
1342 
1343 static void guc_timestamp_ping(struct work_struct *wrk)
1344 {
1345 	struct intel_guc *guc = container_of(wrk, typeof(*guc),
1346 					     timestamp.work.work);
1347 	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
1348 	struct intel_gt *gt = guc_to_gt(guc);
1349 	intel_wakeref_t wakeref;
1350 	int srcu, ret;
1351 
1352 	/*
1353 	 * Synchronize with gt reset to make sure the worker does not
1354 	 * corrupt the engine/guc stats.
1355 	 */
1356 	ret = intel_gt_reset_trylock(gt, &srcu);
1357 	if (ret)
1358 		return;
1359 
1360 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
1361 		__update_guc_busyness_stats(guc);
1362 
1363 	intel_gt_reset_unlock(gt, srcu);
1364 
1365 	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1366 			 guc->timestamp.ping_delay);
1367 }
1368 
1369 static int guc_action_enable_usage_stats(struct intel_guc *guc)
1370 {
1371 	u32 offset = intel_guc_engine_usage_offset(guc);
1372 	u32 action[] = {
1373 		INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
1374 		offset,
1375 		0,
1376 	};
1377 
1378 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
1379 }
1380 
1381 static void guc_init_engine_stats(struct intel_guc *guc)
1382 {
1383 	struct intel_gt *gt = guc_to_gt(guc);
1384 	intel_wakeref_t wakeref;
1385 
1386 	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1387 			 guc->timestamp.ping_delay);
1388 
1389 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
1390 		int ret = guc_action_enable_usage_stats(guc);
1391 
1392 		if (ret)
1393 			drm_err(&gt->i915->drm,
1394 				"Failed to enable usage stats: %d!\n", ret);
1395 	}
1396 }
1397 
1398 void intel_guc_busyness_park(struct intel_gt *gt)
1399 {
1400 	struct intel_guc *guc = &gt->uc.guc;
1401 
1402 	if (!guc_submission_initialized(guc))
1403 		return;
1404 
1405 	cancel_delayed_work(&guc->timestamp.work);
1406 	__update_guc_busyness_stats(guc);
1407 }
1408 
1409 void intel_guc_busyness_unpark(struct intel_gt *gt)
1410 {
1411 	struct intel_guc *guc = &gt->uc.guc;
1412 	unsigned long flags;
1413 	ktime_t unused;
1414 
1415 	if (!guc_submission_initialized(guc))
1416 		return;
1417 
1418 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1419 	guc_update_pm_timestamp(guc, &unused);
1420 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1421 	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1422 			 guc->timestamp.ping_delay);
1423 }
1424 
1425 static inline bool
1426 submission_disabled(struct intel_guc *guc)
1427 {
1428 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1429 
1430 	return unlikely(!sched_engine ||
1431 			!__tasklet_is_enabled(&sched_engine->tasklet));
1432 }
1433 
1434 static void disable_submission(struct intel_guc *guc)
1435 {
1436 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1437 
1438 	if (__tasklet_is_enabled(&sched_engine->tasklet)) {
1439 		GEM_BUG_ON(!guc->ct.enabled);
1440 		__tasklet_disable_sync_once(&sched_engine->tasklet);
1441 		sched_engine->tasklet.callback = NULL;
1442 	}
1443 }
1444 
1445 static void enable_submission(struct intel_guc *guc)
1446 {
1447 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1448 	unsigned long flags;
1449 
1450 	spin_lock_irqsave(&guc->sched_engine->lock, flags);
1451 	sched_engine->tasklet.callback = guc_submission_tasklet;
1452 	wmb();	/* Make sure callback visible */
1453 	if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
1454 	    __tasklet_enable(&sched_engine->tasklet)) {
1455 		GEM_BUG_ON(!guc->ct.enabled);
1456 
1457 		/* And kick in case we missed a new request submission. */
1458 		tasklet_hi_schedule(&sched_engine->tasklet);
1459 	}
1460 	spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
1461 }
1462 
1463 static void guc_flush_submissions(struct intel_guc *guc)
1464 {
1465 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1466 	unsigned long flags;
1467 
1468 	spin_lock_irqsave(&sched_engine->lock, flags);
1469 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1470 }
1471 
1472 static void guc_flush_destroyed_contexts(struct intel_guc *guc);
1473 
1474 void intel_guc_submission_reset_prepare(struct intel_guc *guc)
1475 {
1476 	int i;
1477 
1478 	if (unlikely(!guc_submission_initialized(guc))) {
1479 		/* Reset called during driver load? GuC not yet initialised! */
1480 		return;
1481 	}
1482 
1483 	intel_gt_park_heartbeats(guc_to_gt(guc));
1484 	disable_submission(guc);
1485 	guc->interrupts.disable(guc);
1486 	__reset_guc_busyness_stats(guc);
1487 
1488 	/* Flush IRQ handler */
1489 	spin_lock_irq(&guc_to_gt(guc)->irq_lock);
1490 	spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
1491 
1492 	guc_flush_submissions(guc);
1493 	guc_flush_destroyed_contexts(guc);
1494 
1495 	/*
1496 	 * Handle any outstanding G2Hs before reset. Call IRQ handler directly
1497 	 * each pass as interrupt have been disabled. We always scrub for
1498 	 * outstanding G2H as it is possible for outstanding_submission_g2h to
1499 	 * be incremented after the context state update.
1500 	 */
1501 	for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) {
1502 		intel_guc_to_host_event_handler(guc);
1503 #define wait_for_reset(guc, wait_var) \
1504 		intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20))
1505 		do {
1506 			wait_for_reset(guc, &guc->outstanding_submission_g2h);
1507 		} while (!list_empty(&guc->ct.requests.incoming));
1508 	}
1509 
1510 	scrub_guc_desc_for_outstanding_g2h(guc);
1511 }
1512 
1513 static struct intel_engine_cs *
1514 guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
1515 {
1516 	struct intel_engine_cs *engine;
1517 	intel_engine_mask_t tmp, mask = ve->mask;
1518 	unsigned int num_siblings = 0;
1519 
1520 	for_each_engine_masked(engine, ve->gt, mask, tmp)
1521 		if (num_siblings++ == sibling)
1522 			return engine;
1523 
1524 	return NULL;
1525 }
1526 
1527 static inline struct intel_engine_cs *
1528 __context_to_physical_engine(struct intel_context *ce)
1529 {
1530 	struct intel_engine_cs *engine = ce->engine;
1531 
1532 	if (intel_engine_is_virtual(engine))
1533 		engine = guc_virtual_get_sibling(engine, 0);
1534 
1535 	return engine;
1536 }
1537 
1538 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
1539 {
1540 	struct intel_engine_cs *engine = __context_to_physical_engine(ce);
1541 
1542 	if (intel_context_is_banned(ce))
1543 		return;
1544 
1545 	GEM_BUG_ON(!intel_context_is_pinned(ce));
1546 
1547 	/*
1548 	 * We want a simple context + ring to execute the breadcrumb update.
1549 	 * We cannot rely on the context being intact across the GPU hang,
1550 	 * so clear it and rebuild just what we need for the breadcrumb.
1551 	 * All pending requests for this context will be zapped, and any
1552 	 * future request will be after userspace has had the opportunity
1553 	 * to recreate its own state.
1554 	 */
1555 	if (scrub)
1556 		lrc_init_regs(ce, engine, true);
1557 
1558 	/* Rerun the request; its payload has been neutered (if guilty). */
1559 	lrc_update_regs(ce, engine, head);
1560 }
1561 
1562 static void guc_reset_nop(struct intel_engine_cs *engine)
1563 {
1564 }
1565 
1566 static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
1567 {
1568 }
1569 
1570 static void
1571 __unwind_incomplete_requests(struct intel_context *ce)
1572 {
1573 	struct i915_request *rq, *rn;
1574 	struct list_head *pl;
1575 	int prio = I915_PRIORITY_INVALID;
1576 	struct i915_sched_engine * const sched_engine =
1577 		ce->engine->sched_engine;
1578 	unsigned long flags;
1579 
1580 	spin_lock_irqsave(&sched_engine->lock, flags);
1581 	spin_lock(&ce->guc_state.lock);
1582 	list_for_each_entry_safe_reverse(rq, rn,
1583 					 &ce->guc_state.requests,
1584 					 sched.link) {
1585 		if (i915_request_completed(rq))
1586 			continue;
1587 
1588 		list_del_init(&rq->sched.link);
1589 		__i915_request_unsubmit(rq);
1590 
1591 		/* Push the request back into the queue for later resubmission. */
1592 		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
1593 		if (rq_prio(rq) != prio) {
1594 			prio = rq_prio(rq);
1595 			pl = i915_sched_lookup_priolist(sched_engine, prio);
1596 		}
1597 		GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
1598 
1599 		list_add(&rq->sched.link, pl);
1600 		set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1601 	}
1602 	spin_unlock(&ce->guc_state.lock);
1603 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1604 }
1605 
1606 static void __guc_reset_context(struct intel_context *ce, bool stalled)
1607 {
1608 	bool local_stalled;
1609 	struct i915_request *rq;
1610 	unsigned long flags;
1611 	u32 head;
1612 	int i, number_children = ce->parallel.number_children;
1613 	bool skip = false;
1614 	struct intel_context *parent = ce;
1615 
1616 	GEM_BUG_ON(intel_context_is_child(ce));
1617 
1618 	intel_context_get(ce);
1619 
1620 	/*
1621 	 * GuC will implicitly mark the context as non-schedulable when it sends
1622 	 * the reset notification. Make sure our state reflects this change. The
1623 	 * context will be marked enabled on resubmission.
1624 	 *
1625 	 * XXX: If the context is reset as a result of the request cancellation
1626 	 * this G2H is received after the schedule disable complete G2H which is
1627 	 * wrong as this creates a race between the request cancellation code
1628 	 * re-submitting the context and this G2H handler. This is a bug in the
1629 	 * GuC but can be worked around in the meantime but converting this to a
1630 	 * NOP if a pending enable is in flight as this indicates that a request
1631 	 * cancellation has occurred.
1632 	 */
1633 	spin_lock_irqsave(&ce->guc_state.lock, flags);
1634 	if (likely(!context_pending_enable(ce)))
1635 		clr_context_enabled(ce);
1636 	else
1637 		skip = true;
1638 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1639 	if (unlikely(skip))
1640 		goto out_put;
1641 
1642 	/*
1643 	 * For each context in the relationship find the hanging request
1644 	 * resetting each context / request as needed
1645 	 */
1646 	for (i = 0; i < number_children + 1; ++i) {
1647 		if (!intel_context_is_pinned(ce))
1648 			goto next_context;
1649 
1650 		local_stalled = false;
1651 		rq = intel_context_find_active_request(ce);
1652 		if (!rq) {
1653 			head = ce->ring->tail;
1654 			goto out_replay;
1655 		}
1656 
1657 		if (i915_request_started(rq))
1658 			local_stalled = true;
1659 
1660 		GEM_BUG_ON(i915_active_is_idle(&ce->active));
1661 		head = intel_ring_wrap(ce->ring, rq->head);
1662 
1663 		__i915_request_reset(rq, local_stalled && stalled);
1664 out_replay:
1665 		guc_reset_state(ce, head, local_stalled && stalled);
1666 next_context:
1667 		if (i != number_children)
1668 			ce = list_next_entry(ce, parallel.child_link);
1669 	}
1670 
1671 	__unwind_incomplete_requests(parent);
1672 out_put:
1673 	intel_context_put(parent);
1674 }
1675 
1676 void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
1677 {
1678 	struct intel_context *ce;
1679 	unsigned long index;
1680 	unsigned long flags;
1681 
1682 	if (unlikely(!guc_submission_initialized(guc))) {
1683 		/* Reset called during driver load? GuC not yet initialised! */
1684 		return;
1685 	}
1686 
1687 	xa_lock_irqsave(&guc->context_lookup, flags);
1688 	xa_for_each(&guc->context_lookup, index, ce) {
1689 		if (!kref_get_unless_zero(&ce->ref))
1690 			continue;
1691 
1692 		xa_unlock(&guc->context_lookup);
1693 
1694 		if (intel_context_is_pinned(ce) &&
1695 		    !intel_context_is_child(ce))
1696 			__guc_reset_context(ce, stalled);
1697 
1698 		intel_context_put(ce);
1699 
1700 		xa_lock(&guc->context_lookup);
1701 	}
1702 	xa_unlock_irqrestore(&guc->context_lookup, flags);
1703 
1704 	/* GuC is blown away, drop all references to contexts */
1705 	xa_destroy(&guc->context_lookup);
1706 }
1707 
1708 static void guc_cancel_context_requests(struct intel_context *ce)
1709 {
1710 	struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
1711 	struct i915_request *rq;
1712 	unsigned long flags;
1713 
1714 	/* Mark all executing requests as skipped. */
1715 	spin_lock_irqsave(&sched_engine->lock, flags);
1716 	spin_lock(&ce->guc_state.lock);
1717 	list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
1718 		i915_request_put(i915_request_mark_eio(rq));
1719 	spin_unlock(&ce->guc_state.lock);
1720 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1721 }
1722 
1723 static void
1724 guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
1725 {
1726 	struct i915_request *rq, *rn;
1727 	struct rb_node *rb;
1728 	unsigned long flags;
1729 
1730 	/* Can be called during boot if GuC fails to load */
1731 	if (!sched_engine)
1732 		return;
1733 
1734 	/*
1735 	 * Before we call engine->cancel_requests(), we should have exclusive
1736 	 * access to the submission state. This is arranged for us by the
1737 	 * caller disabling the interrupt generation, the tasklet and other
1738 	 * threads that may then access the same state, giving us a free hand
1739 	 * to reset state. However, we still need to let lockdep be aware that
1740 	 * we know this state may be accessed in hardirq context, so we
1741 	 * disable the irq around this manipulation and we want to keep
1742 	 * the spinlock focused on its duties and not accidentally conflate
1743 	 * coverage to the submission's irq state. (Similarly, although we
1744 	 * shouldn't need to disable irq around the manipulation of the
1745 	 * submission's irq state, we also wish to remind ourselves that
1746 	 * it is irq state.)
1747 	 */
1748 	spin_lock_irqsave(&sched_engine->lock, flags);
1749 
1750 	/* Flush the queued requests to the timeline list (for retiring). */
1751 	while ((rb = rb_first_cached(&sched_engine->queue))) {
1752 		struct i915_priolist *p = to_priolist(rb);
1753 
1754 		priolist_for_each_request_consume(rq, rn, p) {
1755 			list_del_init(&rq->sched.link);
1756 
1757 			__i915_request_submit(rq);
1758 
1759 			i915_request_put(i915_request_mark_eio(rq));
1760 		}
1761 
1762 		rb_erase_cached(&p->node, &sched_engine->queue);
1763 		i915_priolist_free(p);
1764 	}
1765 
1766 	/* Remaining _unready_ requests will be nop'ed when submitted */
1767 
1768 	sched_engine->queue_priority_hint = INT_MIN;
1769 	sched_engine->queue = RB_ROOT_CACHED;
1770 
1771 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1772 }
1773 
1774 void intel_guc_submission_cancel_requests(struct intel_guc *guc)
1775 {
1776 	struct intel_context *ce;
1777 	unsigned long index;
1778 	unsigned long flags;
1779 
1780 	xa_lock_irqsave(&guc->context_lookup, flags);
1781 	xa_for_each(&guc->context_lookup, index, ce) {
1782 		if (!kref_get_unless_zero(&ce->ref))
1783 			continue;
1784 
1785 		xa_unlock(&guc->context_lookup);
1786 
1787 		if (intel_context_is_pinned(ce) &&
1788 		    !intel_context_is_child(ce))
1789 			guc_cancel_context_requests(ce);
1790 
1791 		intel_context_put(ce);
1792 
1793 		xa_lock(&guc->context_lookup);
1794 	}
1795 	xa_unlock_irqrestore(&guc->context_lookup, flags);
1796 
1797 	guc_cancel_sched_engine_requests(guc->sched_engine);
1798 
1799 	/* GuC is blown away, drop all references to contexts */
1800 	xa_destroy(&guc->context_lookup);
1801 }
1802 
1803 void intel_guc_submission_reset_finish(struct intel_guc *guc)
1804 {
1805 	/* Reset called during driver load or during wedge? */
1806 	if (unlikely(!guc_submission_initialized(guc) ||
1807 		     test_bit(I915_WEDGED, &guc_to_gt(guc)->reset.flags))) {
1808 		return;
1809 	}
1810 
1811 	/*
1812 	 * Technically possible for either of these values to be non-zero here,
1813 	 * but very unlikely + harmless. Regardless let's add a warn so we can
1814 	 * see in CI if this happens frequently / a precursor to taking down the
1815 	 * machine.
1816 	 */
1817 	GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
1818 	atomic_set(&guc->outstanding_submission_g2h, 0);
1819 
1820 	intel_guc_global_policies_update(guc);
1821 	enable_submission(guc);
1822 	intel_gt_unpark_heartbeats(guc_to_gt(guc));
1823 }
1824 
1825 static void destroyed_worker_func(struct work_struct *w);
1826 
1827 /*
1828  * Set up the memory resources to be shared with the GuC (via the GGTT)
1829  * at firmware loading time.
1830  */
1831 int intel_guc_submission_init(struct intel_guc *guc)
1832 {
1833 	struct intel_gt *gt = guc_to_gt(guc);
1834 	int ret;
1835 
1836 	if (guc->lrc_desc_pool)
1837 		return 0;
1838 
1839 	ret = guc_lrc_desc_pool_create(guc);
1840 	if (ret)
1841 		return ret;
1842 	/*
1843 	 * Keep static analysers happy, let them know that we allocated the
1844 	 * vma after testing that it didn't exist earlier.
1845 	 */
1846 	GEM_BUG_ON(!guc->lrc_desc_pool);
1847 
1848 	xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
1849 
1850 	spin_lock_init(&guc->submission_state.lock);
1851 	INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
1852 	ida_init(&guc->submission_state.guc_ids);
1853 	INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
1854 	INIT_WORK(&guc->submission_state.destroyed_worker,
1855 		  destroyed_worker_func);
1856 
1857 	guc->submission_state.guc_ids_bitmap =
1858 		bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
1859 	if (!guc->submission_state.guc_ids_bitmap)
1860 		return -ENOMEM;
1861 
1862 	spin_lock_init(&guc->timestamp.lock);
1863 	INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
1864 	guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
1865 	guc->timestamp.shift = gpm_timestamp_shift(gt);
1866 
1867 	return 0;
1868 }
1869 
1870 void intel_guc_submission_fini(struct intel_guc *guc)
1871 {
1872 	if (!guc->lrc_desc_pool)
1873 		return;
1874 
1875 	guc_flush_destroyed_contexts(guc);
1876 	guc_lrc_desc_pool_destroy(guc);
1877 	i915_sched_engine_put(guc->sched_engine);
1878 	bitmap_free(guc->submission_state.guc_ids_bitmap);
1879 }
1880 
1881 static inline void queue_request(struct i915_sched_engine *sched_engine,
1882 				 struct i915_request *rq,
1883 				 int prio)
1884 {
1885 	GEM_BUG_ON(!list_empty(&rq->sched.link));
1886 	list_add_tail(&rq->sched.link,
1887 		      i915_sched_lookup_priolist(sched_engine, prio));
1888 	set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1889 	tasklet_hi_schedule(&sched_engine->tasklet);
1890 }
1891 
1892 static int guc_bypass_tasklet_submit(struct intel_guc *guc,
1893 				     struct i915_request *rq)
1894 {
1895 	int ret = 0;
1896 
1897 	__i915_request_submit(rq);
1898 
1899 	trace_i915_request_in(rq, 0);
1900 
1901 	if (is_multi_lrc_rq(rq)) {
1902 		if (multi_lrc_submit(rq)) {
1903 			ret = guc_wq_item_append(guc, rq);
1904 			if (!ret)
1905 				ret = guc_add_request(guc, rq);
1906 		}
1907 	} else {
1908 		guc_set_lrc_tail(rq);
1909 		ret = guc_add_request(guc, rq);
1910 	}
1911 
1912 	if (unlikely(ret == -EPIPE))
1913 		disable_submission(guc);
1914 
1915 	return ret;
1916 }
1917 
1918 static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
1919 {
1920 	struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
1921 	struct intel_context *ce = request_to_scheduling_context(rq);
1922 
1923 	return submission_disabled(guc) || guc->stalled_request ||
1924 		!i915_sched_engine_is_empty(sched_engine) ||
1925 		!lrc_desc_registered(guc, ce->guc_id.id);
1926 }
1927 
1928 static void guc_submit_request(struct i915_request *rq)
1929 {
1930 	struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
1931 	struct intel_guc *guc = &rq->engine->gt->uc.guc;
1932 	unsigned long flags;
1933 
1934 	/* Will be called from irq-context when using foreign fences. */
1935 	spin_lock_irqsave(&sched_engine->lock, flags);
1936 
1937 	if (need_tasklet(guc, rq))
1938 		queue_request(sched_engine, rq, rq_prio(rq));
1939 	else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
1940 		tasklet_hi_schedule(&sched_engine->tasklet);
1941 
1942 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1943 }
1944 
1945 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
1946 {
1947 	int ret;
1948 
1949 	GEM_BUG_ON(intel_context_is_child(ce));
1950 
1951 	if (intel_context_is_parent(ce))
1952 		ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
1953 					      NUMBER_MULTI_LRC_GUC_ID(guc),
1954 					      order_base_2(ce->parallel.number_children
1955 							   + 1));
1956 	else
1957 		ret = ida_simple_get(&guc->submission_state.guc_ids,
1958 				     NUMBER_MULTI_LRC_GUC_ID(guc),
1959 				     guc->submission_state.num_guc_ids,
1960 				     GFP_KERNEL | __GFP_RETRY_MAYFAIL |
1961 				     __GFP_NOWARN);
1962 	if (unlikely(ret < 0))
1963 		return ret;
1964 
1965 	ce->guc_id.id = ret;
1966 	return 0;
1967 }
1968 
1969 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1970 {
1971 	GEM_BUG_ON(intel_context_is_child(ce));
1972 
1973 	if (!context_guc_id_invalid(ce)) {
1974 		if (intel_context_is_parent(ce))
1975 			bitmap_release_region(guc->submission_state.guc_ids_bitmap,
1976 					      ce->guc_id.id,
1977 					      order_base_2(ce->parallel.number_children
1978 							   + 1));
1979 		else
1980 			ida_simple_remove(&guc->submission_state.guc_ids,
1981 					  ce->guc_id.id);
1982 		reset_lrc_desc(guc, ce->guc_id.id);
1983 		set_context_guc_id_invalid(ce);
1984 	}
1985 	if (!list_empty(&ce->guc_id.link))
1986 		list_del_init(&ce->guc_id.link);
1987 }
1988 
1989 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1990 {
1991 	unsigned long flags;
1992 
1993 	spin_lock_irqsave(&guc->submission_state.lock, flags);
1994 	__release_guc_id(guc, ce);
1995 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
1996 }
1997 
1998 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
1999 {
2000 	struct intel_context *cn;
2001 
2002 	lockdep_assert_held(&guc->submission_state.lock);
2003 	GEM_BUG_ON(intel_context_is_child(ce));
2004 	GEM_BUG_ON(intel_context_is_parent(ce));
2005 
2006 	if (!list_empty(&guc->submission_state.guc_id_list)) {
2007 		cn = list_first_entry(&guc->submission_state.guc_id_list,
2008 				      struct intel_context,
2009 				      guc_id.link);
2010 
2011 		GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
2012 		GEM_BUG_ON(context_guc_id_invalid(cn));
2013 		GEM_BUG_ON(intel_context_is_child(cn));
2014 		GEM_BUG_ON(intel_context_is_parent(cn));
2015 
2016 		list_del_init(&cn->guc_id.link);
2017 		ce->guc_id.id = cn->guc_id.id;
2018 
2019 		spin_lock(&cn->guc_state.lock);
2020 		clr_context_registered(cn);
2021 		spin_unlock(&cn->guc_state.lock);
2022 
2023 		set_context_guc_id_invalid(cn);
2024 
2025 #ifdef CONFIG_DRM_I915_SELFTEST
2026 		guc->number_guc_id_stolen++;
2027 #endif
2028 
2029 		return 0;
2030 	} else {
2031 		return -EAGAIN;
2032 	}
2033 }
2034 
2035 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
2036 {
2037 	int ret;
2038 
2039 	lockdep_assert_held(&guc->submission_state.lock);
2040 	GEM_BUG_ON(intel_context_is_child(ce));
2041 
2042 	ret = new_guc_id(guc, ce);
2043 	if (unlikely(ret < 0)) {
2044 		if (intel_context_is_parent(ce))
2045 			return -ENOSPC;
2046 
2047 		ret = steal_guc_id(guc, ce);
2048 		if (ret < 0)
2049 			return ret;
2050 	}
2051 
2052 	if (intel_context_is_parent(ce)) {
2053 		struct intel_context *child;
2054 		int i = 1;
2055 
2056 		for_each_child(ce, child)
2057 			child->guc_id.id = ce->guc_id.id + i++;
2058 	}
2059 
2060 	return 0;
2061 }
2062 
2063 #define PIN_GUC_ID_TRIES	4
2064 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2065 {
2066 	int ret = 0;
2067 	unsigned long flags, tries = PIN_GUC_ID_TRIES;
2068 
2069 	GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
2070 
2071 try_again:
2072 	spin_lock_irqsave(&guc->submission_state.lock, flags);
2073 
2074 	might_lock(&ce->guc_state.lock);
2075 
2076 	if (context_guc_id_invalid(ce)) {
2077 		ret = assign_guc_id(guc, ce);
2078 		if (ret)
2079 			goto out_unlock;
2080 		ret = 1;	/* Indidcates newly assigned guc_id */
2081 	}
2082 	if (!list_empty(&ce->guc_id.link))
2083 		list_del_init(&ce->guc_id.link);
2084 	atomic_inc(&ce->guc_id.ref);
2085 
2086 out_unlock:
2087 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2088 
2089 	/*
2090 	 * -EAGAIN indicates no guc_id are available, let's retire any
2091 	 * outstanding requests to see if that frees up a guc_id. If the first
2092 	 * retire didn't help, insert a sleep with the timeslice duration before
2093 	 * attempting to retire more requests. Double the sleep period each
2094 	 * subsequent pass before finally giving up. The sleep period has max of
2095 	 * 100ms and minimum of 1ms.
2096 	 */
2097 	if (ret == -EAGAIN && --tries) {
2098 		if (PIN_GUC_ID_TRIES - tries > 1) {
2099 			unsigned int timeslice_shifted =
2100 				ce->engine->props.timeslice_duration_ms <<
2101 				(PIN_GUC_ID_TRIES - tries - 2);
2102 			unsigned int max = min_t(unsigned int, 100,
2103 						 timeslice_shifted);
2104 
2105 			msleep(max_t(unsigned int, max, 1));
2106 		}
2107 		intel_gt_retire_requests(guc_to_gt(guc));
2108 		goto try_again;
2109 	}
2110 
2111 	return ret;
2112 }
2113 
2114 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2115 {
2116 	unsigned long flags;
2117 
2118 	GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
2119 	GEM_BUG_ON(intel_context_is_child(ce));
2120 
2121 	if (unlikely(context_guc_id_invalid(ce) ||
2122 		     intel_context_is_parent(ce)))
2123 		return;
2124 
2125 	spin_lock_irqsave(&guc->submission_state.lock, flags);
2126 	if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
2127 	    !atomic_read(&ce->guc_id.ref))
2128 		list_add_tail(&ce->guc_id.link,
2129 			      &guc->submission_state.guc_id_list);
2130 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2131 }
2132 
2133 static int __guc_action_register_multi_lrc(struct intel_guc *guc,
2134 					   struct intel_context *ce,
2135 					   u32 guc_id,
2136 					   u32 offset,
2137 					   bool loop)
2138 {
2139 	struct intel_context *child;
2140 	u32 action[4 + MAX_ENGINE_INSTANCE];
2141 	int len = 0;
2142 
2143 	GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2144 
2145 	action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
2146 	action[len++] = guc_id;
2147 	action[len++] = ce->parallel.number_children + 1;
2148 	action[len++] = offset;
2149 	for_each_child(ce, child) {
2150 		offset += sizeof(struct guc_lrc_desc);
2151 		action[len++] = offset;
2152 	}
2153 
2154 	return guc_submission_send_busy_loop(guc, action, len, 0, loop);
2155 }
2156 
2157 static int __guc_action_register_context(struct intel_guc *guc,
2158 					 u32 guc_id,
2159 					 u32 offset,
2160 					 bool loop)
2161 {
2162 	u32 action[] = {
2163 		INTEL_GUC_ACTION_REGISTER_CONTEXT,
2164 		guc_id,
2165 		offset,
2166 	};
2167 
2168 	return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2169 					     0, loop);
2170 }
2171 
2172 static int register_context(struct intel_context *ce, bool loop)
2173 {
2174 	struct intel_guc *guc = ce_to_guc(ce);
2175 	u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
2176 		ce->guc_id.id * sizeof(struct guc_lrc_desc);
2177 	int ret;
2178 
2179 	GEM_BUG_ON(intel_context_is_child(ce));
2180 	trace_intel_context_register(ce);
2181 
2182 	if (intel_context_is_parent(ce))
2183 		ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id,
2184 						      offset, loop);
2185 	else
2186 		ret = __guc_action_register_context(guc, ce->guc_id.id, offset,
2187 						    loop);
2188 	if (likely(!ret)) {
2189 		unsigned long flags;
2190 
2191 		spin_lock_irqsave(&ce->guc_state.lock, flags);
2192 		set_context_registered(ce);
2193 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2194 	}
2195 
2196 	return ret;
2197 }
2198 
2199 static int __guc_action_deregister_context(struct intel_guc *guc,
2200 					   u32 guc_id)
2201 {
2202 	u32 action[] = {
2203 		INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
2204 		guc_id,
2205 	};
2206 
2207 	return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2208 					     G2H_LEN_DW_DEREGISTER_CONTEXT,
2209 					     true);
2210 }
2211 
2212 static int deregister_context(struct intel_context *ce, u32 guc_id)
2213 {
2214 	struct intel_guc *guc = ce_to_guc(ce);
2215 
2216 	GEM_BUG_ON(intel_context_is_child(ce));
2217 	trace_intel_context_deregister(ce);
2218 
2219 	return __guc_action_deregister_context(guc, guc_id);
2220 }
2221 
2222 static inline void clear_children_join_go_memory(struct intel_context *ce)
2223 {
2224 	struct parent_scratch *ps = __get_parent_scratch(ce);
2225 	int i;
2226 
2227 	ps->go.semaphore = 0;
2228 	for (i = 0; i < ce->parallel.number_children + 1; ++i)
2229 		ps->join[i].semaphore = 0;
2230 }
2231 
2232 static inline u32 get_children_go_value(struct intel_context *ce)
2233 {
2234 	return __get_parent_scratch(ce)->go.semaphore;
2235 }
2236 
2237 static inline u32 get_children_join_value(struct intel_context *ce,
2238 					  u8 child_index)
2239 {
2240 	return __get_parent_scratch(ce)->join[child_index].semaphore;
2241 }
2242 
2243 static void guc_context_policy_init(struct intel_engine_cs *engine,
2244 				    struct guc_lrc_desc *desc)
2245 {
2246 	desc->policy_flags = 0;
2247 
2248 	if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
2249 		desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE;
2250 
2251 	/* NB: For both of these, zero means disabled. */
2252 	desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
2253 	desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
2254 }
2255 
2256 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
2257 {
2258 	struct intel_engine_cs *engine = ce->engine;
2259 	struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
2260 	struct intel_guc *guc = &engine->gt->uc.guc;
2261 	u32 desc_idx = ce->guc_id.id;
2262 	struct guc_lrc_desc *desc;
2263 	bool context_registered;
2264 	intel_wakeref_t wakeref;
2265 	struct intel_context *child;
2266 	int ret = 0;
2267 
2268 	GEM_BUG_ON(!engine->mask);
2269 	GEM_BUG_ON(!sched_state_is_init(ce));
2270 
2271 	/*
2272 	 * Ensure LRC + CT vmas are is same region as write barrier is done
2273 	 * based on CT vma region.
2274 	 */
2275 	GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
2276 		   i915_gem_object_is_lmem(ce->ring->vma->obj));
2277 
2278 	context_registered = lrc_desc_registered(guc, desc_idx);
2279 
2280 	reset_lrc_desc(guc, desc_idx);
2281 	set_lrc_desc_registered(guc, desc_idx, ce);
2282 
2283 	desc = __get_lrc_desc(guc, desc_idx);
2284 	desc->engine_class = engine_class_to_guc_class(engine->class);
2285 	desc->engine_submit_mask = engine->logical_mask;
2286 	desc->hw_context_desc = ce->lrc.lrca;
2287 	desc->priority = ce->guc_state.prio;
2288 	desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2289 	guc_context_policy_init(engine, desc);
2290 
2291 	/*
2292 	 * If context is a parent, we need to register a process descriptor
2293 	 * describing a work queue and register all child contexts.
2294 	 */
2295 	if (intel_context_is_parent(ce)) {
2296 		struct guc_process_desc *pdesc;
2297 
2298 		ce->parallel.guc.wqi_tail = 0;
2299 		ce->parallel.guc.wqi_head = 0;
2300 
2301 		desc->process_desc = i915_ggtt_offset(ce->state) +
2302 			__get_parent_scratch_offset(ce);
2303 		desc->wq_addr = i915_ggtt_offset(ce->state) +
2304 			__get_wq_offset(ce);
2305 		desc->wq_size = WQ_SIZE;
2306 
2307 		pdesc = __get_process_desc(ce);
2308 		memset(pdesc, 0, sizeof(*(pdesc)));
2309 		pdesc->stage_id = ce->guc_id.id;
2310 		pdesc->wq_base_addr = desc->wq_addr;
2311 		pdesc->wq_size_bytes = desc->wq_size;
2312 		pdesc->wq_status = WQ_STATUS_ACTIVE;
2313 
2314 		for_each_child(ce, child) {
2315 			desc = __get_lrc_desc(guc, child->guc_id.id);
2316 
2317 			desc->engine_class =
2318 				engine_class_to_guc_class(engine->class);
2319 			desc->hw_context_desc = child->lrc.lrca;
2320 			desc->priority = ce->guc_state.prio;
2321 			desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2322 			guc_context_policy_init(engine, desc);
2323 		}
2324 
2325 		clear_children_join_go_memory(ce);
2326 	}
2327 
2328 	/*
2329 	 * The context_lookup xarray is used to determine if the hardware
2330 	 * context is currently registered. There are two cases in which it
2331 	 * could be registered either the guc_id has been stolen from another
2332 	 * context or the lrc descriptor address of this context has changed. In
2333 	 * either case the context needs to be deregistered with the GuC before
2334 	 * registering this context.
2335 	 */
2336 	if (context_registered) {
2337 		bool disabled;
2338 		unsigned long flags;
2339 
2340 		trace_intel_context_steal_guc_id(ce);
2341 		GEM_BUG_ON(!loop);
2342 
2343 		/* Seal race with Reset */
2344 		spin_lock_irqsave(&ce->guc_state.lock, flags);
2345 		disabled = submission_disabled(guc);
2346 		if (likely(!disabled)) {
2347 			set_context_wait_for_deregister_to_register(ce);
2348 			intel_context_get(ce);
2349 		}
2350 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2351 		if (unlikely(disabled)) {
2352 			reset_lrc_desc(guc, desc_idx);
2353 			return 0;	/* Will get registered later */
2354 		}
2355 
2356 		/*
2357 		 * If stealing the guc_id, this ce has the same guc_id as the
2358 		 * context whose guc_id was stolen.
2359 		 */
2360 		with_intel_runtime_pm(runtime_pm, wakeref)
2361 			ret = deregister_context(ce, ce->guc_id.id);
2362 		if (unlikely(ret == -ENODEV))
2363 			ret = 0;	/* Will get registered later */
2364 	} else {
2365 		with_intel_runtime_pm(runtime_pm, wakeref)
2366 			ret = register_context(ce, loop);
2367 		if (unlikely(ret == -EBUSY)) {
2368 			reset_lrc_desc(guc, desc_idx);
2369 		} else if (unlikely(ret == -ENODEV)) {
2370 			reset_lrc_desc(guc, desc_idx);
2371 			ret = 0;	/* Will get registered later */
2372 		}
2373 	}
2374 
2375 	return ret;
2376 }
2377 
2378 static int __guc_context_pre_pin(struct intel_context *ce,
2379 				 struct intel_engine_cs *engine,
2380 				 struct i915_gem_ww_ctx *ww,
2381 				 void **vaddr)
2382 {
2383 	return lrc_pre_pin(ce, engine, ww, vaddr);
2384 }
2385 
2386 static int __guc_context_pin(struct intel_context *ce,
2387 			     struct intel_engine_cs *engine,
2388 			     void *vaddr)
2389 {
2390 	if (i915_ggtt_offset(ce->state) !=
2391 	    (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
2392 		set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
2393 
2394 	/*
2395 	 * GuC context gets pinned in guc_request_alloc. See that function for
2396 	 * explaination of why.
2397 	 */
2398 
2399 	return lrc_pin(ce, engine, vaddr);
2400 }
2401 
2402 static int guc_context_pre_pin(struct intel_context *ce,
2403 			       struct i915_gem_ww_ctx *ww,
2404 			       void **vaddr)
2405 {
2406 	return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
2407 }
2408 
2409 static int guc_context_pin(struct intel_context *ce, void *vaddr)
2410 {
2411 	int ret = __guc_context_pin(ce, ce->engine, vaddr);
2412 
2413 	if (likely(!ret && !intel_context_is_barrier(ce)))
2414 		intel_engine_pm_get(ce->engine);
2415 
2416 	return ret;
2417 }
2418 
2419 static void guc_context_unpin(struct intel_context *ce)
2420 {
2421 	struct intel_guc *guc = ce_to_guc(ce);
2422 
2423 	unpin_guc_id(guc, ce);
2424 	lrc_unpin(ce);
2425 
2426 	if (likely(!intel_context_is_barrier(ce)))
2427 		intel_engine_pm_put_async(ce->engine);
2428 }
2429 
2430 static void guc_context_post_unpin(struct intel_context *ce)
2431 {
2432 	lrc_post_unpin(ce);
2433 }
2434 
2435 static void __guc_context_sched_enable(struct intel_guc *guc,
2436 				       struct intel_context *ce)
2437 {
2438 	u32 action[] = {
2439 		INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2440 		ce->guc_id.id,
2441 		GUC_CONTEXT_ENABLE
2442 	};
2443 
2444 	trace_intel_context_sched_enable(ce);
2445 
2446 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2447 				      G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2448 }
2449 
2450 static void __guc_context_sched_disable(struct intel_guc *guc,
2451 					struct intel_context *ce,
2452 					u16 guc_id)
2453 {
2454 	u32 action[] = {
2455 		INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2456 		guc_id,	/* ce->guc_id.id not stable */
2457 		GUC_CONTEXT_DISABLE
2458 	};
2459 
2460 	GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
2461 
2462 	GEM_BUG_ON(intel_context_is_child(ce));
2463 	trace_intel_context_sched_disable(ce);
2464 
2465 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2466 				      G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2467 }
2468 
2469 static void guc_blocked_fence_complete(struct intel_context *ce)
2470 {
2471 	lockdep_assert_held(&ce->guc_state.lock);
2472 
2473 	if (!i915_sw_fence_done(&ce->guc_state.blocked))
2474 		i915_sw_fence_complete(&ce->guc_state.blocked);
2475 }
2476 
2477 static void guc_blocked_fence_reinit(struct intel_context *ce)
2478 {
2479 	lockdep_assert_held(&ce->guc_state.lock);
2480 	GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
2481 
2482 	/*
2483 	 * This fence is always complete unless a pending schedule disable is
2484 	 * outstanding. We arm the fence here and complete it when we receive
2485 	 * the pending schedule disable complete message.
2486 	 */
2487 	i915_sw_fence_fini(&ce->guc_state.blocked);
2488 	i915_sw_fence_reinit(&ce->guc_state.blocked);
2489 	i915_sw_fence_await(&ce->guc_state.blocked);
2490 	i915_sw_fence_commit(&ce->guc_state.blocked);
2491 }
2492 
2493 static u16 prep_context_pending_disable(struct intel_context *ce)
2494 {
2495 	lockdep_assert_held(&ce->guc_state.lock);
2496 
2497 	set_context_pending_disable(ce);
2498 	clr_context_enabled(ce);
2499 	guc_blocked_fence_reinit(ce);
2500 	intel_context_get(ce);
2501 
2502 	return ce->guc_id.id;
2503 }
2504 
2505 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
2506 {
2507 	struct intel_guc *guc = ce_to_guc(ce);
2508 	unsigned long flags;
2509 	struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2510 	intel_wakeref_t wakeref;
2511 	u16 guc_id;
2512 	bool enabled;
2513 
2514 	GEM_BUG_ON(intel_context_is_child(ce));
2515 
2516 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2517 
2518 	incr_context_blocked(ce);
2519 
2520 	enabled = context_enabled(ce);
2521 	if (unlikely(!enabled || submission_disabled(guc))) {
2522 		if (enabled)
2523 			clr_context_enabled(ce);
2524 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2525 		return &ce->guc_state.blocked;
2526 	}
2527 
2528 	/*
2529 	 * We add +2 here as the schedule disable complete CTB handler calls
2530 	 * intel_context_sched_disable_unpin (-2 to pin_count).
2531 	 */
2532 	atomic_add(2, &ce->pin_count);
2533 
2534 	guc_id = prep_context_pending_disable(ce);
2535 
2536 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2537 
2538 	with_intel_runtime_pm(runtime_pm, wakeref)
2539 		__guc_context_sched_disable(guc, ce, guc_id);
2540 
2541 	return &ce->guc_state.blocked;
2542 }
2543 
2544 #define SCHED_STATE_MULTI_BLOCKED_MASK \
2545 	(SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
2546 #define SCHED_STATE_NO_UNBLOCK \
2547 	(SCHED_STATE_MULTI_BLOCKED_MASK | \
2548 	 SCHED_STATE_PENDING_DISABLE | \
2549 	 SCHED_STATE_BANNED)
2550 
2551 static bool context_cant_unblock(struct intel_context *ce)
2552 {
2553 	lockdep_assert_held(&ce->guc_state.lock);
2554 
2555 	return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
2556 		context_guc_id_invalid(ce) ||
2557 		!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
2558 		!intel_context_is_pinned(ce);
2559 }
2560 
2561 static void guc_context_unblock(struct intel_context *ce)
2562 {
2563 	struct intel_guc *guc = ce_to_guc(ce);
2564 	unsigned long flags;
2565 	struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2566 	intel_wakeref_t wakeref;
2567 	bool enable;
2568 
2569 	GEM_BUG_ON(context_enabled(ce));
2570 	GEM_BUG_ON(intel_context_is_child(ce));
2571 
2572 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2573 
2574 	if (unlikely(submission_disabled(guc) ||
2575 		     context_cant_unblock(ce))) {
2576 		enable = false;
2577 	} else {
2578 		enable = true;
2579 		set_context_pending_enable(ce);
2580 		set_context_enabled(ce);
2581 		intel_context_get(ce);
2582 	}
2583 
2584 	decr_context_blocked(ce);
2585 
2586 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2587 
2588 	if (enable) {
2589 		with_intel_runtime_pm(runtime_pm, wakeref)
2590 			__guc_context_sched_enable(guc, ce);
2591 	}
2592 }
2593 
2594 static void guc_context_cancel_request(struct intel_context *ce,
2595 				       struct i915_request *rq)
2596 {
2597 	struct intel_context *block_context =
2598 		request_to_scheduling_context(rq);
2599 
2600 	if (i915_sw_fence_signaled(&rq->submit)) {
2601 		struct i915_sw_fence *fence;
2602 
2603 		intel_context_get(ce);
2604 		fence = guc_context_block(block_context);
2605 		i915_sw_fence_wait(fence);
2606 		if (!i915_request_completed(rq)) {
2607 			__i915_request_skip(rq);
2608 			guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
2609 					true);
2610 		}
2611 
2612 		/*
2613 		 * XXX: Racey if context is reset, see comment in
2614 		 * __guc_reset_context().
2615 		 */
2616 		flush_work(&ce_to_guc(ce)->ct.requests.worker);
2617 
2618 		guc_context_unblock(block_context);
2619 		intel_context_put(ce);
2620 	}
2621 }
2622 
2623 static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
2624 						 u16 guc_id,
2625 						 u32 preemption_timeout)
2626 {
2627 	u32 action[] = {
2628 		INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
2629 		guc_id,
2630 		preemption_timeout
2631 	};
2632 
2633 	intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
2634 }
2635 
2636 static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
2637 {
2638 	struct intel_guc *guc = ce_to_guc(ce);
2639 	struct intel_runtime_pm *runtime_pm =
2640 		&ce->engine->gt->i915->runtime_pm;
2641 	intel_wakeref_t wakeref;
2642 	unsigned long flags;
2643 
2644 	GEM_BUG_ON(intel_context_is_child(ce));
2645 
2646 	guc_flush_submissions(guc);
2647 
2648 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2649 	set_context_banned(ce);
2650 
2651 	if (submission_disabled(guc) ||
2652 	    (!context_enabled(ce) && !context_pending_disable(ce))) {
2653 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2654 
2655 		guc_cancel_context_requests(ce);
2656 		intel_engine_signal_breadcrumbs(ce->engine);
2657 	} else if (!context_pending_disable(ce)) {
2658 		u16 guc_id;
2659 
2660 		/*
2661 		 * We add +2 here as the schedule disable complete CTB handler
2662 		 * calls intel_context_sched_disable_unpin (-2 to pin_count).
2663 		 */
2664 		atomic_add(2, &ce->pin_count);
2665 
2666 		guc_id = prep_context_pending_disable(ce);
2667 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2668 
2669 		/*
2670 		 * In addition to disabling scheduling, set the preemption
2671 		 * timeout to the minimum value (1 us) so the banned context
2672 		 * gets kicked off the HW ASAP.
2673 		 */
2674 		with_intel_runtime_pm(runtime_pm, wakeref) {
2675 			__guc_context_set_preemption_timeout(guc, guc_id, 1);
2676 			__guc_context_sched_disable(guc, ce, guc_id);
2677 		}
2678 	} else {
2679 		if (!context_guc_id_invalid(ce))
2680 			with_intel_runtime_pm(runtime_pm, wakeref)
2681 				__guc_context_set_preemption_timeout(guc,
2682 								     ce->guc_id.id,
2683 								     1);
2684 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2685 	}
2686 }
2687 
2688 static void guc_context_sched_disable(struct intel_context *ce)
2689 {
2690 	struct intel_guc *guc = ce_to_guc(ce);
2691 	unsigned long flags;
2692 	struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
2693 	intel_wakeref_t wakeref;
2694 	u16 guc_id;
2695 
2696 	GEM_BUG_ON(intel_context_is_child(ce));
2697 
2698 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2699 
2700 	/*
2701 	 * We have to check if the context has been disabled by another thread,
2702 	 * check if submssion has been disabled to seal a race with reset and
2703 	 * finally check if any more requests have been committed to the
2704 	 * context ensursing that a request doesn't slip through the
2705 	 * 'context_pending_disable' fence.
2706 	 */
2707 	if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
2708 		     context_has_committed_requests(ce))) {
2709 		clr_context_enabled(ce);
2710 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2711 		goto unpin;
2712 	}
2713 	guc_id = prep_context_pending_disable(ce);
2714 
2715 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2716 
2717 	with_intel_runtime_pm(runtime_pm, wakeref)
2718 		__guc_context_sched_disable(guc, ce, guc_id);
2719 
2720 	return;
2721 unpin:
2722 	intel_context_sched_disable_unpin(ce);
2723 }
2724 
2725 static inline void guc_lrc_desc_unpin(struct intel_context *ce)
2726 {
2727 	struct intel_guc *guc = ce_to_guc(ce);
2728 	struct intel_gt *gt = guc_to_gt(guc);
2729 	unsigned long flags;
2730 	bool disabled;
2731 
2732 	GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
2733 	GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
2734 	GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
2735 	GEM_BUG_ON(context_enabled(ce));
2736 
2737 	/* Seal race with Reset */
2738 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2739 	disabled = submission_disabled(guc);
2740 	if (likely(!disabled)) {
2741 		__intel_gt_pm_get(gt);
2742 		set_context_destroyed(ce);
2743 		clr_context_registered(ce);
2744 	}
2745 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2746 	if (unlikely(disabled)) {
2747 		release_guc_id(guc, ce);
2748 		__guc_context_destroy(ce);
2749 		return;
2750 	}
2751 
2752 	deregister_context(ce, ce->guc_id.id);
2753 }
2754 
2755 static void __guc_context_destroy(struct intel_context *ce)
2756 {
2757 	GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
2758 		   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
2759 		   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
2760 		   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
2761 	GEM_BUG_ON(ce->guc_state.number_committed_requests);
2762 
2763 	lrc_fini(ce);
2764 	intel_context_fini(ce);
2765 
2766 	if (intel_engine_is_virtual(ce->engine)) {
2767 		struct guc_virtual_engine *ve =
2768 			container_of(ce, typeof(*ve), context);
2769 
2770 		if (ve->base.breadcrumbs)
2771 			intel_breadcrumbs_put(ve->base.breadcrumbs);
2772 
2773 		kfree(ve);
2774 	} else {
2775 		intel_context_free(ce);
2776 	}
2777 }
2778 
2779 static void guc_flush_destroyed_contexts(struct intel_guc *guc)
2780 {
2781 	struct intel_context *ce;
2782 	unsigned long flags;
2783 
2784 	GEM_BUG_ON(!submission_disabled(guc) &&
2785 		   guc_submission_initialized(guc));
2786 
2787 	while (!list_empty(&guc->submission_state.destroyed_contexts)) {
2788 		spin_lock_irqsave(&guc->submission_state.lock, flags);
2789 		ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
2790 					      struct intel_context,
2791 					      destroyed_link);
2792 		if (ce)
2793 			list_del_init(&ce->destroyed_link);
2794 		spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2795 
2796 		if (!ce)
2797 			break;
2798 
2799 		release_guc_id(guc, ce);
2800 		__guc_context_destroy(ce);
2801 	}
2802 }
2803 
2804 static void deregister_destroyed_contexts(struct intel_guc *guc)
2805 {
2806 	struct intel_context *ce;
2807 	unsigned long flags;
2808 
2809 	while (!list_empty(&guc->submission_state.destroyed_contexts)) {
2810 		spin_lock_irqsave(&guc->submission_state.lock, flags);
2811 		ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
2812 					      struct intel_context,
2813 					      destroyed_link);
2814 		if (ce)
2815 			list_del_init(&ce->destroyed_link);
2816 		spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2817 
2818 		if (!ce)
2819 			break;
2820 
2821 		guc_lrc_desc_unpin(ce);
2822 	}
2823 }
2824 
2825 static void destroyed_worker_func(struct work_struct *w)
2826 {
2827 	struct intel_guc *guc = container_of(w, struct intel_guc,
2828 					     submission_state.destroyed_worker);
2829 	struct intel_gt *gt = guc_to_gt(guc);
2830 	int tmp;
2831 
2832 	with_intel_gt_pm(gt, tmp)
2833 		deregister_destroyed_contexts(guc);
2834 }
2835 
2836 static void guc_context_destroy(struct kref *kref)
2837 {
2838 	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
2839 	struct intel_guc *guc = ce_to_guc(ce);
2840 	unsigned long flags;
2841 	bool destroy;
2842 
2843 	/*
2844 	 * If the guc_id is invalid this context has been stolen and we can free
2845 	 * it immediately. Also can be freed immediately if the context is not
2846 	 * registered with the GuC or the GuC is in the middle of a reset.
2847 	 */
2848 	spin_lock_irqsave(&guc->submission_state.lock, flags);
2849 	destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
2850 		!lrc_desc_registered(guc, ce->guc_id.id);
2851 	if (likely(!destroy)) {
2852 		if (!list_empty(&ce->guc_id.link))
2853 			list_del_init(&ce->guc_id.link);
2854 		list_add_tail(&ce->destroyed_link,
2855 			      &guc->submission_state.destroyed_contexts);
2856 	} else {
2857 		__release_guc_id(guc, ce);
2858 	}
2859 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2860 	if (unlikely(destroy)) {
2861 		__guc_context_destroy(ce);
2862 		return;
2863 	}
2864 
2865 	/*
2866 	 * We use a worker to issue the H2G to deregister the context as we can
2867 	 * take the GT PM for the first time which isn't allowed from an atomic
2868 	 * context.
2869 	 */
2870 	queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
2871 }
2872 
2873 static int guc_context_alloc(struct intel_context *ce)
2874 {
2875 	return lrc_alloc(ce, ce->engine);
2876 }
2877 
2878 static void guc_context_set_prio(struct intel_guc *guc,
2879 				 struct intel_context *ce,
2880 				 u8 prio)
2881 {
2882 	u32 action[] = {
2883 		INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
2884 		ce->guc_id.id,
2885 		prio,
2886 	};
2887 
2888 	GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
2889 		   prio > GUC_CLIENT_PRIORITY_NORMAL);
2890 	lockdep_assert_held(&ce->guc_state.lock);
2891 
2892 	if (ce->guc_state.prio == prio || submission_disabled(guc) ||
2893 	    !context_registered(ce)) {
2894 		ce->guc_state.prio = prio;
2895 		return;
2896 	}
2897 
2898 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
2899 
2900 	ce->guc_state.prio = prio;
2901 	trace_intel_context_set_prio(ce);
2902 }
2903 
2904 static inline u8 map_i915_prio_to_guc_prio(int prio)
2905 {
2906 	if (prio == I915_PRIORITY_NORMAL)
2907 		return GUC_CLIENT_PRIORITY_KMD_NORMAL;
2908 	else if (prio < I915_PRIORITY_NORMAL)
2909 		return GUC_CLIENT_PRIORITY_NORMAL;
2910 	else if (prio < I915_PRIORITY_DISPLAY)
2911 		return GUC_CLIENT_PRIORITY_HIGH;
2912 	else
2913 		return GUC_CLIENT_PRIORITY_KMD_HIGH;
2914 }
2915 
2916 static inline void add_context_inflight_prio(struct intel_context *ce,
2917 					     u8 guc_prio)
2918 {
2919 	lockdep_assert_held(&ce->guc_state.lock);
2920 	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
2921 
2922 	++ce->guc_state.prio_count[guc_prio];
2923 
2924 	/* Overflow protection */
2925 	GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
2926 }
2927 
2928 static inline void sub_context_inflight_prio(struct intel_context *ce,
2929 					     u8 guc_prio)
2930 {
2931 	lockdep_assert_held(&ce->guc_state.lock);
2932 	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
2933 
2934 	/* Underflow protection */
2935 	GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
2936 
2937 	--ce->guc_state.prio_count[guc_prio];
2938 }
2939 
2940 static inline void update_context_prio(struct intel_context *ce)
2941 {
2942 	struct intel_guc *guc = &ce->engine->gt->uc.guc;
2943 	int i;
2944 
2945 	BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
2946 	BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
2947 
2948 	lockdep_assert_held(&ce->guc_state.lock);
2949 
2950 	for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
2951 		if (ce->guc_state.prio_count[i]) {
2952 			guc_context_set_prio(guc, ce, i);
2953 			break;
2954 		}
2955 	}
2956 }
2957 
2958 static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
2959 {
2960 	/* Lower value is higher priority */
2961 	return new_guc_prio < old_guc_prio;
2962 }
2963 
2964 static void add_to_context(struct i915_request *rq)
2965 {
2966 	struct intel_context *ce = request_to_scheduling_context(rq);
2967 	u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
2968 
2969 	GEM_BUG_ON(intel_context_is_child(ce));
2970 	GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
2971 
2972 	spin_lock(&ce->guc_state.lock);
2973 	list_move_tail(&rq->sched.link, &ce->guc_state.requests);
2974 
2975 	if (rq->guc_prio == GUC_PRIO_INIT) {
2976 		rq->guc_prio = new_guc_prio;
2977 		add_context_inflight_prio(ce, rq->guc_prio);
2978 	} else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
2979 		sub_context_inflight_prio(ce, rq->guc_prio);
2980 		rq->guc_prio = new_guc_prio;
2981 		add_context_inflight_prio(ce, rq->guc_prio);
2982 	}
2983 	update_context_prio(ce);
2984 
2985 	spin_unlock(&ce->guc_state.lock);
2986 }
2987 
2988 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
2989 {
2990 	lockdep_assert_held(&ce->guc_state.lock);
2991 
2992 	if (rq->guc_prio != GUC_PRIO_INIT &&
2993 	    rq->guc_prio != GUC_PRIO_FINI) {
2994 		sub_context_inflight_prio(ce, rq->guc_prio);
2995 		update_context_prio(ce);
2996 	}
2997 	rq->guc_prio = GUC_PRIO_FINI;
2998 }
2999 
3000 static void remove_from_context(struct i915_request *rq)
3001 {
3002 	struct intel_context *ce = request_to_scheduling_context(rq);
3003 
3004 	GEM_BUG_ON(intel_context_is_child(ce));
3005 
3006 	spin_lock_irq(&ce->guc_state.lock);
3007 
3008 	list_del_init(&rq->sched.link);
3009 	clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
3010 
3011 	/* Prevent further __await_execution() registering a cb, then flush */
3012 	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
3013 
3014 	guc_prio_fini(rq, ce);
3015 
3016 	decr_context_committed_requests(ce);
3017 
3018 	spin_unlock_irq(&ce->guc_state.lock);
3019 
3020 	atomic_dec(&ce->guc_id.ref);
3021 	i915_request_notify_execute_cb_imm(rq);
3022 }
3023 
3024 static const struct intel_context_ops guc_context_ops = {
3025 	.alloc = guc_context_alloc,
3026 
3027 	.pre_pin = guc_context_pre_pin,
3028 	.pin = guc_context_pin,
3029 	.unpin = guc_context_unpin,
3030 	.post_unpin = guc_context_post_unpin,
3031 
3032 	.ban = guc_context_ban,
3033 
3034 	.cancel_request = guc_context_cancel_request,
3035 
3036 	.enter = intel_context_enter_engine,
3037 	.exit = intel_context_exit_engine,
3038 
3039 	.sched_disable = guc_context_sched_disable,
3040 
3041 	.reset = lrc_reset,
3042 	.destroy = guc_context_destroy,
3043 
3044 	.create_virtual = guc_create_virtual,
3045 	.create_parallel = guc_create_parallel,
3046 };
3047 
3048 static void submit_work_cb(struct irq_work *wrk)
3049 {
3050 	struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
3051 
3052 	might_lock(&rq->engine->sched_engine->lock);
3053 	i915_sw_fence_complete(&rq->submit);
3054 }
3055 
3056 static void __guc_signal_context_fence(struct intel_context *ce)
3057 {
3058 	struct i915_request *rq, *rn;
3059 
3060 	lockdep_assert_held(&ce->guc_state.lock);
3061 
3062 	if (!list_empty(&ce->guc_state.fences))
3063 		trace_intel_context_fence_release(ce);
3064 
3065 	/*
3066 	 * Use an IRQ to ensure locking order of sched_engine->lock ->
3067 	 * ce->guc_state.lock is preserved.
3068 	 */
3069 	list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
3070 				 guc_fence_link) {
3071 		list_del(&rq->guc_fence_link);
3072 		irq_work_queue(&rq->submit_work);
3073 	}
3074 
3075 	INIT_LIST_HEAD(&ce->guc_state.fences);
3076 }
3077 
3078 static void guc_signal_context_fence(struct intel_context *ce)
3079 {
3080 	unsigned long flags;
3081 
3082 	GEM_BUG_ON(intel_context_is_child(ce));
3083 
3084 	spin_lock_irqsave(&ce->guc_state.lock, flags);
3085 	clr_context_wait_for_deregister_to_register(ce);
3086 	__guc_signal_context_fence(ce);
3087 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3088 }
3089 
3090 static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
3091 {
3092 	return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
3093 		!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
3094 		!submission_disabled(ce_to_guc(ce));
3095 }
3096 
3097 static void guc_context_init(struct intel_context *ce)
3098 {
3099 	const struct i915_gem_context *ctx;
3100 	int prio = I915_CONTEXT_DEFAULT_PRIORITY;
3101 
3102 	rcu_read_lock();
3103 	ctx = rcu_dereference(ce->gem_context);
3104 	if (ctx)
3105 		prio = ctx->sched.priority;
3106 	rcu_read_unlock();
3107 
3108 	ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
3109 	set_bit(CONTEXT_GUC_INIT, &ce->flags);
3110 }
3111 
3112 static int guc_request_alloc(struct i915_request *rq)
3113 {
3114 	struct intel_context *ce = request_to_scheduling_context(rq);
3115 	struct intel_guc *guc = ce_to_guc(ce);
3116 	unsigned long flags;
3117 	int ret;
3118 
3119 	GEM_BUG_ON(!intel_context_is_pinned(rq->context));
3120 
3121 	/*
3122 	 * Flush enough space to reduce the likelihood of waiting after
3123 	 * we start building the request - in which case we will just
3124 	 * have to repeat work.
3125 	 */
3126 	rq->reserved_space += GUC_REQUEST_SIZE;
3127 
3128 	/*
3129 	 * Note that after this point, we have committed to using
3130 	 * this request as it is being used to both track the
3131 	 * state of engine initialisation and liveness of the
3132 	 * golden renderstate above. Think twice before you try
3133 	 * to cancel/unwind this request now.
3134 	 */
3135 
3136 	/* Unconditionally invalidate GPU caches and TLBs. */
3137 	ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
3138 	if (ret)
3139 		return ret;
3140 
3141 	rq->reserved_space -= GUC_REQUEST_SIZE;
3142 
3143 	if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
3144 		guc_context_init(ce);
3145 
3146 	/*
3147 	 * Call pin_guc_id here rather than in the pinning step as with
3148 	 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
3149 	 * guc_id and creating horrible race conditions. This is especially bad
3150 	 * when guc_id are being stolen due to over subscription. By the time
3151 	 * this function is reached, it is guaranteed that the guc_id will be
3152 	 * persistent until the generated request is retired. Thus, sealing these
3153 	 * race conditions. It is still safe to fail here if guc_id are
3154 	 * exhausted and return -EAGAIN to the user indicating that they can try
3155 	 * again in the future.
3156 	 *
3157 	 * There is no need for a lock here as the timeline mutex ensures at
3158 	 * most one context can be executing this code path at once. The
3159 	 * guc_id_ref is incremented once for every request in flight and
3160 	 * decremented on each retire. When it is zero, a lock around the
3161 	 * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
3162 	 */
3163 	if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
3164 		goto out;
3165 
3166 	ret = pin_guc_id(guc, ce);	/* returns 1 if new guc_id assigned */
3167 	if (unlikely(ret < 0))
3168 		return ret;
3169 	if (context_needs_register(ce, !!ret)) {
3170 		ret = guc_lrc_desc_pin(ce, true);
3171 		if (unlikely(ret)) {	/* unwind */
3172 			if (ret == -EPIPE) {
3173 				disable_submission(guc);
3174 				goto out;	/* GPU will be reset */
3175 			}
3176 			atomic_dec(&ce->guc_id.ref);
3177 			unpin_guc_id(guc, ce);
3178 			return ret;
3179 		}
3180 	}
3181 
3182 	clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
3183 
3184 out:
3185 	/*
3186 	 * We block all requests on this context if a G2H is pending for a
3187 	 * schedule disable or context deregistration as the GuC will fail a
3188 	 * schedule enable or context registration if either G2H is pending
3189 	 * respectfully. Once a G2H returns, the fence is released that is
3190 	 * blocking these requests (see guc_signal_context_fence).
3191 	 */
3192 	spin_lock_irqsave(&ce->guc_state.lock, flags);
3193 	if (context_wait_for_deregister_to_register(ce) ||
3194 	    context_pending_disable(ce)) {
3195 		init_irq_work(&rq->submit_work, submit_work_cb);
3196 		i915_sw_fence_await(&rq->submit);
3197 
3198 		list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
3199 	}
3200 	incr_context_committed_requests(ce);
3201 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3202 
3203 	return 0;
3204 }
3205 
3206 static int guc_virtual_context_pre_pin(struct intel_context *ce,
3207 				       struct i915_gem_ww_ctx *ww,
3208 				       void **vaddr)
3209 {
3210 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3211 
3212 	return __guc_context_pre_pin(ce, engine, ww, vaddr);
3213 }
3214 
3215 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
3216 {
3217 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3218 	int ret = __guc_context_pin(ce, engine, vaddr);
3219 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3220 
3221 	if (likely(!ret))
3222 		for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3223 			intel_engine_pm_get(engine);
3224 
3225 	return ret;
3226 }
3227 
3228 static void guc_virtual_context_unpin(struct intel_context *ce)
3229 {
3230 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3231 	struct intel_engine_cs *engine;
3232 	struct intel_guc *guc = ce_to_guc(ce);
3233 
3234 	GEM_BUG_ON(context_enabled(ce));
3235 	GEM_BUG_ON(intel_context_is_barrier(ce));
3236 
3237 	unpin_guc_id(guc, ce);
3238 	lrc_unpin(ce);
3239 
3240 	for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3241 		intel_engine_pm_put_async(engine);
3242 }
3243 
3244 static void guc_virtual_context_enter(struct intel_context *ce)
3245 {
3246 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3247 	struct intel_engine_cs *engine;
3248 
3249 	for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3250 		intel_engine_pm_get(engine);
3251 
3252 	intel_timeline_enter(ce->timeline);
3253 }
3254 
3255 static void guc_virtual_context_exit(struct intel_context *ce)
3256 {
3257 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3258 	struct intel_engine_cs *engine;
3259 
3260 	for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3261 		intel_engine_pm_put(engine);
3262 
3263 	intel_timeline_exit(ce->timeline);
3264 }
3265 
3266 static int guc_virtual_context_alloc(struct intel_context *ce)
3267 {
3268 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3269 
3270 	return lrc_alloc(ce, engine);
3271 }
3272 
3273 static const struct intel_context_ops virtual_guc_context_ops = {
3274 	.alloc = guc_virtual_context_alloc,
3275 
3276 	.pre_pin = guc_virtual_context_pre_pin,
3277 	.pin = guc_virtual_context_pin,
3278 	.unpin = guc_virtual_context_unpin,
3279 	.post_unpin = guc_context_post_unpin,
3280 
3281 	.ban = guc_context_ban,
3282 
3283 	.cancel_request = guc_context_cancel_request,
3284 
3285 	.enter = guc_virtual_context_enter,
3286 	.exit = guc_virtual_context_exit,
3287 
3288 	.sched_disable = guc_context_sched_disable,
3289 
3290 	.destroy = guc_context_destroy,
3291 
3292 	.get_sibling = guc_virtual_get_sibling,
3293 };
3294 
3295 static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
3296 {
3297 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3298 	struct intel_guc *guc = ce_to_guc(ce);
3299 	int ret;
3300 
3301 	GEM_BUG_ON(!intel_context_is_parent(ce));
3302 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3303 
3304 	ret = pin_guc_id(guc, ce);
3305 	if (unlikely(ret < 0))
3306 		return ret;
3307 
3308 	return __guc_context_pin(ce, engine, vaddr);
3309 }
3310 
3311 static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
3312 {
3313 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3314 
3315 	GEM_BUG_ON(!intel_context_is_child(ce));
3316 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3317 
3318 	__intel_context_pin(ce->parallel.parent);
3319 	return __guc_context_pin(ce, engine, vaddr);
3320 }
3321 
3322 static void guc_parent_context_unpin(struct intel_context *ce)
3323 {
3324 	struct intel_guc *guc = ce_to_guc(ce);
3325 
3326 	GEM_BUG_ON(context_enabled(ce));
3327 	GEM_BUG_ON(intel_context_is_barrier(ce));
3328 	GEM_BUG_ON(!intel_context_is_parent(ce));
3329 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3330 
3331 	if (ce->parallel.last_rq)
3332 		i915_request_put(ce->parallel.last_rq);
3333 	unpin_guc_id(guc, ce);
3334 	lrc_unpin(ce);
3335 }
3336 
3337 static void guc_child_context_unpin(struct intel_context *ce)
3338 {
3339 	GEM_BUG_ON(context_enabled(ce));
3340 	GEM_BUG_ON(intel_context_is_barrier(ce));
3341 	GEM_BUG_ON(!intel_context_is_child(ce));
3342 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3343 
3344 	lrc_unpin(ce);
3345 }
3346 
3347 static void guc_child_context_post_unpin(struct intel_context *ce)
3348 {
3349 	GEM_BUG_ON(!intel_context_is_child(ce));
3350 	GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
3351 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3352 
3353 	lrc_post_unpin(ce);
3354 	intel_context_unpin(ce->parallel.parent);
3355 }
3356 
3357 static void guc_child_context_destroy(struct kref *kref)
3358 {
3359 	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
3360 
3361 	__guc_context_destroy(ce);
3362 }
3363 
3364 static const struct intel_context_ops virtual_parent_context_ops = {
3365 	.alloc = guc_virtual_context_alloc,
3366 
3367 	.pre_pin = guc_context_pre_pin,
3368 	.pin = guc_parent_context_pin,
3369 	.unpin = guc_parent_context_unpin,
3370 	.post_unpin = guc_context_post_unpin,
3371 
3372 	.ban = guc_context_ban,
3373 
3374 	.cancel_request = guc_context_cancel_request,
3375 
3376 	.enter = guc_virtual_context_enter,
3377 	.exit = guc_virtual_context_exit,
3378 
3379 	.sched_disable = guc_context_sched_disable,
3380 
3381 	.destroy = guc_context_destroy,
3382 
3383 	.get_sibling = guc_virtual_get_sibling,
3384 };
3385 
3386 static const struct intel_context_ops virtual_child_context_ops = {
3387 	.alloc = guc_virtual_context_alloc,
3388 
3389 	.pre_pin = guc_context_pre_pin,
3390 	.pin = guc_child_context_pin,
3391 	.unpin = guc_child_context_unpin,
3392 	.post_unpin = guc_child_context_post_unpin,
3393 
3394 	.cancel_request = guc_context_cancel_request,
3395 
3396 	.enter = guc_virtual_context_enter,
3397 	.exit = guc_virtual_context_exit,
3398 
3399 	.destroy = guc_child_context_destroy,
3400 
3401 	.get_sibling = guc_virtual_get_sibling,
3402 };
3403 
3404 /*
3405  * The below override of the breadcrumbs is enabled when the user configures a
3406  * context for parallel submission (multi-lrc, parent-child).
3407  *
3408  * The overridden breadcrumbs implements an algorithm which allows the GuC to
3409  * safely preempt all the hw contexts configured for parallel submission
3410  * between each BB. The contract between the i915 and GuC is if the parent
3411  * context can be preempted, all the children can be preempted, and the GuC will
3412  * always try to preempt the parent before the children. A handshake between the
3413  * parent / children breadcrumbs ensures the i915 holds up its end of the deal
3414  * creating a window to preempt between each set of BBs.
3415  */
3416 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
3417 						     u64 offset, u32 len,
3418 						     const unsigned int flags);
3419 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
3420 						    u64 offset, u32 len,
3421 						    const unsigned int flags);
3422 static u32 *
3423 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
3424 						 u32 *cs);
3425 static u32 *
3426 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
3427 						u32 *cs);
3428 
3429 static struct intel_context *
3430 guc_create_parallel(struct intel_engine_cs **engines,
3431 		    unsigned int num_siblings,
3432 		    unsigned int width)
3433 {
3434 	struct intel_engine_cs **siblings = NULL;
3435 	struct intel_context *parent = NULL, *ce, *err;
3436 	int i, j;
3437 
3438 	siblings = kmalloc_array(num_siblings,
3439 				 sizeof(*siblings),
3440 				 GFP_KERNEL);
3441 	if (!siblings)
3442 		return ERR_PTR(-ENOMEM);
3443 
3444 	for (i = 0; i < width; ++i) {
3445 		for (j = 0; j < num_siblings; ++j)
3446 			siblings[j] = engines[i * num_siblings + j];
3447 
3448 		ce = intel_engine_create_virtual(siblings, num_siblings,
3449 						 FORCE_VIRTUAL);
3450 		if (IS_ERR(ce)) {
3451 			err = ERR_CAST(ce);
3452 			goto unwind;
3453 		}
3454 
3455 		if (i == 0) {
3456 			parent = ce;
3457 			parent->ops = &virtual_parent_context_ops;
3458 		} else {
3459 			ce->ops = &virtual_child_context_ops;
3460 			intel_context_bind_parent_child(parent, ce);
3461 		}
3462 	}
3463 
3464 	parent->parallel.fence_context = dma_fence_context_alloc(1);
3465 
3466 	parent->engine->emit_bb_start =
3467 		emit_bb_start_parent_no_preempt_mid_batch;
3468 	parent->engine->emit_fini_breadcrumb =
3469 		emit_fini_breadcrumb_parent_no_preempt_mid_batch;
3470 	parent->engine->emit_fini_breadcrumb_dw =
3471 		12 + 4 * parent->parallel.number_children;
3472 	for_each_child(parent, ce) {
3473 		ce->engine->emit_bb_start =
3474 			emit_bb_start_child_no_preempt_mid_batch;
3475 		ce->engine->emit_fini_breadcrumb =
3476 			emit_fini_breadcrumb_child_no_preempt_mid_batch;
3477 		ce->engine->emit_fini_breadcrumb_dw = 16;
3478 	}
3479 
3480 	kfree(siblings);
3481 	return parent;
3482 
3483 unwind:
3484 	if (parent)
3485 		intel_context_put(parent);
3486 	kfree(siblings);
3487 	return err;
3488 }
3489 
3490 static bool
3491 guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
3492 {
3493 	struct intel_engine_cs *sibling;
3494 	intel_engine_mask_t tmp, mask = b->engine_mask;
3495 	bool result = false;
3496 
3497 	for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
3498 		result |= intel_engine_irq_enable(sibling);
3499 
3500 	return result;
3501 }
3502 
3503 static void
3504 guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
3505 {
3506 	struct intel_engine_cs *sibling;
3507 	intel_engine_mask_t tmp, mask = b->engine_mask;
3508 
3509 	for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
3510 		intel_engine_irq_disable(sibling);
3511 }
3512 
3513 static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
3514 {
3515 	int i;
3516 
3517 	/*
3518 	 * In GuC submission mode we do not know which physical engine a request
3519 	 * will be scheduled on, this creates a problem because the breadcrumb
3520 	 * interrupt is per physical engine. To work around this we attach
3521 	 * requests and direct all breadcrumb interrupts to the first instance
3522 	 * of an engine per class. In addition all breadcrumb interrupts are
3523 	 * enabled / disabled across an engine class in unison.
3524 	 */
3525 	for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
3526 		struct intel_engine_cs *sibling =
3527 			engine->gt->engine_class[engine->class][i];
3528 
3529 		if (sibling) {
3530 			if (engine->breadcrumbs != sibling->breadcrumbs) {
3531 				intel_breadcrumbs_put(engine->breadcrumbs);
3532 				engine->breadcrumbs =
3533 					intel_breadcrumbs_get(sibling->breadcrumbs);
3534 			}
3535 			break;
3536 		}
3537 	}
3538 
3539 	if (engine->breadcrumbs) {
3540 		engine->breadcrumbs->engine_mask |= engine->mask;
3541 		engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
3542 		engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
3543 	}
3544 }
3545 
3546 static void guc_bump_inflight_request_prio(struct i915_request *rq,
3547 					   int prio)
3548 {
3549 	struct intel_context *ce = request_to_scheduling_context(rq);
3550 	u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
3551 
3552 	/* Short circuit function */
3553 	if (prio < I915_PRIORITY_NORMAL ||
3554 	    rq->guc_prio == GUC_PRIO_FINI ||
3555 	    (rq->guc_prio != GUC_PRIO_INIT &&
3556 	     !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
3557 		return;
3558 
3559 	spin_lock(&ce->guc_state.lock);
3560 	if (rq->guc_prio != GUC_PRIO_FINI) {
3561 		if (rq->guc_prio != GUC_PRIO_INIT)
3562 			sub_context_inflight_prio(ce, rq->guc_prio);
3563 		rq->guc_prio = new_guc_prio;
3564 		add_context_inflight_prio(ce, rq->guc_prio);
3565 		update_context_prio(ce);
3566 	}
3567 	spin_unlock(&ce->guc_state.lock);
3568 }
3569 
3570 static void guc_retire_inflight_request_prio(struct i915_request *rq)
3571 {
3572 	struct intel_context *ce = request_to_scheduling_context(rq);
3573 
3574 	spin_lock(&ce->guc_state.lock);
3575 	guc_prio_fini(rq, ce);
3576 	spin_unlock(&ce->guc_state.lock);
3577 }
3578 
3579 static void sanitize_hwsp(struct intel_engine_cs *engine)
3580 {
3581 	struct intel_timeline *tl;
3582 
3583 	list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
3584 		intel_timeline_reset_seqno(tl);
3585 }
3586 
3587 static void guc_sanitize(struct intel_engine_cs *engine)
3588 {
3589 	/*
3590 	 * Poison residual state on resume, in case the suspend didn't!
3591 	 *
3592 	 * We have to assume that across suspend/resume (or other loss
3593 	 * of control) that the contents of our pinned buffers has been
3594 	 * lost, replaced by garbage. Since this doesn't always happen,
3595 	 * let's poison such state so that we more quickly spot when
3596 	 * we falsely assume it has been preserved.
3597 	 */
3598 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
3599 		memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
3600 
3601 	/*
3602 	 * The kernel_context HWSP is stored in the status_page. As above,
3603 	 * that may be lost on resume/initialisation, and so we need to
3604 	 * reset the value in the HWSP.
3605 	 */
3606 	sanitize_hwsp(engine);
3607 
3608 	/* And scrub the dirty cachelines for the HWSP */
3609 	clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
3610 
3611 	intel_engine_reset_pinned_contexts(engine);
3612 }
3613 
3614 static void setup_hwsp(struct intel_engine_cs *engine)
3615 {
3616 	intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
3617 
3618 	ENGINE_WRITE_FW(engine,
3619 			RING_HWS_PGA,
3620 			i915_ggtt_offset(engine->status_page.vma));
3621 }
3622 
3623 static void start_engine(struct intel_engine_cs *engine)
3624 {
3625 	ENGINE_WRITE_FW(engine,
3626 			RING_MODE_GEN7,
3627 			_MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
3628 
3629 	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
3630 	ENGINE_POSTING_READ(engine, RING_MI_MODE);
3631 }
3632 
3633 static int guc_resume(struct intel_engine_cs *engine)
3634 {
3635 	assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
3636 
3637 	intel_mocs_init_engine(engine);
3638 
3639 	intel_breadcrumbs_reset(engine->breadcrumbs);
3640 
3641 	setup_hwsp(engine);
3642 	start_engine(engine);
3643 
3644 	return 0;
3645 }
3646 
3647 static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
3648 {
3649 	return !sched_engine->tasklet.callback;
3650 }
3651 
3652 static void guc_set_default_submission(struct intel_engine_cs *engine)
3653 {
3654 	engine->submit_request = guc_submit_request;
3655 }
3656 
3657 static inline void guc_kernel_context_pin(struct intel_guc *guc,
3658 					  struct intel_context *ce)
3659 {
3660 	if (context_guc_id_invalid(ce))
3661 		pin_guc_id(guc, ce);
3662 	guc_lrc_desc_pin(ce, true);
3663 }
3664 
3665 static inline void guc_init_lrc_mapping(struct intel_guc *guc)
3666 {
3667 	struct intel_gt *gt = guc_to_gt(guc);
3668 	struct intel_engine_cs *engine;
3669 	enum intel_engine_id id;
3670 
3671 	/* make sure all descriptors are clean... */
3672 	xa_destroy(&guc->context_lookup);
3673 
3674 	/*
3675 	 * Some contexts might have been pinned before we enabled GuC
3676 	 * submission, so we need to add them to the GuC bookeeping.
3677 	 * Also, after a reset the of the GuC we want to make sure that the
3678 	 * information shared with GuC is properly reset. The kernel LRCs are
3679 	 * not attached to the gem_context, so they need to be added separately.
3680 	 *
3681 	 * Note: we purposefully do not check the return of guc_lrc_desc_pin,
3682 	 * because that function can only fail if a reset is just starting. This
3683 	 * is at the end of reset so presumably another reset isn't happening
3684 	 * and even it did this code would be run again.
3685 	 */
3686 
3687 	for_each_engine(engine, gt, id) {
3688 		struct intel_context *ce;
3689 
3690 		list_for_each_entry(ce, &engine->pinned_contexts_list,
3691 				    pinned_contexts_link)
3692 			guc_kernel_context_pin(guc, ce);
3693 	}
3694 }
3695 
3696 static void guc_release(struct intel_engine_cs *engine)
3697 {
3698 	engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
3699 
3700 	intel_engine_cleanup_common(engine);
3701 	lrc_fini_wa_ctx(engine);
3702 }
3703 
3704 static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
3705 {
3706 	struct intel_engine_cs *e;
3707 	intel_engine_mask_t tmp, mask = engine->mask;
3708 
3709 	for_each_engine_masked(e, engine->gt, mask, tmp)
3710 		e->serial++;
3711 }
3712 
3713 static void guc_default_vfuncs(struct intel_engine_cs *engine)
3714 {
3715 	/* Default vfuncs which can be overridden by each engine. */
3716 
3717 	engine->resume = guc_resume;
3718 
3719 	engine->cops = &guc_context_ops;
3720 	engine->request_alloc = guc_request_alloc;
3721 	engine->add_active_request = add_to_context;
3722 	engine->remove_active_request = remove_from_context;
3723 
3724 	engine->sched_engine->schedule = i915_schedule;
3725 
3726 	engine->reset.prepare = guc_reset_nop;
3727 	engine->reset.rewind = guc_rewind_nop;
3728 	engine->reset.cancel = guc_reset_nop;
3729 	engine->reset.finish = guc_reset_nop;
3730 
3731 	engine->emit_flush = gen8_emit_flush_xcs;
3732 	engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
3733 	engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
3734 	if (GRAPHICS_VER(engine->i915) >= 12) {
3735 		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
3736 		engine->emit_flush = gen12_emit_flush_xcs;
3737 	}
3738 	engine->set_default_submission = guc_set_default_submission;
3739 	engine->busyness = guc_engine_busyness;
3740 
3741 	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
3742 	engine->flags |= I915_ENGINE_HAS_PREEMPTION;
3743 	engine->flags |= I915_ENGINE_HAS_TIMESLICES;
3744 
3745 	/*
3746 	 * TODO: GuC supports timeslicing and semaphores as well, but they're
3747 	 * handled by the firmware so some minor tweaks are required before
3748 	 * enabling.
3749 	 *
3750 	 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
3751 	 */
3752 
3753 	engine->emit_bb_start = gen8_emit_bb_start;
3754 }
3755 
3756 static void rcs_submission_override(struct intel_engine_cs *engine)
3757 {
3758 	switch (GRAPHICS_VER(engine->i915)) {
3759 	case 12:
3760 		engine->emit_flush = gen12_emit_flush_rcs;
3761 		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
3762 		break;
3763 	case 11:
3764 		engine->emit_flush = gen11_emit_flush_rcs;
3765 		engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
3766 		break;
3767 	default:
3768 		engine->emit_flush = gen8_emit_flush_rcs;
3769 		engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
3770 		break;
3771 	}
3772 }
3773 
3774 static inline void guc_default_irqs(struct intel_engine_cs *engine)
3775 {
3776 	engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
3777 	intel_engine_set_irq_handler(engine, cs_irq_handler);
3778 }
3779 
3780 static void guc_sched_engine_destroy(struct kref *kref)
3781 {
3782 	struct i915_sched_engine *sched_engine =
3783 		container_of(kref, typeof(*sched_engine), ref);
3784 	struct intel_guc *guc = sched_engine->private_data;
3785 
3786 	guc->sched_engine = NULL;
3787 	tasklet_kill(&sched_engine->tasklet); /* flush the callback */
3788 	kfree(sched_engine);
3789 }
3790 
3791 int intel_guc_submission_setup(struct intel_engine_cs *engine)
3792 {
3793 	struct drm_i915_private *i915 = engine->i915;
3794 	struct intel_guc *guc = &engine->gt->uc.guc;
3795 
3796 	/*
3797 	 * The setup relies on several assumptions (e.g. irqs always enabled)
3798 	 * that are only valid on gen11+
3799 	 */
3800 	GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
3801 
3802 	if (!guc->sched_engine) {
3803 		guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
3804 		if (!guc->sched_engine)
3805 			return -ENOMEM;
3806 
3807 		guc->sched_engine->schedule = i915_schedule;
3808 		guc->sched_engine->disabled = guc_sched_engine_disabled;
3809 		guc->sched_engine->private_data = guc;
3810 		guc->sched_engine->destroy = guc_sched_engine_destroy;
3811 		guc->sched_engine->bump_inflight_request_prio =
3812 			guc_bump_inflight_request_prio;
3813 		guc->sched_engine->retire_inflight_request_prio =
3814 			guc_retire_inflight_request_prio;
3815 		tasklet_setup(&guc->sched_engine->tasklet,
3816 			      guc_submission_tasklet);
3817 	}
3818 	i915_sched_engine_put(engine->sched_engine);
3819 	engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
3820 
3821 	guc_default_vfuncs(engine);
3822 	guc_default_irqs(engine);
3823 	guc_init_breadcrumbs(engine);
3824 
3825 	if (engine->class == RENDER_CLASS)
3826 		rcs_submission_override(engine);
3827 
3828 	lrc_init_wa_ctx(engine);
3829 
3830 	/* Finally, take ownership and responsibility for cleanup! */
3831 	engine->sanitize = guc_sanitize;
3832 	engine->release = guc_release;
3833 
3834 	return 0;
3835 }
3836 
3837 void intel_guc_submission_enable(struct intel_guc *guc)
3838 {
3839 	guc_init_lrc_mapping(guc);
3840 	guc_init_engine_stats(guc);
3841 }
3842 
3843 void intel_guc_submission_disable(struct intel_guc *guc)
3844 {
3845 	/* Note: By the time we're here, GuC may have already been reset */
3846 }
3847 
3848 static bool __guc_submission_supported(struct intel_guc *guc)
3849 {
3850 	/* GuC submission is unavailable for pre-Gen11 */
3851 	return intel_guc_is_supported(guc) &&
3852 	       GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
3853 }
3854 
3855 static bool __guc_submission_selected(struct intel_guc *guc)
3856 {
3857 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
3858 
3859 	if (!intel_guc_submission_is_supported(guc))
3860 		return false;
3861 
3862 	return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
3863 }
3864 
3865 void intel_guc_submission_init_early(struct intel_guc *guc)
3866 {
3867 	guc->submission_state.num_guc_ids = GUC_MAX_LRC_DESCRIPTORS;
3868 	guc->submission_supported = __guc_submission_supported(guc);
3869 	guc->submission_selected = __guc_submission_selected(guc);
3870 }
3871 
3872 static inline struct intel_context *
3873 g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
3874 {
3875 	struct intel_context *ce;
3876 
3877 	if (unlikely(desc_idx >= GUC_MAX_LRC_DESCRIPTORS)) {
3878 		drm_err(&guc_to_gt(guc)->i915->drm,
3879 			"Invalid desc_idx %u", desc_idx);
3880 		return NULL;
3881 	}
3882 
3883 	ce = __get_context(guc, desc_idx);
3884 	if (unlikely(!ce)) {
3885 		drm_err(&guc_to_gt(guc)->i915->drm,
3886 			"Context is NULL, desc_idx %u", desc_idx);
3887 		return NULL;
3888 	}
3889 
3890 	if (unlikely(intel_context_is_child(ce))) {
3891 		drm_err(&guc_to_gt(guc)->i915->drm,
3892 			"Context is child, desc_idx %u", desc_idx);
3893 		return NULL;
3894 	}
3895 
3896 	return ce;
3897 }
3898 
3899 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
3900 					  const u32 *msg,
3901 					  u32 len)
3902 {
3903 	struct intel_context *ce;
3904 	u32 desc_idx = msg[0];
3905 
3906 	if (unlikely(len < 1)) {
3907 		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
3908 		return -EPROTO;
3909 	}
3910 
3911 	ce = g2h_context_lookup(guc, desc_idx);
3912 	if (unlikely(!ce))
3913 		return -EPROTO;
3914 
3915 	trace_intel_context_deregister_done(ce);
3916 
3917 #ifdef CONFIG_DRM_I915_SELFTEST
3918 	if (unlikely(ce->drop_deregister)) {
3919 		ce->drop_deregister = false;
3920 		return 0;
3921 	}
3922 #endif
3923 
3924 	if (context_wait_for_deregister_to_register(ce)) {
3925 		struct intel_runtime_pm *runtime_pm =
3926 			&ce->engine->gt->i915->runtime_pm;
3927 		intel_wakeref_t wakeref;
3928 
3929 		/*
3930 		 * Previous owner of this guc_id has been deregistered, now safe
3931 		 * register this context.
3932 		 */
3933 		with_intel_runtime_pm(runtime_pm, wakeref)
3934 			register_context(ce, true);
3935 		guc_signal_context_fence(ce);
3936 		intel_context_put(ce);
3937 	} else if (context_destroyed(ce)) {
3938 		/* Context has been destroyed */
3939 		intel_gt_pm_put_async(guc_to_gt(guc));
3940 		release_guc_id(guc, ce);
3941 		__guc_context_destroy(ce);
3942 	}
3943 
3944 	decr_outstanding_submission_g2h(guc);
3945 
3946 	return 0;
3947 }
3948 
3949 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
3950 				     const u32 *msg,
3951 				     u32 len)
3952 {
3953 	struct intel_context *ce;
3954 	unsigned long flags;
3955 	u32 desc_idx = msg[0];
3956 
3957 	if (unlikely(len < 2)) {
3958 		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
3959 		return -EPROTO;
3960 	}
3961 
3962 	ce = g2h_context_lookup(guc, desc_idx);
3963 	if (unlikely(!ce))
3964 		return -EPROTO;
3965 
3966 	if (unlikely(context_destroyed(ce) ||
3967 		     (!context_pending_enable(ce) &&
3968 		     !context_pending_disable(ce)))) {
3969 		drm_err(&guc_to_gt(guc)->i915->drm,
3970 			"Bad context sched_state 0x%x, desc_idx %u",
3971 			ce->guc_state.sched_state, desc_idx);
3972 		return -EPROTO;
3973 	}
3974 
3975 	trace_intel_context_sched_done(ce);
3976 
3977 	if (context_pending_enable(ce)) {
3978 #ifdef CONFIG_DRM_I915_SELFTEST
3979 		if (unlikely(ce->drop_schedule_enable)) {
3980 			ce->drop_schedule_enable = false;
3981 			return 0;
3982 		}
3983 #endif
3984 
3985 		spin_lock_irqsave(&ce->guc_state.lock, flags);
3986 		clr_context_pending_enable(ce);
3987 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3988 	} else if (context_pending_disable(ce)) {
3989 		bool banned;
3990 
3991 #ifdef CONFIG_DRM_I915_SELFTEST
3992 		if (unlikely(ce->drop_schedule_disable)) {
3993 			ce->drop_schedule_disable = false;
3994 			return 0;
3995 		}
3996 #endif
3997 
3998 		/*
3999 		 * Unpin must be done before __guc_signal_context_fence,
4000 		 * otherwise a race exists between the requests getting
4001 		 * submitted + retired before this unpin completes resulting in
4002 		 * the pin_count going to zero and the context still being
4003 		 * enabled.
4004 		 */
4005 		intel_context_sched_disable_unpin(ce);
4006 
4007 		spin_lock_irqsave(&ce->guc_state.lock, flags);
4008 		banned = context_banned(ce);
4009 		clr_context_banned(ce);
4010 		clr_context_pending_disable(ce);
4011 		__guc_signal_context_fence(ce);
4012 		guc_blocked_fence_complete(ce);
4013 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
4014 
4015 		if (banned) {
4016 			guc_cancel_context_requests(ce);
4017 			intel_engine_signal_breadcrumbs(ce->engine);
4018 		}
4019 	}
4020 
4021 	decr_outstanding_submission_g2h(guc);
4022 	intel_context_put(ce);
4023 
4024 	return 0;
4025 }
4026 
4027 static void capture_error_state(struct intel_guc *guc,
4028 				struct intel_context *ce)
4029 {
4030 	struct intel_gt *gt = guc_to_gt(guc);
4031 	struct drm_i915_private *i915 = gt->i915;
4032 	struct intel_engine_cs *engine = __context_to_physical_engine(ce);
4033 	intel_wakeref_t wakeref;
4034 
4035 	intel_engine_set_hung_context(engine, ce);
4036 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
4037 		i915_capture_error_state(gt, engine->mask);
4038 	atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
4039 }
4040 
4041 static void guc_context_replay(struct intel_context *ce)
4042 {
4043 	struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
4044 
4045 	__guc_reset_context(ce, true);
4046 	tasklet_hi_schedule(&sched_engine->tasklet);
4047 }
4048 
4049 static void guc_handle_context_reset(struct intel_guc *guc,
4050 				     struct intel_context *ce)
4051 {
4052 	trace_intel_context_reset(ce);
4053 
4054 	/*
4055 	 * XXX: Racey if request cancellation has occurred, see comment in
4056 	 * __guc_reset_context().
4057 	 */
4058 	if (likely(!intel_context_is_banned(ce) &&
4059 		   !context_blocked(ce))) {
4060 		capture_error_state(guc, ce);
4061 		guc_context_replay(ce);
4062 	}
4063 }
4064 
4065 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
4066 					const u32 *msg, u32 len)
4067 {
4068 	struct intel_context *ce;
4069 	unsigned long flags;
4070 	int desc_idx;
4071 
4072 	if (unlikely(len != 1)) {
4073 		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
4074 		return -EPROTO;
4075 	}
4076 
4077 	desc_idx = msg[0];
4078 
4079 	/*
4080 	 * The context lookup uses the xarray but lookups only require an RCU lock
4081 	 * not the full spinlock. So take the lock explicitly and keep it until the
4082 	 * context has been reference count locked to ensure it can't be destroyed
4083 	 * asynchronously until the reset is done.
4084 	 */
4085 	xa_lock_irqsave(&guc->context_lookup, flags);
4086 	ce = g2h_context_lookup(guc, desc_idx);
4087 	if (ce)
4088 		intel_context_get(ce);
4089 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4090 
4091 	if (unlikely(!ce))
4092 		return -EPROTO;
4093 
4094 	guc_handle_context_reset(guc, ce);
4095 	intel_context_put(ce);
4096 
4097 	return 0;
4098 }
4099 
4100 static struct intel_engine_cs *
4101 guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
4102 {
4103 	struct intel_gt *gt = guc_to_gt(guc);
4104 	u8 engine_class = guc_class_to_engine_class(guc_class);
4105 
4106 	/* Class index is checked in class converter */
4107 	GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
4108 
4109 	return gt->engine_class[engine_class][instance];
4110 }
4111 
4112 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
4113 					 const u32 *msg, u32 len)
4114 {
4115 	struct intel_engine_cs *engine;
4116 	struct intel_gt *gt = guc_to_gt(guc);
4117 	u8 guc_class, instance;
4118 	u32 reason;
4119 
4120 	if (unlikely(len != 3)) {
4121 		drm_err(&gt->i915->drm, "Invalid length %u", len);
4122 		return -EPROTO;
4123 	}
4124 
4125 	guc_class = msg[0];
4126 	instance = msg[1];
4127 	reason = msg[2];
4128 
4129 	engine = guc_lookup_engine(guc, guc_class, instance);
4130 	if (unlikely(!engine)) {
4131 		drm_err(&gt->i915->drm,
4132 			"Invalid engine %d:%d", guc_class, instance);
4133 		return -EPROTO;
4134 	}
4135 
4136 	/*
4137 	 * This is an unexpected failure of a hardware feature. So, log a real
4138 	 * error message not just the informational that comes with the reset.
4139 	 */
4140 	drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
4141 		guc_class, instance, engine->name, reason);
4142 
4143 	intel_gt_handle_error(gt, engine->mask,
4144 			      I915_ERROR_CAPTURE,
4145 			      "GuC failed to reset %s (reason=0x%08x)\n",
4146 			      engine->name, reason);
4147 
4148 	return 0;
4149 }
4150 
4151 void intel_guc_find_hung_context(struct intel_engine_cs *engine)
4152 {
4153 	struct intel_guc *guc = &engine->gt->uc.guc;
4154 	struct intel_context *ce;
4155 	struct i915_request *rq;
4156 	unsigned long index;
4157 	unsigned long flags;
4158 
4159 	/* Reset called during driver load? GuC not yet initialised! */
4160 	if (unlikely(!guc_submission_initialized(guc)))
4161 		return;
4162 
4163 	xa_lock_irqsave(&guc->context_lookup, flags);
4164 	xa_for_each(&guc->context_lookup, index, ce) {
4165 		if (!kref_get_unless_zero(&ce->ref))
4166 			continue;
4167 
4168 		xa_unlock(&guc->context_lookup);
4169 
4170 		if (!intel_context_is_pinned(ce))
4171 			goto next;
4172 
4173 		if (intel_engine_is_virtual(ce->engine)) {
4174 			if (!(ce->engine->mask & engine->mask))
4175 				goto next;
4176 		} else {
4177 			if (ce->engine != engine)
4178 				goto next;
4179 		}
4180 
4181 		list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
4182 			if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
4183 				continue;
4184 
4185 			intel_engine_set_hung_context(engine, ce);
4186 
4187 			/* Can only cope with one hang at a time... */
4188 			intel_context_put(ce);
4189 			xa_lock(&guc->context_lookup);
4190 			goto done;
4191 		}
4192 next:
4193 		intel_context_put(ce);
4194 		xa_lock(&guc->context_lookup);
4195 	}
4196 done:
4197 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4198 }
4199 
4200 void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
4201 				    struct i915_request *hung_rq,
4202 				    struct drm_printer *m)
4203 {
4204 	struct intel_guc *guc = &engine->gt->uc.guc;
4205 	struct intel_context *ce;
4206 	unsigned long index;
4207 	unsigned long flags;
4208 
4209 	/* Reset called during driver load? GuC not yet initialised! */
4210 	if (unlikely(!guc_submission_initialized(guc)))
4211 		return;
4212 
4213 	xa_lock_irqsave(&guc->context_lookup, flags);
4214 	xa_for_each(&guc->context_lookup, index, ce) {
4215 		if (!kref_get_unless_zero(&ce->ref))
4216 			continue;
4217 
4218 		xa_unlock(&guc->context_lookup);
4219 
4220 		if (!intel_context_is_pinned(ce))
4221 			goto next;
4222 
4223 		if (intel_engine_is_virtual(ce->engine)) {
4224 			if (!(ce->engine->mask & engine->mask))
4225 				goto next;
4226 		} else {
4227 			if (ce->engine != engine)
4228 				goto next;
4229 		}
4230 
4231 		spin_lock(&ce->guc_state.lock);
4232 		intel_engine_dump_active_requests(&ce->guc_state.requests,
4233 						  hung_rq, m);
4234 		spin_unlock(&ce->guc_state.lock);
4235 
4236 next:
4237 		intel_context_put(ce);
4238 		xa_lock(&guc->context_lookup);
4239 	}
4240 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4241 }
4242 
4243 void intel_guc_submission_print_info(struct intel_guc *guc,
4244 				     struct drm_printer *p)
4245 {
4246 	struct i915_sched_engine *sched_engine = guc->sched_engine;
4247 	struct rb_node *rb;
4248 	unsigned long flags;
4249 
4250 	if (!sched_engine)
4251 		return;
4252 
4253 	drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
4254 		   atomic_read(&guc->outstanding_submission_g2h));
4255 	drm_printf(p, "GuC tasklet count: %u\n\n",
4256 		   atomic_read(&sched_engine->tasklet.count));
4257 
4258 	spin_lock_irqsave(&sched_engine->lock, flags);
4259 	drm_printf(p, "Requests in GuC submit tasklet:\n");
4260 	for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
4261 		struct i915_priolist *pl = to_priolist(rb);
4262 		struct i915_request *rq;
4263 
4264 		priolist_for_each_request(rq, pl)
4265 			drm_printf(p, "guc_id=%u, seqno=%llu\n",
4266 				   rq->context->guc_id.id,
4267 				   rq->fence.seqno);
4268 	}
4269 	spin_unlock_irqrestore(&sched_engine->lock, flags);
4270 	drm_printf(p, "\n");
4271 }
4272 
4273 static inline void guc_log_context_priority(struct drm_printer *p,
4274 					    struct intel_context *ce)
4275 {
4276 	int i;
4277 
4278 	drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
4279 	drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
4280 	for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
4281 	     i < GUC_CLIENT_PRIORITY_NUM; ++i) {
4282 		drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
4283 			   i, ce->guc_state.prio_count[i]);
4284 	}
4285 	drm_printf(p, "\n");
4286 }
4287 
4288 static inline void guc_log_context(struct drm_printer *p,
4289 				   struct intel_context *ce)
4290 {
4291 	drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
4292 	drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
4293 	drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
4294 		   ce->ring->head,
4295 		   ce->lrc_reg_state[CTX_RING_HEAD]);
4296 	drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
4297 		   ce->ring->tail,
4298 		   ce->lrc_reg_state[CTX_RING_TAIL]);
4299 	drm_printf(p, "\t\tContext Pin Count: %u\n",
4300 		   atomic_read(&ce->pin_count));
4301 	drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
4302 		   atomic_read(&ce->guc_id.ref));
4303 	drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
4304 		   ce->guc_state.sched_state);
4305 }
4306 
4307 void intel_guc_submission_print_context_info(struct intel_guc *guc,
4308 					     struct drm_printer *p)
4309 {
4310 	struct intel_context *ce;
4311 	unsigned long index;
4312 	unsigned long flags;
4313 
4314 	xa_lock_irqsave(&guc->context_lookup, flags);
4315 	xa_for_each(&guc->context_lookup, index, ce) {
4316 		GEM_BUG_ON(intel_context_is_child(ce));
4317 
4318 		guc_log_context(p, ce);
4319 		guc_log_context_priority(p, ce);
4320 
4321 		if (intel_context_is_parent(ce)) {
4322 			struct guc_process_desc *desc = __get_process_desc(ce);
4323 			struct intel_context *child;
4324 
4325 			drm_printf(p, "\t\tNumber children: %u\n",
4326 				   ce->parallel.number_children);
4327 			drm_printf(p, "\t\tWQI Head: %u\n",
4328 				   READ_ONCE(desc->head));
4329 			drm_printf(p, "\t\tWQI Tail: %u\n",
4330 				   READ_ONCE(desc->tail));
4331 			drm_printf(p, "\t\tWQI Status: %u\n\n",
4332 				   READ_ONCE(desc->wq_status));
4333 
4334 			if (ce->engine->emit_bb_start ==
4335 			    emit_bb_start_parent_no_preempt_mid_batch) {
4336 				u8 i;
4337 
4338 				drm_printf(p, "\t\tChildren Go: %u\n\n",
4339 					   get_children_go_value(ce));
4340 				for (i = 0; i < ce->parallel.number_children; ++i)
4341 					drm_printf(p, "\t\tChildren Join: %u\n",
4342 						   get_children_join_value(ce, i));
4343 			}
4344 
4345 			for_each_child(ce, child)
4346 				guc_log_context(p, child);
4347 		}
4348 	}
4349 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4350 }
4351 
4352 static inline u32 get_children_go_addr(struct intel_context *ce)
4353 {
4354 	GEM_BUG_ON(!intel_context_is_parent(ce));
4355 
4356 	return i915_ggtt_offset(ce->state) +
4357 		__get_parent_scratch_offset(ce) +
4358 		offsetof(struct parent_scratch, go.semaphore);
4359 }
4360 
4361 static inline u32 get_children_join_addr(struct intel_context *ce,
4362 					 u8 child_index)
4363 {
4364 	GEM_BUG_ON(!intel_context_is_parent(ce));
4365 
4366 	return i915_ggtt_offset(ce->state) +
4367 		__get_parent_scratch_offset(ce) +
4368 		offsetof(struct parent_scratch, join[child_index].semaphore);
4369 }
4370 
4371 #define PARENT_GO_BB			1
4372 #define PARENT_GO_FINI_BREADCRUMB	0
4373 #define CHILD_GO_BB			1
4374 #define CHILD_GO_FINI_BREADCRUMB	0
4375 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
4376 						     u64 offset, u32 len,
4377 						     const unsigned int flags)
4378 {
4379 	struct intel_context *ce = rq->context;
4380 	u32 *cs;
4381 	u8 i;
4382 
4383 	GEM_BUG_ON(!intel_context_is_parent(ce));
4384 
4385 	cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
4386 	if (IS_ERR(cs))
4387 		return PTR_ERR(cs);
4388 
4389 	/* Wait on children */
4390 	for (i = 0; i < ce->parallel.number_children; ++i) {
4391 		*cs++ = (MI_SEMAPHORE_WAIT |
4392 			 MI_SEMAPHORE_GLOBAL_GTT |
4393 			 MI_SEMAPHORE_POLL |
4394 			 MI_SEMAPHORE_SAD_EQ_SDD);
4395 		*cs++ = PARENT_GO_BB;
4396 		*cs++ = get_children_join_addr(ce, i);
4397 		*cs++ = 0;
4398 	}
4399 
4400 	/* Turn off preemption */
4401 	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
4402 	*cs++ = MI_NOOP;
4403 
4404 	/* Tell children go */
4405 	cs = gen8_emit_ggtt_write(cs,
4406 				  CHILD_GO_BB,
4407 				  get_children_go_addr(ce),
4408 				  0);
4409 
4410 	/* Jump to batch */
4411 	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
4412 		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
4413 	*cs++ = lower_32_bits(offset);
4414 	*cs++ = upper_32_bits(offset);
4415 	*cs++ = MI_NOOP;
4416 
4417 	intel_ring_advance(rq, cs);
4418 
4419 	return 0;
4420 }
4421 
4422 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
4423 						    u64 offset, u32 len,
4424 						    const unsigned int flags)
4425 {
4426 	struct intel_context *ce = rq->context;
4427 	struct intel_context *parent = intel_context_to_parent(ce);
4428 	u32 *cs;
4429 
4430 	GEM_BUG_ON(!intel_context_is_child(ce));
4431 
4432 	cs = intel_ring_begin(rq, 12);
4433 	if (IS_ERR(cs))
4434 		return PTR_ERR(cs);
4435 
4436 	/* Signal parent */
4437 	cs = gen8_emit_ggtt_write(cs,
4438 				  PARENT_GO_BB,
4439 				  get_children_join_addr(parent,
4440 							 ce->parallel.child_index),
4441 				  0);
4442 
4443 	/* Wait on parent for go */
4444 	*cs++ = (MI_SEMAPHORE_WAIT |
4445 		 MI_SEMAPHORE_GLOBAL_GTT |
4446 		 MI_SEMAPHORE_POLL |
4447 		 MI_SEMAPHORE_SAD_EQ_SDD);
4448 	*cs++ = CHILD_GO_BB;
4449 	*cs++ = get_children_go_addr(parent);
4450 	*cs++ = 0;
4451 
4452 	/* Turn off preemption */
4453 	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
4454 
4455 	/* Jump to batch */
4456 	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
4457 		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
4458 	*cs++ = lower_32_bits(offset);
4459 	*cs++ = upper_32_bits(offset);
4460 
4461 	intel_ring_advance(rq, cs);
4462 
4463 	return 0;
4464 }
4465 
4466 static u32 *
4467 __emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
4468 						   u32 *cs)
4469 {
4470 	struct intel_context *ce = rq->context;
4471 	u8 i;
4472 
4473 	GEM_BUG_ON(!intel_context_is_parent(ce));
4474 
4475 	/* Wait on children */
4476 	for (i = 0; i < ce->parallel.number_children; ++i) {
4477 		*cs++ = (MI_SEMAPHORE_WAIT |
4478 			 MI_SEMAPHORE_GLOBAL_GTT |
4479 			 MI_SEMAPHORE_POLL |
4480 			 MI_SEMAPHORE_SAD_EQ_SDD);
4481 		*cs++ = PARENT_GO_FINI_BREADCRUMB;
4482 		*cs++ = get_children_join_addr(ce, i);
4483 		*cs++ = 0;
4484 	}
4485 
4486 	/* Turn on preemption */
4487 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
4488 	*cs++ = MI_NOOP;
4489 
4490 	/* Tell children go */
4491 	cs = gen8_emit_ggtt_write(cs,
4492 				  CHILD_GO_FINI_BREADCRUMB,
4493 				  get_children_go_addr(ce),
4494 				  0);
4495 
4496 	return cs;
4497 }
4498 
4499 /*
4500  * If this true, a submission of multi-lrc requests had an error and the
4501  * requests need to be skipped. The front end (execuf IOCTL) should've called
4502  * i915_request_skip which squashes the BB but we still need to emit the fini
4503  * breadrcrumbs seqno write. At this point we don't know how many of the
4504  * requests in the multi-lrc submission were generated so we can't do the
4505  * handshake between the parent and children (e.g. if 4 requests should be
4506  * generated but 2nd hit an error only 1 would be seen by the GuC backend).
4507  * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
4508  * has occurred on any of the requests in submission / relationship.
4509  */
4510 static inline bool skip_handshake(struct i915_request *rq)
4511 {
4512 	return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
4513 }
4514 
4515 static u32 *
4516 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
4517 						 u32 *cs)
4518 {
4519 	struct intel_context *ce = rq->context;
4520 
4521 	GEM_BUG_ON(!intel_context_is_parent(ce));
4522 
4523 	if (unlikely(skip_handshake(rq))) {
4524 		/*
4525 		 * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
4526 		 * the -6 comes from the length of the emits below.
4527 		 */
4528 		memset(cs, 0, sizeof(u32) *
4529 		       (ce->engine->emit_fini_breadcrumb_dw - 6));
4530 		cs += ce->engine->emit_fini_breadcrumb_dw - 6;
4531 	} else {
4532 		cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
4533 	}
4534 
4535 	/* Emit fini breadcrumb */
4536 	cs = gen8_emit_ggtt_write(cs,
4537 				  rq->fence.seqno,
4538 				  i915_request_active_timeline(rq)->hwsp_offset,
4539 				  0);
4540 
4541 	/* User interrupt */
4542 	*cs++ = MI_USER_INTERRUPT;
4543 	*cs++ = MI_NOOP;
4544 
4545 	rq->tail = intel_ring_offset(rq, cs);
4546 
4547 	return cs;
4548 }
4549 
4550 static u32 *
4551 __emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
4552 						  u32 *cs)
4553 {
4554 	struct intel_context *ce = rq->context;
4555 	struct intel_context *parent = intel_context_to_parent(ce);
4556 
4557 	GEM_BUG_ON(!intel_context_is_child(ce));
4558 
4559 	/* Turn on preemption */
4560 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
4561 	*cs++ = MI_NOOP;
4562 
4563 	/* Signal parent */
4564 	cs = gen8_emit_ggtt_write(cs,
4565 				  PARENT_GO_FINI_BREADCRUMB,
4566 				  get_children_join_addr(parent,
4567 							 ce->parallel.child_index),
4568 				  0);
4569 
4570 	/* Wait parent on for go */
4571 	*cs++ = (MI_SEMAPHORE_WAIT |
4572 		 MI_SEMAPHORE_GLOBAL_GTT |
4573 		 MI_SEMAPHORE_POLL |
4574 		 MI_SEMAPHORE_SAD_EQ_SDD);
4575 	*cs++ = CHILD_GO_FINI_BREADCRUMB;
4576 	*cs++ = get_children_go_addr(parent);
4577 	*cs++ = 0;
4578 
4579 	return cs;
4580 }
4581 
4582 static u32 *
4583 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
4584 						u32 *cs)
4585 {
4586 	struct intel_context *ce = rq->context;
4587 
4588 	GEM_BUG_ON(!intel_context_is_child(ce));
4589 
4590 	if (unlikely(skip_handshake(rq))) {
4591 		/*
4592 		 * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
4593 		 * the -6 comes from the length of the emits below.
4594 		 */
4595 		memset(cs, 0, sizeof(u32) *
4596 		       (ce->engine->emit_fini_breadcrumb_dw - 6));
4597 		cs += ce->engine->emit_fini_breadcrumb_dw - 6;
4598 	} else {
4599 		cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
4600 	}
4601 
4602 	/* Emit fini breadcrumb */
4603 	cs = gen8_emit_ggtt_write(cs,
4604 				  rq->fence.seqno,
4605 				  i915_request_active_timeline(rq)->hwsp_offset,
4606 				  0);
4607 
4608 	/* User interrupt */
4609 	*cs++ = MI_USER_INTERRUPT;
4610 	*cs++ = MI_NOOP;
4611 
4612 	rq->tail = intel_ring_offset(rq, cs);
4613 
4614 	return cs;
4615 }
4616 
4617 static struct intel_context *
4618 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
4619 		   unsigned long flags)
4620 {
4621 	struct guc_virtual_engine *ve;
4622 	struct intel_guc *guc;
4623 	unsigned int n;
4624 	int err;
4625 
4626 	ve = kzalloc(sizeof(*ve), GFP_KERNEL);
4627 	if (!ve)
4628 		return ERR_PTR(-ENOMEM);
4629 
4630 	guc = &siblings[0]->gt->uc.guc;
4631 
4632 	ve->base.i915 = siblings[0]->i915;
4633 	ve->base.gt = siblings[0]->gt;
4634 	ve->base.uncore = siblings[0]->uncore;
4635 	ve->base.id = -1;
4636 
4637 	ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
4638 	ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
4639 	ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
4640 	ve->base.saturated = ALL_ENGINES;
4641 
4642 	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
4643 
4644 	ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
4645 
4646 	ve->base.cops = &virtual_guc_context_ops;
4647 	ve->base.request_alloc = guc_request_alloc;
4648 	ve->base.bump_serial = virtual_guc_bump_serial;
4649 
4650 	ve->base.submit_request = guc_submit_request;
4651 
4652 	ve->base.flags = I915_ENGINE_IS_VIRTUAL;
4653 
4654 	intel_context_init(&ve->context, &ve->base);
4655 
4656 	for (n = 0; n < count; n++) {
4657 		struct intel_engine_cs *sibling = siblings[n];
4658 
4659 		GEM_BUG_ON(!is_power_of_2(sibling->mask));
4660 		if (sibling->mask & ve->base.mask) {
4661 			DRM_DEBUG("duplicate %s entry in load balancer\n",
4662 				  sibling->name);
4663 			err = -EINVAL;
4664 			goto err_put;
4665 		}
4666 
4667 		ve->base.mask |= sibling->mask;
4668 		ve->base.logical_mask |= sibling->logical_mask;
4669 
4670 		if (n != 0 && ve->base.class != sibling->class) {
4671 			DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
4672 				  sibling->class, ve->base.class);
4673 			err = -EINVAL;
4674 			goto err_put;
4675 		} else if (n == 0) {
4676 			ve->base.class = sibling->class;
4677 			ve->base.uabi_class = sibling->uabi_class;
4678 			snprintf(ve->base.name, sizeof(ve->base.name),
4679 				 "v%dx%d", ve->base.class, count);
4680 			ve->base.context_size = sibling->context_size;
4681 
4682 			ve->base.add_active_request =
4683 				sibling->add_active_request;
4684 			ve->base.remove_active_request =
4685 				sibling->remove_active_request;
4686 			ve->base.emit_bb_start = sibling->emit_bb_start;
4687 			ve->base.emit_flush = sibling->emit_flush;
4688 			ve->base.emit_init_breadcrumb =
4689 				sibling->emit_init_breadcrumb;
4690 			ve->base.emit_fini_breadcrumb =
4691 				sibling->emit_fini_breadcrumb;
4692 			ve->base.emit_fini_breadcrumb_dw =
4693 				sibling->emit_fini_breadcrumb_dw;
4694 			ve->base.breadcrumbs =
4695 				intel_breadcrumbs_get(sibling->breadcrumbs);
4696 
4697 			ve->base.flags |= sibling->flags;
4698 
4699 			ve->base.props.timeslice_duration_ms =
4700 				sibling->props.timeslice_duration_ms;
4701 			ve->base.props.preempt_timeout_ms =
4702 				sibling->props.preempt_timeout_ms;
4703 		}
4704 	}
4705 
4706 	return &ve->context;
4707 
4708 err_put:
4709 	intel_context_put(&ve->context);
4710 	return ERR_PTR(err);
4711 }
4712 
4713 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
4714 {
4715 	struct intel_engine_cs *engine;
4716 	intel_engine_mask_t tmp, mask = ve->mask;
4717 
4718 	for_each_engine_masked(engine, ve->gt, mask, tmp)
4719 		if (READ_ONCE(engine->props.heartbeat_interval_ms))
4720 			return true;
4721 
4722 	return false;
4723 }
4724 
4725 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4726 #include "selftest_guc.c"
4727 #include "selftest_guc_multi_lrc.c"
4728 #endif
4729