1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014 Intel Corporation
4  */
5 
6 #include <linux/circ_buf.h>
7 
8 #include "gem/i915_gem_context.h"
9 #include "gt/gen8_engine_cs.h"
10 #include "gt/intel_breadcrumbs.h"
11 #include "gt/intel_context.h"
12 #include "gt/intel_engine_pm.h"
13 #include "gt/intel_engine_heartbeat.h"
14 #include "gt/intel_gpu_commands.h"
15 #include "gt/intel_gt.h"
16 #include "gt/intel_gt_clock_utils.h"
17 #include "gt/intel_gt_irq.h"
18 #include "gt/intel_gt_pm.h"
19 #include "gt/intel_gt_requests.h"
20 #include "gt/intel_lrc.h"
21 #include "gt/intel_lrc_reg.h"
22 #include "gt/intel_mocs.h"
23 #include "gt/intel_ring.h"
24 
25 #include "intel_guc_ads.h"
26 #include "intel_guc_submission.h"
27 
28 #include "i915_drv.h"
29 #include "i915_trace.h"
30 
31 /**
32  * DOC: GuC-based command submission
33  *
34  * The Scratch registers:
35  * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
36  * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
37  * triggers an interrupt on the GuC via another register write (0xC4C8).
38  * Firmware writes a success/fail code back to the action register after
39  * processes the request. The kernel driver polls waiting for this update and
40  * then proceeds.
41  *
42  * Command Transport buffers (CTBs):
43  * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
44  * - G2H) are a message interface between the i915 and GuC.
45  *
46  * Context registration:
47  * Before a context can be submitted it must be registered with the GuC via a
48  * H2G. A unique guc_id is associated with each context. The context is either
49  * registered at request creation time (normal operation) or at submission time
50  * (abnormal operation, e.g. after a reset).
51  *
52  * Context submission:
53  * The i915 updates the LRC tail value in memory. The i915 must enable the
54  * scheduling of the context within the GuC for the GuC to actually consider it.
55  * Therefore, the first time a disabled context is submitted we use a schedule
56  * enable H2G, while follow up submissions are done via the context submit H2G,
57  * which informs the GuC that a previously enabled context has new work
58  * available.
59  *
60  * Context unpin:
61  * To unpin a context a H2G is used to disable scheduling. When the
62  * corresponding G2H returns indicating the scheduling disable operation has
63  * completed it is safe to unpin the context. While a disable is in flight it
64  * isn't safe to resubmit the context so a fence is used to stall all future
65  * requests of that context until the G2H is returned.
66  *
67  * Context deregistration:
68  * Before a context can be destroyed or if we steal its guc_id we must
69  * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
70  * safe to submit anything to this guc_id until the deregister completes so a
71  * fence is used to stall all requests associated with this guc_id until the
72  * corresponding G2H returns indicating the guc_id has been deregistered.
73  *
74  * submission_state.guc_ids:
75  * Unique number associated with private GuC context data passed in during
76  * context registration / submission / deregistration. 64k available. Simple ida
77  * is used for allocation.
78  *
79  * Stealing guc_ids:
80  * If no guc_ids are available they can be stolen from another context at
81  * request creation time if that context is unpinned. If a guc_id can't be found
82  * we punt this problem to the user as we believe this is near impossible to hit
83  * during normal use cases.
84  *
85  * Locking:
86  * In the GuC submission code we have 3 basic spin locks which protect
87  * everything. Details about each below.
88  *
89  * sched_engine->lock
90  * This is the submission lock for all contexts that share an i915 schedule
91  * engine (sched_engine), thus only one of the contexts which share a
92  * sched_engine can be submitting at a time. Currently only one sched_engine is
93  * used for all of GuC submission but that could change in the future.
94  *
95  * guc->submission_state.lock
96  * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
97  * list.
98  *
99  * ce->guc_state.lock
100  * Protects everything under ce->guc_state. Ensures that a context is in the
101  * correct state before issuing a H2G. e.g. We don't issue a schedule disable
102  * on a disabled context (bad idea), we don't issue a schedule enable when a
103  * schedule disable is in flight, etc... Also protects list of inflight requests
104  * on the context and the priority management state. Lock is individual to each
105  * context.
106  *
107  * Lock ordering rules:
108  * sched_engine->lock -> ce->guc_state.lock
109  * guc->submission_state.lock -> ce->guc_state.lock
110  *
111  * Reset races:
112  * When a full GT reset is triggered it is assumed that some G2H responses to
113  * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
114  * fatal as we do certain operations upon receiving a G2H (e.g. destroy
115  * contexts, release guc_ids, etc...). When this occurs we can scrub the
116  * context state and cleanup appropriately, however this is quite racey.
117  * To avoid races, the reset code must disable submission before scrubbing for
118  * the missing G2H, while the submission code must check for submission being
119  * disabled and skip sending H2Gs and updating context states when it is. Both
120  * sides must also make sure to hold the relevant locks.
121  */
122 
123 /* GuC Virtual Engine */
124 struct guc_virtual_engine {
125 	struct intel_engine_cs base;
126 	struct intel_context context;
127 };
128 
129 static struct intel_context *
130 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
131 		   unsigned long flags);
132 
133 static struct intel_context *
134 guc_create_parallel(struct intel_engine_cs **engines,
135 		    unsigned int num_siblings,
136 		    unsigned int width);
137 
138 #define GUC_REQUEST_SIZE 64 /* bytes */
139 
140 /*
141  * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
142  * per the GuC submission interface. A different allocation algorithm is used
143  * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
144  * partition the guc_id space. We believe the number of multi-lrc contexts in
145  * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
146  * multi-lrc.
147  */
148 #define NUMBER_MULTI_LRC_GUC_ID(guc)	\
149 	((guc)->submission_state.num_guc_ids / 16)
150 
151 /*
152  * Below is a set of functions which control the GuC scheduling state which
153  * require a lock.
154  */
155 #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER	BIT(0)
156 #define SCHED_STATE_DESTROYED				BIT(1)
157 #define SCHED_STATE_PENDING_DISABLE			BIT(2)
158 #define SCHED_STATE_BANNED				BIT(3)
159 #define SCHED_STATE_ENABLED				BIT(4)
160 #define SCHED_STATE_PENDING_ENABLE			BIT(5)
161 #define SCHED_STATE_REGISTERED				BIT(6)
162 #define SCHED_STATE_BLOCKED_SHIFT			7
163 #define SCHED_STATE_BLOCKED		BIT(SCHED_STATE_BLOCKED_SHIFT)
164 #define SCHED_STATE_BLOCKED_MASK	(0xfff << SCHED_STATE_BLOCKED_SHIFT)
165 
166 static inline void init_sched_state(struct intel_context *ce)
167 {
168 	lockdep_assert_held(&ce->guc_state.lock);
169 	ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
170 }
171 
172 __maybe_unused
173 static bool sched_state_is_init(struct intel_context *ce)
174 {
175 	/*
176 	 * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after
177 	 * suspend.
178 	 */
179 	return !(ce->guc_state.sched_state &=
180 		 ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
181 }
182 
183 static inline bool
184 context_wait_for_deregister_to_register(struct intel_context *ce)
185 {
186 	return ce->guc_state.sched_state &
187 		SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
188 }
189 
190 static inline void
191 set_context_wait_for_deregister_to_register(struct intel_context *ce)
192 {
193 	lockdep_assert_held(&ce->guc_state.lock);
194 	ce->guc_state.sched_state |=
195 		SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
196 }
197 
198 static inline void
199 clr_context_wait_for_deregister_to_register(struct intel_context *ce)
200 {
201 	lockdep_assert_held(&ce->guc_state.lock);
202 	ce->guc_state.sched_state &=
203 		~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
204 }
205 
206 static inline bool
207 context_destroyed(struct intel_context *ce)
208 {
209 	return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
210 }
211 
212 static inline void
213 set_context_destroyed(struct intel_context *ce)
214 {
215 	lockdep_assert_held(&ce->guc_state.lock);
216 	ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
217 }
218 
219 static inline bool context_pending_disable(struct intel_context *ce)
220 {
221 	return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
222 }
223 
224 static inline void set_context_pending_disable(struct intel_context *ce)
225 {
226 	lockdep_assert_held(&ce->guc_state.lock);
227 	ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
228 }
229 
230 static inline void clr_context_pending_disable(struct intel_context *ce)
231 {
232 	lockdep_assert_held(&ce->guc_state.lock);
233 	ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
234 }
235 
236 static inline bool context_banned(struct intel_context *ce)
237 {
238 	return ce->guc_state.sched_state & SCHED_STATE_BANNED;
239 }
240 
241 static inline void set_context_banned(struct intel_context *ce)
242 {
243 	lockdep_assert_held(&ce->guc_state.lock);
244 	ce->guc_state.sched_state |= SCHED_STATE_BANNED;
245 }
246 
247 static inline void clr_context_banned(struct intel_context *ce)
248 {
249 	lockdep_assert_held(&ce->guc_state.lock);
250 	ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
251 }
252 
253 static inline bool context_enabled(struct intel_context *ce)
254 {
255 	return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
256 }
257 
258 static inline void set_context_enabled(struct intel_context *ce)
259 {
260 	lockdep_assert_held(&ce->guc_state.lock);
261 	ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
262 }
263 
264 static inline void clr_context_enabled(struct intel_context *ce)
265 {
266 	lockdep_assert_held(&ce->guc_state.lock);
267 	ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
268 }
269 
270 static inline bool context_pending_enable(struct intel_context *ce)
271 {
272 	return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
273 }
274 
275 static inline void set_context_pending_enable(struct intel_context *ce)
276 {
277 	lockdep_assert_held(&ce->guc_state.lock);
278 	ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
279 }
280 
281 static inline void clr_context_pending_enable(struct intel_context *ce)
282 {
283 	lockdep_assert_held(&ce->guc_state.lock);
284 	ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
285 }
286 
287 static inline bool context_registered(struct intel_context *ce)
288 {
289 	return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
290 }
291 
292 static inline void set_context_registered(struct intel_context *ce)
293 {
294 	lockdep_assert_held(&ce->guc_state.lock);
295 	ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
296 }
297 
298 static inline void clr_context_registered(struct intel_context *ce)
299 {
300 	lockdep_assert_held(&ce->guc_state.lock);
301 	ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
302 }
303 
304 static inline u32 context_blocked(struct intel_context *ce)
305 {
306 	return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
307 		SCHED_STATE_BLOCKED_SHIFT;
308 }
309 
310 static inline void incr_context_blocked(struct intel_context *ce)
311 {
312 	lockdep_assert_held(&ce->guc_state.lock);
313 
314 	ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
315 
316 	GEM_BUG_ON(!context_blocked(ce));	/* Overflow check */
317 }
318 
319 static inline void decr_context_blocked(struct intel_context *ce)
320 {
321 	lockdep_assert_held(&ce->guc_state.lock);
322 
323 	GEM_BUG_ON(!context_blocked(ce));	/* Underflow check */
324 
325 	ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
326 }
327 
328 static inline bool context_has_committed_requests(struct intel_context *ce)
329 {
330 	return !!ce->guc_state.number_committed_requests;
331 }
332 
333 static inline void incr_context_committed_requests(struct intel_context *ce)
334 {
335 	lockdep_assert_held(&ce->guc_state.lock);
336 	++ce->guc_state.number_committed_requests;
337 	GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
338 }
339 
340 static inline void decr_context_committed_requests(struct intel_context *ce)
341 {
342 	lockdep_assert_held(&ce->guc_state.lock);
343 	--ce->guc_state.number_committed_requests;
344 	GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
345 }
346 
347 static struct intel_context *
348 request_to_scheduling_context(struct i915_request *rq)
349 {
350 	return intel_context_to_parent(rq->context);
351 }
352 
353 static inline bool context_guc_id_invalid(struct intel_context *ce)
354 {
355 	return ce->guc_id.id == GUC_INVALID_LRC_ID;
356 }
357 
358 static inline void set_context_guc_id_invalid(struct intel_context *ce)
359 {
360 	ce->guc_id.id = GUC_INVALID_LRC_ID;
361 }
362 
363 static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
364 {
365 	return &ce->engine->gt->uc.guc;
366 }
367 
368 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
369 {
370 	return rb_entry(rb, struct i915_priolist, node);
371 }
372 
373 /*
374  * When using multi-lrc submission a scratch memory area is reserved in the
375  * parent's context state for the process descriptor, work queue, and handshake
376  * between the parent + children contexts to insert safe preemption points
377  * between each of the BBs. Currently the scratch area is sized to a page.
378  *
379  * The layout of this scratch area is below:
380  * 0						guc_process_desc
381  * + sizeof(struct guc_process_desc)		child go
382  * + CACHELINE_BYTES				child join[0]
383  * ...
384  * + CACHELINE_BYTES				child join[n - 1]
385  * ...						unused
386  * PARENT_SCRATCH_SIZE / 2			work queue start
387  * ...						work queue
388  * PARENT_SCRATCH_SIZE - 1			work queue end
389  */
390 #define WQ_SIZE			(PARENT_SCRATCH_SIZE / 2)
391 #define WQ_OFFSET		(PARENT_SCRATCH_SIZE - WQ_SIZE)
392 
393 struct sync_semaphore {
394 	u32 semaphore;
395 	u8 unused[CACHELINE_BYTES - sizeof(u32)];
396 };
397 
398 struct parent_scratch {
399 	struct guc_process_desc pdesc;
400 
401 	struct sync_semaphore go;
402 	struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
403 
404 	u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) -
405 		sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
406 
407 	u32 wq[WQ_SIZE / sizeof(u32)];
408 };
409 
410 static u32 __get_parent_scratch_offset(struct intel_context *ce)
411 {
412 	GEM_BUG_ON(!ce->parallel.guc.parent_page);
413 
414 	return ce->parallel.guc.parent_page * PAGE_SIZE;
415 }
416 
417 static u32 __get_wq_offset(struct intel_context *ce)
418 {
419 	BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
420 
421 	return __get_parent_scratch_offset(ce) + WQ_OFFSET;
422 }
423 
424 static struct parent_scratch *
425 __get_parent_scratch(struct intel_context *ce)
426 {
427 	BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
428 	BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
429 
430 	/*
431 	 * Need to subtract LRC_STATE_OFFSET here as the
432 	 * parallel.guc.parent_page is the offset into ce->state while
433 	 * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
434 	 */
435 	return (struct parent_scratch *)
436 		(ce->lrc_reg_state +
437 		 ((__get_parent_scratch_offset(ce) -
438 		   LRC_STATE_OFFSET) / sizeof(u32)));
439 }
440 
441 static struct guc_process_desc *
442 __get_process_desc(struct intel_context *ce)
443 {
444 	struct parent_scratch *ps = __get_parent_scratch(ce);
445 
446 	return &ps->pdesc;
447 }
448 
449 static u32 *get_wq_pointer(struct guc_process_desc *desc,
450 			   struct intel_context *ce,
451 			   u32 wqi_size)
452 {
453 	/*
454 	 * Check for space in work queue. Caching a value of head pointer in
455 	 * intel_context structure in order reduce the number accesses to shared
456 	 * GPU memory which may be across a PCIe bus.
457 	 */
458 #define AVAILABLE_SPACE	\
459 	CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
460 	if (wqi_size > AVAILABLE_SPACE) {
461 		ce->parallel.guc.wqi_head = READ_ONCE(desc->head);
462 
463 		if (wqi_size > AVAILABLE_SPACE)
464 			return NULL;
465 	}
466 #undef AVAILABLE_SPACE
467 
468 	return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
469 }
470 
471 static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
472 {
473 	struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
474 
475 	GEM_BUG_ON(index >= GUC_MAX_LRC_DESCRIPTORS);
476 
477 	return &base[index];
478 }
479 
480 static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
481 {
482 	struct intel_context *ce = xa_load(&guc->context_lookup, id);
483 
484 	GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS);
485 
486 	return ce;
487 }
488 
489 static int guc_lrc_desc_pool_create(struct intel_guc *guc)
490 {
491 	u32 size;
492 	int ret;
493 
494 	size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) *
495 			  GUC_MAX_LRC_DESCRIPTORS);
496 	ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
497 					     (void **)&guc->lrc_desc_pool_vaddr);
498 	if (ret)
499 		return ret;
500 
501 	return 0;
502 }
503 
504 static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
505 {
506 	guc->lrc_desc_pool_vaddr = NULL;
507 	i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
508 }
509 
510 static inline bool guc_submission_initialized(struct intel_guc *guc)
511 {
512 	return !!guc->lrc_desc_pool_vaddr;
513 }
514 
515 static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
516 {
517 	if (likely(guc_submission_initialized(guc))) {
518 		struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
519 		unsigned long flags;
520 
521 		memset(desc, 0, sizeof(*desc));
522 
523 		/*
524 		 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
525 		 * the lower level functions directly.
526 		 */
527 		xa_lock_irqsave(&guc->context_lookup, flags);
528 		__xa_erase(&guc->context_lookup, id);
529 		xa_unlock_irqrestore(&guc->context_lookup, flags);
530 	}
531 }
532 
533 static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
534 {
535 	return __get_context(guc, id);
536 }
537 
538 static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
539 					   struct intel_context *ce)
540 {
541 	unsigned long flags;
542 
543 	/*
544 	 * xarray API doesn't have xa_save_irqsave wrapper, so calling the
545 	 * lower level functions directly.
546 	 */
547 	xa_lock_irqsave(&guc->context_lookup, flags);
548 	__xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
549 	xa_unlock_irqrestore(&guc->context_lookup, flags);
550 }
551 
552 static void decr_outstanding_submission_g2h(struct intel_guc *guc)
553 {
554 	if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
555 		wake_up_all(&guc->ct.wq);
556 }
557 
558 static int guc_submission_send_busy_loop(struct intel_guc *guc,
559 					 const u32 *action,
560 					 u32 len,
561 					 u32 g2h_len_dw,
562 					 bool loop)
563 {
564 	/*
565 	 * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
566 	 * so we don't handle the case where we don't get a reply because we
567 	 * aborted the send due to the channel being busy.
568 	 */
569 	GEM_BUG_ON(g2h_len_dw && !loop);
570 
571 	if (g2h_len_dw)
572 		atomic_inc(&guc->outstanding_submission_g2h);
573 
574 	return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
575 }
576 
577 int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
578 				   atomic_t *wait_var,
579 				   bool interruptible,
580 				   long timeout)
581 {
582 	const int state = interruptible ?
583 		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
584 	DEFINE_WAIT(wait);
585 
586 	might_sleep();
587 	GEM_BUG_ON(timeout < 0);
588 
589 	if (!atomic_read(wait_var))
590 		return 0;
591 
592 	if (!timeout)
593 		return -ETIME;
594 
595 	for (;;) {
596 		prepare_to_wait(&guc->ct.wq, &wait, state);
597 
598 		if (!atomic_read(wait_var))
599 			break;
600 
601 		if (signal_pending_state(state, current)) {
602 			timeout = -EINTR;
603 			break;
604 		}
605 
606 		if (!timeout) {
607 			timeout = -ETIME;
608 			break;
609 		}
610 
611 		timeout = io_schedule_timeout(timeout);
612 	}
613 	finish_wait(&guc->ct.wq, &wait);
614 
615 	return (timeout < 0) ? timeout : 0;
616 }
617 
618 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
619 {
620 	if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
621 		return 0;
622 
623 	return intel_guc_wait_for_pending_msg(guc,
624 					      &guc->outstanding_submission_g2h,
625 					      true, timeout);
626 }
627 
628 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
629 
630 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
631 {
632 	int err = 0;
633 	struct intel_context *ce = request_to_scheduling_context(rq);
634 	u32 action[3];
635 	int len = 0;
636 	u32 g2h_len_dw = 0;
637 	bool enabled;
638 
639 	lockdep_assert_held(&rq->engine->sched_engine->lock);
640 
641 	/*
642 	 * Corner case where requests were sitting in the priority list or a
643 	 * request resubmitted after the context was banned.
644 	 */
645 	if (unlikely(intel_context_is_banned(ce))) {
646 		i915_request_put(i915_request_mark_eio(rq));
647 		intel_engine_signal_breadcrumbs(ce->engine);
648 		return 0;
649 	}
650 
651 	GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
652 	GEM_BUG_ON(context_guc_id_invalid(ce));
653 
654 	spin_lock(&ce->guc_state.lock);
655 
656 	/*
657 	 * The request / context will be run on the hardware when scheduling
658 	 * gets enabled in the unblock. For multi-lrc we still submit the
659 	 * context to move the LRC tails.
660 	 */
661 	if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
662 		goto out;
663 
664 	enabled = context_enabled(ce) || context_blocked(ce);
665 
666 	if (!enabled) {
667 		action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
668 		action[len++] = ce->guc_id.id;
669 		action[len++] = GUC_CONTEXT_ENABLE;
670 		set_context_pending_enable(ce);
671 		intel_context_get(ce);
672 		g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
673 	} else {
674 		action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
675 		action[len++] = ce->guc_id.id;
676 	}
677 
678 	err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
679 	if (!enabled && !err) {
680 		trace_intel_context_sched_enable(ce);
681 		atomic_inc(&guc->outstanding_submission_g2h);
682 		set_context_enabled(ce);
683 
684 		/*
685 		 * Without multi-lrc KMD does the submission step (moving the
686 		 * lrc tail) so enabling scheduling is sufficient to submit the
687 		 * context. This isn't the case in multi-lrc submission as the
688 		 * GuC needs to move the tails, hence the need for another H2G
689 		 * to submit a multi-lrc context after enabling scheduling.
690 		 */
691 		if (intel_context_is_parent(ce)) {
692 			action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
693 			err = intel_guc_send_nb(guc, action, len - 1, 0);
694 		}
695 	} else if (!enabled) {
696 		clr_context_pending_enable(ce);
697 		intel_context_put(ce);
698 	}
699 	if (likely(!err))
700 		trace_i915_request_guc_submit(rq);
701 
702 out:
703 	spin_unlock(&ce->guc_state.lock);
704 	return err;
705 }
706 
707 static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
708 {
709 	int ret = __guc_add_request(guc, rq);
710 
711 	if (unlikely(ret == -EBUSY)) {
712 		guc->stalled_request = rq;
713 		guc->submission_stall_reason = STALL_ADD_REQUEST;
714 	}
715 
716 	return ret;
717 }
718 
719 static inline void guc_set_lrc_tail(struct i915_request *rq)
720 {
721 	rq->context->lrc_reg_state[CTX_RING_TAIL] =
722 		intel_ring_set_tail(rq->ring, rq->tail);
723 }
724 
725 static inline int rq_prio(const struct i915_request *rq)
726 {
727 	return rq->sched.attr.priority;
728 }
729 
730 static bool is_multi_lrc_rq(struct i915_request *rq)
731 {
732 	return intel_context_is_parallel(rq->context);
733 }
734 
735 static bool can_merge_rq(struct i915_request *rq,
736 			 struct i915_request *last)
737 {
738 	return request_to_scheduling_context(rq) ==
739 		request_to_scheduling_context(last);
740 }
741 
742 static u32 wq_space_until_wrap(struct intel_context *ce)
743 {
744 	return (WQ_SIZE - ce->parallel.guc.wqi_tail);
745 }
746 
747 static void write_wqi(struct guc_process_desc *desc,
748 		      struct intel_context *ce,
749 		      u32 wqi_size)
750 {
751 	BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
752 
753 	/*
754 	 * Ensure WQI are visible before updating tail
755 	 */
756 	intel_guc_write_barrier(ce_to_guc(ce));
757 
758 	ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
759 		(WQ_SIZE - 1);
760 	WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail);
761 }
762 
763 static int guc_wq_noop_append(struct intel_context *ce)
764 {
765 	struct guc_process_desc *desc = __get_process_desc(ce);
766 	u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce));
767 	u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
768 
769 	if (!wqi)
770 		return -EBUSY;
771 
772 	GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
773 
774 	*wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
775 		FIELD_PREP(WQ_LEN_MASK, len_dw);
776 	ce->parallel.guc.wqi_tail = 0;
777 
778 	return 0;
779 }
780 
781 static int __guc_wq_item_append(struct i915_request *rq)
782 {
783 	struct intel_context *ce = request_to_scheduling_context(rq);
784 	struct intel_context *child;
785 	struct guc_process_desc *desc = __get_process_desc(ce);
786 	unsigned int wqi_size = (ce->parallel.number_children + 4) *
787 		sizeof(u32);
788 	u32 *wqi;
789 	u32 len_dw = (wqi_size / sizeof(u32)) - 1;
790 	int ret;
791 
792 	/* Ensure context is in correct state updating work queue */
793 	GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
794 	GEM_BUG_ON(context_guc_id_invalid(ce));
795 	GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
796 	GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id));
797 
798 	/* Insert NOOP if this work queue item will wrap the tail pointer. */
799 	if (wqi_size > wq_space_until_wrap(ce)) {
800 		ret = guc_wq_noop_append(ce);
801 		if (ret)
802 			return ret;
803 	}
804 
805 	wqi = get_wq_pointer(desc, ce, wqi_size);
806 	if (!wqi)
807 		return -EBUSY;
808 
809 	GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
810 
811 	*wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
812 		FIELD_PREP(WQ_LEN_MASK, len_dw);
813 	*wqi++ = ce->lrc.lrca;
814 	*wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
815 	       FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
816 	*wqi++ = 0;	/* fence_id */
817 	for_each_child(ce, child)
818 		*wqi++ = child->ring->tail / sizeof(u64);
819 
820 	write_wqi(desc, ce, wqi_size);
821 
822 	return 0;
823 }
824 
825 static int guc_wq_item_append(struct intel_guc *guc,
826 			      struct i915_request *rq)
827 {
828 	struct intel_context *ce = request_to_scheduling_context(rq);
829 	int ret = 0;
830 
831 	if (likely(!intel_context_is_banned(ce))) {
832 		ret = __guc_wq_item_append(rq);
833 
834 		if (unlikely(ret == -EBUSY)) {
835 			guc->stalled_request = rq;
836 			guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
837 		}
838 	}
839 
840 	return ret;
841 }
842 
843 static bool multi_lrc_submit(struct i915_request *rq)
844 {
845 	struct intel_context *ce = request_to_scheduling_context(rq);
846 
847 	intel_ring_set_tail(rq->ring, rq->tail);
848 
849 	/*
850 	 * We expect the front end (execbuf IOCTL) to set this flag on the last
851 	 * request generated from a multi-BB submission. This indicates to the
852 	 * backend (GuC interface) that we should submit this context thus
853 	 * submitting all the requests generated in parallel.
854 	 */
855 	return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
856 		intel_context_is_banned(ce);
857 }
858 
859 static int guc_dequeue_one_context(struct intel_guc *guc)
860 {
861 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
862 	struct i915_request *last = NULL;
863 	bool submit = false;
864 	struct rb_node *rb;
865 	int ret;
866 
867 	lockdep_assert_held(&sched_engine->lock);
868 
869 	if (guc->stalled_request) {
870 		submit = true;
871 		last = guc->stalled_request;
872 
873 		switch (guc->submission_stall_reason) {
874 		case STALL_REGISTER_CONTEXT:
875 			goto register_context;
876 		case STALL_MOVE_LRC_TAIL:
877 			goto move_lrc_tail;
878 		case STALL_ADD_REQUEST:
879 			goto add_request;
880 		default:
881 			MISSING_CASE(guc->submission_stall_reason);
882 		}
883 	}
884 
885 	while ((rb = rb_first_cached(&sched_engine->queue))) {
886 		struct i915_priolist *p = to_priolist(rb);
887 		struct i915_request *rq, *rn;
888 
889 		priolist_for_each_request_consume(rq, rn, p) {
890 			if (last && !can_merge_rq(rq, last))
891 				goto register_context;
892 
893 			list_del_init(&rq->sched.link);
894 
895 			__i915_request_submit(rq);
896 
897 			trace_i915_request_in(rq, 0);
898 			last = rq;
899 
900 			if (is_multi_lrc_rq(rq)) {
901 				/*
902 				 * We need to coalesce all multi-lrc requests in
903 				 * a relationship into a single H2G. We are
904 				 * guaranteed that all of these requests will be
905 				 * submitted sequentially.
906 				 */
907 				if (multi_lrc_submit(rq)) {
908 					submit = true;
909 					goto register_context;
910 				}
911 			} else {
912 				submit = true;
913 			}
914 		}
915 
916 		rb_erase_cached(&p->node, &sched_engine->queue);
917 		i915_priolist_free(p);
918 	}
919 
920 register_context:
921 	if (submit) {
922 		struct intel_context *ce = request_to_scheduling_context(last);
923 
924 		if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) &&
925 			     !intel_context_is_banned(ce))) {
926 			ret = guc_lrc_desc_pin(ce, false);
927 			if (unlikely(ret == -EPIPE)) {
928 				goto deadlk;
929 			} else if (ret == -EBUSY) {
930 				guc->stalled_request = last;
931 				guc->submission_stall_reason =
932 					STALL_REGISTER_CONTEXT;
933 				goto schedule_tasklet;
934 			} else if (ret != 0) {
935 				GEM_WARN_ON(ret);	/* Unexpected */
936 				goto deadlk;
937 			}
938 		}
939 
940 move_lrc_tail:
941 		if (is_multi_lrc_rq(last)) {
942 			ret = guc_wq_item_append(guc, last);
943 			if (ret == -EBUSY) {
944 				goto schedule_tasklet;
945 			} else if (ret != 0) {
946 				GEM_WARN_ON(ret);	/* Unexpected */
947 				goto deadlk;
948 			}
949 		} else {
950 			guc_set_lrc_tail(last);
951 		}
952 
953 add_request:
954 		ret = guc_add_request(guc, last);
955 		if (unlikely(ret == -EPIPE)) {
956 			goto deadlk;
957 		} else if (ret == -EBUSY) {
958 			goto schedule_tasklet;
959 		} else if (ret != 0) {
960 			GEM_WARN_ON(ret);	/* Unexpected */
961 			goto deadlk;
962 		}
963 	}
964 
965 	guc->stalled_request = NULL;
966 	guc->submission_stall_reason = STALL_NONE;
967 	return submit;
968 
969 deadlk:
970 	sched_engine->tasklet.callback = NULL;
971 	tasklet_disable_nosync(&sched_engine->tasklet);
972 	return false;
973 
974 schedule_tasklet:
975 	tasklet_schedule(&sched_engine->tasklet);
976 	return false;
977 }
978 
979 static void guc_submission_tasklet(struct tasklet_struct *t)
980 {
981 	struct i915_sched_engine *sched_engine =
982 		from_tasklet(sched_engine, t, tasklet);
983 	unsigned long flags;
984 	bool loop;
985 
986 	spin_lock_irqsave(&sched_engine->lock, flags);
987 
988 	do {
989 		loop = guc_dequeue_one_context(sched_engine->private_data);
990 	} while (loop);
991 
992 	i915_sched_engine_reset_on_empty(sched_engine);
993 
994 	spin_unlock_irqrestore(&sched_engine->lock, flags);
995 }
996 
997 static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
998 {
999 	if (iir & GT_RENDER_USER_INTERRUPT)
1000 		intel_engine_signal_breadcrumbs(engine);
1001 }
1002 
1003 static void __guc_context_destroy(struct intel_context *ce);
1004 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
1005 static void guc_signal_context_fence(struct intel_context *ce);
1006 static void guc_cancel_context_requests(struct intel_context *ce);
1007 static void guc_blocked_fence_complete(struct intel_context *ce);
1008 
1009 static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
1010 {
1011 	struct intel_context *ce;
1012 	unsigned long index, flags;
1013 	bool pending_disable, pending_enable, deregister, destroyed, banned;
1014 
1015 	xa_lock_irqsave(&guc->context_lookup, flags);
1016 	xa_for_each(&guc->context_lookup, index, ce) {
1017 		/*
1018 		 * Corner case where the ref count on the object is zero but and
1019 		 * deregister G2H was lost. In this case we don't touch the ref
1020 		 * count and finish the destroy of the context.
1021 		 */
1022 		bool do_put = kref_get_unless_zero(&ce->ref);
1023 
1024 		xa_unlock(&guc->context_lookup);
1025 
1026 		spin_lock(&ce->guc_state.lock);
1027 
1028 		/*
1029 		 * Once we are at this point submission_disabled() is guaranteed
1030 		 * to be visible to all callers who set the below flags (see above
1031 		 * flush and flushes in reset_prepare). If submission_disabled()
1032 		 * is set, the caller shouldn't set these flags.
1033 		 */
1034 
1035 		destroyed = context_destroyed(ce);
1036 		pending_enable = context_pending_enable(ce);
1037 		pending_disable = context_pending_disable(ce);
1038 		deregister = context_wait_for_deregister_to_register(ce);
1039 		banned = context_banned(ce);
1040 		init_sched_state(ce);
1041 
1042 		spin_unlock(&ce->guc_state.lock);
1043 
1044 		if (pending_enable || destroyed || deregister) {
1045 			decr_outstanding_submission_g2h(guc);
1046 			if (deregister)
1047 				guc_signal_context_fence(ce);
1048 			if (destroyed) {
1049 				intel_gt_pm_put_async(guc_to_gt(guc));
1050 				release_guc_id(guc, ce);
1051 				__guc_context_destroy(ce);
1052 			}
1053 			if (pending_enable || deregister)
1054 				intel_context_put(ce);
1055 		}
1056 
1057 		/* Not mutualy exclusive with above if statement. */
1058 		if (pending_disable) {
1059 			guc_signal_context_fence(ce);
1060 			if (banned) {
1061 				guc_cancel_context_requests(ce);
1062 				intel_engine_signal_breadcrumbs(ce->engine);
1063 			}
1064 			intel_context_sched_disable_unpin(ce);
1065 			decr_outstanding_submission_g2h(guc);
1066 
1067 			spin_lock(&ce->guc_state.lock);
1068 			guc_blocked_fence_complete(ce);
1069 			spin_unlock(&ce->guc_state.lock);
1070 
1071 			intel_context_put(ce);
1072 		}
1073 
1074 		if (do_put)
1075 			intel_context_put(ce);
1076 		xa_lock(&guc->context_lookup);
1077 	}
1078 	xa_unlock_irqrestore(&guc->context_lookup, flags);
1079 }
1080 
1081 /*
1082  * GuC stores busyness stats for each engine at context in/out boundaries. A
1083  * context 'in' logs execution start time, 'out' adds in -> out delta to total.
1084  * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
1085  * GuC.
1086  *
1087  * __i915_pmu_event_read samples engine busyness. When sampling, if context id
1088  * is valid (!= ~0) and start is non-zero, the engine is considered to be
1089  * active. For an active engine total busyness = total + (now - start), where
1090  * 'now' is the time at which the busyness is sampled. For inactive engine,
1091  * total busyness = total.
1092  *
1093  * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
1094  *
1095  * The start and total values provided by GuC are 32 bits and wrap around in a
1096  * few minutes. Since perf pmu provides busyness as 64 bit monotonically
1097  * increasing ns values, there is a need for this implementation to account for
1098  * overflows and extend the GuC provided values to 64 bits before returning
1099  * busyness to the user. In order to do that, a worker runs periodically at
1100  * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
1101  * 27 seconds for a gt clock frequency of 19.2 MHz).
1102  */
1103 
1104 #define WRAP_TIME_CLKS U32_MAX
1105 #define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
1106 
1107 static void
1108 __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
1109 {
1110 	u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1111 	u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
1112 
1113 	if (new_start == lower_32_bits(*prev_start))
1114 		return;
1115 
1116 	if (new_start < gt_stamp_last &&
1117 	    (new_start - gt_stamp_last) <= POLL_TIME_CLKS)
1118 		gt_stamp_hi++;
1119 
1120 	if (new_start > gt_stamp_last &&
1121 	    (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
1122 		gt_stamp_hi--;
1123 
1124 	*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
1125 }
1126 
1127 static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
1128 {
1129 	struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
1130 	struct intel_engine_guc_stats *stats = &engine->stats.guc;
1131 	struct intel_guc *guc = &engine->gt->uc.guc;
1132 	u32 last_switch = rec->last_switch_in_stamp;
1133 	u32 ctx_id = rec->current_context_index;
1134 	u32 total = rec->total_runtime;
1135 
1136 	lockdep_assert_held(&guc->timestamp.lock);
1137 
1138 	stats->running = ctx_id != ~0U && last_switch;
1139 	if (stats->running)
1140 		__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
1141 
1142 	/*
1143 	 * Instead of adjusting the total for overflow, just add the
1144 	 * difference from previous sample stats->total_gt_clks
1145 	 */
1146 	if (total && total != ~0U) {
1147 		stats->total_gt_clks += (u32)(total - stats->prev_total);
1148 		stats->prev_total = total;
1149 	}
1150 }
1151 
1152 static void guc_update_pm_timestamp(struct intel_guc *guc,
1153 				    struct intel_engine_cs *engine,
1154 				    ktime_t *now)
1155 {
1156 	u32 gt_stamp_now, gt_stamp_hi;
1157 
1158 	lockdep_assert_held(&guc->timestamp.lock);
1159 
1160 	gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
1161 	gt_stamp_now = intel_uncore_read(engine->uncore,
1162 					 RING_TIMESTAMP(engine->mmio_base));
1163 	*now = ktime_get();
1164 
1165 	if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
1166 		gt_stamp_hi++;
1167 
1168 	guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
1169 }
1170 
1171 /*
1172  * Unlike the execlist mode of submission total and active times are in terms of
1173  * gt clocks. The *now parameter is retained to return the cpu time at which the
1174  * busyness was sampled.
1175  */
1176 static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
1177 {
1178 	struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
1179 	struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
1180 	struct intel_gt *gt = engine->gt;
1181 	struct intel_guc *guc = &gt->uc.guc;
1182 	u64 total, gt_stamp_saved;
1183 	unsigned long flags;
1184 	u32 reset_count;
1185 	bool in_reset;
1186 
1187 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1188 
1189 	/*
1190 	 * If a reset happened, we risk reading partially updated engine
1191 	 * busyness from GuC, so we just use the driver stored copy of busyness.
1192 	 * Synchronize with gt reset using reset_count and the
1193 	 * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
1194 	 * after I915_RESET_BACKOFF flag, so ensure that the reset_count is
1195 	 * usable by checking the flag afterwards.
1196 	 */
1197 	reset_count = i915_reset_count(gpu_error);
1198 	in_reset = test_bit(I915_RESET_BACKOFF, &gt->reset.flags);
1199 
1200 	*now = ktime_get();
1201 
1202 	/*
1203 	 * The active busyness depends on start_gt_clk and gt_stamp.
1204 	 * gt_stamp is updated by i915 only when gt is awake and the
1205 	 * start_gt_clk is derived from GuC state. To get a consistent
1206 	 * view of activity, we query the GuC state only if gt is awake.
1207 	 */
1208 	if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
1209 		stats_saved = *stats;
1210 		gt_stamp_saved = guc->timestamp.gt_stamp;
1211 		guc_update_engine_gt_clks(engine);
1212 		guc_update_pm_timestamp(guc, engine, now);
1213 		intel_gt_pm_put_async(gt);
1214 		if (i915_reset_count(gpu_error) != reset_count) {
1215 			*stats = stats_saved;
1216 			guc->timestamp.gt_stamp = gt_stamp_saved;
1217 		}
1218 	}
1219 
1220 	total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
1221 	if (stats->running) {
1222 		u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
1223 
1224 		total += intel_gt_clock_interval_to_ns(gt, clk);
1225 	}
1226 
1227 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1228 
1229 	return ns_to_ktime(total);
1230 }
1231 
1232 static void __reset_guc_busyness_stats(struct intel_guc *guc)
1233 {
1234 	struct intel_gt *gt = guc_to_gt(guc);
1235 	struct intel_engine_cs *engine;
1236 	enum intel_engine_id id;
1237 	unsigned long flags;
1238 	ktime_t unused;
1239 
1240 	cancel_delayed_work_sync(&guc->timestamp.work);
1241 
1242 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1243 
1244 	for_each_engine(engine, gt, id) {
1245 		guc_update_pm_timestamp(guc, engine, &unused);
1246 		guc_update_engine_gt_clks(engine);
1247 		engine->stats.guc.prev_total = 0;
1248 	}
1249 
1250 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1251 }
1252 
1253 static void __update_guc_busyness_stats(struct intel_guc *guc)
1254 {
1255 	struct intel_gt *gt = guc_to_gt(guc);
1256 	struct intel_engine_cs *engine;
1257 	enum intel_engine_id id;
1258 	unsigned long flags;
1259 	ktime_t unused;
1260 
1261 	spin_lock_irqsave(&guc->timestamp.lock, flags);
1262 	for_each_engine(engine, gt, id) {
1263 		guc_update_pm_timestamp(guc, engine, &unused);
1264 		guc_update_engine_gt_clks(engine);
1265 	}
1266 	spin_unlock_irqrestore(&guc->timestamp.lock, flags);
1267 }
1268 
1269 static void guc_timestamp_ping(struct work_struct *wrk)
1270 {
1271 	struct intel_guc *guc = container_of(wrk, typeof(*guc),
1272 					     timestamp.work.work);
1273 	struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
1274 	struct intel_gt *gt = guc_to_gt(guc);
1275 	intel_wakeref_t wakeref;
1276 	int srcu, ret;
1277 
1278 	/*
1279 	 * Synchronize with gt reset to make sure the worker does not
1280 	 * corrupt the engine/guc stats.
1281 	 */
1282 	ret = intel_gt_reset_trylock(gt, &srcu);
1283 	if (ret)
1284 		return;
1285 
1286 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
1287 		__update_guc_busyness_stats(guc);
1288 
1289 	intel_gt_reset_unlock(gt, srcu);
1290 
1291 	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1292 			 guc->timestamp.ping_delay);
1293 }
1294 
1295 static int guc_action_enable_usage_stats(struct intel_guc *guc)
1296 {
1297 	u32 offset = intel_guc_engine_usage_offset(guc);
1298 	u32 action[] = {
1299 		INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
1300 		offset,
1301 		0,
1302 	};
1303 
1304 	return intel_guc_send(guc, action, ARRAY_SIZE(action));
1305 }
1306 
1307 static void guc_init_engine_stats(struct intel_guc *guc)
1308 {
1309 	struct intel_gt *gt = guc_to_gt(guc);
1310 	intel_wakeref_t wakeref;
1311 
1312 	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1313 			 guc->timestamp.ping_delay);
1314 
1315 	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
1316 		int ret = guc_action_enable_usage_stats(guc);
1317 
1318 		if (ret)
1319 			drm_err(&gt->i915->drm,
1320 				"Failed to enable usage stats: %d!\n", ret);
1321 	}
1322 }
1323 
1324 void intel_guc_busyness_park(struct intel_gt *gt)
1325 {
1326 	struct intel_guc *guc = &gt->uc.guc;
1327 
1328 	if (!guc_submission_initialized(guc))
1329 		return;
1330 
1331 	cancel_delayed_work(&guc->timestamp.work);
1332 	__update_guc_busyness_stats(guc);
1333 }
1334 
1335 void intel_guc_busyness_unpark(struct intel_gt *gt)
1336 {
1337 	struct intel_guc *guc = &gt->uc.guc;
1338 
1339 	if (!guc_submission_initialized(guc))
1340 		return;
1341 
1342 	mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
1343 			 guc->timestamp.ping_delay);
1344 }
1345 
1346 static inline bool
1347 submission_disabled(struct intel_guc *guc)
1348 {
1349 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1350 
1351 	return unlikely(!sched_engine ||
1352 			!__tasklet_is_enabled(&sched_engine->tasklet));
1353 }
1354 
1355 static void disable_submission(struct intel_guc *guc)
1356 {
1357 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1358 
1359 	if (__tasklet_is_enabled(&sched_engine->tasklet)) {
1360 		GEM_BUG_ON(!guc->ct.enabled);
1361 		__tasklet_disable_sync_once(&sched_engine->tasklet);
1362 		sched_engine->tasklet.callback = NULL;
1363 	}
1364 }
1365 
1366 static void enable_submission(struct intel_guc *guc)
1367 {
1368 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1369 	unsigned long flags;
1370 
1371 	spin_lock_irqsave(&guc->sched_engine->lock, flags);
1372 	sched_engine->tasklet.callback = guc_submission_tasklet;
1373 	wmb();	/* Make sure callback visible */
1374 	if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
1375 	    __tasklet_enable(&sched_engine->tasklet)) {
1376 		GEM_BUG_ON(!guc->ct.enabled);
1377 
1378 		/* And kick in case we missed a new request submission. */
1379 		tasklet_hi_schedule(&sched_engine->tasklet);
1380 	}
1381 	spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
1382 }
1383 
1384 static void guc_flush_submissions(struct intel_guc *guc)
1385 {
1386 	struct i915_sched_engine * const sched_engine = guc->sched_engine;
1387 	unsigned long flags;
1388 
1389 	spin_lock_irqsave(&sched_engine->lock, flags);
1390 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1391 }
1392 
1393 static void guc_flush_destroyed_contexts(struct intel_guc *guc);
1394 
1395 void intel_guc_submission_reset_prepare(struct intel_guc *guc)
1396 {
1397 	int i;
1398 
1399 	if (unlikely(!guc_submission_initialized(guc))) {
1400 		/* Reset called during driver load? GuC not yet initialised! */
1401 		return;
1402 	}
1403 
1404 	intel_gt_park_heartbeats(guc_to_gt(guc));
1405 	disable_submission(guc);
1406 	guc->interrupts.disable(guc);
1407 	__reset_guc_busyness_stats(guc);
1408 
1409 	/* Flush IRQ handler */
1410 	spin_lock_irq(&guc_to_gt(guc)->irq_lock);
1411 	spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
1412 
1413 	guc_flush_submissions(guc);
1414 	guc_flush_destroyed_contexts(guc);
1415 
1416 	/*
1417 	 * Handle any outstanding G2Hs before reset. Call IRQ handler directly
1418 	 * each pass as interrupt have been disabled. We always scrub for
1419 	 * outstanding G2H as it is possible for outstanding_submission_g2h to
1420 	 * be incremented after the context state update.
1421 	 */
1422 	for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) {
1423 		intel_guc_to_host_event_handler(guc);
1424 #define wait_for_reset(guc, wait_var) \
1425 		intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20))
1426 		do {
1427 			wait_for_reset(guc, &guc->outstanding_submission_g2h);
1428 		} while (!list_empty(&guc->ct.requests.incoming));
1429 	}
1430 
1431 	scrub_guc_desc_for_outstanding_g2h(guc);
1432 }
1433 
1434 static struct intel_engine_cs *
1435 guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
1436 {
1437 	struct intel_engine_cs *engine;
1438 	intel_engine_mask_t tmp, mask = ve->mask;
1439 	unsigned int num_siblings = 0;
1440 
1441 	for_each_engine_masked(engine, ve->gt, mask, tmp)
1442 		if (num_siblings++ == sibling)
1443 			return engine;
1444 
1445 	return NULL;
1446 }
1447 
1448 static inline struct intel_engine_cs *
1449 __context_to_physical_engine(struct intel_context *ce)
1450 {
1451 	struct intel_engine_cs *engine = ce->engine;
1452 
1453 	if (intel_engine_is_virtual(engine))
1454 		engine = guc_virtual_get_sibling(engine, 0);
1455 
1456 	return engine;
1457 }
1458 
1459 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
1460 {
1461 	struct intel_engine_cs *engine = __context_to_physical_engine(ce);
1462 
1463 	if (intel_context_is_banned(ce))
1464 		return;
1465 
1466 	GEM_BUG_ON(!intel_context_is_pinned(ce));
1467 
1468 	/*
1469 	 * We want a simple context + ring to execute the breadcrumb update.
1470 	 * We cannot rely on the context being intact across the GPU hang,
1471 	 * so clear it and rebuild just what we need for the breadcrumb.
1472 	 * All pending requests for this context will be zapped, and any
1473 	 * future request will be after userspace has had the opportunity
1474 	 * to recreate its own state.
1475 	 */
1476 	if (scrub)
1477 		lrc_init_regs(ce, engine, true);
1478 
1479 	/* Rerun the request; its payload has been neutered (if guilty). */
1480 	lrc_update_regs(ce, engine, head);
1481 }
1482 
1483 static void guc_reset_nop(struct intel_engine_cs *engine)
1484 {
1485 }
1486 
1487 static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
1488 {
1489 }
1490 
1491 static void
1492 __unwind_incomplete_requests(struct intel_context *ce)
1493 {
1494 	struct i915_request *rq, *rn;
1495 	struct list_head *pl;
1496 	int prio = I915_PRIORITY_INVALID;
1497 	struct i915_sched_engine * const sched_engine =
1498 		ce->engine->sched_engine;
1499 	unsigned long flags;
1500 
1501 	spin_lock_irqsave(&sched_engine->lock, flags);
1502 	spin_lock(&ce->guc_state.lock);
1503 	list_for_each_entry_safe_reverse(rq, rn,
1504 					 &ce->guc_state.requests,
1505 					 sched.link) {
1506 		if (i915_request_completed(rq))
1507 			continue;
1508 
1509 		list_del_init(&rq->sched.link);
1510 		__i915_request_unsubmit(rq);
1511 
1512 		/* Push the request back into the queue for later resubmission. */
1513 		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
1514 		if (rq_prio(rq) != prio) {
1515 			prio = rq_prio(rq);
1516 			pl = i915_sched_lookup_priolist(sched_engine, prio);
1517 		}
1518 		GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
1519 
1520 		list_add(&rq->sched.link, pl);
1521 		set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1522 	}
1523 	spin_unlock(&ce->guc_state.lock);
1524 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1525 }
1526 
1527 static void __guc_reset_context(struct intel_context *ce, bool stalled)
1528 {
1529 	bool local_stalled;
1530 	struct i915_request *rq;
1531 	unsigned long flags;
1532 	u32 head;
1533 	int i, number_children = ce->parallel.number_children;
1534 	bool skip = false;
1535 	struct intel_context *parent = ce;
1536 
1537 	GEM_BUG_ON(intel_context_is_child(ce));
1538 
1539 	intel_context_get(ce);
1540 
1541 	/*
1542 	 * GuC will implicitly mark the context as non-schedulable when it sends
1543 	 * the reset notification. Make sure our state reflects this change. The
1544 	 * context will be marked enabled on resubmission.
1545 	 *
1546 	 * XXX: If the context is reset as a result of the request cancellation
1547 	 * this G2H is received after the schedule disable complete G2H which is
1548 	 * wrong as this creates a race between the request cancellation code
1549 	 * re-submitting the context and this G2H handler. This is a bug in the
1550 	 * GuC but can be worked around in the meantime but converting this to a
1551 	 * NOP if a pending enable is in flight as this indicates that a request
1552 	 * cancellation has occurred.
1553 	 */
1554 	spin_lock_irqsave(&ce->guc_state.lock, flags);
1555 	if (likely(!context_pending_enable(ce)))
1556 		clr_context_enabled(ce);
1557 	else
1558 		skip = true;
1559 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1560 	if (unlikely(skip))
1561 		goto out_put;
1562 
1563 	/*
1564 	 * For each context in the relationship find the hanging request
1565 	 * resetting each context / request as needed
1566 	 */
1567 	for (i = 0; i < number_children + 1; ++i) {
1568 		if (!intel_context_is_pinned(ce))
1569 			goto next_context;
1570 
1571 		local_stalled = false;
1572 		rq = intel_context_find_active_request(ce);
1573 		if (!rq) {
1574 			head = ce->ring->tail;
1575 			goto out_replay;
1576 		}
1577 
1578 		if (i915_request_started(rq))
1579 			local_stalled = true;
1580 
1581 		GEM_BUG_ON(i915_active_is_idle(&ce->active));
1582 		head = intel_ring_wrap(ce->ring, rq->head);
1583 
1584 		__i915_request_reset(rq, local_stalled && stalled);
1585 out_replay:
1586 		guc_reset_state(ce, head, local_stalled && stalled);
1587 next_context:
1588 		if (i != number_children)
1589 			ce = list_next_entry(ce, parallel.child_link);
1590 	}
1591 
1592 	__unwind_incomplete_requests(parent);
1593 out_put:
1594 	intel_context_put(parent);
1595 }
1596 
1597 void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
1598 {
1599 	struct intel_context *ce;
1600 	unsigned long index;
1601 	unsigned long flags;
1602 
1603 	if (unlikely(!guc_submission_initialized(guc))) {
1604 		/* Reset called during driver load? GuC not yet initialised! */
1605 		return;
1606 	}
1607 
1608 	xa_lock_irqsave(&guc->context_lookup, flags);
1609 	xa_for_each(&guc->context_lookup, index, ce) {
1610 		if (!kref_get_unless_zero(&ce->ref))
1611 			continue;
1612 
1613 		xa_unlock(&guc->context_lookup);
1614 
1615 		if (intel_context_is_pinned(ce) &&
1616 		    !intel_context_is_child(ce))
1617 			__guc_reset_context(ce, stalled);
1618 
1619 		intel_context_put(ce);
1620 
1621 		xa_lock(&guc->context_lookup);
1622 	}
1623 	xa_unlock_irqrestore(&guc->context_lookup, flags);
1624 
1625 	/* GuC is blown away, drop all references to contexts */
1626 	xa_destroy(&guc->context_lookup);
1627 }
1628 
1629 static void guc_cancel_context_requests(struct intel_context *ce)
1630 {
1631 	struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
1632 	struct i915_request *rq;
1633 	unsigned long flags;
1634 
1635 	/* Mark all executing requests as skipped. */
1636 	spin_lock_irqsave(&sched_engine->lock, flags);
1637 	spin_lock(&ce->guc_state.lock);
1638 	list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
1639 		i915_request_put(i915_request_mark_eio(rq));
1640 	spin_unlock(&ce->guc_state.lock);
1641 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1642 }
1643 
1644 static void
1645 guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
1646 {
1647 	struct i915_request *rq, *rn;
1648 	struct rb_node *rb;
1649 	unsigned long flags;
1650 
1651 	/* Can be called during boot if GuC fails to load */
1652 	if (!sched_engine)
1653 		return;
1654 
1655 	/*
1656 	 * Before we call engine->cancel_requests(), we should have exclusive
1657 	 * access to the submission state. This is arranged for us by the
1658 	 * caller disabling the interrupt generation, the tasklet and other
1659 	 * threads that may then access the same state, giving us a free hand
1660 	 * to reset state. However, we still need to let lockdep be aware that
1661 	 * we know this state may be accessed in hardirq context, so we
1662 	 * disable the irq around this manipulation and we want to keep
1663 	 * the spinlock focused on its duties and not accidentally conflate
1664 	 * coverage to the submission's irq state. (Similarly, although we
1665 	 * shouldn't need to disable irq around the manipulation of the
1666 	 * submission's irq state, we also wish to remind ourselves that
1667 	 * it is irq state.)
1668 	 */
1669 	spin_lock_irqsave(&sched_engine->lock, flags);
1670 
1671 	/* Flush the queued requests to the timeline list (for retiring). */
1672 	while ((rb = rb_first_cached(&sched_engine->queue))) {
1673 		struct i915_priolist *p = to_priolist(rb);
1674 
1675 		priolist_for_each_request_consume(rq, rn, p) {
1676 			list_del_init(&rq->sched.link);
1677 
1678 			__i915_request_submit(rq);
1679 
1680 			i915_request_put(i915_request_mark_eio(rq));
1681 		}
1682 
1683 		rb_erase_cached(&p->node, &sched_engine->queue);
1684 		i915_priolist_free(p);
1685 	}
1686 
1687 	/* Remaining _unready_ requests will be nop'ed when submitted */
1688 
1689 	sched_engine->queue_priority_hint = INT_MIN;
1690 	sched_engine->queue = RB_ROOT_CACHED;
1691 
1692 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1693 }
1694 
1695 void intel_guc_submission_cancel_requests(struct intel_guc *guc)
1696 {
1697 	struct intel_context *ce;
1698 	unsigned long index;
1699 	unsigned long flags;
1700 
1701 	xa_lock_irqsave(&guc->context_lookup, flags);
1702 	xa_for_each(&guc->context_lookup, index, ce) {
1703 		if (!kref_get_unless_zero(&ce->ref))
1704 			continue;
1705 
1706 		xa_unlock(&guc->context_lookup);
1707 
1708 		if (intel_context_is_pinned(ce) &&
1709 		    !intel_context_is_child(ce))
1710 			guc_cancel_context_requests(ce);
1711 
1712 		intel_context_put(ce);
1713 
1714 		xa_lock(&guc->context_lookup);
1715 	}
1716 	xa_unlock_irqrestore(&guc->context_lookup, flags);
1717 
1718 	guc_cancel_sched_engine_requests(guc->sched_engine);
1719 
1720 	/* GuC is blown away, drop all references to contexts */
1721 	xa_destroy(&guc->context_lookup);
1722 }
1723 
1724 void intel_guc_submission_reset_finish(struct intel_guc *guc)
1725 {
1726 	/* Reset called during driver load or during wedge? */
1727 	if (unlikely(!guc_submission_initialized(guc) ||
1728 		     test_bit(I915_WEDGED, &guc_to_gt(guc)->reset.flags))) {
1729 		return;
1730 	}
1731 
1732 	/*
1733 	 * Technically possible for either of these values to be non-zero here,
1734 	 * but very unlikely + harmless. Regardless let's add a warn so we can
1735 	 * see in CI if this happens frequently / a precursor to taking down the
1736 	 * machine.
1737 	 */
1738 	GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
1739 	atomic_set(&guc->outstanding_submission_g2h, 0);
1740 
1741 	intel_guc_global_policies_update(guc);
1742 	enable_submission(guc);
1743 	intel_gt_unpark_heartbeats(guc_to_gt(guc));
1744 }
1745 
1746 static void destroyed_worker_func(struct work_struct *w);
1747 
1748 /*
1749  * Set up the memory resources to be shared with the GuC (via the GGTT)
1750  * at firmware loading time.
1751  */
1752 int intel_guc_submission_init(struct intel_guc *guc)
1753 {
1754 	struct intel_gt *gt = guc_to_gt(guc);
1755 	int ret;
1756 
1757 	if (guc->lrc_desc_pool)
1758 		return 0;
1759 
1760 	ret = guc_lrc_desc_pool_create(guc);
1761 	if (ret)
1762 		return ret;
1763 	/*
1764 	 * Keep static analysers happy, let them know that we allocated the
1765 	 * vma after testing that it didn't exist earlier.
1766 	 */
1767 	GEM_BUG_ON(!guc->lrc_desc_pool);
1768 
1769 	xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
1770 
1771 	spin_lock_init(&guc->submission_state.lock);
1772 	INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
1773 	ida_init(&guc->submission_state.guc_ids);
1774 	INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
1775 	INIT_WORK(&guc->submission_state.destroyed_worker,
1776 		  destroyed_worker_func);
1777 
1778 	guc->submission_state.guc_ids_bitmap =
1779 		bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
1780 	if (!guc->submission_state.guc_ids_bitmap)
1781 		return -ENOMEM;
1782 
1783 	spin_lock_init(&guc->timestamp.lock);
1784 	INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
1785 	guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
1786 
1787 	return 0;
1788 }
1789 
1790 void intel_guc_submission_fini(struct intel_guc *guc)
1791 {
1792 	if (!guc->lrc_desc_pool)
1793 		return;
1794 
1795 	guc_flush_destroyed_contexts(guc);
1796 	guc_lrc_desc_pool_destroy(guc);
1797 	i915_sched_engine_put(guc->sched_engine);
1798 	bitmap_free(guc->submission_state.guc_ids_bitmap);
1799 }
1800 
1801 static inline void queue_request(struct i915_sched_engine *sched_engine,
1802 				 struct i915_request *rq,
1803 				 int prio)
1804 {
1805 	GEM_BUG_ON(!list_empty(&rq->sched.link));
1806 	list_add_tail(&rq->sched.link,
1807 		      i915_sched_lookup_priolist(sched_engine, prio));
1808 	set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
1809 	tasklet_hi_schedule(&sched_engine->tasklet);
1810 }
1811 
1812 static int guc_bypass_tasklet_submit(struct intel_guc *guc,
1813 				     struct i915_request *rq)
1814 {
1815 	int ret = 0;
1816 
1817 	__i915_request_submit(rq);
1818 
1819 	trace_i915_request_in(rq, 0);
1820 
1821 	if (is_multi_lrc_rq(rq)) {
1822 		if (multi_lrc_submit(rq)) {
1823 			ret = guc_wq_item_append(guc, rq);
1824 			if (!ret)
1825 				ret = guc_add_request(guc, rq);
1826 		}
1827 	} else {
1828 		guc_set_lrc_tail(rq);
1829 		ret = guc_add_request(guc, rq);
1830 	}
1831 
1832 	if (unlikely(ret == -EPIPE))
1833 		disable_submission(guc);
1834 
1835 	return ret;
1836 }
1837 
1838 static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
1839 {
1840 	struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
1841 	struct intel_context *ce = request_to_scheduling_context(rq);
1842 
1843 	return submission_disabled(guc) || guc->stalled_request ||
1844 		!i915_sched_engine_is_empty(sched_engine) ||
1845 		!lrc_desc_registered(guc, ce->guc_id.id);
1846 }
1847 
1848 static void guc_submit_request(struct i915_request *rq)
1849 {
1850 	struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
1851 	struct intel_guc *guc = &rq->engine->gt->uc.guc;
1852 	unsigned long flags;
1853 
1854 	/* Will be called from irq-context when using foreign fences. */
1855 	spin_lock_irqsave(&sched_engine->lock, flags);
1856 
1857 	if (need_tasklet(guc, rq))
1858 		queue_request(sched_engine, rq, rq_prio(rq));
1859 	else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
1860 		tasklet_hi_schedule(&sched_engine->tasklet);
1861 
1862 	spin_unlock_irqrestore(&sched_engine->lock, flags);
1863 }
1864 
1865 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
1866 {
1867 	int ret;
1868 
1869 	GEM_BUG_ON(intel_context_is_child(ce));
1870 
1871 	if (intel_context_is_parent(ce))
1872 		ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
1873 					      NUMBER_MULTI_LRC_GUC_ID(guc),
1874 					      order_base_2(ce->parallel.number_children
1875 							   + 1));
1876 	else
1877 		ret = ida_simple_get(&guc->submission_state.guc_ids,
1878 				     NUMBER_MULTI_LRC_GUC_ID(guc),
1879 				     guc->submission_state.num_guc_ids,
1880 				     GFP_KERNEL | __GFP_RETRY_MAYFAIL |
1881 				     __GFP_NOWARN);
1882 	if (unlikely(ret < 0))
1883 		return ret;
1884 
1885 	ce->guc_id.id = ret;
1886 	return 0;
1887 }
1888 
1889 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1890 {
1891 	GEM_BUG_ON(intel_context_is_child(ce));
1892 
1893 	if (!context_guc_id_invalid(ce)) {
1894 		if (intel_context_is_parent(ce))
1895 			bitmap_release_region(guc->submission_state.guc_ids_bitmap,
1896 					      ce->guc_id.id,
1897 					      order_base_2(ce->parallel.number_children
1898 							   + 1));
1899 		else
1900 			ida_simple_remove(&guc->submission_state.guc_ids,
1901 					  ce->guc_id.id);
1902 		reset_lrc_desc(guc, ce->guc_id.id);
1903 		set_context_guc_id_invalid(ce);
1904 	}
1905 	if (!list_empty(&ce->guc_id.link))
1906 		list_del_init(&ce->guc_id.link);
1907 }
1908 
1909 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
1910 {
1911 	unsigned long flags;
1912 
1913 	spin_lock_irqsave(&guc->submission_state.lock, flags);
1914 	__release_guc_id(guc, ce);
1915 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
1916 }
1917 
1918 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
1919 {
1920 	struct intel_context *cn;
1921 
1922 	lockdep_assert_held(&guc->submission_state.lock);
1923 	GEM_BUG_ON(intel_context_is_child(ce));
1924 	GEM_BUG_ON(intel_context_is_parent(ce));
1925 
1926 	if (!list_empty(&guc->submission_state.guc_id_list)) {
1927 		cn = list_first_entry(&guc->submission_state.guc_id_list,
1928 				      struct intel_context,
1929 				      guc_id.link);
1930 
1931 		GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
1932 		GEM_BUG_ON(context_guc_id_invalid(cn));
1933 		GEM_BUG_ON(intel_context_is_child(cn));
1934 		GEM_BUG_ON(intel_context_is_parent(cn));
1935 
1936 		list_del_init(&cn->guc_id.link);
1937 		ce->guc_id.id = cn->guc_id.id;
1938 
1939 		spin_lock(&cn->guc_state.lock);
1940 		clr_context_registered(cn);
1941 		spin_unlock(&cn->guc_state.lock);
1942 
1943 		set_context_guc_id_invalid(cn);
1944 
1945 #ifdef CONFIG_DRM_I915_SELFTEST
1946 		guc->number_guc_id_stolen++;
1947 #endif
1948 
1949 		return 0;
1950 	} else {
1951 		return -EAGAIN;
1952 	}
1953 }
1954 
1955 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
1956 {
1957 	int ret;
1958 
1959 	lockdep_assert_held(&guc->submission_state.lock);
1960 	GEM_BUG_ON(intel_context_is_child(ce));
1961 
1962 	ret = new_guc_id(guc, ce);
1963 	if (unlikely(ret < 0)) {
1964 		if (intel_context_is_parent(ce))
1965 			return -ENOSPC;
1966 
1967 		ret = steal_guc_id(guc, ce);
1968 		if (ret < 0)
1969 			return ret;
1970 	}
1971 
1972 	if (intel_context_is_parent(ce)) {
1973 		struct intel_context *child;
1974 		int i = 1;
1975 
1976 		for_each_child(ce, child)
1977 			child->guc_id.id = ce->guc_id.id + i++;
1978 	}
1979 
1980 	return 0;
1981 }
1982 
1983 #define PIN_GUC_ID_TRIES	4
1984 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
1985 {
1986 	int ret = 0;
1987 	unsigned long flags, tries = PIN_GUC_ID_TRIES;
1988 
1989 	GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
1990 
1991 try_again:
1992 	spin_lock_irqsave(&guc->submission_state.lock, flags);
1993 
1994 	might_lock(&ce->guc_state.lock);
1995 
1996 	if (context_guc_id_invalid(ce)) {
1997 		ret = assign_guc_id(guc, ce);
1998 		if (ret)
1999 			goto out_unlock;
2000 		ret = 1;	/* Indidcates newly assigned guc_id */
2001 	}
2002 	if (!list_empty(&ce->guc_id.link))
2003 		list_del_init(&ce->guc_id.link);
2004 	atomic_inc(&ce->guc_id.ref);
2005 
2006 out_unlock:
2007 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2008 
2009 	/*
2010 	 * -EAGAIN indicates no guc_id are available, let's retire any
2011 	 * outstanding requests to see if that frees up a guc_id. If the first
2012 	 * retire didn't help, insert a sleep with the timeslice duration before
2013 	 * attempting to retire more requests. Double the sleep period each
2014 	 * subsequent pass before finally giving up. The sleep period has max of
2015 	 * 100ms and minimum of 1ms.
2016 	 */
2017 	if (ret == -EAGAIN && --tries) {
2018 		if (PIN_GUC_ID_TRIES - tries > 1) {
2019 			unsigned int timeslice_shifted =
2020 				ce->engine->props.timeslice_duration_ms <<
2021 				(PIN_GUC_ID_TRIES - tries - 2);
2022 			unsigned int max = min_t(unsigned int, 100,
2023 						 timeslice_shifted);
2024 
2025 			msleep(max_t(unsigned int, max, 1));
2026 		}
2027 		intel_gt_retire_requests(guc_to_gt(guc));
2028 		goto try_again;
2029 	}
2030 
2031 	return ret;
2032 }
2033 
2034 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2035 {
2036 	unsigned long flags;
2037 
2038 	GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
2039 	GEM_BUG_ON(intel_context_is_child(ce));
2040 
2041 	if (unlikely(context_guc_id_invalid(ce) ||
2042 		     intel_context_is_parent(ce)))
2043 		return;
2044 
2045 	spin_lock_irqsave(&guc->submission_state.lock, flags);
2046 	if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
2047 	    !atomic_read(&ce->guc_id.ref))
2048 		list_add_tail(&ce->guc_id.link,
2049 			      &guc->submission_state.guc_id_list);
2050 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2051 }
2052 
2053 static int __guc_action_register_multi_lrc(struct intel_guc *guc,
2054 					   struct intel_context *ce,
2055 					   u32 guc_id,
2056 					   u32 offset,
2057 					   bool loop)
2058 {
2059 	struct intel_context *child;
2060 	u32 action[4 + MAX_ENGINE_INSTANCE];
2061 	int len = 0;
2062 
2063 	GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2064 
2065 	action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
2066 	action[len++] = guc_id;
2067 	action[len++] = ce->parallel.number_children + 1;
2068 	action[len++] = offset;
2069 	for_each_child(ce, child) {
2070 		offset += sizeof(struct guc_lrc_desc);
2071 		action[len++] = offset;
2072 	}
2073 
2074 	return guc_submission_send_busy_loop(guc, action, len, 0, loop);
2075 }
2076 
2077 static int __guc_action_register_context(struct intel_guc *guc,
2078 					 u32 guc_id,
2079 					 u32 offset,
2080 					 bool loop)
2081 {
2082 	u32 action[] = {
2083 		INTEL_GUC_ACTION_REGISTER_CONTEXT,
2084 		guc_id,
2085 		offset,
2086 	};
2087 
2088 	return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2089 					     0, loop);
2090 }
2091 
2092 static int register_context(struct intel_context *ce, bool loop)
2093 {
2094 	struct intel_guc *guc = ce_to_guc(ce);
2095 	u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
2096 		ce->guc_id.id * sizeof(struct guc_lrc_desc);
2097 	int ret;
2098 
2099 	GEM_BUG_ON(intel_context_is_child(ce));
2100 	trace_intel_context_register(ce);
2101 
2102 	if (intel_context_is_parent(ce))
2103 		ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id,
2104 						      offset, loop);
2105 	else
2106 		ret = __guc_action_register_context(guc, ce->guc_id.id, offset,
2107 						    loop);
2108 	if (likely(!ret)) {
2109 		unsigned long flags;
2110 
2111 		spin_lock_irqsave(&ce->guc_state.lock, flags);
2112 		set_context_registered(ce);
2113 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2114 	}
2115 
2116 	return ret;
2117 }
2118 
2119 static int __guc_action_deregister_context(struct intel_guc *guc,
2120 					   u32 guc_id)
2121 {
2122 	u32 action[] = {
2123 		INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
2124 		guc_id,
2125 	};
2126 
2127 	return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2128 					     G2H_LEN_DW_DEREGISTER_CONTEXT,
2129 					     true);
2130 }
2131 
2132 static int deregister_context(struct intel_context *ce, u32 guc_id)
2133 {
2134 	struct intel_guc *guc = ce_to_guc(ce);
2135 
2136 	GEM_BUG_ON(intel_context_is_child(ce));
2137 	trace_intel_context_deregister(ce);
2138 
2139 	return __guc_action_deregister_context(guc, guc_id);
2140 }
2141 
2142 static inline void clear_children_join_go_memory(struct intel_context *ce)
2143 {
2144 	struct parent_scratch *ps = __get_parent_scratch(ce);
2145 	int i;
2146 
2147 	ps->go.semaphore = 0;
2148 	for (i = 0; i < ce->parallel.number_children + 1; ++i)
2149 		ps->join[i].semaphore = 0;
2150 }
2151 
2152 static inline u32 get_children_go_value(struct intel_context *ce)
2153 {
2154 	return __get_parent_scratch(ce)->go.semaphore;
2155 }
2156 
2157 static inline u32 get_children_join_value(struct intel_context *ce,
2158 					  u8 child_index)
2159 {
2160 	return __get_parent_scratch(ce)->join[child_index].semaphore;
2161 }
2162 
2163 static void guc_context_policy_init(struct intel_engine_cs *engine,
2164 				    struct guc_lrc_desc *desc)
2165 {
2166 	desc->policy_flags = 0;
2167 
2168 	if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
2169 		desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE;
2170 
2171 	/* NB: For both of these, zero means disabled. */
2172 	desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
2173 	desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
2174 }
2175 
2176 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
2177 {
2178 	struct intel_engine_cs *engine = ce->engine;
2179 	struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
2180 	struct intel_guc *guc = &engine->gt->uc.guc;
2181 	u32 desc_idx = ce->guc_id.id;
2182 	struct guc_lrc_desc *desc;
2183 	bool context_registered;
2184 	intel_wakeref_t wakeref;
2185 	struct intel_context *child;
2186 	int ret = 0;
2187 
2188 	GEM_BUG_ON(!engine->mask);
2189 	GEM_BUG_ON(!sched_state_is_init(ce));
2190 
2191 	/*
2192 	 * Ensure LRC + CT vmas are is same region as write barrier is done
2193 	 * based on CT vma region.
2194 	 */
2195 	GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
2196 		   i915_gem_object_is_lmem(ce->ring->vma->obj));
2197 
2198 	context_registered = lrc_desc_registered(guc, desc_idx);
2199 
2200 	reset_lrc_desc(guc, desc_idx);
2201 	set_lrc_desc_registered(guc, desc_idx, ce);
2202 
2203 	desc = __get_lrc_desc(guc, desc_idx);
2204 	desc->engine_class = engine_class_to_guc_class(engine->class);
2205 	desc->engine_submit_mask = engine->logical_mask;
2206 	desc->hw_context_desc = ce->lrc.lrca;
2207 	desc->priority = ce->guc_state.prio;
2208 	desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2209 	guc_context_policy_init(engine, desc);
2210 
2211 	/*
2212 	 * If context is a parent, we need to register a process descriptor
2213 	 * describing a work queue and register all child contexts.
2214 	 */
2215 	if (intel_context_is_parent(ce)) {
2216 		struct guc_process_desc *pdesc;
2217 
2218 		ce->parallel.guc.wqi_tail = 0;
2219 		ce->parallel.guc.wqi_head = 0;
2220 
2221 		desc->process_desc = i915_ggtt_offset(ce->state) +
2222 			__get_parent_scratch_offset(ce);
2223 		desc->wq_addr = i915_ggtt_offset(ce->state) +
2224 			__get_wq_offset(ce);
2225 		desc->wq_size = WQ_SIZE;
2226 
2227 		pdesc = __get_process_desc(ce);
2228 		memset(pdesc, 0, sizeof(*(pdesc)));
2229 		pdesc->stage_id = ce->guc_id.id;
2230 		pdesc->wq_base_addr = desc->wq_addr;
2231 		pdesc->wq_size_bytes = desc->wq_size;
2232 		pdesc->wq_status = WQ_STATUS_ACTIVE;
2233 
2234 		for_each_child(ce, child) {
2235 			desc = __get_lrc_desc(guc, child->guc_id.id);
2236 
2237 			desc->engine_class =
2238 				engine_class_to_guc_class(engine->class);
2239 			desc->hw_context_desc = child->lrc.lrca;
2240 			desc->priority = ce->guc_state.prio;
2241 			desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
2242 			guc_context_policy_init(engine, desc);
2243 		}
2244 
2245 		clear_children_join_go_memory(ce);
2246 	}
2247 
2248 	/*
2249 	 * The context_lookup xarray is used to determine if the hardware
2250 	 * context is currently registered. There are two cases in which it
2251 	 * could be registered either the guc_id has been stolen from another
2252 	 * context or the lrc descriptor address of this context has changed. In
2253 	 * either case the context needs to be deregistered with the GuC before
2254 	 * registering this context.
2255 	 */
2256 	if (context_registered) {
2257 		bool disabled;
2258 		unsigned long flags;
2259 
2260 		trace_intel_context_steal_guc_id(ce);
2261 		GEM_BUG_ON(!loop);
2262 
2263 		/* Seal race with Reset */
2264 		spin_lock_irqsave(&ce->guc_state.lock, flags);
2265 		disabled = submission_disabled(guc);
2266 		if (likely(!disabled)) {
2267 			set_context_wait_for_deregister_to_register(ce);
2268 			intel_context_get(ce);
2269 		}
2270 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2271 		if (unlikely(disabled)) {
2272 			reset_lrc_desc(guc, desc_idx);
2273 			return 0;	/* Will get registered later */
2274 		}
2275 
2276 		/*
2277 		 * If stealing the guc_id, this ce has the same guc_id as the
2278 		 * context whose guc_id was stolen.
2279 		 */
2280 		with_intel_runtime_pm(runtime_pm, wakeref)
2281 			ret = deregister_context(ce, ce->guc_id.id);
2282 		if (unlikely(ret == -ENODEV))
2283 			ret = 0;	/* Will get registered later */
2284 	} else {
2285 		with_intel_runtime_pm(runtime_pm, wakeref)
2286 			ret = register_context(ce, loop);
2287 		if (unlikely(ret == -EBUSY)) {
2288 			reset_lrc_desc(guc, desc_idx);
2289 		} else if (unlikely(ret == -ENODEV)) {
2290 			reset_lrc_desc(guc, desc_idx);
2291 			ret = 0;	/* Will get registered later */
2292 		}
2293 	}
2294 
2295 	return ret;
2296 }
2297 
2298 static int __guc_context_pre_pin(struct intel_context *ce,
2299 				 struct intel_engine_cs *engine,
2300 				 struct i915_gem_ww_ctx *ww,
2301 				 void **vaddr)
2302 {
2303 	return lrc_pre_pin(ce, engine, ww, vaddr);
2304 }
2305 
2306 static int __guc_context_pin(struct intel_context *ce,
2307 			     struct intel_engine_cs *engine,
2308 			     void *vaddr)
2309 {
2310 	if (i915_ggtt_offset(ce->state) !=
2311 	    (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
2312 		set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
2313 
2314 	/*
2315 	 * GuC context gets pinned in guc_request_alloc. See that function for
2316 	 * explaination of why.
2317 	 */
2318 
2319 	return lrc_pin(ce, engine, vaddr);
2320 }
2321 
2322 static int guc_context_pre_pin(struct intel_context *ce,
2323 			       struct i915_gem_ww_ctx *ww,
2324 			       void **vaddr)
2325 {
2326 	return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
2327 }
2328 
2329 static int guc_context_pin(struct intel_context *ce, void *vaddr)
2330 {
2331 	int ret = __guc_context_pin(ce, ce->engine, vaddr);
2332 
2333 	if (likely(!ret && !intel_context_is_barrier(ce)))
2334 		intel_engine_pm_get(ce->engine);
2335 
2336 	return ret;
2337 }
2338 
2339 static void guc_context_unpin(struct intel_context *ce)
2340 {
2341 	struct intel_guc *guc = ce_to_guc(ce);
2342 
2343 	unpin_guc_id(guc, ce);
2344 	lrc_unpin(ce);
2345 
2346 	if (likely(!intel_context_is_barrier(ce)))
2347 		intel_engine_pm_put_async(ce->engine);
2348 }
2349 
2350 static void guc_context_post_unpin(struct intel_context *ce)
2351 {
2352 	lrc_post_unpin(ce);
2353 }
2354 
2355 static void __guc_context_sched_enable(struct intel_guc *guc,
2356 				       struct intel_context *ce)
2357 {
2358 	u32 action[] = {
2359 		INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2360 		ce->guc_id.id,
2361 		GUC_CONTEXT_ENABLE
2362 	};
2363 
2364 	trace_intel_context_sched_enable(ce);
2365 
2366 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2367 				      G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2368 }
2369 
2370 static void __guc_context_sched_disable(struct intel_guc *guc,
2371 					struct intel_context *ce,
2372 					u16 guc_id)
2373 {
2374 	u32 action[] = {
2375 		INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
2376 		guc_id,	/* ce->guc_id.id not stable */
2377 		GUC_CONTEXT_DISABLE
2378 	};
2379 
2380 	GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
2381 
2382 	GEM_BUG_ON(intel_context_is_child(ce));
2383 	trace_intel_context_sched_disable(ce);
2384 
2385 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
2386 				      G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
2387 }
2388 
2389 static void guc_blocked_fence_complete(struct intel_context *ce)
2390 {
2391 	lockdep_assert_held(&ce->guc_state.lock);
2392 
2393 	if (!i915_sw_fence_done(&ce->guc_state.blocked))
2394 		i915_sw_fence_complete(&ce->guc_state.blocked);
2395 }
2396 
2397 static void guc_blocked_fence_reinit(struct intel_context *ce)
2398 {
2399 	lockdep_assert_held(&ce->guc_state.lock);
2400 	GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
2401 
2402 	/*
2403 	 * This fence is always complete unless a pending schedule disable is
2404 	 * outstanding. We arm the fence here and complete it when we receive
2405 	 * the pending schedule disable complete message.
2406 	 */
2407 	i915_sw_fence_fini(&ce->guc_state.blocked);
2408 	i915_sw_fence_reinit(&ce->guc_state.blocked);
2409 	i915_sw_fence_await(&ce->guc_state.blocked);
2410 	i915_sw_fence_commit(&ce->guc_state.blocked);
2411 }
2412 
2413 static u16 prep_context_pending_disable(struct intel_context *ce)
2414 {
2415 	lockdep_assert_held(&ce->guc_state.lock);
2416 
2417 	set_context_pending_disable(ce);
2418 	clr_context_enabled(ce);
2419 	guc_blocked_fence_reinit(ce);
2420 	intel_context_get(ce);
2421 
2422 	return ce->guc_id.id;
2423 }
2424 
2425 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
2426 {
2427 	struct intel_guc *guc = ce_to_guc(ce);
2428 	unsigned long flags;
2429 	struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2430 	intel_wakeref_t wakeref;
2431 	u16 guc_id;
2432 	bool enabled;
2433 
2434 	GEM_BUG_ON(intel_context_is_child(ce));
2435 
2436 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2437 
2438 	incr_context_blocked(ce);
2439 
2440 	enabled = context_enabled(ce);
2441 	if (unlikely(!enabled || submission_disabled(guc))) {
2442 		if (enabled)
2443 			clr_context_enabled(ce);
2444 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2445 		return &ce->guc_state.blocked;
2446 	}
2447 
2448 	/*
2449 	 * We add +2 here as the schedule disable complete CTB handler calls
2450 	 * intel_context_sched_disable_unpin (-2 to pin_count).
2451 	 */
2452 	atomic_add(2, &ce->pin_count);
2453 
2454 	guc_id = prep_context_pending_disable(ce);
2455 
2456 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2457 
2458 	with_intel_runtime_pm(runtime_pm, wakeref)
2459 		__guc_context_sched_disable(guc, ce, guc_id);
2460 
2461 	return &ce->guc_state.blocked;
2462 }
2463 
2464 #define SCHED_STATE_MULTI_BLOCKED_MASK \
2465 	(SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
2466 #define SCHED_STATE_NO_UNBLOCK \
2467 	(SCHED_STATE_MULTI_BLOCKED_MASK | \
2468 	 SCHED_STATE_PENDING_DISABLE | \
2469 	 SCHED_STATE_BANNED)
2470 
2471 static bool context_cant_unblock(struct intel_context *ce)
2472 {
2473 	lockdep_assert_held(&ce->guc_state.lock);
2474 
2475 	return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
2476 		context_guc_id_invalid(ce) ||
2477 		!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
2478 		!intel_context_is_pinned(ce);
2479 }
2480 
2481 static void guc_context_unblock(struct intel_context *ce)
2482 {
2483 	struct intel_guc *guc = ce_to_guc(ce);
2484 	unsigned long flags;
2485 	struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
2486 	intel_wakeref_t wakeref;
2487 	bool enable;
2488 
2489 	GEM_BUG_ON(context_enabled(ce));
2490 	GEM_BUG_ON(intel_context_is_child(ce));
2491 
2492 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2493 
2494 	if (unlikely(submission_disabled(guc) ||
2495 		     context_cant_unblock(ce))) {
2496 		enable = false;
2497 	} else {
2498 		enable = true;
2499 		set_context_pending_enable(ce);
2500 		set_context_enabled(ce);
2501 		intel_context_get(ce);
2502 	}
2503 
2504 	decr_context_blocked(ce);
2505 
2506 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2507 
2508 	if (enable) {
2509 		with_intel_runtime_pm(runtime_pm, wakeref)
2510 			__guc_context_sched_enable(guc, ce);
2511 	}
2512 }
2513 
2514 static void guc_context_cancel_request(struct intel_context *ce,
2515 				       struct i915_request *rq)
2516 {
2517 	struct intel_context *block_context =
2518 		request_to_scheduling_context(rq);
2519 
2520 	if (i915_sw_fence_signaled(&rq->submit)) {
2521 		struct i915_sw_fence *fence;
2522 
2523 		intel_context_get(ce);
2524 		fence = guc_context_block(block_context);
2525 		i915_sw_fence_wait(fence);
2526 		if (!i915_request_completed(rq)) {
2527 			__i915_request_skip(rq);
2528 			guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
2529 					true);
2530 		}
2531 
2532 		/*
2533 		 * XXX: Racey if context is reset, see comment in
2534 		 * __guc_reset_context().
2535 		 */
2536 		flush_work(&ce_to_guc(ce)->ct.requests.worker);
2537 
2538 		guc_context_unblock(block_context);
2539 		intel_context_put(ce);
2540 	}
2541 }
2542 
2543 static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
2544 						 u16 guc_id,
2545 						 u32 preemption_timeout)
2546 {
2547 	u32 action[] = {
2548 		INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
2549 		guc_id,
2550 		preemption_timeout
2551 	};
2552 
2553 	intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
2554 }
2555 
2556 static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
2557 {
2558 	struct intel_guc *guc = ce_to_guc(ce);
2559 	struct intel_runtime_pm *runtime_pm =
2560 		&ce->engine->gt->i915->runtime_pm;
2561 	intel_wakeref_t wakeref;
2562 	unsigned long flags;
2563 
2564 	GEM_BUG_ON(intel_context_is_child(ce));
2565 
2566 	guc_flush_submissions(guc);
2567 
2568 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2569 	set_context_banned(ce);
2570 
2571 	if (submission_disabled(guc) ||
2572 	    (!context_enabled(ce) && !context_pending_disable(ce))) {
2573 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2574 
2575 		guc_cancel_context_requests(ce);
2576 		intel_engine_signal_breadcrumbs(ce->engine);
2577 	} else if (!context_pending_disable(ce)) {
2578 		u16 guc_id;
2579 
2580 		/*
2581 		 * We add +2 here as the schedule disable complete CTB handler
2582 		 * calls intel_context_sched_disable_unpin (-2 to pin_count).
2583 		 */
2584 		atomic_add(2, &ce->pin_count);
2585 
2586 		guc_id = prep_context_pending_disable(ce);
2587 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2588 
2589 		/*
2590 		 * In addition to disabling scheduling, set the preemption
2591 		 * timeout to the minimum value (1 us) so the banned context
2592 		 * gets kicked off the HW ASAP.
2593 		 */
2594 		with_intel_runtime_pm(runtime_pm, wakeref) {
2595 			__guc_context_set_preemption_timeout(guc, guc_id, 1);
2596 			__guc_context_sched_disable(guc, ce, guc_id);
2597 		}
2598 	} else {
2599 		if (!context_guc_id_invalid(ce))
2600 			with_intel_runtime_pm(runtime_pm, wakeref)
2601 				__guc_context_set_preemption_timeout(guc,
2602 								     ce->guc_id.id,
2603 								     1);
2604 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2605 	}
2606 }
2607 
2608 static void guc_context_sched_disable(struct intel_context *ce)
2609 {
2610 	struct intel_guc *guc = ce_to_guc(ce);
2611 	unsigned long flags;
2612 	struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
2613 	intel_wakeref_t wakeref;
2614 	u16 guc_id;
2615 
2616 	GEM_BUG_ON(intel_context_is_child(ce));
2617 
2618 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2619 
2620 	/*
2621 	 * We have to check if the context has been disabled by another thread,
2622 	 * check if submssion has been disabled to seal a race with reset and
2623 	 * finally check if any more requests have been committed to the
2624 	 * context ensursing that a request doesn't slip through the
2625 	 * 'context_pending_disable' fence.
2626 	 */
2627 	if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
2628 		     context_has_committed_requests(ce))) {
2629 		clr_context_enabled(ce);
2630 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2631 		goto unpin;
2632 	}
2633 	guc_id = prep_context_pending_disable(ce);
2634 
2635 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2636 
2637 	with_intel_runtime_pm(runtime_pm, wakeref)
2638 		__guc_context_sched_disable(guc, ce, guc_id);
2639 
2640 	return;
2641 unpin:
2642 	intel_context_sched_disable_unpin(ce);
2643 }
2644 
2645 static inline void guc_lrc_desc_unpin(struct intel_context *ce)
2646 {
2647 	struct intel_guc *guc = ce_to_guc(ce);
2648 	struct intel_gt *gt = guc_to_gt(guc);
2649 	unsigned long flags;
2650 	bool disabled;
2651 
2652 	GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
2653 	GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
2654 	GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
2655 	GEM_BUG_ON(context_enabled(ce));
2656 
2657 	/* Seal race with Reset */
2658 	spin_lock_irqsave(&ce->guc_state.lock, flags);
2659 	disabled = submission_disabled(guc);
2660 	if (likely(!disabled)) {
2661 		__intel_gt_pm_get(gt);
2662 		set_context_destroyed(ce);
2663 		clr_context_registered(ce);
2664 	}
2665 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2666 	if (unlikely(disabled)) {
2667 		release_guc_id(guc, ce);
2668 		__guc_context_destroy(ce);
2669 		return;
2670 	}
2671 
2672 	deregister_context(ce, ce->guc_id.id);
2673 }
2674 
2675 static void __guc_context_destroy(struct intel_context *ce)
2676 {
2677 	GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
2678 		   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
2679 		   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
2680 		   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
2681 	GEM_BUG_ON(ce->guc_state.number_committed_requests);
2682 
2683 	lrc_fini(ce);
2684 	intel_context_fini(ce);
2685 
2686 	if (intel_engine_is_virtual(ce->engine)) {
2687 		struct guc_virtual_engine *ve =
2688 			container_of(ce, typeof(*ve), context);
2689 
2690 		if (ve->base.breadcrumbs)
2691 			intel_breadcrumbs_put(ve->base.breadcrumbs);
2692 
2693 		kfree(ve);
2694 	} else {
2695 		intel_context_free(ce);
2696 	}
2697 }
2698 
2699 static void guc_flush_destroyed_contexts(struct intel_guc *guc)
2700 {
2701 	struct intel_context *ce;
2702 	unsigned long flags;
2703 
2704 	GEM_BUG_ON(!submission_disabled(guc) &&
2705 		   guc_submission_initialized(guc));
2706 
2707 	while (!list_empty(&guc->submission_state.destroyed_contexts)) {
2708 		spin_lock_irqsave(&guc->submission_state.lock, flags);
2709 		ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
2710 					      struct intel_context,
2711 					      destroyed_link);
2712 		if (ce)
2713 			list_del_init(&ce->destroyed_link);
2714 		spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2715 
2716 		if (!ce)
2717 			break;
2718 
2719 		release_guc_id(guc, ce);
2720 		__guc_context_destroy(ce);
2721 	}
2722 }
2723 
2724 static void deregister_destroyed_contexts(struct intel_guc *guc)
2725 {
2726 	struct intel_context *ce;
2727 	unsigned long flags;
2728 
2729 	while (!list_empty(&guc->submission_state.destroyed_contexts)) {
2730 		spin_lock_irqsave(&guc->submission_state.lock, flags);
2731 		ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
2732 					      struct intel_context,
2733 					      destroyed_link);
2734 		if (ce)
2735 			list_del_init(&ce->destroyed_link);
2736 		spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2737 
2738 		if (!ce)
2739 			break;
2740 
2741 		guc_lrc_desc_unpin(ce);
2742 	}
2743 }
2744 
2745 static void destroyed_worker_func(struct work_struct *w)
2746 {
2747 	struct intel_guc *guc = container_of(w, struct intel_guc,
2748 					     submission_state.destroyed_worker);
2749 	struct intel_gt *gt = guc_to_gt(guc);
2750 	int tmp;
2751 
2752 	with_intel_gt_pm(gt, tmp)
2753 		deregister_destroyed_contexts(guc);
2754 }
2755 
2756 static void guc_context_destroy(struct kref *kref)
2757 {
2758 	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
2759 	struct intel_guc *guc = ce_to_guc(ce);
2760 	unsigned long flags;
2761 	bool destroy;
2762 
2763 	/*
2764 	 * If the guc_id is invalid this context has been stolen and we can free
2765 	 * it immediately. Also can be freed immediately if the context is not
2766 	 * registered with the GuC or the GuC is in the middle of a reset.
2767 	 */
2768 	spin_lock_irqsave(&guc->submission_state.lock, flags);
2769 	destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
2770 		!lrc_desc_registered(guc, ce->guc_id.id);
2771 	if (likely(!destroy)) {
2772 		if (!list_empty(&ce->guc_id.link))
2773 			list_del_init(&ce->guc_id.link);
2774 		list_add_tail(&ce->destroyed_link,
2775 			      &guc->submission_state.destroyed_contexts);
2776 	} else {
2777 		__release_guc_id(guc, ce);
2778 	}
2779 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
2780 	if (unlikely(destroy)) {
2781 		__guc_context_destroy(ce);
2782 		return;
2783 	}
2784 
2785 	/*
2786 	 * We use a worker to issue the H2G to deregister the context as we can
2787 	 * take the GT PM for the first time which isn't allowed from an atomic
2788 	 * context.
2789 	 */
2790 	queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
2791 }
2792 
2793 static int guc_context_alloc(struct intel_context *ce)
2794 {
2795 	return lrc_alloc(ce, ce->engine);
2796 }
2797 
2798 static void guc_context_set_prio(struct intel_guc *guc,
2799 				 struct intel_context *ce,
2800 				 u8 prio)
2801 {
2802 	u32 action[] = {
2803 		INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
2804 		ce->guc_id.id,
2805 		prio,
2806 	};
2807 
2808 	GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
2809 		   prio > GUC_CLIENT_PRIORITY_NORMAL);
2810 	lockdep_assert_held(&ce->guc_state.lock);
2811 
2812 	if (ce->guc_state.prio == prio || submission_disabled(guc) ||
2813 	    !context_registered(ce)) {
2814 		ce->guc_state.prio = prio;
2815 		return;
2816 	}
2817 
2818 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
2819 
2820 	ce->guc_state.prio = prio;
2821 	trace_intel_context_set_prio(ce);
2822 }
2823 
2824 static inline u8 map_i915_prio_to_guc_prio(int prio)
2825 {
2826 	if (prio == I915_PRIORITY_NORMAL)
2827 		return GUC_CLIENT_PRIORITY_KMD_NORMAL;
2828 	else if (prio < I915_PRIORITY_NORMAL)
2829 		return GUC_CLIENT_PRIORITY_NORMAL;
2830 	else if (prio < I915_PRIORITY_DISPLAY)
2831 		return GUC_CLIENT_PRIORITY_HIGH;
2832 	else
2833 		return GUC_CLIENT_PRIORITY_KMD_HIGH;
2834 }
2835 
2836 static inline void add_context_inflight_prio(struct intel_context *ce,
2837 					     u8 guc_prio)
2838 {
2839 	lockdep_assert_held(&ce->guc_state.lock);
2840 	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
2841 
2842 	++ce->guc_state.prio_count[guc_prio];
2843 
2844 	/* Overflow protection */
2845 	GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
2846 }
2847 
2848 static inline void sub_context_inflight_prio(struct intel_context *ce,
2849 					     u8 guc_prio)
2850 {
2851 	lockdep_assert_held(&ce->guc_state.lock);
2852 	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
2853 
2854 	/* Underflow protection */
2855 	GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
2856 
2857 	--ce->guc_state.prio_count[guc_prio];
2858 }
2859 
2860 static inline void update_context_prio(struct intel_context *ce)
2861 {
2862 	struct intel_guc *guc = &ce->engine->gt->uc.guc;
2863 	int i;
2864 
2865 	BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
2866 	BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
2867 
2868 	lockdep_assert_held(&ce->guc_state.lock);
2869 
2870 	for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
2871 		if (ce->guc_state.prio_count[i]) {
2872 			guc_context_set_prio(guc, ce, i);
2873 			break;
2874 		}
2875 	}
2876 }
2877 
2878 static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
2879 {
2880 	/* Lower value is higher priority */
2881 	return new_guc_prio < old_guc_prio;
2882 }
2883 
2884 static void add_to_context(struct i915_request *rq)
2885 {
2886 	struct intel_context *ce = request_to_scheduling_context(rq);
2887 	u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
2888 
2889 	GEM_BUG_ON(intel_context_is_child(ce));
2890 	GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
2891 
2892 	spin_lock(&ce->guc_state.lock);
2893 	list_move_tail(&rq->sched.link, &ce->guc_state.requests);
2894 
2895 	if (rq->guc_prio == GUC_PRIO_INIT) {
2896 		rq->guc_prio = new_guc_prio;
2897 		add_context_inflight_prio(ce, rq->guc_prio);
2898 	} else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
2899 		sub_context_inflight_prio(ce, rq->guc_prio);
2900 		rq->guc_prio = new_guc_prio;
2901 		add_context_inflight_prio(ce, rq->guc_prio);
2902 	}
2903 	update_context_prio(ce);
2904 
2905 	spin_unlock(&ce->guc_state.lock);
2906 }
2907 
2908 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
2909 {
2910 	lockdep_assert_held(&ce->guc_state.lock);
2911 
2912 	if (rq->guc_prio != GUC_PRIO_INIT &&
2913 	    rq->guc_prio != GUC_PRIO_FINI) {
2914 		sub_context_inflight_prio(ce, rq->guc_prio);
2915 		update_context_prio(ce);
2916 	}
2917 	rq->guc_prio = GUC_PRIO_FINI;
2918 }
2919 
2920 static void remove_from_context(struct i915_request *rq)
2921 {
2922 	struct intel_context *ce = request_to_scheduling_context(rq);
2923 
2924 	GEM_BUG_ON(intel_context_is_child(ce));
2925 
2926 	spin_lock_irq(&ce->guc_state.lock);
2927 
2928 	list_del_init(&rq->sched.link);
2929 	clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
2930 
2931 	/* Prevent further __await_execution() registering a cb, then flush */
2932 	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
2933 
2934 	guc_prio_fini(rq, ce);
2935 
2936 	decr_context_committed_requests(ce);
2937 
2938 	spin_unlock_irq(&ce->guc_state.lock);
2939 
2940 	atomic_dec(&ce->guc_id.ref);
2941 	i915_request_notify_execute_cb_imm(rq);
2942 }
2943 
2944 static const struct intel_context_ops guc_context_ops = {
2945 	.alloc = guc_context_alloc,
2946 
2947 	.pre_pin = guc_context_pre_pin,
2948 	.pin = guc_context_pin,
2949 	.unpin = guc_context_unpin,
2950 	.post_unpin = guc_context_post_unpin,
2951 
2952 	.ban = guc_context_ban,
2953 
2954 	.cancel_request = guc_context_cancel_request,
2955 
2956 	.enter = intel_context_enter_engine,
2957 	.exit = intel_context_exit_engine,
2958 
2959 	.sched_disable = guc_context_sched_disable,
2960 
2961 	.reset = lrc_reset,
2962 	.destroy = guc_context_destroy,
2963 
2964 	.create_virtual = guc_create_virtual,
2965 	.create_parallel = guc_create_parallel,
2966 };
2967 
2968 static void submit_work_cb(struct irq_work *wrk)
2969 {
2970 	struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
2971 
2972 	might_lock(&rq->engine->sched_engine->lock);
2973 	i915_sw_fence_complete(&rq->submit);
2974 }
2975 
2976 static void __guc_signal_context_fence(struct intel_context *ce)
2977 {
2978 	struct i915_request *rq, *rn;
2979 
2980 	lockdep_assert_held(&ce->guc_state.lock);
2981 
2982 	if (!list_empty(&ce->guc_state.fences))
2983 		trace_intel_context_fence_release(ce);
2984 
2985 	/*
2986 	 * Use an IRQ to ensure locking order of sched_engine->lock ->
2987 	 * ce->guc_state.lock is preserved.
2988 	 */
2989 	list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
2990 				 guc_fence_link) {
2991 		list_del(&rq->guc_fence_link);
2992 		irq_work_queue(&rq->submit_work);
2993 	}
2994 
2995 	INIT_LIST_HEAD(&ce->guc_state.fences);
2996 }
2997 
2998 static void guc_signal_context_fence(struct intel_context *ce)
2999 {
3000 	unsigned long flags;
3001 
3002 	GEM_BUG_ON(intel_context_is_child(ce));
3003 
3004 	spin_lock_irqsave(&ce->guc_state.lock, flags);
3005 	clr_context_wait_for_deregister_to_register(ce);
3006 	__guc_signal_context_fence(ce);
3007 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3008 }
3009 
3010 static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
3011 {
3012 	return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
3013 		!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
3014 		!submission_disabled(ce_to_guc(ce));
3015 }
3016 
3017 static void guc_context_init(struct intel_context *ce)
3018 {
3019 	const struct i915_gem_context *ctx;
3020 	int prio = I915_CONTEXT_DEFAULT_PRIORITY;
3021 
3022 	rcu_read_lock();
3023 	ctx = rcu_dereference(ce->gem_context);
3024 	if (ctx)
3025 		prio = ctx->sched.priority;
3026 	rcu_read_unlock();
3027 
3028 	ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
3029 	set_bit(CONTEXT_GUC_INIT, &ce->flags);
3030 }
3031 
3032 static int guc_request_alloc(struct i915_request *rq)
3033 {
3034 	struct intel_context *ce = request_to_scheduling_context(rq);
3035 	struct intel_guc *guc = ce_to_guc(ce);
3036 	unsigned long flags;
3037 	int ret;
3038 
3039 	GEM_BUG_ON(!intel_context_is_pinned(rq->context));
3040 
3041 	/*
3042 	 * Flush enough space to reduce the likelihood of waiting after
3043 	 * we start building the request - in which case we will just
3044 	 * have to repeat work.
3045 	 */
3046 	rq->reserved_space += GUC_REQUEST_SIZE;
3047 
3048 	/*
3049 	 * Note that after this point, we have committed to using
3050 	 * this request as it is being used to both track the
3051 	 * state of engine initialisation and liveness of the
3052 	 * golden renderstate above. Think twice before you try
3053 	 * to cancel/unwind this request now.
3054 	 */
3055 
3056 	/* Unconditionally invalidate GPU caches and TLBs. */
3057 	ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
3058 	if (ret)
3059 		return ret;
3060 
3061 	rq->reserved_space -= GUC_REQUEST_SIZE;
3062 
3063 	if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
3064 		guc_context_init(ce);
3065 
3066 	/*
3067 	 * Call pin_guc_id here rather than in the pinning step as with
3068 	 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
3069 	 * guc_id and creating horrible race conditions. This is especially bad
3070 	 * when guc_id are being stolen due to over subscription. By the time
3071 	 * this function is reached, it is guaranteed that the guc_id will be
3072 	 * persistent until the generated request is retired. Thus, sealing these
3073 	 * race conditions. It is still safe to fail here if guc_id are
3074 	 * exhausted and return -EAGAIN to the user indicating that they can try
3075 	 * again in the future.
3076 	 *
3077 	 * There is no need for a lock here as the timeline mutex ensures at
3078 	 * most one context can be executing this code path at once. The
3079 	 * guc_id_ref is incremented once for every request in flight and
3080 	 * decremented on each retire. When it is zero, a lock around the
3081 	 * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
3082 	 */
3083 	if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
3084 		goto out;
3085 
3086 	ret = pin_guc_id(guc, ce);	/* returns 1 if new guc_id assigned */
3087 	if (unlikely(ret < 0))
3088 		return ret;
3089 	if (context_needs_register(ce, !!ret)) {
3090 		ret = guc_lrc_desc_pin(ce, true);
3091 		if (unlikely(ret)) {	/* unwind */
3092 			if (ret == -EPIPE) {
3093 				disable_submission(guc);
3094 				goto out;	/* GPU will be reset */
3095 			}
3096 			atomic_dec(&ce->guc_id.ref);
3097 			unpin_guc_id(guc, ce);
3098 			return ret;
3099 		}
3100 	}
3101 
3102 	clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
3103 
3104 out:
3105 	/*
3106 	 * We block all requests on this context if a G2H is pending for a
3107 	 * schedule disable or context deregistration as the GuC will fail a
3108 	 * schedule enable or context registration if either G2H is pending
3109 	 * respectfully. Once a G2H returns, the fence is released that is
3110 	 * blocking these requests (see guc_signal_context_fence).
3111 	 */
3112 	spin_lock_irqsave(&ce->guc_state.lock, flags);
3113 	if (context_wait_for_deregister_to_register(ce) ||
3114 	    context_pending_disable(ce)) {
3115 		init_irq_work(&rq->submit_work, submit_work_cb);
3116 		i915_sw_fence_await(&rq->submit);
3117 
3118 		list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
3119 	}
3120 	incr_context_committed_requests(ce);
3121 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3122 
3123 	return 0;
3124 }
3125 
3126 static int guc_virtual_context_pre_pin(struct intel_context *ce,
3127 				       struct i915_gem_ww_ctx *ww,
3128 				       void **vaddr)
3129 {
3130 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3131 
3132 	return __guc_context_pre_pin(ce, engine, ww, vaddr);
3133 }
3134 
3135 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
3136 {
3137 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3138 	int ret = __guc_context_pin(ce, engine, vaddr);
3139 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3140 
3141 	if (likely(!ret))
3142 		for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3143 			intel_engine_pm_get(engine);
3144 
3145 	return ret;
3146 }
3147 
3148 static void guc_virtual_context_unpin(struct intel_context *ce)
3149 {
3150 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3151 	struct intel_engine_cs *engine;
3152 	struct intel_guc *guc = ce_to_guc(ce);
3153 
3154 	GEM_BUG_ON(context_enabled(ce));
3155 	GEM_BUG_ON(intel_context_is_barrier(ce));
3156 
3157 	unpin_guc_id(guc, ce);
3158 	lrc_unpin(ce);
3159 
3160 	for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3161 		intel_engine_pm_put_async(engine);
3162 }
3163 
3164 static void guc_virtual_context_enter(struct intel_context *ce)
3165 {
3166 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3167 	struct intel_engine_cs *engine;
3168 
3169 	for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3170 		intel_engine_pm_get(engine);
3171 
3172 	intel_timeline_enter(ce->timeline);
3173 }
3174 
3175 static void guc_virtual_context_exit(struct intel_context *ce)
3176 {
3177 	intel_engine_mask_t tmp, mask = ce->engine->mask;
3178 	struct intel_engine_cs *engine;
3179 
3180 	for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3181 		intel_engine_pm_put(engine);
3182 
3183 	intel_timeline_exit(ce->timeline);
3184 }
3185 
3186 static int guc_virtual_context_alloc(struct intel_context *ce)
3187 {
3188 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3189 
3190 	return lrc_alloc(ce, engine);
3191 }
3192 
3193 static const struct intel_context_ops virtual_guc_context_ops = {
3194 	.alloc = guc_virtual_context_alloc,
3195 
3196 	.pre_pin = guc_virtual_context_pre_pin,
3197 	.pin = guc_virtual_context_pin,
3198 	.unpin = guc_virtual_context_unpin,
3199 	.post_unpin = guc_context_post_unpin,
3200 
3201 	.ban = guc_context_ban,
3202 
3203 	.cancel_request = guc_context_cancel_request,
3204 
3205 	.enter = guc_virtual_context_enter,
3206 	.exit = guc_virtual_context_exit,
3207 
3208 	.sched_disable = guc_context_sched_disable,
3209 
3210 	.destroy = guc_context_destroy,
3211 
3212 	.get_sibling = guc_virtual_get_sibling,
3213 };
3214 
3215 static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
3216 {
3217 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3218 	struct intel_guc *guc = ce_to_guc(ce);
3219 	int ret;
3220 
3221 	GEM_BUG_ON(!intel_context_is_parent(ce));
3222 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3223 
3224 	ret = pin_guc_id(guc, ce);
3225 	if (unlikely(ret < 0))
3226 		return ret;
3227 
3228 	return __guc_context_pin(ce, engine, vaddr);
3229 }
3230 
3231 static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
3232 {
3233 	struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3234 
3235 	GEM_BUG_ON(!intel_context_is_child(ce));
3236 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3237 
3238 	__intel_context_pin(ce->parallel.parent);
3239 	return __guc_context_pin(ce, engine, vaddr);
3240 }
3241 
3242 static void guc_parent_context_unpin(struct intel_context *ce)
3243 {
3244 	struct intel_guc *guc = ce_to_guc(ce);
3245 
3246 	GEM_BUG_ON(context_enabled(ce));
3247 	GEM_BUG_ON(intel_context_is_barrier(ce));
3248 	GEM_BUG_ON(!intel_context_is_parent(ce));
3249 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3250 
3251 	if (ce->parallel.last_rq)
3252 		i915_request_put(ce->parallel.last_rq);
3253 	unpin_guc_id(guc, ce);
3254 	lrc_unpin(ce);
3255 }
3256 
3257 static void guc_child_context_unpin(struct intel_context *ce)
3258 {
3259 	GEM_BUG_ON(context_enabled(ce));
3260 	GEM_BUG_ON(intel_context_is_barrier(ce));
3261 	GEM_BUG_ON(!intel_context_is_child(ce));
3262 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3263 
3264 	lrc_unpin(ce);
3265 }
3266 
3267 static void guc_child_context_post_unpin(struct intel_context *ce)
3268 {
3269 	GEM_BUG_ON(!intel_context_is_child(ce));
3270 	GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
3271 	GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
3272 
3273 	lrc_post_unpin(ce);
3274 	intel_context_unpin(ce->parallel.parent);
3275 }
3276 
3277 static void guc_child_context_destroy(struct kref *kref)
3278 {
3279 	struct intel_context *ce = container_of(kref, typeof(*ce), ref);
3280 
3281 	__guc_context_destroy(ce);
3282 }
3283 
3284 static const struct intel_context_ops virtual_parent_context_ops = {
3285 	.alloc = guc_virtual_context_alloc,
3286 
3287 	.pre_pin = guc_context_pre_pin,
3288 	.pin = guc_parent_context_pin,
3289 	.unpin = guc_parent_context_unpin,
3290 	.post_unpin = guc_context_post_unpin,
3291 
3292 	.ban = guc_context_ban,
3293 
3294 	.cancel_request = guc_context_cancel_request,
3295 
3296 	.enter = guc_virtual_context_enter,
3297 	.exit = guc_virtual_context_exit,
3298 
3299 	.sched_disable = guc_context_sched_disable,
3300 
3301 	.destroy = guc_context_destroy,
3302 
3303 	.get_sibling = guc_virtual_get_sibling,
3304 };
3305 
3306 static const struct intel_context_ops virtual_child_context_ops = {
3307 	.alloc = guc_virtual_context_alloc,
3308 
3309 	.pre_pin = guc_context_pre_pin,
3310 	.pin = guc_child_context_pin,
3311 	.unpin = guc_child_context_unpin,
3312 	.post_unpin = guc_child_context_post_unpin,
3313 
3314 	.cancel_request = guc_context_cancel_request,
3315 
3316 	.enter = guc_virtual_context_enter,
3317 	.exit = guc_virtual_context_exit,
3318 
3319 	.destroy = guc_child_context_destroy,
3320 
3321 	.get_sibling = guc_virtual_get_sibling,
3322 };
3323 
3324 /*
3325  * The below override of the breadcrumbs is enabled when the user configures a
3326  * context for parallel submission (multi-lrc, parent-child).
3327  *
3328  * The overridden breadcrumbs implements an algorithm which allows the GuC to
3329  * safely preempt all the hw contexts configured for parallel submission
3330  * between each BB. The contract between the i915 and GuC is if the parent
3331  * context can be preempted, all the children can be preempted, and the GuC will
3332  * always try to preempt the parent before the children. A handshake between the
3333  * parent / children breadcrumbs ensures the i915 holds up its end of the deal
3334  * creating a window to preempt between each set of BBs.
3335  */
3336 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
3337 						     u64 offset, u32 len,
3338 						     const unsigned int flags);
3339 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
3340 						    u64 offset, u32 len,
3341 						    const unsigned int flags);
3342 static u32 *
3343 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
3344 						 u32 *cs);
3345 static u32 *
3346 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
3347 						u32 *cs);
3348 
3349 static struct intel_context *
3350 guc_create_parallel(struct intel_engine_cs **engines,
3351 		    unsigned int num_siblings,
3352 		    unsigned int width)
3353 {
3354 	struct intel_engine_cs **siblings = NULL;
3355 	struct intel_context *parent = NULL, *ce, *err;
3356 	int i, j;
3357 
3358 	siblings = kmalloc_array(num_siblings,
3359 				 sizeof(*siblings),
3360 				 GFP_KERNEL);
3361 	if (!siblings)
3362 		return ERR_PTR(-ENOMEM);
3363 
3364 	for (i = 0; i < width; ++i) {
3365 		for (j = 0; j < num_siblings; ++j)
3366 			siblings[j] = engines[i * num_siblings + j];
3367 
3368 		ce = intel_engine_create_virtual(siblings, num_siblings,
3369 						 FORCE_VIRTUAL);
3370 		if (IS_ERR(ce)) {
3371 			err = ERR_CAST(ce);
3372 			goto unwind;
3373 		}
3374 
3375 		if (i == 0) {
3376 			parent = ce;
3377 			parent->ops = &virtual_parent_context_ops;
3378 		} else {
3379 			ce->ops = &virtual_child_context_ops;
3380 			intel_context_bind_parent_child(parent, ce);
3381 		}
3382 	}
3383 
3384 	parent->parallel.fence_context = dma_fence_context_alloc(1);
3385 
3386 	parent->engine->emit_bb_start =
3387 		emit_bb_start_parent_no_preempt_mid_batch;
3388 	parent->engine->emit_fini_breadcrumb =
3389 		emit_fini_breadcrumb_parent_no_preempt_mid_batch;
3390 	parent->engine->emit_fini_breadcrumb_dw =
3391 		12 + 4 * parent->parallel.number_children;
3392 	for_each_child(parent, ce) {
3393 		ce->engine->emit_bb_start =
3394 			emit_bb_start_child_no_preempt_mid_batch;
3395 		ce->engine->emit_fini_breadcrumb =
3396 			emit_fini_breadcrumb_child_no_preempt_mid_batch;
3397 		ce->engine->emit_fini_breadcrumb_dw = 16;
3398 	}
3399 
3400 	kfree(siblings);
3401 	return parent;
3402 
3403 unwind:
3404 	if (parent)
3405 		intel_context_put(parent);
3406 	kfree(siblings);
3407 	return err;
3408 }
3409 
3410 static bool
3411 guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
3412 {
3413 	struct intel_engine_cs *sibling;
3414 	intel_engine_mask_t tmp, mask = b->engine_mask;
3415 	bool result = false;
3416 
3417 	for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
3418 		result |= intel_engine_irq_enable(sibling);
3419 
3420 	return result;
3421 }
3422 
3423 static void
3424 guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
3425 {
3426 	struct intel_engine_cs *sibling;
3427 	intel_engine_mask_t tmp, mask = b->engine_mask;
3428 
3429 	for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
3430 		intel_engine_irq_disable(sibling);
3431 }
3432 
3433 static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
3434 {
3435 	int i;
3436 
3437 	/*
3438 	 * In GuC submission mode we do not know which physical engine a request
3439 	 * will be scheduled on, this creates a problem because the breadcrumb
3440 	 * interrupt is per physical engine. To work around this we attach
3441 	 * requests and direct all breadcrumb interrupts to the first instance
3442 	 * of an engine per class. In addition all breadcrumb interrupts are
3443 	 * enabled / disabled across an engine class in unison.
3444 	 */
3445 	for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
3446 		struct intel_engine_cs *sibling =
3447 			engine->gt->engine_class[engine->class][i];
3448 
3449 		if (sibling) {
3450 			if (engine->breadcrumbs != sibling->breadcrumbs) {
3451 				intel_breadcrumbs_put(engine->breadcrumbs);
3452 				engine->breadcrumbs =
3453 					intel_breadcrumbs_get(sibling->breadcrumbs);
3454 			}
3455 			break;
3456 		}
3457 	}
3458 
3459 	if (engine->breadcrumbs) {
3460 		engine->breadcrumbs->engine_mask |= engine->mask;
3461 		engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
3462 		engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
3463 	}
3464 }
3465 
3466 static void guc_bump_inflight_request_prio(struct i915_request *rq,
3467 					   int prio)
3468 {
3469 	struct intel_context *ce = request_to_scheduling_context(rq);
3470 	u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
3471 
3472 	/* Short circuit function */
3473 	if (prio < I915_PRIORITY_NORMAL ||
3474 	    rq->guc_prio == GUC_PRIO_FINI ||
3475 	    (rq->guc_prio != GUC_PRIO_INIT &&
3476 	     !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
3477 		return;
3478 
3479 	spin_lock(&ce->guc_state.lock);
3480 	if (rq->guc_prio != GUC_PRIO_FINI) {
3481 		if (rq->guc_prio != GUC_PRIO_INIT)
3482 			sub_context_inflight_prio(ce, rq->guc_prio);
3483 		rq->guc_prio = new_guc_prio;
3484 		add_context_inflight_prio(ce, rq->guc_prio);
3485 		update_context_prio(ce);
3486 	}
3487 	spin_unlock(&ce->guc_state.lock);
3488 }
3489 
3490 static void guc_retire_inflight_request_prio(struct i915_request *rq)
3491 {
3492 	struct intel_context *ce = request_to_scheduling_context(rq);
3493 
3494 	spin_lock(&ce->guc_state.lock);
3495 	guc_prio_fini(rq, ce);
3496 	spin_unlock(&ce->guc_state.lock);
3497 }
3498 
3499 static void sanitize_hwsp(struct intel_engine_cs *engine)
3500 {
3501 	struct intel_timeline *tl;
3502 
3503 	list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
3504 		intel_timeline_reset_seqno(tl);
3505 }
3506 
3507 static void guc_sanitize(struct intel_engine_cs *engine)
3508 {
3509 	/*
3510 	 * Poison residual state on resume, in case the suspend didn't!
3511 	 *
3512 	 * We have to assume that across suspend/resume (or other loss
3513 	 * of control) that the contents of our pinned buffers has been
3514 	 * lost, replaced by garbage. Since this doesn't always happen,
3515 	 * let's poison such state so that we more quickly spot when
3516 	 * we falsely assume it has been preserved.
3517 	 */
3518 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
3519 		memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
3520 
3521 	/*
3522 	 * The kernel_context HWSP is stored in the status_page. As above,
3523 	 * that may be lost on resume/initialisation, and so we need to
3524 	 * reset the value in the HWSP.
3525 	 */
3526 	sanitize_hwsp(engine);
3527 
3528 	/* And scrub the dirty cachelines for the HWSP */
3529 	clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
3530 
3531 	intel_engine_reset_pinned_contexts(engine);
3532 }
3533 
3534 static void setup_hwsp(struct intel_engine_cs *engine)
3535 {
3536 	intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
3537 
3538 	ENGINE_WRITE_FW(engine,
3539 			RING_HWS_PGA,
3540 			i915_ggtt_offset(engine->status_page.vma));
3541 }
3542 
3543 static void start_engine(struct intel_engine_cs *engine)
3544 {
3545 	ENGINE_WRITE_FW(engine,
3546 			RING_MODE_GEN7,
3547 			_MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
3548 
3549 	ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
3550 	ENGINE_POSTING_READ(engine, RING_MI_MODE);
3551 }
3552 
3553 static int guc_resume(struct intel_engine_cs *engine)
3554 {
3555 	assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
3556 
3557 	intel_mocs_init_engine(engine);
3558 
3559 	intel_breadcrumbs_reset(engine->breadcrumbs);
3560 
3561 	setup_hwsp(engine);
3562 	start_engine(engine);
3563 
3564 	return 0;
3565 }
3566 
3567 static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
3568 {
3569 	return !sched_engine->tasklet.callback;
3570 }
3571 
3572 static void guc_set_default_submission(struct intel_engine_cs *engine)
3573 {
3574 	engine->submit_request = guc_submit_request;
3575 }
3576 
3577 static inline void guc_kernel_context_pin(struct intel_guc *guc,
3578 					  struct intel_context *ce)
3579 {
3580 	if (context_guc_id_invalid(ce))
3581 		pin_guc_id(guc, ce);
3582 	guc_lrc_desc_pin(ce, true);
3583 }
3584 
3585 static inline void guc_init_lrc_mapping(struct intel_guc *guc)
3586 {
3587 	struct intel_gt *gt = guc_to_gt(guc);
3588 	struct intel_engine_cs *engine;
3589 	enum intel_engine_id id;
3590 
3591 	/* make sure all descriptors are clean... */
3592 	xa_destroy(&guc->context_lookup);
3593 
3594 	/*
3595 	 * Some contexts might have been pinned before we enabled GuC
3596 	 * submission, so we need to add them to the GuC bookeeping.
3597 	 * Also, after a reset the of the GuC we want to make sure that the
3598 	 * information shared with GuC is properly reset. The kernel LRCs are
3599 	 * not attached to the gem_context, so they need to be added separately.
3600 	 *
3601 	 * Note: we purposefully do not check the return of guc_lrc_desc_pin,
3602 	 * because that function can only fail if a reset is just starting. This
3603 	 * is at the end of reset so presumably another reset isn't happening
3604 	 * and even it did this code would be run again.
3605 	 */
3606 
3607 	for_each_engine(engine, gt, id) {
3608 		struct intel_context *ce;
3609 
3610 		list_for_each_entry(ce, &engine->pinned_contexts_list,
3611 				    pinned_contexts_link)
3612 			guc_kernel_context_pin(guc, ce);
3613 	}
3614 }
3615 
3616 static void guc_release(struct intel_engine_cs *engine)
3617 {
3618 	engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
3619 
3620 	intel_engine_cleanup_common(engine);
3621 	lrc_fini_wa_ctx(engine);
3622 }
3623 
3624 static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
3625 {
3626 	struct intel_engine_cs *e;
3627 	intel_engine_mask_t tmp, mask = engine->mask;
3628 
3629 	for_each_engine_masked(e, engine->gt, mask, tmp)
3630 		e->serial++;
3631 }
3632 
3633 static void guc_default_vfuncs(struct intel_engine_cs *engine)
3634 {
3635 	/* Default vfuncs which can be overridden by each engine. */
3636 
3637 	engine->resume = guc_resume;
3638 
3639 	engine->cops = &guc_context_ops;
3640 	engine->request_alloc = guc_request_alloc;
3641 	engine->add_active_request = add_to_context;
3642 	engine->remove_active_request = remove_from_context;
3643 
3644 	engine->sched_engine->schedule = i915_schedule;
3645 
3646 	engine->reset.prepare = guc_reset_nop;
3647 	engine->reset.rewind = guc_rewind_nop;
3648 	engine->reset.cancel = guc_reset_nop;
3649 	engine->reset.finish = guc_reset_nop;
3650 
3651 	engine->emit_flush = gen8_emit_flush_xcs;
3652 	engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
3653 	engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
3654 	if (GRAPHICS_VER(engine->i915) >= 12) {
3655 		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
3656 		engine->emit_flush = gen12_emit_flush_xcs;
3657 	}
3658 	engine->set_default_submission = guc_set_default_submission;
3659 	engine->busyness = guc_engine_busyness;
3660 
3661 	engine->flags |= I915_ENGINE_SUPPORTS_STATS;
3662 	engine->flags |= I915_ENGINE_HAS_PREEMPTION;
3663 	engine->flags |= I915_ENGINE_HAS_TIMESLICES;
3664 
3665 	/*
3666 	 * TODO: GuC supports timeslicing and semaphores as well, but they're
3667 	 * handled by the firmware so some minor tweaks are required before
3668 	 * enabling.
3669 	 *
3670 	 * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
3671 	 */
3672 
3673 	engine->emit_bb_start = gen8_emit_bb_start;
3674 }
3675 
3676 static void rcs_submission_override(struct intel_engine_cs *engine)
3677 {
3678 	switch (GRAPHICS_VER(engine->i915)) {
3679 	case 12:
3680 		engine->emit_flush = gen12_emit_flush_rcs;
3681 		engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
3682 		break;
3683 	case 11:
3684 		engine->emit_flush = gen11_emit_flush_rcs;
3685 		engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
3686 		break;
3687 	default:
3688 		engine->emit_flush = gen8_emit_flush_rcs;
3689 		engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
3690 		break;
3691 	}
3692 }
3693 
3694 static inline void guc_default_irqs(struct intel_engine_cs *engine)
3695 {
3696 	engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
3697 	intel_engine_set_irq_handler(engine, cs_irq_handler);
3698 }
3699 
3700 static void guc_sched_engine_destroy(struct kref *kref)
3701 {
3702 	struct i915_sched_engine *sched_engine =
3703 		container_of(kref, typeof(*sched_engine), ref);
3704 	struct intel_guc *guc = sched_engine->private_data;
3705 
3706 	guc->sched_engine = NULL;
3707 	tasklet_kill(&sched_engine->tasklet); /* flush the callback */
3708 	kfree(sched_engine);
3709 }
3710 
3711 int intel_guc_submission_setup(struct intel_engine_cs *engine)
3712 {
3713 	struct drm_i915_private *i915 = engine->i915;
3714 	struct intel_guc *guc = &engine->gt->uc.guc;
3715 
3716 	/*
3717 	 * The setup relies on several assumptions (e.g. irqs always enabled)
3718 	 * that are only valid on gen11+
3719 	 */
3720 	GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
3721 
3722 	if (!guc->sched_engine) {
3723 		guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
3724 		if (!guc->sched_engine)
3725 			return -ENOMEM;
3726 
3727 		guc->sched_engine->schedule = i915_schedule;
3728 		guc->sched_engine->disabled = guc_sched_engine_disabled;
3729 		guc->sched_engine->private_data = guc;
3730 		guc->sched_engine->destroy = guc_sched_engine_destroy;
3731 		guc->sched_engine->bump_inflight_request_prio =
3732 			guc_bump_inflight_request_prio;
3733 		guc->sched_engine->retire_inflight_request_prio =
3734 			guc_retire_inflight_request_prio;
3735 		tasklet_setup(&guc->sched_engine->tasklet,
3736 			      guc_submission_tasklet);
3737 	}
3738 	i915_sched_engine_put(engine->sched_engine);
3739 	engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
3740 
3741 	guc_default_vfuncs(engine);
3742 	guc_default_irqs(engine);
3743 	guc_init_breadcrumbs(engine);
3744 
3745 	if (engine->class == RENDER_CLASS)
3746 		rcs_submission_override(engine);
3747 
3748 	lrc_init_wa_ctx(engine);
3749 
3750 	/* Finally, take ownership and responsibility for cleanup! */
3751 	engine->sanitize = guc_sanitize;
3752 	engine->release = guc_release;
3753 
3754 	return 0;
3755 }
3756 
3757 void intel_guc_submission_enable(struct intel_guc *guc)
3758 {
3759 	guc_init_lrc_mapping(guc);
3760 	guc_init_engine_stats(guc);
3761 }
3762 
3763 void intel_guc_submission_disable(struct intel_guc *guc)
3764 {
3765 	/* Note: By the time we're here, GuC may have already been reset */
3766 }
3767 
3768 static bool __guc_submission_supported(struct intel_guc *guc)
3769 {
3770 	/* GuC submission is unavailable for pre-Gen11 */
3771 	return intel_guc_is_supported(guc) &&
3772 	       GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
3773 }
3774 
3775 static bool __guc_submission_selected(struct intel_guc *guc)
3776 {
3777 	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
3778 
3779 	if (!intel_guc_submission_is_supported(guc))
3780 		return false;
3781 
3782 	return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
3783 }
3784 
3785 void intel_guc_submission_init_early(struct intel_guc *guc)
3786 {
3787 	guc->submission_state.num_guc_ids = GUC_MAX_LRC_DESCRIPTORS;
3788 	guc->submission_supported = __guc_submission_supported(guc);
3789 	guc->submission_selected = __guc_submission_selected(guc);
3790 }
3791 
3792 static inline struct intel_context *
3793 g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
3794 {
3795 	struct intel_context *ce;
3796 
3797 	if (unlikely(desc_idx >= GUC_MAX_LRC_DESCRIPTORS)) {
3798 		drm_err(&guc_to_gt(guc)->i915->drm,
3799 			"Invalid desc_idx %u", desc_idx);
3800 		return NULL;
3801 	}
3802 
3803 	ce = __get_context(guc, desc_idx);
3804 	if (unlikely(!ce)) {
3805 		drm_err(&guc_to_gt(guc)->i915->drm,
3806 			"Context is NULL, desc_idx %u", desc_idx);
3807 		return NULL;
3808 	}
3809 
3810 	if (unlikely(intel_context_is_child(ce))) {
3811 		drm_err(&guc_to_gt(guc)->i915->drm,
3812 			"Context is child, desc_idx %u", desc_idx);
3813 		return NULL;
3814 	}
3815 
3816 	return ce;
3817 }
3818 
3819 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
3820 					  const u32 *msg,
3821 					  u32 len)
3822 {
3823 	struct intel_context *ce;
3824 	u32 desc_idx = msg[0];
3825 
3826 	if (unlikely(len < 1)) {
3827 		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
3828 		return -EPROTO;
3829 	}
3830 
3831 	ce = g2h_context_lookup(guc, desc_idx);
3832 	if (unlikely(!ce))
3833 		return -EPROTO;
3834 
3835 	trace_intel_context_deregister_done(ce);
3836 
3837 #ifdef CONFIG_DRM_I915_SELFTEST
3838 	if (unlikely(ce->drop_deregister)) {
3839 		ce->drop_deregister = false;
3840 		return 0;
3841 	}
3842 #endif
3843 
3844 	if (context_wait_for_deregister_to_register(ce)) {
3845 		struct intel_runtime_pm *runtime_pm =
3846 			&ce->engine->gt->i915->runtime_pm;
3847 		intel_wakeref_t wakeref;
3848 
3849 		/*
3850 		 * Previous owner of this guc_id has been deregistered, now safe
3851 		 * register this context.
3852 		 */
3853 		with_intel_runtime_pm(runtime_pm, wakeref)
3854 			register_context(ce, true);
3855 		guc_signal_context_fence(ce);
3856 		intel_context_put(ce);
3857 	} else if (context_destroyed(ce)) {
3858 		/* Context has been destroyed */
3859 		intel_gt_pm_put_async(guc_to_gt(guc));
3860 		release_guc_id(guc, ce);
3861 		__guc_context_destroy(ce);
3862 	}
3863 
3864 	decr_outstanding_submission_g2h(guc);
3865 
3866 	return 0;
3867 }
3868 
3869 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
3870 				     const u32 *msg,
3871 				     u32 len)
3872 {
3873 	struct intel_context *ce;
3874 	unsigned long flags;
3875 	u32 desc_idx = msg[0];
3876 
3877 	if (unlikely(len < 2)) {
3878 		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
3879 		return -EPROTO;
3880 	}
3881 
3882 	ce = g2h_context_lookup(guc, desc_idx);
3883 	if (unlikely(!ce))
3884 		return -EPROTO;
3885 
3886 	if (unlikely(context_destroyed(ce) ||
3887 		     (!context_pending_enable(ce) &&
3888 		     !context_pending_disable(ce)))) {
3889 		drm_err(&guc_to_gt(guc)->i915->drm,
3890 			"Bad context sched_state 0x%x, desc_idx %u",
3891 			ce->guc_state.sched_state, desc_idx);
3892 		return -EPROTO;
3893 	}
3894 
3895 	trace_intel_context_sched_done(ce);
3896 
3897 	if (context_pending_enable(ce)) {
3898 #ifdef CONFIG_DRM_I915_SELFTEST
3899 		if (unlikely(ce->drop_schedule_enable)) {
3900 			ce->drop_schedule_enable = false;
3901 			return 0;
3902 		}
3903 #endif
3904 
3905 		spin_lock_irqsave(&ce->guc_state.lock, flags);
3906 		clr_context_pending_enable(ce);
3907 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3908 	} else if (context_pending_disable(ce)) {
3909 		bool banned;
3910 
3911 #ifdef CONFIG_DRM_I915_SELFTEST
3912 		if (unlikely(ce->drop_schedule_disable)) {
3913 			ce->drop_schedule_disable = false;
3914 			return 0;
3915 		}
3916 #endif
3917 
3918 		/*
3919 		 * Unpin must be done before __guc_signal_context_fence,
3920 		 * otherwise a race exists between the requests getting
3921 		 * submitted + retired before this unpin completes resulting in
3922 		 * the pin_count going to zero and the context still being
3923 		 * enabled.
3924 		 */
3925 		intel_context_sched_disable_unpin(ce);
3926 
3927 		spin_lock_irqsave(&ce->guc_state.lock, flags);
3928 		banned = context_banned(ce);
3929 		clr_context_banned(ce);
3930 		clr_context_pending_disable(ce);
3931 		__guc_signal_context_fence(ce);
3932 		guc_blocked_fence_complete(ce);
3933 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3934 
3935 		if (banned) {
3936 			guc_cancel_context_requests(ce);
3937 			intel_engine_signal_breadcrumbs(ce->engine);
3938 		}
3939 	}
3940 
3941 	decr_outstanding_submission_g2h(guc);
3942 	intel_context_put(ce);
3943 
3944 	return 0;
3945 }
3946 
3947 static void capture_error_state(struct intel_guc *guc,
3948 				struct intel_context *ce)
3949 {
3950 	struct intel_gt *gt = guc_to_gt(guc);
3951 	struct drm_i915_private *i915 = gt->i915;
3952 	struct intel_engine_cs *engine = __context_to_physical_engine(ce);
3953 	intel_wakeref_t wakeref;
3954 
3955 	intel_engine_set_hung_context(engine, ce);
3956 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
3957 		i915_capture_error_state(gt, engine->mask);
3958 	atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
3959 }
3960 
3961 static void guc_context_replay(struct intel_context *ce)
3962 {
3963 	struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
3964 
3965 	__guc_reset_context(ce, true);
3966 	tasklet_hi_schedule(&sched_engine->tasklet);
3967 }
3968 
3969 static void guc_handle_context_reset(struct intel_guc *guc,
3970 				     struct intel_context *ce)
3971 {
3972 	trace_intel_context_reset(ce);
3973 
3974 	/*
3975 	 * XXX: Racey if request cancellation has occurred, see comment in
3976 	 * __guc_reset_context().
3977 	 */
3978 	if (likely(!intel_context_is_banned(ce) &&
3979 		   !context_blocked(ce))) {
3980 		capture_error_state(guc, ce);
3981 		guc_context_replay(ce);
3982 	}
3983 }
3984 
3985 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
3986 					const u32 *msg, u32 len)
3987 {
3988 	struct intel_context *ce;
3989 	unsigned long flags;
3990 	int desc_idx;
3991 
3992 	if (unlikely(len != 1)) {
3993 		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
3994 		return -EPROTO;
3995 	}
3996 
3997 	desc_idx = msg[0];
3998 
3999 	/*
4000 	 * The context lookup uses the xarray but lookups only require an RCU lock
4001 	 * not the full spinlock. So take the lock explicitly and keep it until the
4002 	 * context has been reference count locked to ensure it can't be destroyed
4003 	 * asynchronously until the reset is done.
4004 	 */
4005 	xa_lock_irqsave(&guc->context_lookup, flags);
4006 	ce = g2h_context_lookup(guc, desc_idx);
4007 	if (ce)
4008 		intel_context_get(ce);
4009 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4010 
4011 	if (unlikely(!ce))
4012 		return -EPROTO;
4013 
4014 	guc_handle_context_reset(guc, ce);
4015 	intel_context_put(ce);
4016 
4017 	return 0;
4018 }
4019 
4020 static struct intel_engine_cs *
4021 guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
4022 {
4023 	struct intel_gt *gt = guc_to_gt(guc);
4024 	u8 engine_class = guc_class_to_engine_class(guc_class);
4025 
4026 	/* Class index is checked in class converter */
4027 	GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
4028 
4029 	return gt->engine_class[engine_class][instance];
4030 }
4031 
4032 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
4033 					 const u32 *msg, u32 len)
4034 {
4035 	struct intel_engine_cs *engine;
4036 	struct intel_gt *gt = guc_to_gt(guc);
4037 	u8 guc_class, instance;
4038 	u32 reason;
4039 
4040 	if (unlikely(len != 3)) {
4041 		drm_err(&gt->i915->drm, "Invalid length %u", len);
4042 		return -EPROTO;
4043 	}
4044 
4045 	guc_class = msg[0];
4046 	instance = msg[1];
4047 	reason = msg[2];
4048 
4049 	engine = guc_lookup_engine(guc, guc_class, instance);
4050 	if (unlikely(!engine)) {
4051 		drm_err(&gt->i915->drm,
4052 			"Invalid engine %d:%d", guc_class, instance);
4053 		return -EPROTO;
4054 	}
4055 
4056 	/*
4057 	 * This is an unexpected failure of a hardware feature. So, log a real
4058 	 * error message not just the informational that comes with the reset.
4059 	 */
4060 	drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
4061 		guc_class, instance, engine->name, reason);
4062 
4063 	intel_gt_handle_error(gt, engine->mask,
4064 			      I915_ERROR_CAPTURE,
4065 			      "GuC failed to reset %s (reason=0x%08x)\n",
4066 			      engine->name, reason);
4067 
4068 	return 0;
4069 }
4070 
4071 void intel_guc_find_hung_context(struct intel_engine_cs *engine)
4072 {
4073 	struct intel_guc *guc = &engine->gt->uc.guc;
4074 	struct intel_context *ce;
4075 	struct i915_request *rq;
4076 	unsigned long index;
4077 	unsigned long flags;
4078 
4079 	/* Reset called during driver load? GuC not yet initialised! */
4080 	if (unlikely(!guc_submission_initialized(guc)))
4081 		return;
4082 
4083 	xa_lock_irqsave(&guc->context_lookup, flags);
4084 	xa_for_each(&guc->context_lookup, index, ce) {
4085 		if (!kref_get_unless_zero(&ce->ref))
4086 			continue;
4087 
4088 		xa_unlock(&guc->context_lookup);
4089 
4090 		if (!intel_context_is_pinned(ce))
4091 			goto next;
4092 
4093 		if (intel_engine_is_virtual(ce->engine)) {
4094 			if (!(ce->engine->mask & engine->mask))
4095 				goto next;
4096 		} else {
4097 			if (ce->engine != engine)
4098 				goto next;
4099 		}
4100 
4101 		list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
4102 			if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
4103 				continue;
4104 
4105 			intel_engine_set_hung_context(engine, ce);
4106 
4107 			/* Can only cope with one hang at a time... */
4108 			intel_context_put(ce);
4109 			xa_lock(&guc->context_lookup);
4110 			goto done;
4111 		}
4112 next:
4113 		intel_context_put(ce);
4114 		xa_lock(&guc->context_lookup);
4115 	}
4116 done:
4117 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4118 }
4119 
4120 void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
4121 				    struct i915_request *hung_rq,
4122 				    struct drm_printer *m)
4123 {
4124 	struct intel_guc *guc = &engine->gt->uc.guc;
4125 	struct intel_context *ce;
4126 	unsigned long index;
4127 	unsigned long flags;
4128 
4129 	/* Reset called during driver load? GuC not yet initialised! */
4130 	if (unlikely(!guc_submission_initialized(guc)))
4131 		return;
4132 
4133 	xa_lock_irqsave(&guc->context_lookup, flags);
4134 	xa_for_each(&guc->context_lookup, index, ce) {
4135 		if (!kref_get_unless_zero(&ce->ref))
4136 			continue;
4137 
4138 		xa_unlock(&guc->context_lookup);
4139 
4140 		if (!intel_context_is_pinned(ce))
4141 			goto next;
4142 
4143 		if (intel_engine_is_virtual(ce->engine)) {
4144 			if (!(ce->engine->mask & engine->mask))
4145 				goto next;
4146 		} else {
4147 			if (ce->engine != engine)
4148 				goto next;
4149 		}
4150 
4151 		spin_lock(&ce->guc_state.lock);
4152 		intel_engine_dump_active_requests(&ce->guc_state.requests,
4153 						  hung_rq, m);
4154 		spin_unlock(&ce->guc_state.lock);
4155 
4156 next:
4157 		intel_context_put(ce);
4158 		xa_lock(&guc->context_lookup);
4159 	}
4160 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4161 }
4162 
4163 void intel_guc_submission_print_info(struct intel_guc *guc,
4164 				     struct drm_printer *p)
4165 {
4166 	struct i915_sched_engine *sched_engine = guc->sched_engine;
4167 	struct rb_node *rb;
4168 	unsigned long flags;
4169 
4170 	if (!sched_engine)
4171 		return;
4172 
4173 	drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
4174 		   atomic_read(&guc->outstanding_submission_g2h));
4175 	drm_printf(p, "GuC tasklet count: %u\n\n",
4176 		   atomic_read(&sched_engine->tasklet.count));
4177 
4178 	spin_lock_irqsave(&sched_engine->lock, flags);
4179 	drm_printf(p, "Requests in GuC submit tasklet:\n");
4180 	for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
4181 		struct i915_priolist *pl = to_priolist(rb);
4182 		struct i915_request *rq;
4183 
4184 		priolist_for_each_request(rq, pl)
4185 			drm_printf(p, "guc_id=%u, seqno=%llu\n",
4186 				   rq->context->guc_id.id,
4187 				   rq->fence.seqno);
4188 	}
4189 	spin_unlock_irqrestore(&sched_engine->lock, flags);
4190 	drm_printf(p, "\n");
4191 }
4192 
4193 static inline void guc_log_context_priority(struct drm_printer *p,
4194 					    struct intel_context *ce)
4195 {
4196 	int i;
4197 
4198 	drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
4199 	drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
4200 	for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
4201 	     i < GUC_CLIENT_PRIORITY_NUM; ++i) {
4202 		drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
4203 			   i, ce->guc_state.prio_count[i]);
4204 	}
4205 	drm_printf(p, "\n");
4206 }
4207 
4208 static inline void guc_log_context(struct drm_printer *p,
4209 				   struct intel_context *ce)
4210 {
4211 	drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
4212 	drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
4213 	drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
4214 		   ce->ring->head,
4215 		   ce->lrc_reg_state[CTX_RING_HEAD]);
4216 	drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
4217 		   ce->ring->tail,
4218 		   ce->lrc_reg_state[CTX_RING_TAIL]);
4219 	drm_printf(p, "\t\tContext Pin Count: %u\n",
4220 		   atomic_read(&ce->pin_count));
4221 	drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
4222 		   atomic_read(&ce->guc_id.ref));
4223 	drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
4224 		   ce->guc_state.sched_state);
4225 }
4226 
4227 void intel_guc_submission_print_context_info(struct intel_guc *guc,
4228 					     struct drm_printer *p)
4229 {
4230 	struct intel_context *ce;
4231 	unsigned long index;
4232 	unsigned long flags;
4233 
4234 	xa_lock_irqsave(&guc->context_lookup, flags);
4235 	xa_for_each(&guc->context_lookup, index, ce) {
4236 		GEM_BUG_ON(intel_context_is_child(ce));
4237 
4238 		guc_log_context(p, ce);
4239 		guc_log_context_priority(p, ce);
4240 
4241 		if (intel_context_is_parent(ce)) {
4242 			struct guc_process_desc *desc = __get_process_desc(ce);
4243 			struct intel_context *child;
4244 
4245 			drm_printf(p, "\t\tNumber children: %u\n",
4246 				   ce->parallel.number_children);
4247 			drm_printf(p, "\t\tWQI Head: %u\n",
4248 				   READ_ONCE(desc->head));
4249 			drm_printf(p, "\t\tWQI Tail: %u\n",
4250 				   READ_ONCE(desc->tail));
4251 			drm_printf(p, "\t\tWQI Status: %u\n\n",
4252 				   READ_ONCE(desc->wq_status));
4253 
4254 			if (ce->engine->emit_bb_start ==
4255 			    emit_bb_start_parent_no_preempt_mid_batch) {
4256 				u8 i;
4257 
4258 				drm_printf(p, "\t\tChildren Go: %u\n\n",
4259 					   get_children_go_value(ce));
4260 				for (i = 0; i < ce->parallel.number_children; ++i)
4261 					drm_printf(p, "\t\tChildren Join: %u\n",
4262 						   get_children_join_value(ce, i));
4263 			}
4264 
4265 			for_each_child(ce, child)
4266 				guc_log_context(p, child);
4267 		}
4268 	}
4269 	xa_unlock_irqrestore(&guc->context_lookup, flags);
4270 }
4271 
4272 static inline u32 get_children_go_addr(struct intel_context *ce)
4273 {
4274 	GEM_BUG_ON(!intel_context_is_parent(ce));
4275 
4276 	return i915_ggtt_offset(ce->state) +
4277 		__get_parent_scratch_offset(ce) +
4278 		offsetof(struct parent_scratch, go.semaphore);
4279 }
4280 
4281 static inline u32 get_children_join_addr(struct intel_context *ce,
4282 					 u8 child_index)
4283 {
4284 	GEM_BUG_ON(!intel_context_is_parent(ce));
4285 
4286 	return i915_ggtt_offset(ce->state) +
4287 		__get_parent_scratch_offset(ce) +
4288 		offsetof(struct parent_scratch, join[child_index].semaphore);
4289 }
4290 
4291 #define PARENT_GO_BB			1
4292 #define PARENT_GO_FINI_BREADCRUMB	0
4293 #define CHILD_GO_BB			1
4294 #define CHILD_GO_FINI_BREADCRUMB	0
4295 static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
4296 						     u64 offset, u32 len,
4297 						     const unsigned int flags)
4298 {
4299 	struct intel_context *ce = rq->context;
4300 	u32 *cs;
4301 	u8 i;
4302 
4303 	GEM_BUG_ON(!intel_context_is_parent(ce));
4304 
4305 	cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
4306 	if (IS_ERR(cs))
4307 		return PTR_ERR(cs);
4308 
4309 	/* Wait on children */
4310 	for (i = 0; i < ce->parallel.number_children; ++i) {
4311 		*cs++ = (MI_SEMAPHORE_WAIT |
4312 			 MI_SEMAPHORE_GLOBAL_GTT |
4313 			 MI_SEMAPHORE_POLL |
4314 			 MI_SEMAPHORE_SAD_EQ_SDD);
4315 		*cs++ = PARENT_GO_BB;
4316 		*cs++ = get_children_join_addr(ce, i);
4317 		*cs++ = 0;
4318 	}
4319 
4320 	/* Turn off preemption */
4321 	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
4322 	*cs++ = MI_NOOP;
4323 
4324 	/* Tell children go */
4325 	cs = gen8_emit_ggtt_write(cs,
4326 				  CHILD_GO_BB,
4327 				  get_children_go_addr(ce),
4328 				  0);
4329 
4330 	/* Jump to batch */
4331 	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
4332 		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
4333 	*cs++ = lower_32_bits(offset);
4334 	*cs++ = upper_32_bits(offset);
4335 	*cs++ = MI_NOOP;
4336 
4337 	intel_ring_advance(rq, cs);
4338 
4339 	return 0;
4340 }
4341 
4342 static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
4343 						    u64 offset, u32 len,
4344 						    const unsigned int flags)
4345 {
4346 	struct intel_context *ce = rq->context;
4347 	struct intel_context *parent = intel_context_to_parent(ce);
4348 	u32 *cs;
4349 
4350 	GEM_BUG_ON(!intel_context_is_child(ce));
4351 
4352 	cs = intel_ring_begin(rq, 12);
4353 	if (IS_ERR(cs))
4354 		return PTR_ERR(cs);
4355 
4356 	/* Signal parent */
4357 	cs = gen8_emit_ggtt_write(cs,
4358 				  PARENT_GO_BB,
4359 				  get_children_join_addr(parent,
4360 							 ce->parallel.child_index),
4361 				  0);
4362 
4363 	/* Wait on parent for go */
4364 	*cs++ = (MI_SEMAPHORE_WAIT |
4365 		 MI_SEMAPHORE_GLOBAL_GTT |
4366 		 MI_SEMAPHORE_POLL |
4367 		 MI_SEMAPHORE_SAD_EQ_SDD);
4368 	*cs++ = CHILD_GO_BB;
4369 	*cs++ = get_children_go_addr(parent);
4370 	*cs++ = 0;
4371 
4372 	/* Turn off preemption */
4373 	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
4374 
4375 	/* Jump to batch */
4376 	*cs++ = MI_BATCH_BUFFER_START_GEN8 |
4377 		(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
4378 	*cs++ = lower_32_bits(offset);
4379 	*cs++ = upper_32_bits(offset);
4380 
4381 	intel_ring_advance(rq, cs);
4382 
4383 	return 0;
4384 }
4385 
4386 static u32 *
4387 __emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
4388 						   u32 *cs)
4389 {
4390 	struct intel_context *ce = rq->context;
4391 	u8 i;
4392 
4393 	GEM_BUG_ON(!intel_context_is_parent(ce));
4394 
4395 	/* Wait on children */
4396 	for (i = 0; i < ce->parallel.number_children; ++i) {
4397 		*cs++ = (MI_SEMAPHORE_WAIT |
4398 			 MI_SEMAPHORE_GLOBAL_GTT |
4399 			 MI_SEMAPHORE_POLL |
4400 			 MI_SEMAPHORE_SAD_EQ_SDD);
4401 		*cs++ = PARENT_GO_FINI_BREADCRUMB;
4402 		*cs++ = get_children_join_addr(ce, i);
4403 		*cs++ = 0;
4404 	}
4405 
4406 	/* Turn on preemption */
4407 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
4408 	*cs++ = MI_NOOP;
4409 
4410 	/* Tell children go */
4411 	cs = gen8_emit_ggtt_write(cs,
4412 				  CHILD_GO_FINI_BREADCRUMB,
4413 				  get_children_go_addr(ce),
4414 				  0);
4415 
4416 	return cs;
4417 }
4418 
4419 /*
4420  * If this true, a submission of multi-lrc requests had an error and the
4421  * requests need to be skipped. The front end (execuf IOCTL) should've called
4422  * i915_request_skip which squashes the BB but we still need to emit the fini
4423  * breadrcrumbs seqno write. At this point we don't know how many of the
4424  * requests in the multi-lrc submission were generated so we can't do the
4425  * handshake between the parent and children (e.g. if 4 requests should be
4426  * generated but 2nd hit an error only 1 would be seen by the GuC backend).
4427  * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
4428  * has occurred on any of the requests in submission / relationship.
4429  */
4430 static inline bool skip_handshake(struct i915_request *rq)
4431 {
4432 	return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
4433 }
4434 
4435 static u32 *
4436 emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
4437 						 u32 *cs)
4438 {
4439 	struct intel_context *ce = rq->context;
4440 
4441 	GEM_BUG_ON(!intel_context_is_parent(ce));
4442 
4443 	if (unlikely(skip_handshake(rq))) {
4444 		/*
4445 		 * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
4446 		 * the -6 comes from the length of the emits below.
4447 		 */
4448 		memset(cs, 0, sizeof(u32) *
4449 		       (ce->engine->emit_fini_breadcrumb_dw - 6));
4450 		cs += ce->engine->emit_fini_breadcrumb_dw - 6;
4451 	} else {
4452 		cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
4453 	}
4454 
4455 	/* Emit fini breadcrumb */
4456 	cs = gen8_emit_ggtt_write(cs,
4457 				  rq->fence.seqno,
4458 				  i915_request_active_timeline(rq)->hwsp_offset,
4459 				  0);
4460 
4461 	/* User interrupt */
4462 	*cs++ = MI_USER_INTERRUPT;
4463 	*cs++ = MI_NOOP;
4464 
4465 	rq->tail = intel_ring_offset(rq, cs);
4466 
4467 	return cs;
4468 }
4469 
4470 static u32 *
4471 __emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
4472 						  u32 *cs)
4473 {
4474 	struct intel_context *ce = rq->context;
4475 	struct intel_context *parent = intel_context_to_parent(ce);
4476 
4477 	GEM_BUG_ON(!intel_context_is_child(ce));
4478 
4479 	/* Turn on preemption */
4480 	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
4481 	*cs++ = MI_NOOP;
4482 
4483 	/* Signal parent */
4484 	cs = gen8_emit_ggtt_write(cs,
4485 				  PARENT_GO_FINI_BREADCRUMB,
4486 				  get_children_join_addr(parent,
4487 							 ce->parallel.child_index),
4488 				  0);
4489 
4490 	/* Wait parent on for go */
4491 	*cs++ = (MI_SEMAPHORE_WAIT |
4492 		 MI_SEMAPHORE_GLOBAL_GTT |
4493 		 MI_SEMAPHORE_POLL |
4494 		 MI_SEMAPHORE_SAD_EQ_SDD);
4495 	*cs++ = CHILD_GO_FINI_BREADCRUMB;
4496 	*cs++ = get_children_go_addr(parent);
4497 	*cs++ = 0;
4498 
4499 	return cs;
4500 }
4501 
4502 static u32 *
4503 emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
4504 						u32 *cs)
4505 {
4506 	struct intel_context *ce = rq->context;
4507 
4508 	GEM_BUG_ON(!intel_context_is_child(ce));
4509 
4510 	if (unlikely(skip_handshake(rq))) {
4511 		/*
4512 		 * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
4513 		 * the -6 comes from the length of the emits below.
4514 		 */
4515 		memset(cs, 0, sizeof(u32) *
4516 		       (ce->engine->emit_fini_breadcrumb_dw - 6));
4517 		cs += ce->engine->emit_fini_breadcrumb_dw - 6;
4518 	} else {
4519 		cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
4520 	}
4521 
4522 	/* Emit fini breadcrumb */
4523 	cs = gen8_emit_ggtt_write(cs,
4524 				  rq->fence.seqno,
4525 				  i915_request_active_timeline(rq)->hwsp_offset,
4526 				  0);
4527 
4528 	/* User interrupt */
4529 	*cs++ = MI_USER_INTERRUPT;
4530 	*cs++ = MI_NOOP;
4531 
4532 	rq->tail = intel_ring_offset(rq, cs);
4533 
4534 	return cs;
4535 }
4536 
4537 static struct intel_context *
4538 guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
4539 		   unsigned long flags)
4540 {
4541 	struct guc_virtual_engine *ve;
4542 	struct intel_guc *guc;
4543 	unsigned int n;
4544 	int err;
4545 
4546 	ve = kzalloc(sizeof(*ve), GFP_KERNEL);
4547 	if (!ve)
4548 		return ERR_PTR(-ENOMEM);
4549 
4550 	guc = &siblings[0]->gt->uc.guc;
4551 
4552 	ve->base.i915 = siblings[0]->i915;
4553 	ve->base.gt = siblings[0]->gt;
4554 	ve->base.uncore = siblings[0]->uncore;
4555 	ve->base.id = -1;
4556 
4557 	ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
4558 	ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
4559 	ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
4560 	ve->base.saturated = ALL_ENGINES;
4561 
4562 	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
4563 
4564 	ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
4565 
4566 	ve->base.cops = &virtual_guc_context_ops;
4567 	ve->base.request_alloc = guc_request_alloc;
4568 	ve->base.bump_serial = virtual_guc_bump_serial;
4569 
4570 	ve->base.submit_request = guc_submit_request;
4571 
4572 	ve->base.flags = I915_ENGINE_IS_VIRTUAL;
4573 
4574 	intel_context_init(&ve->context, &ve->base);
4575 
4576 	for (n = 0; n < count; n++) {
4577 		struct intel_engine_cs *sibling = siblings[n];
4578 
4579 		GEM_BUG_ON(!is_power_of_2(sibling->mask));
4580 		if (sibling->mask & ve->base.mask) {
4581 			DRM_DEBUG("duplicate %s entry in load balancer\n",
4582 				  sibling->name);
4583 			err = -EINVAL;
4584 			goto err_put;
4585 		}
4586 
4587 		ve->base.mask |= sibling->mask;
4588 		ve->base.logical_mask |= sibling->logical_mask;
4589 
4590 		if (n != 0 && ve->base.class != sibling->class) {
4591 			DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
4592 				  sibling->class, ve->base.class);
4593 			err = -EINVAL;
4594 			goto err_put;
4595 		} else if (n == 0) {
4596 			ve->base.class = sibling->class;
4597 			ve->base.uabi_class = sibling->uabi_class;
4598 			snprintf(ve->base.name, sizeof(ve->base.name),
4599 				 "v%dx%d", ve->base.class, count);
4600 			ve->base.context_size = sibling->context_size;
4601 
4602 			ve->base.add_active_request =
4603 				sibling->add_active_request;
4604 			ve->base.remove_active_request =
4605 				sibling->remove_active_request;
4606 			ve->base.emit_bb_start = sibling->emit_bb_start;
4607 			ve->base.emit_flush = sibling->emit_flush;
4608 			ve->base.emit_init_breadcrumb =
4609 				sibling->emit_init_breadcrumb;
4610 			ve->base.emit_fini_breadcrumb =
4611 				sibling->emit_fini_breadcrumb;
4612 			ve->base.emit_fini_breadcrumb_dw =
4613 				sibling->emit_fini_breadcrumb_dw;
4614 			ve->base.breadcrumbs =
4615 				intel_breadcrumbs_get(sibling->breadcrumbs);
4616 
4617 			ve->base.flags |= sibling->flags;
4618 
4619 			ve->base.props.timeslice_duration_ms =
4620 				sibling->props.timeslice_duration_ms;
4621 			ve->base.props.preempt_timeout_ms =
4622 				sibling->props.preempt_timeout_ms;
4623 		}
4624 	}
4625 
4626 	return &ve->context;
4627 
4628 err_put:
4629 	intel_context_put(&ve->context);
4630 	return ERR_PTR(err);
4631 }
4632 
4633 bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
4634 {
4635 	struct intel_engine_cs *engine;
4636 	intel_engine_mask_t tmp, mask = ve->mask;
4637 
4638 	for_each_engine_masked(engine, ve->gt, mask, tmp)
4639 		if (READ_ONCE(engine->props.heartbeat_interval_ms))
4640 			return true;
4641 
4642 	return false;
4643 }
4644 
4645 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4646 #include "selftest_guc.c"
4647 #include "selftest_guc_multi_lrc.c"
4648 #endif
4649