1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6 
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66 
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69 
70 #include <drm/drm_syncobj.h>
71 
72 #include "gt/gen6_ppgtt.h"
73 #include "gt/intel_context.h"
74 #include "gt/intel_context_param.h"
75 #include "gt/intel_engine_heartbeat.h"
76 #include "gt/intel_engine_user.h"
77 #include "gt/intel_execlists_submission.h" /* virtual_engine */
78 #include "gt/intel_gpu_commands.h"
79 #include "gt/intel_ring.h"
80 
81 #include "i915_gem_context.h"
82 #include "i915_globals.h"
83 #include "i915_trace.h"
84 #include "i915_user_extensions.h"
85 
86 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
87 
88 static struct i915_global_gem_context {
89 	struct i915_global base;
90 	struct kmem_cache *slab_luts;
91 } global;
92 
93 struct i915_lut_handle *i915_lut_handle_alloc(void)
94 {
95 	return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
96 }
97 
98 void i915_lut_handle_free(struct i915_lut_handle *lut)
99 {
100 	return kmem_cache_free(global.slab_luts, lut);
101 }
102 
103 static void lut_close(struct i915_gem_context *ctx)
104 {
105 	struct radix_tree_iter iter;
106 	void __rcu **slot;
107 
108 	mutex_lock(&ctx->lut_mutex);
109 	rcu_read_lock();
110 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
111 		struct i915_vma *vma = rcu_dereference_raw(*slot);
112 		struct drm_i915_gem_object *obj = vma->obj;
113 		struct i915_lut_handle *lut;
114 
115 		if (!kref_get_unless_zero(&obj->base.refcount))
116 			continue;
117 
118 		spin_lock(&obj->lut_lock);
119 		list_for_each_entry(lut, &obj->lut_list, obj_link) {
120 			if (lut->ctx != ctx)
121 				continue;
122 
123 			if (lut->handle != iter.index)
124 				continue;
125 
126 			list_del(&lut->obj_link);
127 			break;
128 		}
129 		spin_unlock(&obj->lut_lock);
130 
131 		if (&lut->obj_link != &obj->lut_list) {
132 			i915_lut_handle_free(lut);
133 			radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
134 			i915_vma_close(vma);
135 			i915_gem_object_put(obj);
136 		}
137 
138 		i915_gem_object_put(obj);
139 	}
140 	rcu_read_unlock();
141 	mutex_unlock(&ctx->lut_mutex);
142 }
143 
144 static struct intel_context *
145 lookup_user_engine(struct i915_gem_context *ctx,
146 		   unsigned long flags,
147 		   const struct i915_engine_class_instance *ci)
148 #define LOOKUP_USER_INDEX BIT(0)
149 {
150 	int idx;
151 
152 	if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
153 		return ERR_PTR(-EINVAL);
154 
155 	if (!i915_gem_context_user_engines(ctx)) {
156 		struct intel_engine_cs *engine;
157 
158 		engine = intel_engine_lookup_user(ctx->i915,
159 						  ci->engine_class,
160 						  ci->engine_instance);
161 		if (!engine)
162 			return ERR_PTR(-EINVAL);
163 
164 		idx = engine->legacy_idx;
165 	} else {
166 		idx = ci->engine_instance;
167 	}
168 
169 	return i915_gem_context_get_engine(ctx, idx);
170 }
171 
172 static struct i915_address_space *
173 context_get_vm_rcu(struct i915_gem_context *ctx)
174 {
175 	GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
176 
177 	do {
178 		struct i915_address_space *vm;
179 
180 		/*
181 		 * We do not allow downgrading from full-ppgtt [to a shared
182 		 * global gtt], so ctx->vm cannot become NULL.
183 		 */
184 		vm = rcu_dereference(ctx->vm);
185 		if (!kref_get_unless_zero(&vm->ref))
186 			continue;
187 
188 		/*
189 		 * This ppgtt may have be reallocated between
190 		 * the read and the kref, and reassigned to a third
191 		 * context. In order to avoid inadvertent sharing
192 		 * of this ppgtt with that third context (and not
193 		 * src), we have to confirm that we have the same
194 		 * ppgtt after passing through the strong memory
195 		 * barrier implied by a successful
196 		 * kref_get_unless_zero().
197 		 *
198 		 * Once we have acquired the current ppgtt of ctx,
199 		 * we no longer care if it is released from ctx, as
200 		 * it cannot be reallocated elsewhere.
201 		 */
202 
203 		if (vm == rcu_access_pointer(ctx->vm))
204 			return rcu_pointer_handoff(vm);
205 
206 		i915_vm_put(vm);
207 	} while (1);
208 }
209 
210 static void intel_context_set_gem(struct intel_context *ce,
211 				  struct i915_gem_context *ctx)
212 {
213 	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
214 	RCU_INIT_POINTER(ce->gem_context, ctx);
215 
216 	ce->ring_size = SZ_16K;
217 
218 	if (rcu_access_pointer(ctx->vm)) {
219 		struct i915_address_space *vm;
220 
221 		rcu_read_lock();
222 		vm = context_get_vm_rcu(ctx); /* hmm */
223 		rcu_read_unlock();
224 
225 		i915_vm_put(ce->vm);
226 		ce->vm = vm;
227 	}
228 
229 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
230 	    intel_engine_has_timeslices(ce->engine))
231 		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
232 
233 	if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
234 	    ctx->i915->params.request_timeout_ms) {
235 		unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
236 
237 		intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
238 	}
239 }
240 
241 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
242 {
243 	while (count--) {
244 		if (!e->engines[count])
245 			continue;
246 
247 		intel_context_put(e->engines[count]);
248 	}
249 	kfree(e);
250 }
251 
252 static void free_engines(struct i915_gem_engines *e)
253 {
254 	__free_engines(e, e->num_engines);
255 }
256 
257 static void free_engines_rcu(struct rcu_head *rcu)
258 {
259 	struct i915_gem_engines *engines =
260 		container_of(rcu, struct i915_gem_engines, rcu);
261 
262 	i915_sw_fence_fini(&engines->fence);
263 	free_engines(engines);
264 }
265 
266 static int __i915_sw_fence_call
267 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
268 {
269 	struct i915_gem_engines *engines =
270 		container_of(fence, typeof(*engines), fence);
271 
272 	switch (state) {
273 	case FENCE_COMPLETE:
274 		if (!list_empty(&engines->link)) {
275 			struct i915_gem_context *ctx = engines->ctx;
276 			unsigned long flags;
277 
278 			spin_lock_irqsave(&ctx->stale.lock, flags);
279 			list_del(&engines->link);
280 			spin_unlock_irqrestore(&ctx->stale.lock, flags);
281 		}
282 		i915_gem_context_put(engines->ctx);
283 		break;
284 
285 	case FENCE_FREE:
286 		init_rcu_head(&engines->rcu);
287 		call_rcu(&engines->rcu, free_engines_rcu);
288 		break;
289 	}
290 
291 	return NOTIFY_DONE;
292 }
293 
294 static struct i915_gem_engines *alloc_engines(unsigned int count)
295 {
296 	struct i915_gem_engines *e;
297 
298 	e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
299 	if (!e)
300 		return NULL;
301 
302 	i915_sw_fence_init(&e->fence, engines_notify);
303 	return e;
304 }
305 
306 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
307 {
308 	const struct intel_gt *gt = &ctx->i915->gt;
309 	struct intel_engine_cs *engine;
310 	struct i915_gem_engines *e;
311 	enum intel_engine_id id;
312 
313 	e = alloc_engines(I915_NUM_ENGINES);
314 	if (!e)
315 		return ERR_PTR(-ENOMEM);
316 
317 	for_each_engine(engine, gt, id) {
318 		struct intel_context *ce;
319 
320 		if (engine->legacy_idx == INVALID_ENGINE)
321 			continue;
322 
323 		GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
324 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
325 
326 		ce = intel_context_create(engine);
327 		if (IS_ERR(ce)) {
328 			__free_engines(e, e->num_engines + 1);
329 			return ERR_CAST(ce);
330 		}
331 
332 		intel_context_set_gem(ce, ctx);
333 
334 		e->engines[engine->legacy_idx] = ce;
335 		e->num_engines = max(e->num_engines, engine->legacy_idx);
336 	}
337 	e->num_engines++;
338 
339 	return e;
340 }
341 
342 void i915_gem_context_release(struct kref *ref)
343 {
344 	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
345 
346 	trace_i915_context_free(ctx);
347 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
348 
349 	mutex_destroy(&ctx->engines_mutex);
350 	mutex_destroy(&ctx->lut_mutex);
351 
352 	put_pid(ctx->pid);
353 	mutex_destroy(&ctx->mutex);
354 
355 	kfree_rcu(ctx, rcu);
356 }
357 
358 static inline struct i915_gem_engines *
359 __context_engines_static(const struct i915_gem_context *ctx)
360 {
361 	return rcu_dereference_protected(ctx->engines, true);
362 }
363 
364 static void __reset_context(struct i915_gem_context *ctx,
365 			    struct intel_engine_cs *engine)
366 {
367 	intel_gt_handle_error(engine->gt, engine->mask, 0,
368 			      "context closure in %s", ctx->name);
369 }
370 
371 static bool __cancel_engine(struct intel_engine_cs *engine)
372 {
373 	/*
374 	 * Send a "high priority pulse" down the engine to cause the
375 	 * current request to be momentarily preempted. (If it fails to
376 	 * be preempted, it will be reset). As we have marked our context
377 	 * as banned, any incomplete request, including any running, will
378 	 * be skipped following the preemption.
379 	 *
380 	 * If there is no hangchecking (one of the reasons why we try to
381 	 * cancel the context) and no forced preemption, there may be no
382 	 * means by which we reset the GPU and evict the persistent hog.
383 	 * Ergo if we are unable to inject a preemptive pulse that can
384 	 * kill the banned context, we fallback to doing a local reset
385 	 * instead.
386 	 */
387 	return intel_engine_pulse(engine) == 0;
388 }
389 
390 static struct intel_engine_cs *active_engine(struct intel_context *ce)
391 {
392 	struct intel_engine_cs *engine = NULL;
393 	struct i915_request *rq;
394 
395 	if (intel_context_has_inflight(ce))
396 		return intel_context_inflight(ce);
397 
398 	if (!ce->timeline)
399 		return NULL;
400 
401 	/*
402 	 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
403 	 * to the request to prevent it being transferred to a new timeline
404 	 * (and onto a new timeline->requests list).
405 	 */
406 	rcu_read_lock();
407 	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
408 		bool found;
409 
410 		/* timeline is already completed upto this point? */
411 		if (!i915_request_get_rcu(rq))
412 			break;
413 
414 		/* Check with the backend if the request is inflight */
415 		found = true;
416 		if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
417 			found = i915_request_active_engine(rq, &engine);
418 
419 		i915_request_put(rq);
420 		if (found)
421 			break;
422 	}
423 	rcu_read_unlock();
424 
425 	return engine;
426 }
427 
428 static void kill_engines(struct i915_gem_engines *engines, bool ban)
429 {
430 	struct i915_gem_engines_iter it;
431 	struct intel_context *ce;
432 
433 	/*
434 	 * Map the user's engine back to the actual engines; one virtual
435 	 * engine will be mapped to multiple engines, and using ctx->engine[]
436 	 * the same engine may be have multiple instances in the user's map.
437 	 * However, we only care about pending requests, so only include
438 	 * engines on which there are incomplete requests.
439 	 */
440 	for_each_gem_engine(ce, engines, it) {
441 		struct intel_engine_cs *engine;
442 
443 		if (ban && intel_context_set_banned(ce))
444 			continue;
445 
446 		/*
447 		 * Check the current active state of this context; if we
448 		 * are currently executing on the GPU we need to evict
449 		 * ourselves. On the other hand, if we haven't yet been
450 		 * submitted to the GPU or if everything is complete,
451 		 * we have nothing to do.
452 		 */
453 		engine = active_engine(ce);
454 
455 		/* First attempt to gracefully cancel the context */
456 		if (engine && !__cancel_engine(engine) && ban)
457 			/*
458 			 * If we are unable to send a preemptive pulse to bump
459 			 * the context from the GPU, we have to resort to a full
460 			 * reset. We hope the collateral damage is worth it.
461 			 */
462 			__reset_context(engines->ctx, engine);
463 	}
464 }
465 
466 static void kill_context(struct i915_gem_context *ctx)
467 {
468 	bool ban = (!i915_gem_context_is_persistent(ctx) ||
469 		    !ctx->i915->params.enable_hangcheck);
470 	struct i915_gem_engines *pos, *next;
471 
472 	spin_lock_irq(&ctx->stale.lock);
473 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
474 	list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
475 		if (!i915_sw_fence_await(&pos->fence)) {
476 			list_del_init(&pos->link);
477 			continue;
478 		}
479 
480 		spin_unlock_irq(&ctx->stale.lock);
481 
482 		kill_engines(pos, ban);
483 
484 		spin_lock_irq(&ctx->stale.lock);
485 		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
486 		list_safe_reset_next(pos, next, link);
487 		list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
488 
489 		i915_sw_fence_complete(&pos->fence);
490 	}
491 	spin_unlock_irq(&ctx->stale.lock);
492 }
493 
494 static void engines_idle_release(struct i915_gem_context *ctx,
495 				 struct i915_gem_engines *engines)
496 {
497 	struct i915_gem_engines_iter it;
498 	struct intel_context *ce;
499 
500 	INIT_LIST_HEAD(&engines->link);
501 
502 	engines->ctx = i915_gem_context_get(ctx);
503 
504 	for_each_gem_engine(ce, engines, it) {
505 		int err;
506 
507 		/* serialises with execbuf */
508 		set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
509 		if (!intel_context_pin_if_active(ce))
510 			continue;
511 
512 		/* Wait until context is finally scheduled out and retired */
513 		err = i915_sw_fence_await_active(&engines->fence,
514 						 &ce->active,
515 						 I915_ACTIVE_AWAIT_BARRIER);
516 		intel_context_unpin(ce);
517 		if (err)
518 			goto kill;
519 	}
520 
521 	spin_lock_irq(&ctx->stale.lock);
522 	if (!i915_gem_context_is_closed(ctx))
523 		list_add_tail(&engines->link, &ctx->stale.engines);
524 	spin_unlock_irq(&ctx->stale.lock);
525 
526 kill:
527 	if (list_empty(&engines->link)) /* raced, already closed */
528 		kill_engines(engines, true);
529 
530 	i915_sw_fence_commit(&engines->fence);
531 }
532 
533 static void set_closed_name(struct i915_gem_context *ctx)
534 {
535 	char *s;
536 
537 	/* Replace '[]' with '<>' to indicate closed in debug prints */
538 
539 	s = strrchr(ctx->name, '[');
540 	if (!s)
541 		return;
542 
543 	*s = '<';
544 
545 	s = strchr(s + 1, ']');
546 	if (s)
547 		*s = '>';
548 }
549 
550 static void context_close(struct i915_gem_context *ctx)
551 {
552 	struct i915_address_space *vm;
553 
554 	/* Flush any concurrent set_engines() */
555 	mutex_lock(&ctx->engines_mutex);
556 	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
557 	i915_gem_context_set_closed(ctx);
558 	mutex_unlock(&ctx->engines_mutex);
559 
560 	mutex_lock(&ctx->mutex);
561 
562 	set_closed_name(ctx);
563 
564 	vm = i915_gem_context_vm(ctx);
565 	if (vm)
566 		i915_vm_close(vm);
567 
568 	if (ctx->syncobj)
569 		drm_syncobj_put(ctx->syncobj);
570 
571 	ctx->file_priv = ERR_PTR(-EBADF);
572 
573 	/*
574 	 * The LUT uses the VMA as a backpointer to unref the object,
575 	 * so we need to clear the LUT before we close all the VMA (inside
576 	 * the ppgtt).
577 	 */
578 	lut_close(ctx);
579 
580 	spin_lock(&ctx->i915->gem.contexts.lock);
581 	list_del(&ctx->link);
582 	spin_unlock(&ctx->i915->gem.contexts.lock);
583 
584 	mutex_unlock(&ctx->mutex);
585 
586 	/*
587 	 * If the user has disabled hangchecking, we can not be sure that
588 	 * the batches will ever complete after the context is closed,
589 	 * keeping the context and all resources pinned forever. So in this
590 	 * case we opt to forcibly kill off all remaining requests on
591 	 * context close.
592 	 */
593 	kill_context(ctx);
594 
595 	i915_gem_context_put(ctx);
596 }
597 
598 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
599 {
600 	if (i915_gem_context_is_persistent(ctx) == state)
601 		return 0;
602 
603 	if (state) {
604 		/*
605 		 * Only contexts that are short-lived [that will expire or be
606 		 * reset] are allowed to survive past termination. We require
607 		 * hangcheck to ensure that the persistent requests are healthy.
608 		 */
609 		if (!ctx->i915->params.enable_hangcheck)
610 			return -EINVAL;
611 
612 		i915_gem_context_set_persistence(ctx);
613 	} else {
614 		/* To cancel a context we use "preempt-to-idle" */
615 		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
616 			return -ENODEV;
617 
618 		/*
619 		 * If the cancel fails, we then need to reset, cleanly!
620 		 *
621 		 * If the per-engine reset fails, all hope is lost! We resort
622 		 * to a full GPU reset in that unlikely case, but realistically
623 		 * if the engine could not reset, the full reset does not fare
624 		 * much better. The damage has been done.
625 		 *
626 		 * However, if we cannot reset an engine by itself, we cannot
627 		 * cleanup a hanging persistent context without causing
628 		 * colateral damage, and we should not pretend we can by
629 		 * exposing the interface.
630 		 */
631 		if (!intel_has_reset_engine(&ctx->i915->gt))
632 			return -ENODEV;
633 
634 		i915_gem_context_clear_persistence(ctx);
635 	}
636 
637 	return 0;
638 }
639 
640 static struct i915_gem_context *
641 __create_context(struct drm_i915_private *i915)
642 {
643 	struct i915_gem_context *ctx;
644 	struct i915_gem_engines *e;
645 	int err;
646 	int i;
647 
648 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
649 	if (!ctx)
650 		return ERR_PTR(-ENOMEM);
651 
652 	kref_init(&ctx->ref);
653 	ctx->i915 = i915;
654 	ctx->sched.priority = I915_PRIORITY_NORMAL;
655 	mutex_init(&ctx->mutex);
656 	INIT_LIST_HEAD(&ctx->link);
657 
658 	spin_lock_init(&ctx->stale.lock);
659 	INIT_LIST_HEAD(&ctx->stale.engines);
660 
661 	mutex_init(&ctx->engines_mutex);
662 	e = default_engines(ctx);
663 	if (IS_ERR(e)) {
664 		err = PTR_ERR(e);
665 		goto err_free;
666 	}
667 	RCU_INIT_POINTER(ctx->engines, e);
668 
669 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
670 	mutex_init(&ctx->lut_mutex);
671 
672 	/* NB: Mark all slices as needing a remap so that when the context first
673 	 * loads it will restore whatever remap state already exists. If there
674 	 * is no remap info, it will be a NOP. */
675 	ctx->remap_slice = ALL_L3_SLICES(i915);
676 
677 	i915_gem_context_set_bannable(ctx);
678 	i915_gem_context_set_recoverable(ctx);
679 	__context_set_persistence(ctx, true /* cgroup hook? */);
680 
681 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
682 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
683 
684 	return ctx;
685 
686 err_free:
687 	kfree(ctx);
688 	return ERR_PTR(err);
689 }
690 
691 static inline struct i915_gem_engines *
692 __context_engines_await(const struct i915_gem_context *ctx,
693 			bool *user_engines)
694 {
695 	struct i915_gem_engines *engines;
696 
697 	rcu_read_lock();
698 	do {
699 		engines = rcu_dereference(ctx->engines);
700 		GEM_BUG_ON(!engines);
701 
702 		if (user_engines)
703 			*user_engines = i915_gem_context_user_engines(ctx);
704 
705 		/* successful await => strong mb */
706 		if (unlikely(!i915_sw_fence_await(&engines->fence)))
707 			continue;
708 
709 		if (likely(engines == rcu_access_pointer(ctx->engines)))
710 			break;
711 
712 		i915_sw_fence_complete(&engines->fence);
713 	} while (1);
714 	rcu_read_unlock();
715 
716 	return engines;
717 }
718 
719 static void
720 context_apply_all(struct i915_gem_context *ctx,
721 		  void (*fn)(struct intel_context *ce, void *data),
722 		  void *data)
723 {
724 	struct i915_gem_engines_iter it;
725 	struct i915_gem_engines *e;
726 	struct intel_context *ce;
727 
728 	e = __context_engines_await(ctx, NULL);
729 	for_each_gem_engine(ce, e, it)
730 		fn(ce, data);
731 	i915_sw_fence_complete(&e->fence);
732 }
733 
734 static void __apply_ppgtt(struct intel_context *ce, void *vm)
735 {
736 	i915_vm_put(ce->vm);
737 	ce->vm = i915_vm_get(vm);
738 }
739 
740 static struct i915_address_space *
741 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
742 {
743 	struct i915_address_space *old;
744 
745 	old = rcu_replace_pointer(ctx->vm,
746 				  i915_vm_open(vm),
747 				  lockdep_is_held(&ctx->mutex));
748 	GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
749 
750 	context_apply_all(ctx, __apply_ppgtt, vm);
751 
752 	return old;
753 }
754 
755 static void __assign_ppgtt(struct i915_gem_context *ctx,
756 			   struct i915_address_space *vm)
757 {
758 	if (vm == rcu_access_pointer(ctx->vm))
759 		return;
760 
761 	vm = __set_ppgtt(ctx, vm);
762 	if (vm)
763 		i915_vm_close(vm);
764 }
765 
766 static struct i915_gem_context *
767 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
768 {
769 	struct i915_gem_context *ctx;
770 	int ret;
771 
772 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
773 	    !HAS_EXECLISTS(i915))
774 		return ERR_PTR(-EINVAL);
775 
776 	ctx = __create_context(i915);
777 	if (IS_ERR(ctx))
778 		return ctx;
779 
780 	if (HAS_FULL_PPGTT(i915)) {
781 		struct i915_ppgtt *ppgtt;
782 
783 		ppgtt = i915_ppgtt_create(&i915->gt);
784 		if (IS_ERR(ppgtt)) {
785 			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
786 				PTR_ERR(ppgtt));
787 			context_close(ctx);
788 			return ERR_CAST(ppgtt);
789 		}
790 
791 		mutex_lock(&ctx->mutex);
792 		__assign_ppgtt(ctx, &ppgtt->vm);
793 		mutex_unlock(&ctx->mutex);
794 
795 		i915_vm_put(&ppgtt->vm);
796 	}
797 
798 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
799 		ret = drm_syncobj_create(&ctx->syncobj,
800 					 DRM_SYNCOBJ_CREATE_SIGNALED,
801 					 NULL);
802 		if (ret) {
803 			context_close(ctx);
804 			return ERR_PTR(ret);
805 		}
806 	}
807 
808 	trace_i915_context_create(ctx);
809 
810 	return ctx;
811 }
812 
813 static void init_contexts(struct i915_gem_contexts *gc)
814 {
815 	spin_lock_init(&gc->lock);
816 	INIT_LIST_HEAD(&gc->list);
817 }
818 
819 void i915_gem_init__contexts(struct drm_i915_private *i915)
820 {
821 	init_contexts(&i915->gem.contexts);
822 }
823 
824 static int gem_context_register(struct i915_gem_context *ctx,
825 				struct drm_i915_file_private *fpriv,
826 				u32 *id)
827 {
828 	struct drm_i915_private *i915 = ctx->i915;
829 	struct i915_address_space *vm;
830 	int ret;
831 
832 	ctx->file_priv = fpriv;
833 
834 	mutex_lock(&ctx->mutex);
835 	vm = i915_gem_context_vm(ctx);
836 	if (vm)
837 		WRITE_ONCE(vm->file, fpriv); /* XXX */
838 	mutex_unlock(&ctx->mutex);
839 
840 	ctx->pid = get_task_pid(current, PIDTYPE_PID);
841 	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
842 		 current->comm, pid_nr(ctx->pid));
843 
844 	/* And finally expose ourselves to userspace via the idr */
845 	ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
846 	if (ret)
847 		goto err_pid;
848 
849 	spin_lock(&i915->gem.contexts.lock);
850 	list_add_tail(&ctx->link, &i915->gem.contexts.list);
851 	spin_unlock(&i915->gem.contexts.lock);
852 
853 	return 0;
854 
855 err_pid:
856 	put_pid(fetch_and_zero(&ctx->pid));
857 	return ret;
858 }
859 
860 int i915_gem_context_open(struct drm_i915_private *i915,
861 			  struct drm_file *file)
862 {
863 	struct drm_i915_file_private *file_priv = file->driver_priv;
864 	struct i915_gem_context *ctx;
865 	int err;
866 	u32 id;
867 
868 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
869 
870 	/* 0 reserved for invalid/unassigned ppgtt */
871 	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
872 
873 	ctx = i915_gem_create_context(i915, 0);
874 	if (IS_ERR(ctx)) {
875 		err = PTR_ERR(ctx);
876 		goto err;
877 	}
878 
879 	err = gem_context_register(ctx, file_priv, &id);
880 	if (err < 0)
881 		goto err_ctx;
882 
883 	GEM_BUG_ON(id);
884 	return 0;
885 
886 err_ctx:
887 	context_close(ctx);
888 err:
889 	xa_destroy(&file_priv->vm_xa);
890 	xa_destroy(&file_priv->context_xa);
891 	return err;
892 }
893 
894 void i915_gem_context_close(struct drm_file *file)
895 {
896 	struct drm_i915_file_private *file_priv = file->driver_priv;
897 	struct i915_address_space *vm;
898 	struct i915_gem_context *ctx;
899 	unsigned long idx;
900 
901 	xa_for_each(&file_priv->context_xa, idx, ctx)
902 		context_close(ctx);
903 	xa_destroy(&file_priv->context_xa);
904 
905 	xa_for_each(&file_priv->vm_xa, idx, vm)
906 		i915_vm_put(vm);
907 	xa_destroy(&file_priv->vm_xa);
908 }
909 
910 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
911 			     struct drm_file *file)
912 {
913 	struct drm_i915_private *i915 = to_i915(dev);
914 	struct drm_i915_gem_vm_control *args = data;
915 	struct drm_i915_file_private *file_priv = file->driver_priv;
916 	struct i915_ppgtt *ppgtt;
917 	u32 id;
918 	int err;
919 
920 	if (!HAS_FULL_PPGTT(i915))
921 		return -ENODEV;
922 
923 	if (args->flags)
924 		return -EINVAL;
925 
926 	ppgtt = i915_ppgtt_create(&i915->gt);
927 	if (IS_ERR(ppgtt))
928 		return PTR_ERR(ppgtt);
929 
930 	ppgtt->vm.file = file_priv;
931 
932 	if (args->extensions) {
933 		err = i915_user_extensions(u64_to_user_ptr(args->extensions),
934 					   NULL, 0,
935 					   ppgtt);
936 		if (err)
937 			goto err_put;
938 	}
939 
940 	err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
941 		       xa_limit_32b, GFP_KERNEL);
942 	if (err)
943 		goto err_put;
944 
945 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
946 	args->vm_id = id;
947 	return 0;
948 
949 err_put:
950 	i915_vm_put(&ppgtt->vm);
951 	return err;
952 }
953 
954 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
955 			      struct drm_file *file)
956 {
957 	struct drm_i915_file_private *file_priv = file->driver_priv;
958 	struct drm_i915_gem_vm_control *args = data;
959 	struct i915_address_space *vm;
960 
961 	if (args->flags)
962 		return -EINVAL;
963 
964 	if (args->extensions)
965 		return -EINVAL;
966 
967 	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
968 	if (!vm)
969 		return -ENOENT;
970 
971 	i915_vm_put(vm);
972 	return 0;
973 }
974 
975 struct context_barrier_task {
976 	struct i915_active base;
977 	void (*task)(void *data);
978 	void *data;
979 };
980 
981 static void cb_retire(struct i915_active *base)
982 {
983 	struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
984 
985 	if (cb->task)
986 		cb->task(cb->data);
987 
988 	i915_active_fini(&cb->base);
989 	kfree(cb);
990 }
991 
992 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
993 static int context_barrier_task(struct i915_gem_context *ctx,
994 				intel_engine_mask_t engines,
995 				bool (*skip)(struct intel_context *ce, void *data),
996 				int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
997 				int (*emit)(struct i915_request *rq, void *data),
998 				void (*task)(void *data),
999 				void *data)
1000 {
1001 	struct context_barrier_task *cb;
1002 	struct i915_gem_engines_iter it;
1003 	struct i915_gem_engines *e;
1004 	struct i915_gem_ww_ctx ww;
1005 	struct intel_context *ce;
1006 	int err = 0;
1007 
1008 	GEM_BUG_ON(!task);
1009 
1010 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1011 	if (!cb)
1012 		return -ENOMEM;
1013 
1014 	i915_active_init(&cb->base, NULL, cb_retire, 0);
1015 	err = i915_active_acquire(&cb->base);
1016 	if (err) {
1017 		kfree(cb);
1018 		return err;
1019 	}
1020 
1021 	e = __context_engines_await(ctx, NULL);
1022 	if (!e) {
1023 		i915_active_release(&cb->base);
1024 		return -ENOENT;
1025 	}
1026 
1027 	for_each_gem_engine(ce, e, it) {
1028 		struct i915_request *rq;
1029 
1030 		if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
1031 				       ce->engine->mask)) {
1032 			err = -ENXIO;
1033 			break;
1034 		}
1035 
1036 		if (!(ce->engine->mask & engines))
1037 			continue;
1038 
1039 		if (skip && skip(ce, data))
1040 			continue;
1041 
1042 		i915_gem_ww_ctx_init(&ww, true);
1043 retry:
1044 		err = intel_context_pin_ww(ce, &ww);
1045 		if (err)
1046 			goto err;
1047 
1048 		if (pin)
1049 			err = pin(ce, &ww, data);
1050 		if (err)
1051 			goto err_unpin;
1052 
1053 		rq = i915_request_create(ce);
1054 		if (IS_ERR(rq)) {
1055 			err = PTR_ERR(rq);
1056 			goto err_unpin;
1057 		}
1058 
1059 		err = 0;
1060 		if (emit)
1061 			err = emit(rq, data);
1062 		if (err == 0)
1063 			err = i915_active_add_request(&cb->base, rq);
1064 
1065 		i915_request_add(rq);
1066 err_unpin:
1067 		intel_context_unpin(ce);
1068 err:
1069 		if (err == -EDEADLK) {
1070 			err = i915_gem_ww_ctx_backoff(&ww);
1071 			if (!err)
1072 				goto retry;
1073 		}
1074 		i915_gem_ww_ctx_fini(&ww);
1075 
1076 		if (err)
1077 			break;
1078 	}
1079 	i915_sw_fence_complete(&e->fence);
1080 
1081 	cb->task = err ? NULL : task; /* caller needs to unwind instead */
1082 	cb->data = data;
1083 
1084 	i915_active_release(&cb->base);
1085 
1086 	return err;
1087 }
1088 
1089 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1090 		     struct i915_gem_context *ctx,
1091 		     struct drm_i915_gem_context_param *args)
1092 {
1093 	struct i915_address_space *vm;
1094 	int err;
1095 	u32 id;
1096 
1097 	if (!rcu_access_pointer(ctx->vm))
1098 		return -ENODEV;
1099 
1100 	rcu_read_lock();
1101 	vm = context_get_vm_rcu(ctx);
1102 	rcu_read_unlock();
1103 	if (!vm)
1104 		return -ENODEV;
1105 
1106 	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1107 	if (err)
1108 		goto err_put;
1109 
1110 	i915_vm_open(vm);
1111 
1112 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1113 	args->value = id;
1114 	args->size = 0;
1115 
1116 err_put:
1117 	i915_vm_put(vm);
1118 	return err;
1119 }
1120 
1121 static void set_ppgtt_barrier(void *data)
1122 {
1123 	struct i915_address_space *old = data;
1124 
1125 	if (GRAPHICS_VER(old->i915) < 8)
1126 		gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1127 
1128 	i915_vm_close(old);
1129 }
1130 
1131 static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
1132 {
1133 	struct i915_address_space *vm = ce->vm;
1134 
1135 	if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
1136 		/* ppGTT is not part of the legacy context image */
1137 		return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
1138 
1139 	return 0;
1140 }
1141 
1142 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1143 {
1144 	struct i915_address_space *vm = rq->context->vm;
1145 	struct intel_engine_cs *engine = rq->engine;
1146 	u32 base = engine->mmio_base;
1147 	u32 *cs;
1148 	int i;
1149 
1150 	if (i915_vm_is_4lvl(vm)) {
1151 		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1152 		const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1153 
1154 		cs = intel_ring_begin(rq, 6);
1155 		if (IS_ERR(cs))
1156 			return PTR_ERR(cs);
1157 
1158 		*cs++ = MI_LOAD_REGISTER_IMM(2);
1159 
1160 		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1161 		*cs++ = upper_32_bits(pd_daddr);
1162 		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1163 		*cs++ = lower_32_bits(pd_daddr);
1164 
1165 		*cs++ = MI_NOOP;
1166 		intel_ring_advance(rq, cs);
1167 	} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1168 		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1169 		int err;
1170 
1171 		/* Magic required to prevent forcewake errors! */
1172 		err = engine->emit_flush(rq, EMIT_INVALIDATE);
1173 		if (err)
1174 			return err;
1175 
1176 		cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1177 		if (IS_ERR(cs))
1178 			return PTR_ERR(cs);
1179 
1180 		*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1181 		for (i = GEN8_3LVL_PDPES; i--; ) {
1182 			const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1183 
1184 			*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1185 			*cs++ = upper_32_bits(pd_daddr);
1186 			*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1187 			*cs++ = lower_32_bits(pd_daddr);
1188 		}
1189 		*cs++ = MI_NOOP;
1190 		intel_ring_advance(rq, cs);
1191 	}
1192 
1193 	return 0;
1194 }
1195 
1196 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1197 {
1198 	if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1199 		return !ce->state;
1200 	else
1201 		return !atomic_read(&ce->pin_count);
1202 }
1203 
1204 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1205 		     struct i915_gem_context *ctx,
1206 		     struct drm_i915_gem_context_param *args)
1207 {
1208 	struct i915_address_space *vm, *old;
1209 	int err;
1210 
1211 	if (args->size)
1212 		return -EINVAL;
1213 
1214 	if (!rcu_access_pointer(ctx->vm))
1215 		return -ENODEV;
1216 
1217 	if (upper_32_bits(args->value))
1218 		return -ENOENT;
1219 
1220 	rcu_read_lock();
1221 	vm = xa_load(&file_priv->vm_xa, args->value);
1222 	if (vm && !kref_get_unless_zero(&vm->ref))
1223 		vm = NULL;
1224 	rcu_read_unlock();
1225 	if (!vm)
1226 		return -ENOENT;
1227 
1228 	err = mutex_lock_interruptible(&ctx->mutex);
1229 	if (err)
1230 		goto out;
1231 
1232 	if (i915_gem_context_is_closed(ctx)) {
1233 		err = -ENOENT;
1234 		goto unlock;
1235 	}
1236 
1237 	if (vm == rcu_access_pointer(ctx->vm))
1238 		goto unlock;
1239 
1240 	old = __set_ppgtt(ctx, vm);
1241 
1242 	/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1243 	lut_close(ctx);
1244 
1245 	/*
1246 	 * We need to flush any requests using the current ppgtt before
1247 	 * we release it as the requests do not hold a reference themselves,
1248 	 * only indirectly through the context.
1249 	 */
1250 	err = context_barrier_task(ctx, ALL_ENGINES,
1251 				   skip_ppgtt_update,
1252 				   pin_ppgtt_update,
1253 				   emit_ppgtt_update,
1254 				   set_ppgtt_barrier,
1255 				   old);
1256 	if (err) {
1257 		i915_vm_close(__set_ppgtt(ctx, old));
1258 		i915_vm_close(old);
1259 		lut_close(ctx); /* force a rebuild of the old obj:vma cache */
1260 	}
1261 
1262 unlock:
1263 	mutex_unlock(&ctx->mutex);
1264 out:
1265 	i915_vm_put(vm);
1266 	return err;
1267 }
1268 
1269 int
1270 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1271 			      const struct drm_i915_gem_context_param_sseu *user,
1272 			      struct intel_sseu *context)
1273 {
1274 	const struct sseu_dev_info *device = &gt->info.sseu;
1275 	struct drm_i915_private *i915 = gt->i915;
1276 
1277 	/* No zeros in any field. */
1278 	if (!user->slice_mask || !user->subslice_mask ||
1279 	    !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1280 		return -EINVAL;
1281 
1282 	/* Max > min. */
1283 	if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1284 		return -EINVAL;
1285 
1286 	/*
1287 	 * Some future proofing on the types since the uAPI is wider than the
1288 	 * current internal implementation.
1289 	 */
1290 	if (overflows_type(user->slice_mask, context->slice_mask) ||
1291 	    overflows_type(user->subslice_mask, context->subslice_mask) ||
1292 	    overflows_type(user->min_eus_per_subslice,
1293 			   context->min_eus_per_subslice) ||
1294 	    overflows_type(user->max_eus_per_subslice,
1295 			   context->max_eus_per_subslice))
1296 		return -EINVAL;
1297 
1298 	/* Check validity against hardware. */
1299 	if (user->slice_mask & ~device->slice_mask)
1300 		return -EINVAL;
1301 
1302 	if (user->subslice_mask & ~device->subslice_mask[0])
1303 		return -EINVAL;
1304 
1305 	if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1306 		return -EINVAL;
1307 
1308 	context->slice_mask = user->slice_mask;
1309 	context->subslice_mask = user->subslice_mask;
1310 	context->min_eus_per_subslice = user->min_eus_per_subslice;
1311 	context->max_eus_per_subslice = user->max_eus_per_subslice;
1312 
1313 	/* Part specific restrictions. */
1314 	if (GRAPHICS_VER(i915) == 11) {
1315 		unsigned int hw_s = hweight8(device->slice_mask);
1316 		unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1317 		unsigned int req_s = hweight8(context->slice_mask);
1318 		unsigned int req_ss = hweight8(context->subslice_mask);
1319 
1320 		/*
1321 		 * Only full subslice enablement is possible if more than one
1322 		 * slice is turned on.
1323 		 */
1324 		if (req_s > 1 && req_ss != hw_ss_per_s)
1325 			return -EINVAL;
1326 
1327 		/*
1328 		 * If more than four (SScount bitfield limit) subslices are
1329 		 * requested then the number has to be even.
1330 		 */
1331 		if (req_ss > 4 && (req_ss & 1))
1332 			return -EINVAL;
1333 
1334 		/*
1335 		 * If only one slice is enabled and subslice count is below the
1336 		 * device full enablement, it must be at most half of the all
1337 		 * available subslices.
1338 		 */
1339 		if (req_s == 1 && req_ss < hw_ss_per_s &&
1340 		    req_ss > (hw_ss_per_s / 2))
1341 			return -EINVAL;
1342 
1343 		/* ABI restriction - VME use case only. */
1344 
1345 		/* All slices or one slice only. */
1346 		if (req_s != 1 && req_s != hw_s)
1347 			return -EINVAL;
1348 
1349 		/*
1350 		 * Half subslices or full enablement only when one slice is
1351 		 * enabled.
1352 		 */
1353 		if (req_s == 1 &&
1354 		    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1355 			return -EINVAL;
1356 
1357 		/* No EU configuration changes. */
1358 		if ((user->min_eus_per_subslice !=
1359 		     device->max_eus_per_subslice) ||
1360 		    (user->max_eus_per_subslice !=
1361 		     device->max_eus_per_subslice))
1362 			return -EINVAL;
1363 	}
1364 
1365 	return 0;
1366 }
1367 
1368 static int set_sseu(struct i915_gem_context *ctx,
1369 		    struct drm_i915_gem_context_param *args)
1370 {
1371 	struct drm_i915_private *i915 = ctx->i915;
1372 	struct drm_i915_gem_context_param_sseu user_sseu;
1373 	struct intel_context *ce;
1374 	struct intel_sseu sseu;
1375 	unsigned long lookup;
1376 	int ret;
1377 
1378 	if (args->size < sizeof(user_sseu))
1379 		return -EINVAL;
1380 
1381 	if (GRAPHICS_VER(i915) != 11)
1382 		return -ENODEV;
1383 
1384 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1385 			   sizeof(user_sseu)))
1386 		return -EFAULT;
1387 
1388 	if (user_sseu.rsvd)
1389 		return -EINVAL;
1390 
1391 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1392 		return -EINVAL;
1393 
1394 	lookup = 0;
1395 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1396 		lookup |= LOOKUP_USER_INDEX;
1397 
1398 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1399 	if (IS_ERR(ce))
1400 		return PTR_ERR(ce);
1401 
1402 	/* Only render engine supports RPCS configuration. */
1403 	if (ce->engine->class != RENDER_CLASS) {
1404 		ret = -ENODEV;
1405 		goto out_ce;
1406 	}
1407 
1408 	ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1409 	if (ret)
1410 		goto out_ce;
1411 
1412 	ret = intel_context_reconfigure_sseu(ce, sseu);
1413 	if (ret)
1414 		goto out_ce;
1415 
1416 	args->size = sizeof(user_sseu);
1417 
1418 out_ce:
1419 	intel_context_put(ce);
1420 	return ret;
1421 }
1422 
1423 struct set_engines {
1424 	struct i915_gem_context *ctx;
1425 	struct i915_gem_engines *engines;
1426 };
1427 
1428 static int
1429 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1430 {
1431 	struct i915_context_engines_load_balance __user *ext =
1432 		container_of_user(base, typeof(*ext), base);
1433 	const struct set_engines *set = data;
1434 	struct drm_i915_private *i915 = set->ctx->i915;
1435 	struct intel_engine_cs *stack[16];
1436 	struct intel_engine_cs **siblings;
1437 	struct intel_context *ce;
1438 	u16 num_siblings, idx;
1439 	unsigned int n;
1440 	int err;
1441 
1442 	if (!HAS_EXECLISTS(i915))
1443 		return -ENODEV;
1444 
1445 	if (intel_uc_uses_guc_submission(&i915->gt.uc))
1446 		return -ENODEV; /* not implement yet */
1447 
1448 	if (get_user(idx, &ext->engine_index))
1449 		return -EFAULT;
1450 
1451 	if (idx >= set->engines->num_engines) {
1452 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
1453 			idx, set->engines->num_engines);
1454 		return -EINVAL;
1455 	}
1456 
1457 	idx = array_index_nospec(idx, set->engines->num_engines);
1458 	if (set->engines->engines[idx]) {
1459 		drm_dbg(&i915->drm,
1460 			"Invalid placement[%d], already occupied\n", idx);
1461 		return -EEXIST;
1462 	}
1463 
1464 	if (get_user(num_siblings, &ext->num_siblings))
1465 		return -EFAULT;
1466 
1467 	err = check_user_mbz(&ext->flags);
1468 	if (err)
1469 		return err;
1470 
1471 	err = check_user_mbz(&ext->mbz64);
1472 	if (err)
1473 		return err;
1474 
1475 	siblings = stack;
1476 	if (num_siblings > ARRAY_SIZE(stack)) {
1477 		siblings = kmalloc_array(num_siblings,
1478 					 sizeof(*siblings),
1479 					 GFP_KERNEL);
1480 		if (!siblings)
1481 			return -ENOMEM;
1482 	}
1483 
1484 	for (n = 0; n < num_siblings; n++) {
1485 		struct i915_engine_class_instance ci;
1486 
1487 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1488 			err = -EFAULT;
1489 			goto out_siblings;
1490 		}
1491 
1492 		siblings[n] = intel_engine_lookup_user(i915,
1493 						       ci.engine_class,
1494 						       ci.engine_instance);
1495 		if (!siblings[n]) {
1496 			drm_dbg(&i915->drm,
1497 				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
1498 				n, ci.engine_class, ci.engine_instance);
1499 			err = -EINVAL;
1500 			goto out_siblings;
1501 		}
1502 	}
1503 
1504 	ce = intel_execlists_create_virtual(siblings, n);
1505 	if (IS_ERR(ce)) {
1506 		err = PTR_ERR(ce);
1507 		goto out_siblings;
1508 	}
1509 
1510 	intel_context_set_gem(ce, set->ctx);
1511 
1512 	if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1513 		intel_context_put(ce);
1514 		err = -EEXIST;
1515 		goto out_siblings;
1516 	}
1517 
1518 out_siblings:
1519 	if (siblings != stack)
1520 		kfree(siblings);
1521 
1522 	return err;
1523 }
1524 
1525 static int
1526 set_engines__bond(struct i915_user_extension __user *base, void *data)
1527 {
1528 	struct i915_context_engines_bond __user *ext =
1529 		container_of_user(base, typeof(*ext), base);
1530 	const struct set_engines *set = data;
1531 	struct drm_i915_private *i915 = set->ctx->i915;
1532 	struct i915_engine_class_instance ci;
1533 	struct intel_engine_cs *virtual;
1534 	struct intel_engine_cs *master;
1535 	u16 idx, num_bonds;
1536 	int err, n;
1537 
1538 	if (get_user(idx, &ext->virtual_index))
1539 		return -EFAULT;
1540 
1541 	if (idx >= set->engines->num_engines) {
1542 		drm_dbg(&i915->drm,
1543 			"Invalid index for virtual engine: %d >= %d\n",
1544 			idx, set->engines->num_engines);
1545 		return -EINVAL;
1546 	}
1547 
1548 	idx = array_index_nospec(idx, set->engines->num_engines);
1549 	if (!set->engines->engines[idx]) {
1550 		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
1551 		return -EINVAL;
1552 	}
1553 	virtual = set->engines->engines[idx]->engine;
1554 
1555 	if (intel_engine_is_virtual(virtual)) {
1556 		drm_dbg(&i915->drm,
1557 			"Bonding with virtual engines not allowed\n");
1558 		return -EINVAL;
1559 	}
1560 
1561 	err = check_user_mbz(&ext->flags);
1562 	if (err)
1563 		return err;
1564 
1565 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1566 		err = check_user_mbz(&ext->mbz64[n]);
1567 		if (err)
1568 			return err;
1569 	}
1570 
1571 	if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1572 		return -EFAULT;
1573 
1574 	master = intel_engine_lookup_user(i915,
1575 					  ci.engine_class, ci.engine_instance);
1576 	if (!master) {
1577 		drm_dbg(&i915->drm,
1578 			"Unrecognised master engine: { class:%u, instance:%u }\n",
1579 			ci.engine_class, ci.engine_instance);
1580 		return -EINVAL;
1581 	}
1582 
1583 	if (get_user(num_bonds, &ext->num_bonds))
1584 		return -EFAULT;
1585 
1586 	for (n = 0; n < num_bonds; n++) {
1587 		struct intel_engine_cs *bond;
1588 
1589 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1590 			return -EFAULT;
1591 
1592 		bond = intel_engine_lookup_user(i915,
1593 						ci.engine_class,
1594 						ci.engine_instance);
1595 		if (!bond) {
1596 			drm_dbg(&i915->drm,
1597 				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1598 				n, ci.engine_class, ci.engine_instance);
1599 			return -EINVAL;
1600 		}
1601 	}
1602 
1603 	return 0;
1604 }
1605 
1606 static const i915_user_extension_fn set_engines__extensions[] = {
1607 	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1608 	[I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1609 };
1610 
1611 static int
1612 set_engines(struct i915_gem_context *ctx,
1613 	    const struct drm_i915_gem_context_param *args)
1614 {
1615 	struct drm_i915_private *i915 = ctx->i915;
1616 	struct i915_context_param_engines __user *user =
1617 		u64_to_user_ptr(args->value);
1618 	struct set_engines set = { .ctx = ctx };
1619 	unsigned int num_engines, n;
1620 	u64 extensions;
1621 	int err;
1622 
1623 	if (!args->size) { /* switch back to legacy user_ring_map */
1624 		if (!i915_gem_context_user_engines(ctx))
1625 			return 0;
1626 
1627 		set.engines = default_engines(ctx);
1628 		if (IS_ERR(set.engines))
1629 			return PTR_ERR(set.engines);
1630 
1631 		goto replace;
1632 	}
1633 
1634 	BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1635 	if (args->size < sizeof(*user) ||
1636 	    !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1637 		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
1638 			args->size);
1639 		return -EINVAL;
1640 	}
1641 
1642 	num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1643 	/* RING_MASK has no shift so we can use it directly here */
1644 	if (num_engines > I915_EXEC_RING_MASK + 1)
1645 		return -EINVAL;
1646 
1647 	set.engines = alloc_engines(num_engines);
1648 	if (!set.engines)
1649 		return -ENOMEM;
1650 
1651 	for (n = 0; n < num_engines; n++) {
1652 		struct i915_engine_class_instance ci;
1653 		struct intel_engine_cs *engine;
1654 		struct intel_context *ce;
1655 
1656 		if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1657 			__free_engines(set.engines, n);
1658 			return -EFAULT;
1659 		}
1660 
1661 		if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1662 		    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1663 			set.engines->engines[n] = NULL;
1664 			continue;
1665 		}
1666 
1667 		engine = intel_engine_lookup_user(ctx->i915,
1668 						  ci.engine_class,
1669 						  ci.engine_instance);
1670 		if (!engine) {
1671 			drm_dbg(&i915->drm,
1672 				"Invalid engine[%d]: { class:%d, instance:%d }\n",
1673 				n, ci.engine_class, ci.engine_instance);
1674 			__free_engines(set.engines, n);
1675 			return -ENOENT;
1676 		}
1677 
1678 		ce = intel_context_create(engine);
1679 		if (IS_ERR(ce)) {
1680 			__free_engines(set.engines, n);
1681 			return PTR_ERR(ce);
1682 		}
1683 
1684 		intel_context_set_gem(ce, ctx);
1685 
1686 		set.engines->engines[n] = ce;
1687 	}
1688 	set.engines->num_engines = num_engines;
1689 
1690 	err = -EFAULT;
1691 	if (!get_user(extensions, &user->extensions))
1692 		err = i915_user_extensions(u64_to_user_ptr(extensions),
1693 					   set_engines__extensions,
1694 					   ARRAY_SIZE(set_engines__extensions),
1695 					   &set);
1696 	if (err) {
1697 		free_engines(set.engines);
1698 		return err;
1699 	}
1700 
1701 replace:
1702 	mutex_lock(&ctx->engines_mutex);
1703 	if (i915_gem_context_is_closed(ctx)) {
1704 		mutex_unlock(&ctx->engines_mutex);
1705 		free_engines(set.engines);
1706 		return -ENOENT;
1707 	}
1708 	if (args->size)
1709 		i915_gem_context_set_user_engines(ctx);
1710 	else
1711 		i915_gem_context_clear_user_engines(ctx);
1712 	set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1713 	mutex_unlock(&ctx->engines_mutex);
1714 
1715 	/* Keep track of old engine sets for kill_context() */
1716 	engines_idle_release(ctx, set.engines);
1717 
1718 	return 0;
1719 }
1720 
1721 static int
1722 set_persistence(struct i915_gem_context *ctx,
1723 		const struct drm_i915_gem_context_param *args)
1724 {
1725 	if (args->size)
1726 		return -EINVAL;
1727 
1728 	return __context_set_persistence(ctx, args->value);
1729 }
1730 
1731 static void __apply_priority(struct intel_context *ce, void *arg)
1732 {
1733 	struct i915_gem_context *ctx = arg;
1734 
1735 	if (!intel_engine_has_timeslices(ce->engine))
1736 		return;
1737 
1738 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
1739 		intel_context_set_use_semaphores(ce);
1740 	else
1741 		intel_context_clear_use_semaphores(ce);
1742 }
1743 
1744 static int set_priority(struct i915_gem_context *ctx,
1745 			const struct drm_i915_gem_context_param *args)
1746 {
1747 	s64 priority = args->value;
1748 
1749 	if (args->size)
1750 		return -EINVAL;
1751 
1752 	if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1753 		return -ENODEV;
1754 
1755 	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1756 	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
1757 		return -EINVAL;
1758 
1759 	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1760 	    !capable(CAP_SYS_NICE))
1761 		return -EPERM;
1762 
1763 	ctx->sched.priority = priority;
1764 	context_apply_all(ctx, __apply_priority, ctx);
1765 
1766 	return 0;
1767 }
1768 
1769 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1770 			struct i915_gem_context *ctx,
1771 			struct drm_i915_gem_context_param *args)
1772 {
1773 	int ret = 0;
1774 
1775 	switch (args->param) {
1776 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1777 		if (args->size)
1778 			ret = -EINVAL;
1779 		else if (args->value)
1780 			i915_gem_context_set_no_error_capture(ctx);
1781 		else
1782 			i915_gem_context_clear_no_error_capture(ctx);
1783 		break;
1784 
1785 	case I915_CONTEXT_PARAM_BANNABLE:
1786 		if (args->size)
1787 			ret = -EINVAL;
1788 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
1789 			ret = -EPERM;
1790 		else if (args->value)
1791 			i915_gem_context_set_bannable(ctx);
1792 		else
1793 			i915_gem_context_clear_bannable(ctx);
1794 		break;
1795 
1796 	case I915_CONTEXT_PARAM_RECOVERABLE:
1797 		if (args->size)
1798 			ret = -EINVAL;
1799 		else if (args->value)
1800 			i915_gem_context_set_recoverable(ctx);
1801 		else
1802 			i915_gem_context_clear_recoverable(ctx);
1803 		break;
1804 
1805 	case I915_CONTEXT_PARAM_PRIORITY:
1806 		ret = set_priority(ctx, args);
1807 		break;
1808 
1809 	case I915_CONTEXT_PARAM_SSEU:
1810 		ret = set_sseu(ctx, args);
1811 		break;
1812 
1813 	case I915_CONTEXT_PARAM_VM:
1814 		ret = set_ppgtt(fpriv, ctx, args);
1815 		break;
1816 
1817 	case I915_CONTEXT_PARAM_ENGINES:
1818 		ret = set_engines(ctx, args);
1819 		break;
1820 
1821 	case I915_CONTEXT_PARAM_PERSISTENCE:
1822 		ret = set_persistence(ctx, args);
1823 		break;
1824 
1825 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
1826 	case I915_CONTEXT_PARAM_BAN_PERIOD:
1827 	case I915_CONTEXT_PARAM_RINGSIZE:
1828 	default:
1829 		ret = -EINVAL;
1830 		break;
1831 	}
1832 
1833 	return ret;
1834 }
1835 
1836 struct create_ext {
1837 	struct i915_gem_context *ctx;
1838 	struct drm_i915_file_private *fpriv;
1839 };
1840 
1841 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1842 {
1843 	struct drm_i915_gem_context_create_ext_setparam local;
1844 	const struct create_ext *arg = data;
1845 
1846 	if (copy_from_user(&local, ext, sizeof(local)))
1847 		return -EFAULT;
1848 
1849 	if (local.param.ctx_id)
1850 		return -EINVAL;
1851 
1852 	return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1853 }
1854 
1855 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
1856 {
1857 	return -EINVAL;
1858 }
1859 
1860 static const i915_user_extension_fn create_extensions[] = {
1861 	[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
1862 	[I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
1863 };
1864 
1865 static bool client_is_banned(struct drm_i915_file_private *file_priv)
1866 {
1867 	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
1868 }
1869 
1870 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1871 				  struct drm_file *file)
1872 {
1873 	struct drm_i915_private *i915 = to_i915(dev);
1874 	struct drm_i915_gem_context_create_ext *args = data;
1875 	struct create_ext ext_data;
1876 	int ret;
1877 	u32 id;
1878 
1879 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
1880 		return -ENODEV;
1881 
1882 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
1883 		return -EINVAL;
1884 
1885 	ret = intel_gt_terminally_wedged(&i915->gt);
1886 	if (ret)
1887 		return ret;
1888 
1889 	ext_data.fpriv = file->driver_priv;
1890 	if (client_is_banned(ext_data.fpriv)) {
1891 		drm_dbg(&i915->drm,
1892 			"client %s[%d] banned from creating ctx\n",
1893 			current->comm, task_pid_nr(current));
1894 		return -EIO;
1895 	}
1896 
1897 	ext_data.ctx = i915_gem_create_context(i915, args->flags);
1898 	if (IS_ERR(ext_data.ctx))
1899 		return PTR_ERR(ext_data.ctx);
1900 
1901 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
1902 		ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
1903 					   create_extensions,
1904 					   ARRAY_SIZE(create_extensions),
1905 					   &ext_data);
1906 		if (ret)
1907 			goto err_ctx;
1908 	}
1909 
1910 	ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
1911 	if (ret < 0)
1912 		goto err_ctx;
1913 
1914 	args->ctx_id = id;
1915 	drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
1916 
1917 	return 0;
1918 
1919 err_ctx:
1920 	context_close(ext_data.ctx);
1921 	return ret;
1922 }
1923 
1924 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1925 				   struct drm_file *file)
1926 {
1927 	struct drm_i915_gem_context_destroy *args = data;
1928 	struct drm_i915_file_private *file_priv = file->driver_priv;
1929 	struct i915_gem_context *ctx;
1930 
1931 	if (args->pad != 0)
1932 		return -EINVAL;
1933 
1934 	if (!args->ctx_id)
1935 		return -ENOENT;
1936 
1937 	ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
1938 	if (!ctx)
1939 		return -ENOENT;
1940 
1941 	context_close(ctx);
1942 	return 0;
1943 }
1944 
1945 static int get_sseu(struct i915_gem_context *ctx,
1946 		    struct drm_i915_gem_context_param *args)
1947 {
1948 	struct drm_i915_gem_context_param_sseu user_sseu;
1949 	struct intel_context *ce;
1950 	unsigned long lookup;
1951 	int err;
1952 
1953 	if (args->size == 0)
1954 		goto out;
1955 	else if (args->size < sizeof(user_sseu))
1956 		return -EINVAL;
1957 
1958 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1959 			   sizeof(user_sseu)))
1960 		return -EFAULT;
1961 
1962 	if (user_sseu.rsvd)
1963 		return -EINVAL;
1964 
1965 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1966 		return -EINVAL;
1967 
1968 	lookup = 0;
1969 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1970 		lookup |= LOOKUP_USER_INDEX;
1971 
1972 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1973 	if (IS_ERR(ce))
1974 		return PTR_ERR(ce);
1975 
1976 	err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
1977 	if (err) {
1978 		intel_context_put(ce);
1979 		return err;
1980 	}
1981 
1982 	user_sseu.slice_mask = ce->sseu.slice_mask;
1983 	user_sseu.subslice_mask = ce->sseu.subslice_mask;
1984 	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
1985 	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
1986 
1987 	intel_context_unlock_pinned(ce);
1988 	intel_context_put(ce);
1989 
1990 	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
1991 			 sizeof(user_sseu)))
1992 		return -EFAULT;
1993 
1994 out:
1995 	args->size = sizeof(user_sseu);
1996 
1997 	return 0;
1998 }
1999 
2000 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2001 				    struct drm_file *file)
2002 {
2003 	struct drm_i915_file_private *file_priv = file->driver_priv;
2004 	struct drm_i915_gem_context_param *args = data;
2005 	struct i915_gem_context *ctx;
2006 	int ret = 0;
2007 
2008 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2009 	if (!ctx)
2010 		return -ENOENT;
2011 
2012 	switch (args->param) {
2013 	case I915_CONTEXT_PARAM_GTT_SIZE:
2014 		args->size = 0;
2015 		rcu_read_lock();
2016 		if (rcu_access_pointer(ctx->vm))
2017 			args->value = rcu_dereference(ctx->vm)->total;
2018 		else
2019 			args->value = to_i915(dev)->ggtt.vm.total;
2020 		rcu_read_unlock();
2021 		break;
2022 
2023 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2024 		args->size = 0;
2025 		args->value = i915_gem_context_no_error_capture(ctx);
2026 		break;
2027 
2028 	case I915_CONTEXT_PARAM_BANNABLE:
2029 		args->size = 0;
2030 		args->value = i915_gem_context_is_bannable(ctx);
2031 		break;
2032 
2033 	case I915_CONTEXT_PARAM_RECOVERABLE:
2034 		args->size = 0;
2035 		args->value = i915_gem_context_is_recoverable(ctx);
2036 		break;
2037 
2038 	case I915_CONTEXT_PARAM_PRIORITY:
2039 		args->size = 0;
2040 		args->value = ctx->sched.priority;
2041 		break;
2042 
2043 	case I915_CONTEXT_PARAM_SSEU:
2044 		ret = get_sseu(ctx, args);
2045 		break;
2046 
2047 	case I915_CONTEXT_PARAM_VM:
2048 		ret = get_ppgtt(file_priv, ctx, args);
2049 		break;
2050 
2051 	case I915_CONTEXT_PARAM_PERSISTENCE:
2052 		args->size = 0;
2053 		args->value = i915_gem_context_is_persistent(ctx);
2054 		break;
2055 
2056 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
2057 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2058 	case I915_CONTEXT_PARAM_ENGINES:
2059 	case I915_CONTEXT_PARAM_RINGSIZE:
2060 	default:
2061 		ret = -EINVAL;
2062 		break;
2063 	}
2064 
2065 	i915_gem_context_put(ctx);
2066 	return ret;
2067 }
2068 
2069 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2070 				    struct drm_file *file)
2071 {
2072 	struct drm_i915_file_private *file_priv = file->driver_priv;
2073 	struct drm_i915_gem_context_param *args = data;
2074 	struct i915_gem_context *ctx;
2075 	int ret;
2076 
2077 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2078 	if (!ctx)
2079 		return -ENOENT;
2080 
2081 	ret = ctx_setparam(file_priv, ctx, args);
2082 
2083 	i915_gem_context_put(ctx);
2084 	return ret;
2085 }
2086 
2087 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2088 				       void *data, struct drm_file *file)
2089 {
2090 	struct drm_i915_private *i915 = to_i915(dev);
2091 	struct drm_i915_reset_stats *args = data;
2092 	struct i915_gem_context *ctx;
2093 
2094 	if (args->flags || args->pad)
2095 		return -EINVAL;
2096 
2097 	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2098 	if (!ctx)
2099 		return -ENOENT;
2100 
2101 	/*
2102 	 * We opt for unserialised reads here. This may result in tearing
2103 	 * in the extremely unlikely event of a GPU hang on this context
2104 	 * as we are querying them. If we need that extra layer of protection,
2105 	 * we should wrap the hangstats with a seqlock.
2106 	 */
2107 
2108 	if (capable(CAP_SYS_ADMIN))
2109 		args->reset_count = i915_reset_count(&i915->gpu_error);
2110 	else
2111 		args->reset_count = 0;
2112 
2113 	args->batch_active = atomic_read(&ctx->guilty_count);
2114 	args->batch_pending = atomic_read(&ctx->active_count);
2115 
2116 	i915_gem_context_put(ctx);
2117 	return 0;
2118 }
2119 
2120 /* GEM context-engines iterator: for_each_gem_engine() */
2121 struct intel_context *
2122 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2123 {
2124 	const struct i915_gem_engines *e = it->engines;
2125 	struct intel_context *ctx;
2126 
2127 	if (unlikely(!e))
2128 		return NULL;
2129 
2130 	do {
2131 		if (it->idx >= e->num_engines)
2132 			return NULL;
2133 
2134 		ctx = e->engines[it->idx++];
2135 	} while (!ctx);
2136 
2137 	return ctx;
2138 }
2139 
2140 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2141 #include "selftests/mock_context.c"
2142 #include "selftests/i915_gem_context.c"
2143 #endif
2144 
2145 static void i915_global_gem_context_shrink(void)
2146 {
2147 	kmem_cache_shrink(global.slab_luts);
2148 }
2149 
2150 static void i915_global_gem_context_exit(void)
2151 {
2152 	kmem_cache_destroy(global.slab_luts);
2153 }
2154 
2155 static struct i915_global_gem_context global = { {
2156 	.shrink = i915_global_gem_context_shrink,
2157 	.exit = i915_global_gem_context_exit,
2158 } };
2159 
2160 int __init i915_global_gem_context_init(void)
2161 {
2162 	global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2163 	if (!global.slab_luts)
2164 		return -ENOMEM;
2165 
2166 	i915_global_register(&global.base);
2167 	return 0;
2168 }
2169