1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6 
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66 
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69 
70 #include "gt/gen6_ppgtt.h"
71 #include "gt/intel_context.h"
72 #include "gt/intel_context_param.h"
73 #include "gt/intel_engine_heartbeat.h"
74 #include "gt/intel_engine_user.h"
75 #include "gt/intel_execlists_submission.h" /* virtual_engine */
76 #include "gt/intel_gpu_commands.h"
77 #include "gt/intel_ring.h"
78 
79 #include "i915_gem_context.h"
80 #include "i915_globals.h"
81 #include "i915_trace.h"
82 #include "i915_user_extensions.h"
83 
84 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
85 
86 static struct i915_global_gem_context {
87 	struct i915_global base;
88 	struct kmem_cache *slab_luts;
89 } global;
90 
91 struct i915_lut_handle *i915_lut_handle_alloc(void)
92 {
93 	return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
94 }
95 
96 void i915_lut_handle_free(struct i915_lut_handle *lut)
97 {
98 	return kmem_cache_free(global.slab_luts, lut);
99 }
100 
101 static void lut_close(struct i915_gem_context *ctx)
102 {
103 	struct radix_tree_iter iter;
104 	void __rcu **slot;
105 
106 	mutex_lock(&ctx->lut_mutex);
107 	rcu_read_lock();
108 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
109 		struct i915_vma *vma = rcu_dereference_raw(*slot);
110 		struct drm_i915_gem_object *obj = vma->obj;
111 		struct i915_lut_handle *lut;
112 
113 		if (!kref_get_unless_zero(&obj->base.refcount))
114 			continue;
115 
116 		spin_lock(&obj->lut_lock);
117 		list_for_each_entry(lut, &obj->lut_list, obj_link) {
118 			if (lut->ctx != ctx)
119 				continue;
120 
121 			if (lut->handle != iter.index)
122 				continue;
123 
124 			list_del(&lut->obj_link);
125 			break;
126 		}
127 		spin_unlock(&obj->lut_lock);
128 
129 		if (&lut->obj_link != &obj->lut_list) {
130 			i915_lut_handle_free(lut);
131 			radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
132 			i915_vma_close(vma);
133 			i915_gem_object_put(obj);
134 		}
135 
136 		i915_gem_object_put(obj);
137 	}
138 	rcu_read_unlock();
139 	mutex_unlock(&ctx->lut_mutex);
140 }
141 
142 static struct intel_context *
143 lookup_user_engine(struct i915_gem_context *ctx,
144 		   unsigned long flags,
145 		   const struct i915_engine_class_instance *ci)
146 #define LOOKUP_USER_INDEX BIT(0)
147 {
148 	int idx;
149 
150 	if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
151 		return ERR_PTR(-EINVAL);
152 
153 	if (!i915_gem_context_user_engines(ctx)) {
154 		struct intel_engine_cs *engine;
155 
156 		engine = intel_engine_lookup_user(ctx->i915,
157 						  ci->engine_class,
158 						  ci->engine_instance);
159 		if (!engine)
160 			return ERR_PTR(-EINVAL);
161 
162 		idx = engine->legacy_idx;
163 	} else {
164 		idx = ci->engine_instance;
165 	}
166 
167 	return i915_gem_context_get_engine(ctx, idx);
168 }
169 
170 static struct i915_address_space *
171 context_get_vm_rcu(struct i915_gem_context *ctx)
172 {
173 	GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
174 
175 	do {
176 		struct i915_address_space *vm;
177 
178 		/*
179 		 * We do not allow downgrading from full-ppgtt [to a shared
180 		 * global gtt], so ctx->vm cannot become NULL.
181 		 */
182 		vm = rcu_dereference(ctx->vm);
183 		if (!kref_get_unless_zero(&vm->ref))
184 			continue;
185 
186 		/*
187 		 * This ppgtt may have be reallocated between
188 		 * the read and the kref, and reassigned to a third
189 		 * context. In order to avoid inadvertent sharing
190 		 * of this ppgtt with that third context (and not
191 		 * src), we have to confirm that we have the same
192 		 * ppgtt after passing through the strong memory
193 		 * barrier implied by a successful
194 		 * kref_get_unless_zero().
195 		 *
196 		 * Once we have acquired the current ppgtt of ctx,
197 		 * we no longer care if it is released from ctx, as
198 		 * it cannot be reallocated elsewhere.
199 		 */
200 
201 		if (vm == rcu_access_pointer(ctx->vm))
202 			return rcu_pointer_handoff(vm);
203 
204 		i915_vm_put(vm);
205 	} while (1);
206 }
207 
208 static void intel_context_set_gem(struct intel_context *ce,
209 				  struct i915_gem_context *ctx)
210 {
211 	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
212 	RCU_INIT_POINTER(ce->gem_context, ctx);
213 
214 	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
215 		ce->ring = __intel_context_ring_size(SZ_16K);
216 
217 	if (rcu_access_pointer(ctx->vm)) {
218 		struct i915_address_space *vm;
219 
220 		rcu_read_lock();
221 		vm = context_get_vm_rcu(ctx); /* hmm */
222 		rcu_read_unlock();
223 
224 		i915_vm_put(ce->vm);
225 		ce->vm = vm;
226 	}
227 
228 	GEM_BUG_ON(ce->timeline);
229 	if (ctx->timeline)
230 		ce->timeline = intel_timeline_get(ctx->timeline);
231 
232 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
233 	    intel_engine_has_timeslices(ce->engine))
234 		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
235 
236 	intel_context_set_watchdog_us(ce, ctx->watchdog.timeout_us);
237 }
238 
239 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
240 {
241 	while (count--) {
242 		if (!e->engines[count])
243 			continue;
244 
245 		intel_context_put(e->engines[count]);
246 	}
247 	kfree(e);
248 }
249 
250 static void free_engines(struct i915_gem_engines *e)
251 {
252 	__free_engines(e, e->num_engines);
253 }
254 
255 static void free_engines_rcu(struct rcu_head *rcu)
256 {
257 	struct i915_gem_engines *engines =
258 		container_of(rcu, struct i915_gem_engines, rcu);
259 
260 	i915_sw_fence_fini(&engines->fence);
261 	free_engines(engines);
262 }
263 
264 static int __i915_sw_fence_call
265 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
266 {
267 	struct i915_gem_engines *engines =
268 		container_of(fence, typeof(*engines), fence);
269 
270 	switch (state) {
271 	case FENCE_COMPLETE:
272 		if (!list_empty(&engines->link)) {
273 			struct i915_gem_context *ctx = engines->ctx;
274 			unsigned long flags;
275 
276 			spin_lock_irqsave(&ctx->stale.lock, flags);
277 			list_del(&engines->link);
278 			spin_unlock_irqrestore(&ctx->stale.lock, flags);
279 		}
280 		i915_gem_context_put(engines->ctx);
281 		break;
282 
283 	case FENCE_FREE:
284 		init_rcu_head(&engines->rcu);
285 		call_rcu(&engines->rcu, free_engines_rcu);
286 		break;
287 	}
288 
289 	return NOTIFY_DONE;
290 }
291 
292 static struct i915_gem_engines *alloc_engines(unsigned int count)
293 {
294 	struct i915_gem_engines *e;
295 
296 	e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
297 	if (!e)
298 		return NULL;
299 
300 	i915_sw_fence_init(&e->fence, engines_notify);
301 	return e;
302 }
303 
304 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
305 {
306 	const struct intel_gt *gt = &ctx->i915->gt;
307 	struct intel_engine_cs *engine;
308 	struct i915_gem_engines *e;
309 	enum intel_engine_id id;
310 
311 	e = alloc_engines(I915_NUM_ENGINES);
312 	if (!e)
313 		return ERR_PTR(-ENOMEM);
314 
315 	for_each_engine(engine, gt, id) {
316 		struct intel_context *ce;
317 
318 		if (engine->legacy_idx == INVALID_ENGINE)
319 			continue;
320 
321 		GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
322 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
323 
324 		ce = intel_context_create(engine);
325 		if (IS_ERR(ce)) {
326 			__free_engines(e, e->num_engines + 1);
327 			return ERR_CAST(ce);
328 		}
329 
330 		intel_context_set_gem(ce, ctx);
331 
332 		e->engines[engine->legacy_idx] = ce;
333 		e->num_engines = max(e->num_engines, engine->legacy_idx);
334 	}
335 	e->num_engines++;
336 
337 	return e;
338 }
339 
340 void i915_gem_context_release(struct kref *ref)
341 {
342 	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
343 
344 	trace_i915_context_free(ctx);
345 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
346 
347 	mutex_destroy(&ctx->engines_mutex);
348 	mutex_destroy(&ctx->lut_mutex);
349 
350 	if (ctx->timeline)
351 		intel_timeline_put(ctx->timeline);
352 
353 	put_pid(ctx->pid);
354 	mutex_destroy(&ctx->mutex);
355 
356 	kfree_rcu(ctx, rcu);
357 }
358 
359 static inline struct i915_gem_engines *
360 __context_engines_static(const struct i915_gem_context *ctx)
361 {
362 	return rcu_dereference_protected(ctx->engines, true);
363 }
364 
365 static void __reset_context(struct i915_gem_context *ctx,
366 			    struct intel_engine_cs *engine)
367 {
368 	intel_gt_handle_error(engine->gt, engine->mask, 0,
369 			      "context closure in %s", ctx->name);
370 }
371 
372 static bool __cancel_engine(struct intel_engine_cs *engine)
373 {
374 	/*
375 	 * Send a "high priority pulse" down the engine to cause the
376 	 * current request to be momentarily preempted. (If it fails to
377 	 * be preempted, it will be reset). As we have marked our context
378 	 * as banned, any incomplete request, including any running, will
379 	 * be skipped following the preemption.
380 	 *
381 	 * If there is no hangchecking (one of the reasons why we try to
382 	 * cancel the context) and no forced preemption, there may be no
383 	 * means by which we reset the GPU and evict the persistent hog.
384 	 * Ergo if we are unable to inject a preemptive pulse that can
385 	 * kill the banned context, we fallback to doing a local reset
386 	 * instead.
387 	 */
388 	return intel_engine_pulse(engine) == 0;
389 }
390 
391 static struct intel_engine_cs *active_engine(struct intel_context *ce)
392 {
393 	struct intel_engine_cs *engine = NULL;
394 	struct i915_request *rq;
395 
396 	if (intel_context_has_inflight(ce))
397 		return intel_context_inflight(ce);
398 
399 	if (!ce->timeline)
400 		return NULL;
401 
402 	/*
403 	 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
404 	 * to the request to prevent it being transferred to a new timeline
405 	 * (and onto a new timeline->requests list).
406 	 */
407 	rcu_read_lock();
408 	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
409 		bool found;
410 
411 		/* timeline is already completed upto this point? */
412 		if (!i915_request_get_rcu(rq))
413 			break;
414 
415 		/* Check with the backend if the request is inflight */
416 		found = true;
417 		if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
418 			found = i915_request_active_engine(rq, &engine);
419 
420 		i915_request_put(rq);
421 		if (found)
422 			break;
423 	}
424 	rcu_read_unlock();
425 
426 	return engine;
427 }
428 
429 static void kill_engines(struct i915_gem_engines *engines, bool ban)
430 {
431 	struct i915_gem_engines_iter it;
432 	struct intel_context *ce;
433 
434 	/*
435 	 * Map the user's engine back to the actual engines; one virtual
436 	 * engine will be mapped to multiple engines, and using ctx->engine[]
437 	 * the same engine may be have multiple instances in the user's map.
438 	 * However, we only care about pending requests, so only include
439 	 * engines on which there are incomplete requests.
440 	 */
441 	for_each_gem_engine(ce, engines, it) {
442 		struct intel_engine_cs *engine;
443 
444 		if (ban && intel_context_set_banned(ce))
445 			continue;
446 
447 		/*
448 		 * Check the current active state of this context; if we
449 		 * are currently executing on the GPU we need to evict
450 		 * ourselves. On the other hand, if we haven't yet been
451 		 * submitted to the GPU or if everything is complete,
452 		 * we have nothing to do.
453 		 */
454 		engine = active_engine(ce);
455 
456 		/* First attempt to gracefully cancel the context */
457 		if (engine && !__cancel_engine(engine) && ban)
458 			/*
459 			 * If we are unable to send a preemptive pulse to bump
460 			 * the context from the GPU, we have to resort to a full
461 			 * reset. We hope the collateral damage is worth it.
462 			 */
463 			__reset_context(engines->ctx, engine);
464 	}
465 }
466 
467 static void kill_context(struct i915_gem_context *ctx)
468 {
469 	bool ban = (!i915_gem_context_is_persistent(ctx) ||
470 		    !ctx->i915->params.enable_hangcheck);
471 	struct i915_gem_engines *pos, *next;
472 
473 	spin_lock_irq(&ctx->stale.lock);
474 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
475 	list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
476 		if (!i915_sw_fence_await(&pos->fence)) {
477 			list_del_init(&pos->link);
478 			continue;
479 		}
480 
481 		spin_unlock_irq(&ctx->stale.lock);
482 
483 		kill_engines(pos, ban);
484 
485 		spin_lock_irq(&ctx->stale.lock);
486 		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
487 		list_safe_reset_next(pos, next, link);
488 		list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
489 
490 		i915_sw_fence_complete(&pos->fence);
491 	}
492 	spin_unlock_irq(&ctx->stale.lock);
493 }
494 
495 static void engines_idle_release(struct i915_gem_context *ctx,
496 				 struct i915_gem_engines *engines)
497 {
498 	struct i915_gem_engines_iter it;
499 	struct intel_context *ce;
500 
501 	INIT_LIST_HEAD(&engines->link);
502 
503 	engines->ctx = i915_gem_context_get(ctx);
504 
505 	for_each_gem_engine(ce, engines, it) {
506 		int err;
507 
508 		/* serialises with execbuf */
509 		set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
510 		if (!intel_context_pin_if_active(ce))
511 			continue;
512 
513 		/* Wait until context is finally scheduled out and retired */
514 		err = i915_sw_fence_await_active(&engines->fence,
515 						 &ce->active,
516 						 I915_ACTIVE_AWAIT_BARRIER);
517 		intel_context_unpin(ce);
518 		if (err)
519 			goto kill;
520 	}
521 
522 	spin_lock_irq(&ctx->stale.lock);
523 	if (!i915_gem_context_is_closed(ctx))
524 		list_add_tail(&engines->link, &ctx->stale.engines);
525 	spin_unlock_irq(&ctx->stale.lock);
526 
527 kill:
528 	if (list_empty(&engines->link)) /* raced, already closed */
529 		kill_engines(engines, true);
530 
531 	i915_sw_fence_commit(&engines->fence);
532 }
533 
534 static void set_closed_name(struct i915_gem_context *ctx)
535 {
536 	char *s;
537 
538 	/* Replace '[]' with '<>' to indicate closed in debug prints */
539 
540 	s = strrchr(ctx->name, '[');
541 	if (!s)
542 		return;
543 
544 	*s = '<';
545 
546 	s = strchr(s + 1, ']');
547 	if (s)
548 		*s = '>';
549 }
550 
551 static void context_close(struct i915_gem_context *ctx)
552 {
553 	struct i915_address_space *vm;
554 
555 	/* Flush any concurrent set_engines() */
556 	mutex_lock(&ctx->engines_mutex);
557 	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
558 	i915_gem_context_set_closed(ctx);
559 	mutex_unlock(&ctx->engines_mutex);
560 
561 	mutex_lock(&ctx->mutex);
562 
563 	set_closed_name(ctx);
564 
565 	vm = i915_gem_context_vm(ctx);
566 	if (vm)
567 		i915_vm_close(vm);
568 
569 	ctx->file_priv = ERR_PTR(-EBADF);
570 
571 	/*
572 	 * The LUT uses the VMA as a backpointer to unref the object,
573 	 * so we need to clear the LUT before we close all the VMA (inside
574 	 * the ppgtt).
575 	 */
576 	lut_close(ctx);
577 
578 	spin_lock(&ctx->i915->gem.contexts.lock);
579 	list_del(&ctx->link);
580 	spin_unlock(&ctx->i915->gem.contexts.lock);
581 
582 	mutex_unlock(&ctx->mutex);
583 
584 	/*
585 	 * If the user has disabled hangchecking, we can not be sure that
586 	 * the batches will ever complete after the context is closed,
587 	 * keeping the context and all resources pinned forever. So in this
588 	 * case we opt to forcibly kill off all remaining requests on
589 	 * context close.
590 	 */
591 	kill_context(ctx);
592 
593 	i915_gem_context_put(ctx);
594 }
595 
596 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
597 {
598 	if (i915_gem_context_is_persistent(ctx) == state)
599 		return 0;
600 
601 	if (state) {
602 		/*
603 		 * Only contexts that are short-lived [that will expire or be
604 		 * reset] are allowed to survive past termination. We require
605 		 * hangcheck to ensure that the persistent requests are healthy.
606 		 */
607 		if (!ctx->i915->params.enable_hangcheck)
608 			return -EINVAL;
609 
610 		i915_gem_context_set_persistence(ctx);
611 	} else {
612 		/* To cancel a context we use "preempt-to-idle" */
613 		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
614 			return -ENODEV;
615 
616 		/*
617 		 * If the cancel fails, we then need to reset, cleanly!
618 		 *
619 		 * If the per-engine reset fails, all hope is lost! We resort
620 		 * to a full GPU reset in that unlikely case, but realistically
621 		 * if the engine could not reset, the full reset does not fare
622 		 * much better. The damage has been done.
623 		 *
624 		 * However, if we cannot reset an engine by itself, we cannot
625 		 * cleanup a hanging persistent context without causing
626 		 * colateral damage, and we should not pretend we can by
627 		 * exposing the interface.
628 		 */
629 		if (!intel_has_reset_engine(&ctx->i915->gt))
630 			return -ENODEV;
631 
632 		i915_gem_context_clear_persistence(ctx);
633 	}
634 
635 	return 0;
636 }
637 
638 static struct i915_gem_context *
639 __create_context(struct drm_i915_private *i915)
640 {
641 	struct i915_gem_context *ctx;
642 	struct i915_gem_engines *e;
643 	int err;
644 	int i;
645 
646 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
647 	if (!ctx)
648 		return ERR_PTR(-ENOMEM);
649 
650 	kref_init(&ctx->ref);
651 	ctx->i915 = i915;
652 	ctx->sched.priority = I915_PRIORITY_NORMAL;
653 	mutex_init(&ctx->mutex);
654 	INIT_LIST_HEAD(&ctx->link);
655 
656 	spin_lock_init(&ctx->stale.lock);
657 	INIT_LIST_HEAD(&ctx->stale.engines);
658 
659 	mutex_init(&ctx->engines_mutex);
660 	e = default_engines(ctx);
661 	if (IS_ERR(e)) {
662 		err = PTR_ERR(e);
663 		goto err_free;
664 	}
665 	RCU_INIT_POINTER(ctx->engines, e);
666 
667 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
668 	mutex_init(&ctx->lut_mutex);
669 
670 	/* NB: Mark all slices as needing a remap so that when the context first
671 	 * loads it will restore whatever remap state already exists. If there
672 	 * is no remap info, it will be a NOP. */
673 	ctx->remap_slice = ALL_L3_SLICES(i915);
674 
675 	i915_gem_context_set_bannable(ctx);
676 	i915_gem_context_set_recoverable(ctx);
677 	__context_set_persistence(ctx, true /* cgroup hook? */);
678 
679 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
680 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
681 
682 	return ctx;
683 
684 err_free:
685 	kfree(ctx);
686 	return ERR_PTR(err);
687 }
688 
689 static inline struct i915_gem_engines *
690 __context_engines_await(const struct i915_gem_context *ctx,
691 			bool *user_engines)
692 {
693 	struct i915_gem_engines *engines;
694 
695 	rcu_read_lock();
696 	do {
697 		engines = rcu_dereference(ctx->engines);
698 		GEM_BUG_ON(!engines);
699 
700 		if (user_engines)
701 			*user_engines = i915_gem_context_user_engines(ctx);
702 
703 		/* successful await => strong mb */
704 		if (unlikely(!i915_sw_fence_await(&engines->fence)))
705 			continue;
706 
707 		if (likely(engines == rcu_access_pointer(ctx->engines)))
708 			break;
709 
710 		i915_sw_fence_complete(&engines->fence);
711 	} while (1);
712 	rcu_read_unlock();
713 
714 	return engines;
715 }
716 
717 static int
718 context_apply_all(struct i915_gem_context *ctx,
719 		  int (*fn)(struct intel_context *ce, void *data),
720 		  void *data)
721 {
722 	struct i915_gem_engines_iter it;
723 	struct i915_gem_engines *e;
724 	struct intel_context *ce;
725 	int err = 0;
726 
727 	e = __context_engines_await(ctx, NULL);
728 	for_each_gem_engine(ce, e, it) {
729 		err = fn(ce, data);
730 		if (err)
731 			break;
732 	}
733 	i915_sw_fence_complete(&e->fence);
734 
735 	return err;
736 }
737 
738 static int __apply_ppgtt(struct intel_context *ce, void *vm)
739 {
740 	i915_vm_put(ce->vm);
741 	ce->vm = i915_vm_get(vm);
742 	return 0;
743 }
744 
745 static struct i915_address_space *
746 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
747 {
748 	struct i915_address_space *old;
749 
750 	old = rcu_replace_pointer(ctx->vm,
751 				  i915_vm_open(vm),
752 				  lockdep_is_held(&ctx->mutex));
753 	GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
754 
755 	context_apply_all(ctx, __apply_ppgtt, vm);
756 
757 	return old;
758 }
759 
760 static void __assign_ppgtt(struct i915_gem_context *ctx,
761 			   struct i915_address_space *vm)
762 {
763 	if (vm == rcu_access_pointer(ctx->vm))
764 		return;
765 
766 	vm = __set_ppgtt(ctx, vm);
767 	if (vm)
768 		i915_vm_close(vm);
769 }
770 
771 static void __set_timeline(struct intel_timeline **dst,
772 			   struct intel_timeline *src)
773 {
774 	struct intel_timeline *old = *dst;
775 
776 	*dst = src ? intel_timeline_get(src) : NULL;
777 
778 	if (old)
779 		intel_timeline_put(old);
780 }
781 
782 static int __apply_timeline(struct intel_context *ce, void *timeline)
783 {
784 	__set_timeline(&ce->timeline, timeline);
785 	return 0;
786 }
787 
788 static void __assign_timeline(struct i915_gem_context *ctx,
789 			      struct intel_timeline *timeline)
790 {
791 	__set_timeline(&ctx->timeline, timeline);
792 	context_apply_all(ctx, __apply_timeline, timeline);
793 }
794 
795 static int __apply_watchdog(struct intel_context *ce, void *timeout_us)
796 {
797 	return intel_context_set_watchdog_us(ce, (uintptr_t)timeout_us);
798 }
799 
800 static int
801 __set_watchdog(struct i915_gem_context *ctx, unsigned long timeout_us)
802 {
803 	int ret;
804 
805 	ret = context_apply_all(ctx, __apply_watchdog,
806 				(void *)(uintptr_t)timeout_us);
807 	if (!ret)
808 		ctx->watchdog.timeout_us = timeout_us;
809 
810 	return ret;
811 }
812 
813 static void __set_default_fence_expiry(struct i915_gem_context *ctx)
814 {
815 	struct drm_i915_private *i915 = ctx->i915;
816 	int ret;
817 
818 	if (!IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) ||
819 	    !i915->params.request_timeout_ms)
820 		return;
821 
822 	/* Default expiry for user fences. */
823 	ret = __set_watchdog(ctx, i915->params.request_timeout_ms * 1000);
824 	if (ret)
825 		drm_notice(&i915->drm,
826 			   "Failed to configure default fence expiry! (%d)",
827 			   ret);
828 }
829 
830 static struct i915_gem_context *
831 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
832 {
833 	struct i915_gem_context *ctx;
834 
835 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
836 	    !HAS_EXECLISTS(i915))
837 		return ERR_PTR(-EINVAL);
838 
839 	ctx = __create_context(i915);
840 	if (IS_ERR(ctx))
841 		return ctx;
842 
843 	if (HAS_FULL_PPGTT(i915)) {
844 		struct i915_ppgtt *ppgtt;
845 
846 		ppgtt = i915_ppgtt_create(&i915->gt);
847 		if (IS_ERR(ppgtt)) {
848 			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
849 				PTR_ERR(ppgtt));
850 			context_close(ctx);
851 			return ERR_CAST(ppgtt);
852 		}
853 
854 		mutex_lock(&ctx->mutex);
855 		__assign_ppgtt(ctx, &ppgtt->vm);
856 		mutex_unlock(&ctx->mutex);
857 
858 		i915_vm_put(&ppgtt->vm);
859 	}
860 
861 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
862 		struct intel_timeline *timeline;
863 
864 		timeline = intel_timeline_create(&i915->gt);
865 		if (IS_ERR(timeline)) {
866 			context_close(ctx);
867 			return ERR_CAST(timeline);
868 		}
869 
870 		__assign_timeline(ctx, timeline);
871 		intel_timeline_put(timeline);
872 	}
873 
874 	__set_default_fence_expiry(ctx);
875 
876 	trace_i915_context_create(ctx);
877 
878 	return ctx;
879 }
880 
881 static void init_contexts(struct i915_gem_contexts *gc)
882 {
883 	spin_lock_init(&gc->lock);
884 	INIT_LIST_HEAD(&gc->list);
885 }
886 
887 void i915_gem_init__contexts(struct drm_i915_private *i915)
888 {
889 	init_contexts(&i915->gem.contexts);
890 }
891 
892 static int gem_context_register(struct i915_gem_context *ctx,
893 				struct drm_i915_file_private *fpriv,
894 				u32 *id)
895 {
896 	struct drm_i915_private *i915 = ctx->i915;
897 	struct i915_address_space *vm;
898 	int ret;
899 
900 	ctx->file_priv = fpriv;
901 
902 	mutex_lock(&ctx->mutex);
903 	vm = i915_gem_context_vm(ctx);
904 	if (vm)
905 		WRITE_ONCE(vm->file, fpriv); /* XXX */
906 	mutex_unlock(&ctx->mutex);
907 
908 	ctx->pid = get_task_pid(current, PIDTYPE_PID);
909 	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
910 		 current->comm, pid_nr(ctx->pid));
911 
912 	/* And finally expose ourselves to userspace via the idr */
913 	ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
914 	if (ret)
915 		goto err_pid;
916 
917 	spin_lock(&i915->gem.contexts.lock);
918 	list_add_tail(&ctx->link, &i915->gem.contexts.list);
919 	spin_unlock(&i915->gem.contexts.lock);
920 
921 	return 0;
922 
923 err_pid:
924 	put_pid(fetch_and_zero(&ctx->pid));
925 	return ret;
926 }
927 
928 int i915_gem_context_open(struct drm_i915_private *i915,
929 			  struct drm_file *file)
930 {
931 	struct drm_i915_file_private *file_priv = file->driver_priv;
932 	struct i915_gem_context *ctx;
933 	int err;
934 	u32 id;
935 
936 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
937 
938 	/* 0 reserved for invalid/unassigned ppgtt */
939 	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
940 
941 	ctx = i915_gem_create_context(i915, 0);
942 	if (IS_ERR(ctx)) {
943 		err = PTR_ERR(ctx);
944 		goto err;
945 	}
946 
947 	err = gem_context_register(ctx, file_priv, &id);
948 	if (err < 0)
949 		goto err_ctx;
950 
951 	GEM_BUG_ON(id);
952 	return 0;
953 
954 err_ctx:
955 	context_close(ctx);
956 err:
957 	xa_destroy(&file_priv->vm_xa);
958 	xa_destroy(&file_priv->context_xa);
959 	return err;
960 }
961 
962 void i915_gem_context_close(struct drm_file *file)
963 {
964 	struct drm_i915_file_private *file_priv = file->driver_priv;
965 	struct i915_address_space *vm;
966 	struct i915_gem_context *ctx;
967 	unsigned long idx;
968 
969 	xa_for_each(&file_priv->context_xa, idx, ctx)
970 		context_close(ctx);
971 	xa_destroy(&file_priv->context_xa);
972 
973 	xa_for_each(&file_priv->vm_xa, idx, vm)
974 		i915_vm_put(vm);
975 	xa_destroy(&file_priv->vm_xa);
976 }
977 
978 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
979 			     struct drm_file *file)
980 {
981 	struct drm_i915_private *i915 = to_i915(dev);
982 	struct drm_i915_gem_vm_control *args = data;
983 	struct drm_i915_file_private *file_priv = file->driver_priv;
984 	struct i915_ppgtt *ppgtt;
985 	u32 id;
986 	int err;
987 
988 	if (!HAS_FULL_PPGTT(i915))
989 		return -ENODEV;
990 
991 	if (args->flags)
992 		return -EINVAL;
993 
994 	ppgtt = i915_ppgtt_create(&i915->gt);
995 	if (IS_ERR(ppgtt))
996 		return PTR_ERR(ppgtt);
997 
998 	ppgtt->vm.file = file_priv;
999 
1000 	if (args->extensions) {
1001 		err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1002 					   NULL, 0,
1003 					   ppgtt);
1004 		if (err)
1005 			goto err_put;
1006 	}
1007 
1008 	err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1009 		       xa_limit_32b, GFP_KERNEL);
1010 	if (err)
1011 		goto err_put;
1012 
1013 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1014 	args->vm_id = id;
1015 	return 0;
1016 
1017 err_put:
1018 	i915_vm_put(&ppgtt->vm);
1019 	return err;
1020 }
1021 
1022 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1023 			      struct drm_file *file)
1024 {
1025 	struct drm_i915_file_private *file_priv = file->driver_priv;
1026 	struct drm_i915_gem_vm_control *args = data;
1027 	struct i915_address_space *vm;
1028 
1029 	if (args->flags)
1030 		return -EINVAL;
1031 
1032 	if (args->extensions)
1033 		return -EINVAL;
1034 
1035 	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1036 	if (!vm)
1037 		return -ENOENT;
1038 
1039 	i915_vm_put(vm);
1040 	return 0;
1041 }
1042 
1043 struct context_barrier_task {
1044 	struct i915_active base;
1045 	void (*task)(void *data);
1046 	void *data;
1047 };
1048 
1049 __i915_active_call
1050 static void cb_retire(struct i915_active *base)
1051 {
1052 	struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
1053 
1054 	if (cb->task)
1055 		cb->task(cb->data);
1056 
1057 	i915_active_fini(&cb->base);
1058 	kfree(cb);
1059 }
1060 
1061 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
1062 static int context_barrier_task(struct i915_gem_context *ctx,
1063 				intel_engine_mask_t engines,
1064 				bool (*skip)(struct intel_context *ce, void *data),
1065 				int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
1066 				int (*emit)(struct i915_request *rq, void *data),
1067 				void (*task)(void *data),
1068 				void *data)
1069 {
1070 	struct context_barrier_task *cb;
1071 	struct i915_gem_engines_iter it;
1072 	struct i915_gem_engines *e;
1073 	struct i915_gem_ww_ctx ww;
1074 	struct intel_context *ce;
1075 	int err = 0;
1076 
1077 	GEM_BUG_ON(!task);
1078 
1079 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1080 	if (!cb)
1081 		return -ENOMEM;
1082 
1083 	i915_active_init(&cb->base, NULL, cb_retire);
1084 	err = i915_active_acquire(&cb->base);
1085 	if (err) {
1086 		kfree(cb);
1087 		return err;
1088 	}
1089 
1090 	e = __context_engines_await(ctx, NULL);
1091 	if (!e) {
1092 		i915_active_release(&cb->base);
1093 		return -ENOENT;
1094 	}
1095 
1096 	for_each_gem_engine(ce, e, it) {
1097 		struct i915_request *rq;
1098 
1099 		if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
1100 				       ce->engine->mask)) {
1101 			err = -ENXIO;
1102 			break;
1103 		}
1104 
1105 		if (!(ce->engine->mask & engines))
1106 			continue;
1107 
1108 		if (skip && skip(ce, data))
1109 			continue;
1110 
1111 		i915_gem_ww_ctx_init(&ww, true);
1112 retry:
1113 		err = intel_context_pin_ww(ce, &ww);
1114 		if (err)
1115 			goto err;
1116 
1117 		if (pin)
1118 			err = pin(ce, &ww, data);
1119 		if (err)
1120 			goto err_unpin;
1121 
1122 		rq = i915_request_create(ce);
1123 		if (IS_ERR(rq)) {
1124 			err = PTR_ERR(rq);
1125 			goto err_unpin;
1126 		}
1127 
1128 		err = 0;
1129 		if (emit)
1130 			err = emit(rq, data);
1131 		if (err == 0)
1132 			err = i915_active_add_request(&cb->base, rq);
1133 
1134 		i915_request_add(rq);
1135 err_unpin:
1136 		intel_context_unpin(ce);
1137 err:
1138 		if (err == -EDEADLK) {
1139 			err = i915_gem_ww_ctx_backoff(&ww);
1140 			if (!err)
1141 				goto retry;
1142 		}
1143 		i915_gem_ww_ctx_fini(&ww);
1144 
1145 		if (err)
1146 			break;
1147 	}
1148 	i915_sw_fence_complete(&e->fence);
1149 
1150 	cb->task = err ? NULL : task; /* caller needs to unwind instead */
1151 	cb->data = data;
1152 
1153 	i915_active_release(&cb->base);
1154 
1155 	return err;
1156 }
1157 
1158 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1159 		     struct i915_gem_context *ctx,
1160 		     struct drm_i915_gem_context_param *args)
1161 {
1162 	struct i915_address_space *vm;
1163 	int err;
1164 	u32 id;
1165 
1166 	if (!rcu_access_pointer(ctx->vm))
1167 		return -ENODEV;
1168 
1169 	rcu_read_lock();
1170 	vm = context_get_vm_rcu(ctx);
1171 	rcu_read_unlock();
1172 	if (!vm)
1173 		return -ENODEV;
1174 
1175 	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1176 	if (err)
1177 		goto err_put;
1178 
1179 	i915_vm_open(vm);
1180 
1181 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1182 	args->value = id;
1183 	args->size = 0;
1184 
1185 err_put:
1186 	i915_vm_put(vm);
1187 	return err;
1188 }
1189 
1190 static void set_ppgtt_barrier(void *data)
1191 {
1192 	struct i915_address_space *old = data;
1193 
1194 	if (INTEL_GEN(old->i915) < 8)
1195 		gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1196 
1197 	i915_vm_close(old);
1198 }
1199 
1200 static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
1201 {
1202 	struct i915_address_space *vm = ce->vm;
1203 
1204 	if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
1205 		/* ppGTT is not part of the legacy context image */
1206 		return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
1207 
1208 	return 0;
1209 }
1210 
1211 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1212 {
1213 	struct i915_address_space *vm = rq->context->vm;
1214 	struct intel_engine_cs *engine = rq->engine;
1215 	u32 base = engine->mmio_base;
1216 	u32 *cs;
1217 	int i;
1218 
1219 	if (i915_vm_is_4lvl(vm)) {
1220 		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1221 		const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1222 
1223 		cs = intel_ring_begin(rq, 6);
1224 		if (IS_ERR(cs))
1225 			return PTR_ERR(cs);
1226 
1227 		*cs++ = MI_LOAD_REGISTER_IMM(2);
1228 
1229 		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1230 		*cs++ = upper_32_bits(pd_daddr);
1231 		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1232 		*cs++ = lower_32_bits(pd_daddr);
1233 
1234 		*cs++ = MI_NOOP;
1235 		intel_ring_advance(rq, cs);
1236 	} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1237 		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1238 		int err;
1239 
1240 		/* Magic required to prevent forcewake errors! */
1241 		err = engine->emit_flush(rq, EMIT_INVALIDATE);
1242 		if (err)
1243 			return err;
1244 
1245 		cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1246 		if (IS_ERR(cs))
1247 			return PTR_ERR(cs);
1248 
1249 		*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1250 		for (i = GEN8_3LVL_PDPES; i--; ) {
1251 			const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1252 
1253 			*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1254 			*cs++ = upper_32_bits(pd_daddr);
1255 			*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1256 			*cs++ = lower_32_bits(pd_daddr);
1257 		}
1258 		*cs++ = MI_NOOP;
1259 		intel_ring_advance(rq, cs);
1260 	}
1261 
1262 	return 0;
1263 }
1264 
1265 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1266 {
1267 	if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1268 		return !ce->state;
1269 	else
1270 		return !atomic_read(&ce->pin_count);
1271 }
1272 
1273 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1274 		     struct i915_gem_context *ctx,
1275 		     struct drm_i915_gem_context_param *args)
1276 {
1277 	struct i915_address_space *vm, *old;
1278 	int err;
1279 
1280 	if (args->size)
1281 		return -EINVAL;
1282 
1283 	if (!rcu_access_pointer(ctx->vm))
1284 		return -ENODEV;
1285 
1286 	if (upper_32_bits(args->value))
1287 		return -ENOENT;
1288 
1289 	rcu_read_lock();
1290 	vm = xa_load(&file_priv->vm_xa, args->value);
1291 	if (vm && !kref_get_unless_zero(&vm->ref))
1292 		vm = NULL;
1293 	rcu_read_unlock();
1294 	if (!vm)
1295 		return -ENOENT;
1296 
1297 	err = mutex_lock_interruptible(&ctx->mutex);
1298 	if (err)
1299 		goto out;
1300 
1301 	if (i915_gem_context_is_closed(ctx)) {
1302 		err = -ENOENT;
1303 		goto unlock;
1304 	}
1305 
1306 	if (vm == rcu_access_pointer(ctx->vm))
1307 		goto unlock;
1308 
1309 	old = __set_ppgtt(ctx, vm);
1310 
1311 	/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1312 	lut_close(ctx);
1313 
1314 	/*
1315 	 * We need to flush any requests using the current ppgtt before
1316 	 * we release it as the requests do not hold a reference themselves,
1317 	 * only indirectly through the context.
1318 	 */
1319 	err = context_barrier_task(ctx, ALL_ENGINES,
1320 				   skip_ppgtt_update,
1321 				   pin_ppgtt_update,
1322 				   emit_ppgtt_update,
1323 				   set_ppgtt_barrier,
1324 				   old);
1325 	if (err) {
1326 		i915_vm_close(__set_ppgtt(ctx, old));
1327 		i915_vm_close(old);
1328 		lut_close(ctx); /* force a rebuild of the old obj:vma cache */
1329 	}
1330 
1331 unlock:
1332 	mutex_unlock(&ctx->mutex);
1333 out:
1334 	i915_vm_put(vm);
1335 	return err;
1336 }
1337 
1338 static int __apply_ringsize(struct intel_context *ce, void *sz)
1339 {
1340 	return intel_context_set_ring_size(ce, (unsigned long)sz);
1341 }
1342 
1343 static int set_ringsize(struct i915_gem_context *ctx,
1344 			struct drm_i915_gem_context_param *args)
1345 {
1346 	if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1347 		return -ENODEV;
1348 
1349 	if (args->size)
1350 		return -EINVAL;
1351 
1352 	if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
1353 		return -EINVAL;
1354 
1355 	if (args->value < I915_GTT_PAGE_SIZE)
1356 		return -EINVAL;
1357 
1358 	if (args->value > 128 * I915_GTT_PAGE_SIZE)
1359 		return -EINVAL;
1360 
1361 	return context_apply_all(ctx,
1362 				 __apply_ringsize,
1363 				 __intel_context_ring_size(args->value));
1364 }
1365 
1366 static int __get_ringsize(struct intel_context *ce, void *arg)
1367 {
1368 	long sz;
1369 
1370 	sz = intel_context_get_ring_size(ce);
1371 	GEM_BUG_ON(sz > INT_MAX);
1372 
1373 	return sz; /* stop on first engine */
1374 }
1375 
1376 static int get_ringsize(struct i915_gem_context *ctx,
1377 			struct drm_i915_gem_context_param *args)
1378 {
1379 	int sz;
1380 
1381 	if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1382 		return -ENODEV;
1383 
1384 	if (args->size)
1385 		return -EINVAL;
1386 
1387 	sz = context_apply_all(ctx, __get_ringsize, NULL);
1388 	if (sz < 0)
1389 		return sz;
1390 
1391 	args->value = sz;
1392 	return 0;
1393 }
1394 
1395 int
1396 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1397 			      const struct drm_i915_gem_context_param_sseu *user,
1398 			      struct intel_sseu *context)
1399 {
1400 	const struct sseu_dev_info *device = &gt->info.sseu;
1401 	struct drm_i915_private *i915 = gt->i915;
1402 
1403 	/* No zeros in any field. */
1404 	if (!user->slice_mask || !user->subslice_mask ||
1405 	    !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1406 		return -EINVAL;
1407 
1408 	/* Max > min. */
1409 	if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1410 		return -EINVAL;
1411 
1412 	/*
1413 	 * Some future proofing on the types since the uAPI is wider than the
1414 	 * current internal implementation.
1415 	 */
1416 	if (overflows_type(user->slice_mask, context->slice_mask) ||
1417 	    overflows_type(user->subslice_mask, context->subslice_mask) ||
1418 	    overflows_type(user->min_eus_per_subslice,
1419 			   context->min_eus_per_subslice) ||
1420 	    overflows_type(user->max_eus_per_subslice,
1421 			   context->max_eus_per_subslice))
1422 		return -EINVAL;
1423 
1424 	/* Check validity against hardware. */
1425 	if (user->slice_mask & ~device->slice_mask)
1426 		return -EINVAL;
1427 
1428 	if (user->subslice_mask & ~device->subslice_mask[0])
1429 		return -EINVAL;
1430 
1431 	if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1432 		return -EINVAL;
1433 
1434 	context->slice_mask = user->slice_mask;
1435 	context->subslice_mask = user->subslice_mask;
1436 	context->min_eus_per_subslice = user->min_eus_per_subslice;
1437 	context->max_eus_per_subslice = user->max_eus_per_subslice;
1438 
1439 	/* Part specific restrictions. */
1440 	if (IS_GEN(i915, 11)) {
1441 		unsigned int hw_s = hweight8(device->slice_mask);
1442 		unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1443 		unsigned int req_s = hweight8(context->slice_mask);
1444 		unsigned int req_ss = hweight8(context->subslice_mask);
1445 
1446 		/*
1447 		 * Only full subslice enablement is possible if more than one
1448 		 * slice is turned on.
1449 		 */
1450 		if (req_s > 1 && req_ss != hw_ss_per_s)
1451 			return -EINVAL;
1452 
1453 		/*
1454 		 * If more than four (SScount bitfield limit) subslices are
1455 		 * requested then the number has to be even.
1456 		 */
1457 		if (req_ss > 4 && (req_ss & 1))
1458 			return -EINVAL;
1459 
1460 		/*
1461 		 * If only one slice is enabled and subslice count is below the
1462 		 * device full enablement, it must be at most half of the all
1463 		 * available subslices.
1464 		 */
1465 		if (req_s == 1 && req_ss < hw_ss_per_s &&
1466 		    req_ss > (hw_ss_per_s / 2))
1467 			return -EINVAL;
1468 
1469 		/* ABI restriction - VME use case only. */
1470 
1471 		/* All slices or one slice only. */
1472 		if (req_s != 1 && req_s != hw_s)
1473 			return -EINVAL;
1474 
1475 		/*
1476 		 * Half subslices or full enablement only when one slice is
1477 		 * enabled.
1478 		 */
1479 		if (req_s == 1 &&
1480 		    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1481 			return -EINVAL;
1482 
1483 		/* No EU configuration changes. */
1484 		if ((user->min_eus_per_subslice !=
1485 		     device->max_eus_per_subslice) ||
1486 		    (user->max_eus_per_subslice !=
1487 		     device->max_eus_per_subslice))
1488 			return -EINVAL;
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 static int set_sseu(struct i915_gem_context *ctx,
1495 		    struct drm_i915_gem_context_param *args)
1496 {
1497 	struct drm_i915_private *i915 = ctx->i915;
1498 	struct drm_i915_gem_context_param_sseu user_sseu;
1499 	struct intel_context *ce;
1500 	struct intel_sseu sseu;
1501 	unsigned long lookup;
1502 	int ret;
1503 
1504 	if (args->size < sizeof(user_sseu))
1505 		return -EINVAL;
1506 
1507 	if (!IS_GEN(i915, 11))
1508 		return -ENODEV;
1509 
1510 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1511 			   sizeof(user_sseu)))
1512 		return -EFAULT;
1513 
1514 	if (user_sseu.rsvd)
1515 		return -EINVAL;
1516 
1517 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1518 		return -EINVAL;
1519 
1520 	lookup = 0;
1521 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1522 		lookup |= LOOKUP_USER_INDEX;
1523 
1524 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1525 	if (IS_ERR(ce))
1526 		return PTR_ERR(ce);
1527 
1528 	/* Only render engine supports RPCS configuration. */
1529 	if (ce->engine->class != RENDER_CLASS) {
1530 		ret = -ENODEV;
1531 		goto out_ce;
1532 	}
1533 
1534 	ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1535 	if (ret)
1536 		goto out_ce;
1537 
1538 	ret = intel_context_reconfigure_sseu(ce, sseu);
1539 	if (ret)
1540 		goto out_ce;
1541 
1542 	args->size = sizeof(user_sseu);
1543 
1544 out_ce:
1545 	intel_context_put(ce);
1546 	return ret;
1547 }
1548 
1549 struct set_engines {
1550 	struct i915_gem_context *ctx;
1551 	struct i915_gem_engines *engines;
1552 };
1553 
1554 static int
1555 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1556 {
1557 	struct i915_context_engines_load_balance __user *ext =
1558 		container_of_user(base, typeof(*ext), base);
1559 	const struct set_engines *set = data;
1560 	struct drm_i915_private *i915 = set->ctx->i915;
1561 	struct intel_engine_cs *stack[16];
1562 	struct intel_engine_cs **siblings;
1563 	struct intel_context *ce;
1564 	u16 num_siblings, idx;
1565 	unsigned int n;
1566 	int err;
1567 
1568 	if (!HAS_EXECLISTS(i915))
1569 		return -ENODEV;
1570 
1571 	if (intel_uc_uses_guc_submission(&i915->gt.uc))
1572 		return -ENODEV; /* not implement yet */
1573 
1574 	if (get_user(idx, &ext->engine_index))
1575 		return -EFAULT;
1576 
1577 	if (idx >= set->engines->num_engines) {
1578 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
1579 			idx, set->engines->num_engines);
1580 		return -EINVAL;
1581 	}
1582 
1583 	idx = array_index_nospec(idx, set->engines->num_engines);
1584 	if (set->engines->engines[idx]) {
1585 		drm_dbg(&i915->drm,
1586 			"Invalid placement[%d], already occupied\n", idx);
1587 		return -EEXIST;
1588 	}
1589 
1590 	if (get_user(num_siblings, &ext->num_siblings))
1591 		return -EFAULT;
1592 
1593 	err = check_user_mbz(&ext->flags);
1594 	if (err)
1595 		return err;
1596 
1597 	err = check_user_mbz(&ext->mbz64);
1598 	if (err)
1599 		return err;
1600 
1601 	siblings = stack;
1602 	if (num_siblings > ARRAY_SIZE(stack)) {
1603 		siblings = kmalloc_array(num_siblings,
1604 					 sizeof(*siblings),
1605 					 GFP_KERNEL);
1606 		if (!siblings)
1607 			return -ENOMEM;
1608 	}
1609 
1610 	for (n = 0; n < num_siblings; n++) {
1611 		struct i915_engine_class_instance ci;
1612 
1613 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1614 			err = -EFAULT;
1615 			goto out_siblings;
1616 		}
1617 
1618 		siblings[n] = intel_engine_lookup_user(i915,
1619 						       ci.engine_class,
1620 						       ci.engine_instance);
1621 		if (!siblings[n]) {
1622 			drm_dbg(&i915->drm,
1623 				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
1624 				n, ci.engine_class, ci.engine_instance);
1625 			err = -EINVAL;
1626 			goto out_siblings;
1627 		}
1628 	}
1629 
1630 	ce = intel_execlists_create_virtual(siblings, n);
1631 	if (IS_ERR(ce)) {
1632 		err = PTR_ERR(ce);
1633 		goto out_siblings;
1634 	}
1635 
1636 	intel_context_set_gem(ce, set->ctx);
1637 
1638 	if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1639 		intel_context_put(ce);
1640 		err = -EEXIST;
1641 		goto out_siblings;
1642 	}
1643 
1644 out_siblings:
1645 	if (siblings != stack)
1646 		kfree(siblings);
1647 
1648 	return err;
1649 }
1650 
1651 static int
1652 set_engines__bond(struct i915_user_extension __user *base, void *data)
1653 {
1654 	struct i915_context_engines_bond __user *ext =
1655 		container_of_user(base, typeof(*ext), base);
1656 	const struct set_engines *set = data;
1657 	struct drm_i915_private *i915 = set->ctx->i915;
1658 	struct i915_engine_class_instance ci;
1659 	struct intel_engine_cs *virtual;
1660 	struct intel_engine_cs *master;
1661 	u16 idx, num_bonds;
1662 	int err, n;
1663 
1664 	if (get_user(idx, &ext->virtual_index))
1665 		return -EFAULT;
1666 
1667 	if (idx >= set->engines->num_engines) {
1668 		drm_dbg(&i915->drm,
1669 			"Invalid index for virtual engine: %d >= %d\n",
1670 			idx, set->engines->num_engines);
1671 		return -EINVAL;
1672 	}
1673 
1674 	idx = array_index_nospec(idx, set->engines->num_engines);
1675 	if (!set->engines->engines[idx]) {
1676 		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
1677 		return -EINVAL;
1678 	}
1679 	virtual = set->engines->engines[idx]->engine;
1680 
1681 	err = check_user_mbz(&ext->flags);
1682 	if (err)
1683 		return err;
1684 
1685 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1686 		err = check_user_mbz(&ext->mbz64[n]);
1687 		if (err)
1688 			return err;
1689 	}
1690 
1691 	if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1692 		return -EFAULT;
1693 
1694 	master = intel_engine_lookup_user(i915,
1695 					  ci.engine_class, ci.engine_instance);
1696 	if (!master) {
1697 		drm_dbg(&i915->drm,
1698 			"Unrecognised master engine: { class:%u, instance:%u }\n",
1699 			ci.engine_class, ci.engine_instance);
1700 		return -EINVAL;
1701 	}
1702 
1703 	if (get_user(num_bonds, &ext->num_bonds))
1704 		return -EFAULT;
1705 
1706 	for (n = 0; n < num_bonds; n++) {
1707 		struct intel_engine_cs *bond;
1708 
1709 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1710 			return -EFAULT;
1711 
1712 		bond = intel_engine_lookup_user(i915,
1713 						ci.engine_class,
1714 						ci.engine_instance);
1715 		if (!bond) {
1716 			drm_dbg(&i915->drm,
1717 				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1718 				n, ci.engine_class, ci.engine_instance);
1719 			return -EINVAL;
1720 		}
1721 
1722 		/*
1723 		 * A non-virtual engine has no siblings to choose between; and
1724 		 * a submit fence will always be directed to the one engine.
1725 		 */
1726 		if (intel_engine_is_virtual(virtual)) {
1727 			err = intel_virtual_engine_attach_bond(virtual,
1728 							       master,
1729 							       bond);
1730 			if (err)
1731 				return err;
1732 		}
1733 	}
1734 
1735 	return 0;
1736 }
1737 
1738 static const i915_user_extension_fn set_engines__extensions[] = {
1739 	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1740 	[I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1741 };
1742 
1743 static int
1744 set_engines(struct i915_gem_context *ctx,
1745 	    const struct drm_i915_gem_context_param *args)
1746 {
1747 	struct drm_i915_private *i915 = ctx->i915;
1748 	struct i915_context_param_engines __user *user =
1749 		u64_to_user_ptr(args->value);
1750 	struct set_engines set = { .ctx = ctx };
1751 	unsigned int num_engines, n;
1752 	u64 extensions;
1753 	int err;
1754 
1755 	if (!args->size) { /* switch back to legacy user_ring_map */
1756 		if (!i915_gem_context_user_engines(ctx))
1757 			return 0;
1758 
1759 		set.engines = default_engines(ctx);
1760 		if (IS_ERR(set.engines))
1761 			return PTR_ERR(set.engines);
1762 
1763 		goto replace;
1764 	}
1765 
1766 	BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1767 	if (args->size < sizeof(*user) ||
1768 	    !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1769 		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
1770 			args->size);
1771 		return -EINVAL;
1772 	}
1773 
1774 	/*
1775 	 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1776 	 * first 64 engines defined here.
1777 	 */
1778 	num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1779 	set.engines = alloc_engines(num_engines);
1780 	if (!set.engines)
1781 		return -ENOMEM;
1782 
1783 	for (n = 0; n < num_engines; n++) {
1784 		struct i915_engine_class_instance ci;
1785 		struct intel_engine_cs *engine;
1786 		struct intel_context *ce;
1787 
1788 		if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1789 			__free_engines(set.engines, n);
1790 			return -EFAULT;
1791 		}
1792 
1793 		if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1794 		    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1795 			set.engines->engines[n] = NULL;
1796 			continue;
1797 		}
1798 
1799 		engine = intel_engine_lookup_user(ctx->i915,
1800 						  ci.engine_class,
1801 						  ci.engine_instance);
1802 		if (!engine) {
1803 			drm_dbg(&i915->drm,
1804 				"Invalid engine[%d]: { class:%d, instance:%d }\n",
1805 				n, ci.engine_class, ci.engine_instance);
1806 			__free_engines(set.engines, n);
1807 			return -ENOENT;
1808 		}
1809 
1810 		ce = intel_context_create(engine);
1811 		if (IS_ERR(ce)) {
1812 			__free_engines(set.engines, n);
1813 			return PTR_ERR(ce);
1814 		}
1815 
1816 		intel_context_set_gem(ce, ctx);
1817 
1818 		set.engines->engines[n] = ce;
1819 	}
1820 	set.engines->num_engines = num_engines;
1821 
1822 	err = -EFAULT;
1823 	if (!get_user(extensions, &user->extensions))
1824 		err = i915_user_extensions(u64_to_user_ptr(extensions),
1825 					   set_engines__extensions,
1826 					   ARRAY_SIZE(set_engines__extensions),
1827 					   &set);
1828 	if (err) {
1829 		free_engines(set.engines);
1830 		return err;
1831 	}
1832 
1833 replace:
1834 	mutex_lock(&ctx->engines_mutex);
1835 	if (i915_gem_context_is_closed(ctx)) {
1836 		mutex_unlock(&ctx->engines_mutex);
1837 		free_engines(set.engines);
1838 		return -ENOENT;
1839 	}
1840 	if (args->size)
1841 		i915_gem_context_set_user_engines(ctx);
1842 	else
1843 		i915_gem_context_clear_user_engines(ctx);
1844 	set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1845 	mutex_unlock(&ctx->engines_mutex);
1846 
1847 	/* Keep track of old engine sets for kill_context() */
1848 	engines_idle_release(ctx, set.engines);
1849 
1850 	return 0;
1851 }
1852 
1853 static int
1854 get_engines(struct i915_gem_context *ctx,
1855 	    struct drm_i915_gem_context_param *args)
1856 {
1857 	struct i915_context_param_engines __user *user;
1858 	struct i915_gem_engines *e;
1859 	size_t n, count, size;
1860 	bool user_engines;
1861 	int err = 0;
1862 
1863 	e = __context_engines_await(ctx, &user_engines);
1864 	if (!e)
1865 		return -ENOENT;
1866 
1867 	if (!user_engines) {
1868 		i915_sw_fence_complete(&e->fence);
1869 		args->size = 0;
1870 		return 0;
1871 	}
1872 
1873 	count = e->num_engines;
1874 
1875 	/* Be paranoid in case we have an impedance mismatch */
1876 	if (!check_struct_size(user, engines, count, &size)) {
1877 		err = -EINVAL;
1878 		goto err_free;
1879 	}
1880 	if (overflows_type(size, args->size)) {
1881 		err = -EINVAL;
1882 		goto err_free;
1883 	}
1884 
1885 	if (!args->size) {
1886 		args->size = size;
1887 		goto err_free;
1888 	}
1889 
1890 	if (args->size < size) {
1891 		err = -EINVAL;
1892 		goto err_free;
1893 	}
1894 
1895 	user = u64_to_user_ptr(args->value);
1896 	if (put_user(0, &user->extensions)) {
1897 		err = -EFAULT;
1898 		goto err_free;
1899 	}
1900 
1901 	for (n = 0; n < count; n++) {
1902 		struct i915_engine_class_instance ci = {
1903 			.engine_class = I915_ENGINE_CLASS_INVALID,
1904 			.engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1905 		};
1906 
1907 		if (e->engines[n]) {
1908 			ci.engine_class = e->engines[n]->engine->uabi_class;
1909 			ci.engine_instance = e->engines[n]->engine->uabi_instance;
1910 		}
1911 
1912 		if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1913 			err = -EFAULT;
1914 			goto err_free;
1915 		}
1916 	}
1917 
1918 	args->size = size;
1919 
1920 err_free:
1921 	i915_sw_fence_complete(&e->fence);
1922 	return err;
1923 }
1924 
1925 static int
1926 set_persistence(struct i915_gem_context *ctx,
1927 		const struct drm_i915_gem_context_param *args)
1928 {
1929 	if (args->size)
1930 		return -EINVAL;
1931 
1932 	return __context_set_persistence(ctx, args->value);
1933 }
1934 
1935 static int __apply_priority(struct intel_context *ce, void *arg)
1936 {
1937 	struct i915_gem_context *ctx = arg;
1938 
1939 	if (!intel_engine_has_timeslices(ce->engine))
1940 		return 0;
1941 
1942 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
1943 		intel_context_set_use_semaphores(ce);
1944 	else
1945 		intel_context_clear_use_semaphores(ce);
1946 
1947 	return 0;
1948 }
1949 
1950 static int set_priority(struct i915_gem_context *ctx,
1951 			const struct drm_i915_gem_context_param *args)
1952 {
1953 	s64 priority = args->value;
1954 
1955 	if (args->size)
1956 		return -EINVAL;
1957 
1958 	if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1959 		return -ENODEV;
1960 
1961 	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1962 	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
1963 		return -EINVAL;
1964 
1965 	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1966 	    !capable(CAP_SYS_NICE))
1967 		return -EPERM;
1968 
1969 	ctx->sched.priority = priority;
1970 	context_apply_all(ctx, __apply_priority, ctx);
1971 
1972 	return 0;
1973 }
1974 
1975 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1976 			struct i915_gem_context *ctx,
1977 			struct drm_i915_gem_context_param *args)
1978 {
1979 	int ret = 0;
1980 
1981 	switch (args->param) {
1982 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
1983 		if (args->size)
1984 			ret = -EINVAL;
1985 		else if (args->value)
1986 			set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1987 		else
1988 			clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
1989 		break;
1990 
1991 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1992 		if (args->size)
1993 			ret = -EINVAL;
1994 		else if (args->value)
1995 			i915_gem_context_set_no_error_capture(ctx);
1996 		else
1997 			i915_gem_context_clear_no_error_capture(ctx);
1998 		break;
1999 
2000 	case I915_CONTEXT_PARAM_BANNABLE:
2001 		if (args->size)
2002 			ret = -EINVAL;
2003 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
2004 			ret = -EPERM;
2005 		else if (args->value)
2006 			i915_gem_context_set_bannable(ctx);
2007 		else
2008 			i915_gem_context_clear_bannable(ctx);
2009 		break;
2010 
2011 	case I915_CONTEXT_PARAM_RECOVERABLE:
2012 		if (args->size)
2013 			ret = -EINVAL;
2014 		else if (args->value)
2015 			i915_gem_context_set_recoverable(ctx);
2016 		else
2017 			i915_gem_context_clear_recoverable(ctx);
2018 		break;
2019 
2020 	case I915_CONTEXT_PARAM_PRIORITY:
2021 		ret = set_priority(ctx, args);
2022 		break;
2023 
2024 	case I915_CONTEXT_PARAM_SSEU:
2025 		ret = set_sseu(ctx, args);
2026 		break;
2027 
2028 	case I915_CONTEXT_PARAM_VM:
2029 		ret = set_ppgtt(fpriv, ctx, args);
2030 		break;
2031 
2032 	case I915_CONTEXT_PARAM_ENGINES:
2033 		ret = set_engines(ctx, args);
2034 		break;
2035 
2036 	case I915_CONTEXT_PARAM_PERSISTENCE:
2037 		ret = set_persistence(ctx, args);
2038 		break;
2039 
2040 	case I915_CONTEXT_PARAM_RINGSIZE:
2041 		ret = set_ringsize(ctx, args);
2042 		break;
2043 
2044 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2045 	default:
2046 		ret = -EINVAL;
2047 		break;
2048 	}
2049 
2050 	return ret;
2051 }
2052 
2053 struct create_ext {
2054 	struct i915_gem_context *ctx;
2055 	struct drm_i915_file_private *fpriv;
2056 };
2057 
2058 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2059 {
2060 	struct drm_i915_gem_context_create_ext_setparam local;
2061 	const struct create_ext *arg = data;
2062 
2063 	if (copy_from_user(&local, ext, sizeof(local)))
2064 		return -EFAULT;
2065 
2066 	if (local.param.ctx_id)
2067 		return -EINVAL;
2068 
2069 	return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
2070 }
2071 
2072 static int copy_ring_size(struct intel_context *dst,
2073 			  struct intel_context *src)
2074 {
2075 	long sz;
2076 
2077 	sz = intel_context_get_ring_size(src);
2078 	if (sz < 0)
2079 		return sz;
2080 
2081 	return intel_context_set_ring_size(dst, sz);
2082 }
2083 
2084 static int clone_engines(struct i915_gem_context *dst,
2085 			 struct i915_gem_context *src)
2086 {
2087 	struct i915_gem_engines *clone, *e;
2088 	bool user_engines;
2089 	unsigned long n;
2090 
2091 	e = __context_engines_await(src, &user_engines);
2092 	if (!e)
2093 		return -ENOENT;
2094 
2095 	clone = alloc_engines(e->num_engines);
2096 	if (!clone)
2097 		goto err_unlock;
2098 
2099 	for (n = 0; n < e->num_engines; n++) {
2100 		struct intel_engine_cs *engine;
2101 
2102 		if (!e->engines[n]) {
2103 			clone->engines[n] = NULL;
2104 			continue;
2105 		}
2106 		engine = e->engines[n]->engine;
2107 
2108 		/*
2109 		 * Virtual engines are singletons; they can only exist
2110 		 * inside a single context, because they embed their
2111 		 * HW context... As each virtual context implies a single
2112 		 * timeline (each engine can only dequeue a single request
2113 		 * at any time), it would be surprising for two contexts
2114 		 * to use the same engine. So let's create a copy of
2115 		 * the virtual engine instead.
2116 		 */
2117 		if (intel_engine_is_virtual(engine))
2118 			clone->engines[n] =
2119 				intel_execlists_clone_virtual(engine);
2120 		else
2121 			clone->engines[n] = intel_context_create(engine);
2122 		if (IS_ERR_OR_NULL(clone->engines[n])) {
2123 			__free_engines(clone, n);
2124 			goto err_unlock;
2125 		}
2126 
2127 		intel_context_set_gem(clone->engines[n], dst);
2128 
2129 		/* Copy across the preferred ringsize */
2130 		if (copy_ring_size(clone->engines[n], e->engines[n])) {
2131 			__free_engines(clone, n + 1);
2132 			goto err_unlock;
2133 		}
2134 	}
2135 	clone->num_engines = n;
2136 	i915_sw_fence_complete(&e->fence);
2137 
2138 	/* Serialised by constructor */
2139 	engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
2140 	if (user_engines)
2141 		i915_gem_context_set_user_engines(dst);
2142 	else
2143 		i915_gem_context_clear_user_engines(dst);
2144 	return 0;
2145 
2146 err_unlock:
2147 	i915_sw_fence_complete(&e->fence);
2148 	return -ENOMEM;
2149 }
2150 
2151 static int clone_flags(struct i915_gem_context *dst,
2152 		       struct i915_gem_context *src)
2153 {
2154 	dst->user_flags = src->user_flags;
2155 	return 0;
2156 }
2157 
2158 static int clone_schedattr(struct i915_gem_context *dst,
2159 			   struct i915_gem_context *src)
2160 {
2161 	dst->sched = src->sched;
2162 	return 0;
2163 }
2164 
2165 static int clone_sseu(struct i915_gem_context *dst,
2166 		      struct i915_gem_context *src)
2167 {
2168 	struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
2169 	struct i915_gem_engines *clone;
2170 	unsigned long n;
2171 	int err;
2172 
2173 	/* no locking required; sole access under constructor*/
2174 	clone = __context_engines_static(dst);
2175 	if (e->num_engines != clone->num_engines) {
2176 		err = -EINVAL;
2177 		goto unlock;
2178 	}
2179 
2180 	for (n = 0; n < e->num_engines; n++) {
2181 		struct intel_context *ce = e->engines[n];
2182 
2183 		if (clone->engines[n]->engine->class != ce->engine->class) {
2184 			/* Must have compatible engine maps! */
2185 			err = -EINVAL;
2186 			goto unlock;
2187 		}
2188 
2189 		/* serialises with set_sseu */
2190 		err = intel_context_lock_pinned(ce);
2191 		if (err)
2192 			goto unlock;
2193 
2194 		clone->engines[n]->sseu = ce->sseu;
2195 		intel_context_unlock_pinned(ce);
2196 	}
2197 
2198 	err = 0;
2199 unlock:
2200 	i915_gem_context_unlock_engines(src);
2201 	return err;
2202 }
2203 
2204 static int clone_timeline(struct i915_gem_context *dst,
2205 			  struct i915_gem_context *src)
2206 {
2207 	if (src->timeline)
2208 		__assign_timeline(dst, src->timeline);
2209 
2210 	return 0;
2211 }
2212 
2213 static int clone_vm(struct i915_gem_context *dst,
2214 		    struct i915_gem_context *src)
2215 {
2216 	struct i915_address_space *vm;
2217 	int err = 0;
2218 
2219 	if (!rcu_access_pointer(src->vm))
2220 		return 0;
2221 
2222 	rcu_read_lock();
2223 	vm = context_get_vm_rcu(src);
2224 	rcu_read_unlock();
2225 
2226 	if (!mutex_lock_interruptible(&dst->mutex)) {
2227 		__assign_ppgtt(dst, vm);
2228 		mutex_unlock(&dst->mutex);
2229 	} else {
2230 		err = -EINTR;
2231 	}
2232 
2233 	i915_vm_put(vm);
2234 	return err;
2235 }
2236 
2237 static int create_clone(struct i915_user_extension __user *ext, void *data)
2238 {
2239 	static int (* const fn[])(struct i915_gem_context *dst,
2240 				  struct i915_gem_context *src) = {
2241 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2242 		MAP(ENGINES, clone_engines),
2243 		MAP(FLAGS, clone_flags),
2244 		MAP(SCHEDATTR, clone_schedattr),
2245 		MAP(SSEU, clone_sseu),
2246 		MAP(TIMELINE, clone_timeline),
2247 		MAP(VM, clone_vm),
2248 #undef MAP
2249 	};
2250 	struct drm_i915_gem_context_create_ext_clone local;
2251 	const struct create_ext *arg = data;
2252 	struct i915_gem_context *dst = arg->ctx;
2253 	struct i915_gem_context *src;
2254 	int err, bit;
2255 
2256 	if (copy_from_user(&local, ext, sizeof(local)))
2257 		return -EFAULT;
2258 
2259 	BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2260 		     I915_CONTEXT_CLONE_UNKNOWN);
2261 
2262 	if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2263 		return -EINVAL;
2264 
2265 	if (local.rsvd)
2266 		return -EINVAL;
2267 
2268 	rcu_read_lock();
2269 	src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2270 	rcu_read_unlock();
2271 	if (!src)
2272 		return -ENOENT;
2273 
2274 	GEM_BUG_ON(src == dst);
2275 
2276 	for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2277 		if (!(local.flags & BIT(bit)))
2278 			continue;
2279 
2280 		err = fn[bit](dst, src);
2281 		if (err)
2282 			return err;
2283 	}
2284 
2285 	return 0;
2286 }
2287 
2288 static const i915_user_extension_fn create_extensions[] = {
2289 	[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2290 	[I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2291 };
2292 
2293 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2294 {
2295 	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2296 }
2297 
2298 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2299 				  struct drm_file *file)
2300 {
2301 	struct drm_i915_private *i915 = to_i915(dev);
2302 	struct drm_i915_gem_context_create_ext *args = data;
2303 	struct create_ext ext_data;
2304 	int ret;
2305 	u32 id;
2306 
2307 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
2308 		return -ENODEV;
2309 
2310 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2311 		return -EINVAL;
2312 
2313 	ret = intel_gt_terminally_wedged(&i915->gt);
2314 	if (ret)
2315 		return ret;
2316 
2317 	ext_data.fpriv = file->driver_priv;
2318 	if (client_is_banned(ext_data.fpriv)) {
2319 		drm_dbg(&i915->drm,
2320 			"client %s[%d] banned from creating ctx\n",
2321 			current->comm, task_pid_nr(current));
2322 		return -EIO;
2323 	}
2324 
2325 	ext_data.ctx = i915_gem_create_context(i915, args->flags);
2326 	if (IS_ERR(ext_data.ctx))
2327 		return PTR_ERR(ext_data.ctx);
2328 
2329 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2330 		ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2331 					   create_extensions,
2332 					   ARRAY_SIZE(create_extensions),
2333 					   &ext_data);
2334 		if (ret)
2335 			goto err_ctx;
2336 	}
2337 
2338 	ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
2339 	if (ret < 0)
2340 		goto err_ctx;
2341 
2342 	args->ctx_id = id;
2343 	drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2344 
2345 	return 0;
2346 
2347 err_ctx:
2348 	context_close(ext_data.ctx);
2349 	return ret;
2350 }
2351 
2352 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2353 				   struct drm_file *file)
2354 {
2355 	struct drm_i915_gem_context_destroy *args = data;
2356 	struct drm_i915_file_private *file_priv = file->driver_priv;
2357 	struct i915_gem_context *ctx;
2358 
2359 	if (args->pad != 0)
2360 		return -EINVAL;
2361 
2362 	if (!args->ctx_id)
2363 		return -ENOENT;
2364 
2365 	ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2366 	if (!ctx)
2367 		return -ENOENT;
2368 
2369 	context_close(ctx);
2370 	return 0;
2371 }
2372 
2373 static int get_sseu(struct i915_gem_context *ctx,
2374 		    struct drm_i915_gem_context_param *args)
2375 {
2376 	struct drm_i915_gem_context_param_sseu user_sseu;
2377 	struct intel_context *ce;
2378 	unsigned long lookup;
2379 	int err;
2380 
2381 	if (args->size == 0)
2382 		goto out;
2383 	else if (args->size < sizeof(user_sseu))
2384 		return -EINVAL;
2385 
2386 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2387 			   sizeof(user_sseu)))
2388 		return -EFAULT;
2389 
2390 	if (user_sseu.rsvd)
2391 		return -EINVAL;
2392 
2393 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2394 		return -EINVAL;
2395 
2396 	lookup = 0;
2397 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2398 		lookup |= LOOKUP_USER_INDEX;
2399 
2400 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2401 	if (IS_ERR(ce))
2402 		return PTR_ERR(ce);
2403 
2404 	err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2405 	if (err) {
2406 		intel_context_put(ce);
2407 		return err;
2408 	}
2409 
2410 	user_sseu.slice_mask = ce->sseu.slice_mask;
2411 	user_sseu.subslice_mask = ce->sseu.subslice_mask;
2412 	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2413 	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2414 
2415 	intel_context_unlock_pinned(ce);
2416 	intel_context_put(ce);
2417 
2418 	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2419 			 sizeof(user_sseu)))
2420 		return -EFAULT;
2421 
2422 out:
2423 	args->size = sizeof(user_sseu);
2424 
2425 	return 0;
2426 }
2427 
2428 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2429 				    struct drm_file *file)
2430 {
2431 	struct drm_i915_file_private *file_priv = file->driver_priv;
2432 	struct drm_i915_gem_context_param *args = data;
2433 	struct i915_gem_context *ctx;
2434 	int ret = 0;
2435 
2436 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2437 	if (!ctx)
2438 		return -ENOENT;
2439 
2440 	switch (args->param) {
2441 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
2442 		args->size = 0;
2443 		args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2444 		break;
2445 
2446 	case I915_CONTEXT_PARAM_GTT_SIZE:
2447 		args->size = 0;
2448 		rcu_read_lock();
2449 		if (rcu_access_pointer(ctx->vm))
2450 			args->value = rcu_dereference(ctx->vm)->total;
2451 		else
2452 			args->value = to_i915(dev)->ggtt.vm.total;
2453 		rcu_read_unlock();
2454 		break;
2455 
2456 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2457 		args->size = 0;
2458 		args->value = i915_gem_context_no_error_capture(ctx);
2459 		break;
2460 
2461 	case I915_CONTEXT_PARAM_BANNABLE:
2462 		args->size = 0;
2463 		args->value = i915_gem_context_is_bannable(ctx);
2464 		break;
2465 
2466 	case I915_CONTEXT_PARAM_RECOVERABLE:
2467 		args->size = 0;
2468 		args->value = i915_gem_context_is_recoverable(ctx);
2469 		break;
2470 
2471 	case I915_CONTEXT_PARAM_PRIORITY:
2472 		args->size = 0;
2473 		args->value = ctx->sched.priority;
2474 		break;
2475 
2476 	case I915_CONTEXT_PARAM_SSEU:
2477 		ret = get_sseu(ctx, args);
2478 		break;
2479 
2480 	case I915_CONTEXT_PARAM_VM:
2481 		ret = get_ppgtt(file_priv, ctx, args);
2482 		break;
2483 
2484 	case I915_CONTEXT_PARAM_ENGINES:
2485 		ret = get_engines(ctx, args);
2486 		break;
2487 
2488 	case I915_CONTEXT_PARAM_PERSISTENCE:
2489 		args->size = 0;
2490 		args->value = i915_gem_context_is_persistent(ctx);
2491 		break;
2492 
2493 	case I915_CONTEXT_PARAM_RINGSIZE:
2494 		ret = get_ringsize(ctx, args);
2495 		break;
2496 
2497 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2498 	default:
2499 		ret = -EINVAL;
2500 		break;
2501 	}
2502 
2503 	i915_gem_context_put(ctx);
2504 	return ret;
2505 }
2506 
2507 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2508 				    struct drm_file *file)
2509 {
2510 	struct drm_i915_file_private *file_priv = file->driver_priv;
2511 	struct drm_i915_gem_context_param *args = data;
2512 	struct i915_gem_context *ctx;
2513 	int ret;
2514 
2515 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2516 	if (!ctx)
2517 		return -ENOENT;
2518 
2519 	ret = ctx_setparam(file_priv, ctx, args);
2520 
2521 	i915_gem_context_put(ctx);
2522 	return ret;
2523 }
2524 
2525 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2526 				       void *data, struct drm_file *file)
2527 {
2528 	struct drm_i915_private *i915 = to_i915(dev);
2529 	struct drm_i915_reset_stats *args = data;
2530 	struct i915_gem_context *ctx;
2531 	int ret;
2532 
2533 	if (args->flags || args->pad)
2534 		return -EINVAL;
2535 
2536 	ret = -ENOENT;
2537 	rcu_read_lock();
2538 	ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2539 	if (!ctx)
2540 		goto out;
2541 
2542 	/*
2543 	 * We opt for unserialised reads here. This may result in tearing
2544 	 * in the extremely unlikely event of a GPU hang on this context
2545 	 * as we are querying them. If we need that extra layer of protection,
2546 	 * we should wrap the hangstats with a seqlock.
2547 	 */
2548 
2549 	if (capable(CAP_SYS_ADMIN))
2550 		args->reset_count = i915_reset_count(&i915->gpu_error);
2551 	else
2552 		args->reset_count = 0;
2553 
2554 	args->batch_active = atomic_read(&ctx->guilty_count);
2555 	args->batch_pending = atomic_read(&ctx->active_count);
2556 
2557 	ret = 0;
2558 out:
2559 	rcu_read_unlock();
2560 	return ret;
2561 }
2562 
2563 /* GEM context-engines iterator: for_each_gem_engine() */
2564 struct intel_context *
2565 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2566 {
2567 	const struct i915_gem_engines *e = it->engines;
2568 	struct intel_context *ctx;
2569 
2570 	if (unlikely(!e))
2571 		return NULL;
2572 
2573 	do {
2574 		if (it->idx >= e->num_engines)
2575 			return NULL;
2576 
2577 		ctx = e->engines[it->idx++];
2578 	} while (!ctx);
2579 
2580 	return ctx;
2581 }
2582 
2583 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2584 #include "selftests/mock_context.c"
2585 #include "selftests/i915_gem_context.c"
2586 #endif
2587 
2588 static void i915_global_gem_context_shrink(void)
2589 {
2590 	kmem_cache_shrink(global.slab_luts);
2591 }
2592 
2593 static void i915_global_gem_context_exit(void)
2594 {
2595 	kmem_cache_destroy(global.slab_luts);
2596 }
2597 
2598 static struct i915_global_gem_context global = { {
2599 	.shrink = i915_global_gem_context_shrink,
2600 	.exit = i915_global_gem_context_exit,
2601 } };
2602 
2603 int __init i915_global_gem_context_init(void)
2604 {
2605 	global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2606 	if (!global.slab_luts)
2607 		return -ENOMEM;
2608 
2609 	i915_global_register(&global.base);
2610 	return 0;
2611 }
2612