110be98a7SChris Wilson /*
210be98a7SChris Wilson  * SPDX-License-Identifier: MIT
310be98a7SChris Wilson  *
410be98a7SChris Wilson  * Copyright © 2011-2012 Intel Corporation
510be98a7SChris Wilson  */
610be98a7SChris Wilson 
710be98a7SChris Wilson /*
810be98a7SChris Wilson  * This file implements HW context support. On gen5+ a HW context consists of an
910be98a7SChris Wilson  * opaque GPU object which is referenced at times of context saves and restores.
1010be98a7SChris Wilson  * With RC6 enabled, the context is also referenced as the GPU enters and exists
1110be98a7SChris Wilson  * from RC6 (GPU has it's own internal power context, except on gen5). Though
1210be98a7SChris Wilson  * something like a context does exist for the media ring, the code only
1310be98a7SChris Wilson  * supports contexts for the render ring.
1410be98a7SChris Wilson  *
1510be98a7SChris Wilson  * In software, there is a distinction between contexts created by the user,
1610be98a7SChris Wilson  * and the default HW context. The default HW context is used by GPU clients
1710be98a7SChris Wilson  * that do not request setup of their own hardware context. The default
1810be98a7SChris Wilson  * context's state is never restored to help prevent programming errors. This
1910be98a7SChris Wilson  * would happen if a client ran and piggy-backed off another clients GPU state.
2010be98a7SChris Wilson  * The default context only exists to give the GPU some offset to load as the
2110be98a7SChris Wilson  * current to invoke a save of the context we actually care about. In fact, the
2210be98a7SChris Wilson  * code could likely be constructed, albeit in a more complicated fashion, to
2310be98a7SChris Wilson  * never use the default context, though that limits the driver's ability to
2410be98a7SChris Wilson  * swap out, and/or destroy other contexts.
2510be98a7SChris Wilson  *
2610be98a7SChris Wilson  * All other contexts are created as a request by the GPU client. These contexts
2710be98a7SChris Wilson  * store GPU state, and thus allow GPU clients to not re-emit state (and
2810be98a7SChris Wilson  * potentially query certain state) at any time. The kernel driver makes
2910be98a7SChris Wilson  * certain that the appropriate commands are inserted.
3010be98a7SChris Wilson  *
3110be98a7SChris Wilson  * The context life cycle is semi-complicated in that context BOs may live
3210be98a7SChris Wilson  * longer than the context itself because of the way the hardware, and object
3310be98a7SChris Wilson  * tracking works. Below is a very crude representation of the state machine
3410be98a7SChris Wilson  * describing the context life.
3510be98a7SChris Wilson  *                                         refcount     pincount     active
3610be98a7SChris Wilson  * S0: initial state                          0            0           0
3710be98a7SChris Wilson  * S1: context created                        1            0           0
3810be98a7SChris Wilson  * S2: context is currently running           2            1           X
3910be98a7SChris Wilson  * S3: GPU referenced, but not current        2            0           1
4010be98a7SChris Wilson  * S4: context is current, but destroyed      1            1           0
4110be98a7SChris Wilson  * S5: like S3, but destroyed                 1            0           1
4210be98a7SChris Wilson  *
4310be98a7SChris Wilson  * The most common (but not all) transitions:
4410be98a7SChris Wilson  * S0->S1: client creates a context
4510be98a7SChris Wilson  * S1->S2: client submits execbuf with context
4610be98a7SChris Wilson  * S2->S3: other clients submits execbuf with context
4710be98a7SChris Wilson  * S3->S1: context object was retired
4810be98a7SChris Wilson  * S3->S2: clients submits another execbuf
4910be98a7SChris Wilson  * S2->S4: context destroy called with current context
5010be98a7SChris Wilson  * S3->S5->S0: destroy path
5110be98a7SChris Wilson  * S4->S5->S0: destroy path on current context
5210be98a7SChris Wilson  *
5310be98a7SChris Wilson  * There are two confusing terms used above:
5410be98a7SChris Wilson  *  The "current context" means the context which is currently running on the
5510be98a7SChris Wilson  *  GPU. The GPU has loaded its state already and has stored away the gtt
5610be98a7SChris Wilson  *  offset of the BO. The GPU is not actively referencing the data at this
5710be98a7SChris Wilson  *  offset, but it will on the next context switch. The only way to avoid this
5810be98a7SChris Wilson  *  is to do a GPU reset.
5910be98a7SChris Wilson  *
6010be98a7SChris Wilson  *  An "active context' is one which was previously the "current context" and is
6110be98a7SChris Wilson  *  on the active list waiting for the next context switch to occur. Until this
6210be98a7SChris Wilson  *  happens, the object must remain at the same gtt offset. It is therefore
6310be98a7SChris Wilson  *  possible to destroy a context, but it is still active.
6410be98a7SChris Wilson  *
6510be98a7SChris Wilson  */
6610be98a7SChris Wilson 
6710be98a7SChris Wilson #include <linux/log2.h>
6810be98a7SChris Wilson #include <linux/nospec.h>
6910be98a7SChris Wilson 
705f2ec909SJani Nikula #include <drm/drm_cache.h>
7100dae4d3SJason Ekstrand #include <drm/drm_syncobj.h>
7200dae4d3SJason Ekstrand 
732c86e55dSMatthew Auld #include "gt/gen6_ppgtt.h"
749f3ccd40SChris Wilson #include "gt/intel_context.h"
7588be76cdSChris Wilson #include "gt/intel_context_param.h"
762e0986a5SChris Wilson #include "gt/intel_engine_heartbeat.h"
77750e76b4SChris Wilson #include "gt/intel_engine_user.h"
7845233ab2SChris Wilson #include "gt/intel_gpu_commands.h"
792871ea85SChris Wilson #include "gt/intel_ring.h"
8010be98a7SChris Wilson 
81d3ac8d42SDaniele Ceraolo Spurio #include "pxp/intel_pxp.h"
82d3ac8d42SDaniele Ceraolo Spurio 
835472b3f2SJani Nikula #include "i915_file_private.h"
8410be98a7SChris Wilson #include "i915_gem_context.h"
8510be98a7SChris Wilson #include "i915_trace.h"
8610be98a7SChris Wilson #include "i915_user_extensions.h"
8710be98a7SChris Wilson 
8810be98a7SChris Wilson #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
8910be98a7SChris Wilson 
90a6270d1dSDaniel Vetter static struct kmem_cache *slab_luts;
9110be98a7SChris Wilson 
9210be98a7SChris Wilson struct i915_lut_handle *i915_lut_handle_alloc(void)
9310be98a7SChris Wilson {
94a6270d1dSDaniel Vetter 	return kmem_cache_alloc(slab_luts, GFP_KERNEL);
9510be98a7SChris Wilson }
9610be98a7SChris Wilson 
9710be98a7SChris Wilson void i915_lut_handle_free(struct i915_lut_handle *lut)
9810be98a7SChris Wilson {
99a6270d1dSDaniel Vetter 	return kmem_cache_free(slab_luts, lut);
10010be98a7SChris Wilson }
10110be98a7SChris Wilson 
10210be98a7SChris Wilson static void lut_close(struct i915_gem_context *ctx)
10310be98a7SChris Wilson {
10410be98a7SChris Wilson 	struct radix_tree_iter iter;
10510be98a7SChris Wilson 	void __rcu **slot;
10610be98a7SChris Wilson 
107f7ce8639SChris Wilson 	mutex_lock(&ctx->lut_mutex);
10810be98a7SChris Wilson 	rcu_read_lock();
10910be98a7SChris Wilson 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
11010be98a7SChris Wilson 		struct i915_vma *vma = rcu_dereference_raw(*slot);
111155ab883SChris Wilson 		struct drm_i915_gem_object *obj = vma->obj;
112155ab883SChris Wilson 		struct i915_lut_handle *lut;
11310be98a7SChris Wilson 
114155ab883SChris Wilson 		if (!kref_get_unless_zero(&obj->base.refcount))
115155ab883SChris Wilson 			continue;
116155ab883SChris Wilson 
117096a42ddSChris Wilson 		spin_lock(&obj->lut_lock);
118155ab883SChris Wilson 		list_for_each_entry(lut, &obj->lut_list, obj_link) {
119155ab883SChris Wilson 			if (lut->ctx != ctx)
120155ab883SChris Wilson 				continue;
121155ab883SChris Wilson 
122155ab883SChris Wilson 			if (lut->handle != iter.index)
123155ab883SChris Wilson 				continue;
124155ab883SChris Wilson 
125155ab883SChris Wilson 			list_del(&lut->obj_link);
126155ab883SChris Wilson 			break;
127155ab883SChris Wilson 		}
128096a42ddSChris Wilson 		spin_unlock(&obj->lut_lock);
129155ab883SChris Wilson 
130155ab883SChris Wilson 		if (&lut->obj_link != &obj->lut_list) {
131155ab883SChris Wilson 			i915_lut_handle_free(lut);
13210be98a7SChris Wilson 			radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
133155ab883SChris Wilson 			i915_vma_close(vma);
134155ab883SChris Wilson 			i915_gem_object_put(obj);
135155ab883SChris Wilson 		}
13610be98a7SChris Wilson 
137155ab883SChris Wilson 		i915_gem_object_put(obj);
13810be98a7SChris Wilson 	}
13910be98a7SChris Wilson 	rcu_read_unlock();
140f7ce8639SChris Wilson 	mutex_unlock(&ctx->lut_mutex);
14110be98a7SChris Wilson }
14210be98a7SChris Wilson 
14310be98a7SChris Wilson static struct intel_context *
14410be98a7SChris Wilson lookup_user_engine(struct i915_gem_context *ctx,
14510be98a7SChris Wilson 		   unsigned long flags,
14610be98a7SChris Wilson 		   const struct i915_engine_class_instance *ci)
14710be98a7SChris Wilson #define LOOKUP_USER_INDEX BIT(0)
14810be98a7SChris Wilson {
14910be98a7SChris Wilson 	int idx;
15010be98a7SChris Wilson 
15110be98a7SChris Wilson 	if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
15210be98a7SChris Wilson 		return ERR_PTR(-EINVAL);
15310be98a7SChris Wilson 
15410be98a7SChris Wilson 	if (!i915_gem_context_user_engines(ctx)) {
15510be98a7SChris Wilson 		struct intel_engine_cs *engine;
15610be98a7SChris Wilson 
15710be98a7SChris Wilson 		engine = intel_engine_lookup_user(ctx->i915,
15810be98a7SChris Wilson 						  ci->engine_class,
15910be98a7SChris Wilson 						  ci->engine_instance);
16010be98a7SChris Wilson 		if (!engine)
16110be98a7SChris Wilson 			return ERR_PTR(-EINVAL);
16210be98a7SChris Wilson 
163f1c4d157SChris Wilson 		idx = engine->legacy_idx;
16410be98a7SChris Wilson 	} else {
16510be98a7SChris Wilson 		idx = ci->engine_instance;
16610be98a7SChris Wilson 	}
16710be98a7SChris Wilson 
16810be98a7SChris Wilson 	return i915_gem_context_get_engine(ctx, idx);
16910be98a7SChris Wilson }
17010be98a7SChris Wilson 
171aaa5957cSJason Ekstrand static int validate_priority(struct drm_i915_private *i915,
172aaa5957cSJason Ekstrand 			     const struct drm_i915_gem_context_param *args)
173aaa5957cSJason Ekstrand {
174aaa5957cSJason Ekstrand 	s64 priority = args->value;
175aaa5957cSJason Ekstrand 
176aaa5957cSJason Ekstrand 	if (args->size)
177aaa5957cSJason Ekstrand 		return -EINVAL;
178aaa5957cSJason Ekstrand 
179aaa5957cSJason Ekstrand 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
180aaa5957cSJason Ekstrand 		return -ENODEV;
181aaa5957cSJason Ekstrand 
182aaa5957cSJason Ekstrand 	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
183aaa5957cSJason Ekstrand 	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
184aaa5957cSJason Ekstrand 		return -EINVAL;
185aaa5957cSJason Ekstrand 
186aaa5957cSJason Ekstrand 	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
187aaa5957cSJason Ekstrand 	    !capable(CAP_SYS_NICE))
188aaa5957cSJason Ekstrand 		return -EPERM;
189aaa5957cSJason Ekstrand 
190aaa5957cSJason Ekstrand 	return 0;
191aaa5957cSJason Ekstrand }
192aaa5957cSJason Ekstrand 
193d3ac8d42SDaniele Ceraolo Spurio static void proto_context_close(struct drm_i915_private *i915,
194d3ac8d42SDaniele Ceraolo Spurio 				struct i915_gem_proto_context *pc)
195a34857dcSJason Ekstrand {
196d4433c76SJason Ekstrand 	int i;
197d4433c76SJason Ekstrand 
198d3ac8d42SDaniele Ceraolo Spurio 	if (pc->pxp_wakeref)
199d3ac8d42SDaniele Ceraolo Spurio 		intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
200a34857dcSJason Ekstrand 	if (pc->vm)
201a34857dcSJason Ekstrand 		i915_vm_put(pc->vm);
202d4433c76SJason Ekstrand 	if (pc->user_engines) {
203d4433c76SJason Ekstrand 		for (i = 0; i < pc->num_user_engines; i++)
204d4433c76SJason Ekstrand 			kfree(pc->user_engines[i].siblings);
205d4433c76SJason Ekstrand 		kfree(pc->user_engines);
206d4433c76SJason Ekstrand 	}
207a34857dcSJason Ekstrand 	kfree(pc);
208a34857dcSJason Ekstrand }
209a34857dcSJason Ekstrand 
210d4433c76SJason Ekstrand static int proto_context_set_persistence(struct drm_i915_private *i915,
211d4433c76SJason Ekstrand 					 struct i915_gem_proto_context *pc,
212d4433c76SJason Ekstrand 					 bool persist)
213d4433c76SJason Ekstrand {
214d4433c76SJason Ekstrand 	if (persist) {
215d4433c76SJason Ekstrand 		/*
216d4433c76SJason Ekstrand 		 * Only contexts that are short-lived [that will expire or be
217d4433c76SJason Ekstrand 		 * reset] are allowed to survive past termination. We require
218d4433c76SJason Ekstrand 		 * hangcheck to ensure that the persistent requests are healthy.
219d4433c76SJason Ekstrand 		 */
220d4433c76SJason Ekstrand 		if (!i915->params.enable_hangcheck)
221d4433c76SJason Ekstrand 			return -EINVAL;
222d4433c76SJason Ekstrand 
223d4433c76SJason Ekstrand 		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
224d4433c76SJason Ekstrand 	} else {
225d4433c76SJason Ekstrand 		/* To cancel a context we use "preempt-to-idle" */
226d4433c76SJason Ekstrand 		if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
227d4433c76SJason Ekstrand 			return -ENODEV;
228d4433c76SJason Ekstrand 
229d4433c76SJason Ekstrand 		/*
230d4433c76SJason Ekstrand 		 * If the cancel fails, we then need to reset, cleanly!
231d4433c76SJason Ekstrand 		 *
232d4433c76SJason Ekstrand 		 * If the per-engine reset fails, all hope is lost! We resort
233d4433c76SJason Ekstrand 		 * to a full GPU reset in that unlikely case, but realistically
234d4433c76SJason Ekstrand 		 * if the engine could not reset, the full reset does not fare
235d4433c76SJason Ekstrand 		 * much better. The damage has been done.
236d4433c76SJason Ekstrand 		 *
237d4433c76SJason Ekstrand 		 * However, if we cannot reset an engine by itself, we cannot
238d4433c76SJason Ekstrand 		 * cleanup a hanging persistent context without causing
239d4433c76SJason Ekstrand 		 * colateral damage, and we should not pretend we can by
240d4433c76SJason Ekstrand 		 * exposing the interface.
241d4433c76SJason Ekstrand 		 */
2421a9c4db4SMichał Winiarski 		if (!intel_has_reset_engine(to_gt(i915)))
243d4433c76SJason Ekstrand 			return -ENODEV;
244d4433c76SJason Ekstrand 
245d4433c76SJason Ekstrand 		pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
246d4433c76SJason Ekstrand 	}
247d4433c76SJason Ekstrand 
248d4433c76SJason Ekstrand 	return 0;
249d4433c76SJason Ekstrand }
250d4433c76SJason Ekstrand 
251d3ac8d42SDaniele Ceraolo Spurio static int proto_context_set_protected(struct drm_i915_private *i915,
252d3ac8d42SDaniele Ceraolo Spurio 				       struct i915_gem_proto_context *pc,
253d3ac8d42SDaniele Ceraolo Spurio 				       bool protected)
254d3ac8d42SDaniele Ceraolo Spurio {
255d3ac8d42SDaniele Ceraolo Spurio 	int ret = 0;
256d3ac8d42SDaniele Ceraolo Spurio 
257d3ac8d42SDaniele Ceraolo Spurio 	if (!protected) {
258d3ac8d42SDaniele Ceraolo Spurio 		pc->uses_protected_content = false;
2591a9c4db4SMichał Winiarski 	} else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
260d3ac8d42SDaniele Ceraolo Spurio 		ret = -ENODEV;
261d3ac8d42SDaniele Ceraolo Spurio 	} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
262d3ac8d42SDaniele Ceraolo Spurio 		   !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
263d3ac8d42SDaniele Ceraolo Spurio 		ret = -EPERM;
264d3ac8d42SDaniele Ceraolo Spurio 	} else {
265d3ac8d42SDaniele Ceraolo Spurio 		pc->uses_protected_content = true;
266d3ac8d42SDaniele Ceraolo Spurio 
267d3ac8d42SDaniele Ceraolo Spurio 		/*
268d3ac8d42SDaniele Ceraolo Spurio 		 * protected context usage requires the PXP session to be up,
269d3ac8d42SDaniele Ceraolo Spurio 		 * which in turn requires the device to be active.
270d3ac8d42SDaniele Ceraolo Spurio 		 */
271d3ac8d42SDaniele Ceraolo Spurio 		pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
27232271ecdSDaniele Ceraolo Spurio 
2731a9c4db4SMichał Winiarski 		if (!intel_pxp_is_active(&to_gt(i915)->pxp))
2741a9c4db4SMichał Winiarski 			ret = intel_pxp_start(&to_gt(i915)->pxp);
275d3ac8d42SDaniele Ceraolo Spurio 	}
276d3ac8d42SDaniele Ceraolo Spurio 
277d3ac8d42SDaniele Ceraolo Spurio 	return ret;
278d3ac8d42SDaniele Ceraolo Spurio }
279d3ac8d42SDaniele Ceraolo Spurio 
280a34857dcSJason Ekstrand static struct i915_gem_proto_context *
281a34857dcSJason Ekstrand proto_context_create(struct drm_i915_private *i915, unsigned int flags)
282a34857dcSJason Ekstrand {
283a34857dcSJason Ekstrand 	struct i915_gem_proto_context *pc, *err;
284a34857dcSJason Ekstrand 
285a34857dcSJason Ekstrand 	pc = kzalloc(sizeof(*pc), GFP_KERNEL);
286a34857dcSJason Ekstrand 	if (!pc)
287a34857dcSJason Ekstrand 		return ERR_PTR(-ENOMEM);
288a34857dcSJason Ekstrand 
289d4433c76SJason Ekstrand 	pc->num_user_engines = -1;
290d4433c76SJason Ekstrand 	pc->user_engines = NULL;
291a34857dcSJason Ekstrand 	pc->user_flags = BIT(UCONTEXT_BANNABLE) |
292a34857dcSJason Ekstrand 			 BIT(UCONTEXT_RECOVERABLE);
293a34857dcSJason Ekstrand 	if (i915->params.enable_hangcheck)
294a34857dcSJason Ekstrand 		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
295a34857dcSJason Ekstrand 	pc->sched.priority = I915_PRIORITY_NORMAL;
296a34857dcSJason Ekstrand 
297a34857dcSJason Ekstrand 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
298a34857dcSJason Ekstrand 		if (!HAS_EXECLISTS(i915)) {
299a34857dcSJason Ekstrand 			err = ERR_PTR(-EINVAL);
300a34857dcSJason Ekstrand 			goto proto_close;
301a34857dcSJason Ekstrand 		}
302a34857dcSJason Ekstrand 		pc->single_timeline = true;
303a34857dcSJason Ekstrand 	}
304a34857dcSJason Ekstrand 
305a34857dcSJason Ekstrand 	return pc;
306a34857dcSJason Ekstrand 
307a34857dcSJason Ekstrand proto_close:
308d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(i915, pc);
309a34857dcSJason Ekstrand 	return err;
310a34857dcSJason Ekstrand }
311a34857dcSJason Ekstrand 
312a4c1cdd3SJason Ekstrand static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
313a4c1cdd3SJason Ekstrand 					 struct i915_gem_proto_context *pc,
314a4c1cdd3SJason Ekstrand 					 u32 *id)
315a4c1cdd3SJason Ekstrand {
316a4c1cdd3SJason Ekstrand 	int ret;
317a4c1cdd3SJason Ekstrand 	void *old;
318a4c1cdd3SJason Ekstrand 
319a4c1cdd3SJason Ekstrand 	lockdep_assert_held(&fpriv->proto_context_lock);
320a4c1cdd3SJason Ekstrand 
321a4c1cdd3SJason Ekstrand 	ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
322a4c1cdd3SJason Ekstrand 	if (ret)
323a4c1cdd3SJason Ekstrand 		return ret;
324a4c1cdd3SJason Ekstrand 
325a4c1cdd3SJason Ekstrand 	old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
326a4c1cdd3SJason Ekstrand 	if (xa_is_err(old)) {
327a4c1cdd3SJason Ekstrand 		xa_erase(&fpriv->context_xa, *id);
328a4c1cdd3SJason Ekstrand 		return xa_err(old);
329a4c1cdd3SJason Ekstrand 	}
330a4c1cdd3SJason Ekstrand 	WARN_ON(old);
331a4c1cdd3SJason Ekstrand 
332a4c1cdd3SJason Ekstrand 	return 0;
333a4c1cdd3SJason Ekstrand }
334a4c1cdd3SJason Ekstrand 
335a4c1cdd3SJason Ekstrand static int proto_context_register(struct drm_i915_file_private *fpriv,
336a4c1cdd3SJason Ekstrand 				  struct i915_gem_proto_context *pc,
337a4c1cdd3SJason Ekstrand 				  u32 *id)
338a4c1cdd3SJason Ekstrand {
339a4c1cdd3SJason Ekstrand 	int ret;
340a4c1cdd3SJason Ekstrand 
341a4c1cdd3SJason Ekstrand 	mutex_lock(&fpriv->proto_context_lock);
342a4c1cdd3SJason Ekstrand 	ret = proto_context_register_locked(fpriv, pc, id);
343a4c1cdd3SJason Ekstrand 	mutex_unlock(&fpriv->proto_context_lock);
344a4c1cdd3SJason Ekstrand 
345a4c1cdd3SJason Ekstrand 	return ret;
346a4c1cdd3SJason Ekstrand }
347a4c1cdd3SJason Ekstrand 
348d83d5298SJani Nikula static struct i915_address_space *
349d83d5298SJani Nikula i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
350d83d5298SJani Nikula {
351d83d5298SJani Nikula 	struct i915_address_space *vm;
352d83d5298SJani Nikula 
353d83d5298SJani Nikula 	xa_lock(&file_priv->vm_xa);
354d83d5298SJani Nikula 	vm = xa_load(&file_priv->vm_xa, id);
355d83d5298SJani Nikula 	if (vm)
356d83d5298SJani Nikula 		kref_get(&vm->ref);
357d83d5298SJani Nikula 	xa_unlock(&file_priv->vm_xa);
358d83d5298SJani Nikula 
359d83d5298SJani Nikula 	return vm;
360d83d5298SJani Nikula }
361d83d5298SJani Nikula 
362d4433c76SJason Ekstrand static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
363d4433c76SJason Ekstrand 			    struct i915_gem_proto_context *pc,
364d4433c76SJason Ekstrand 			    const struct drm_i915_gem_context_param *args)
365d4433c76SJason Ekstrand {
366d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = fpriv->dev_priv;
367d4433c76SJason Ekstrand 	struct i915_address_space *vm;
368d4433c76SJason Ekstrand 
369d4433c76SJason Ekstrand 	if (args->size)
370d4433c76SJason Ekstrand 		return -EINVAL;
371d4433c76SJason Ekstrand 
372d4433c76SJason Ekstrand 	if (!HAS_FULL_PPGTT(i915))
373d4433c76SJason Ekstrand 		return -ENODEV;
374d4433c76SJason Ekstrand 
375d4433c76SJason Ekstrand 	if (upper_32_bits(args->value))
376d4433c76SJason Ekstrand 		return -ENOENT;
377d4433c76SJason Ekstrand 
378d4433c76SJason Ekstrand 	vm = i915_gem_vm_lookup(fpriv, args->value);
379d4433c76SJason Ekstrand 	if (!vm)
380d4433c76SJason Ekstrand 		return -ENOENT;
381d4433c76SJason Ekstrand 
382d4433c76SJason Ekstrand 	if (pc->vm)
383d4433c76SJason Ekstrand 		i915_vm_put(pc->vm);
384d4433c76SJason Ekstrand 	pc->vm = vm;
385d4433c76SJason Ekstrand 
386d4433c76SJason Ekstrand 	return 0;
387d4433c76SJason Ekstrand }
388d4433c76SJason Ekstrand 
389d4433c76SJason Ekstrand struct set_proto_ctx_engines {
390d4433c76SJason Ekstrand 	struct drm_i915_private *i915;
391d4433c76SJason Ekstrand 	unsigned num_engines;
392d4433c76SJason Ekstrand 	struct i915_gem_proto_engine *engines;
393d4433c76SJason Ekstrand };
394d4433c76SJason Ekstrand 
395d4433c76SJason Ekstrand static int
396d4433c76SJason Ekstrand set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
397d4433c76SJason Ekstrand 			      void *data)
398d4433c76SJason Ekstrand {
399d4433c76SJason Ekstrand 	struct i915_context_engines_load_balance __user *ext =
400d4433c76SJason Ekstrand 		container_of_user(base, typeof(*ext), base);
401d4433c76SJason Ekstrand 	const struct set_proto_ctx_engines *set = data;
402d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = set->i915;
403d4433c76SJason Ekstrand 	struct intel_engine_cs **siblings;
404d4433c76SJason Ekstrand 	u16 num_siblings, idx;
405d4433c76SJason Ekstrand 	unsigned int n;
406d4433c76SJason Ekstrand 	int err;
407d4433c76SJason Ekstrand 
408d4433c76SJason Ekstrand 	if (!HAS_EXECLISTS(i915))
409d4433c76SJason Ekstrand 		return -ENODEV;
410d4433c76SJason Ekstrand 
411d4433c76SJason Ekstrand 	if (get_user(idx, &ext->engine_index))
412d4433c76SJason Ekstrand 		return -EFAULT;
413d4433c76SJason Ekstrand 
414d4433c76SJason Ekstrand 	if (idx >= set->num_engines) {
415d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
416d4433c76SJason Ekstrand 			idx, set->num_engines);
417d4433c76SJason Ekstrand 		return -EINVAL;
418d4433c76SJason Ekstrand 	}
419d4433c76SJason Ekstrand 
420d4433c76SJason Ekstrand 	idx = array_index_nospec(idx, set->num_engines);
421d4433c76SJason Ekstrand 	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
422d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
423d4433c76SJason Ekstrand 			"Invalid placement[%d], already occupied\n", idx);
424d4433c76SJason Ekstrand 		return -EEXIST;
425d4433c76SJason Ekstrand 	}
426d4433c76SJason Ekstrand 
427d4433c76SJason Ekstrand 	if (get_user(num_siblings, &ext->num_siblings))
428d4433c76SJason Ekstrand 		return -EFAULT;
429d4433c76SJason Ekstrand 
430d4433c76SJason Ekstrand 	err = check_user_mbz(&ext->flags);
431d4433c76SJason Ekstrand 	if (err)
432d4433c76SJason Ekstrand 		return err;
433d4433c76SJason Ekstrand 
434d4433c76SJason Ekstrand 	err = check_user_mbz(&ext->mbz64);
435d4433c76SJason Ekstrand 	if (err)
436d4433c76SJason Ekstrand 		return err;
437d4433c76SJason Ekstrand 
438d4433c76SJason Ekstrand 	if (num_siblings == 0)
439d4433c76SJason Ekstrand 		return 0;
440d4433c76SJason Ekstrand 
441d4433c76SJason Ekstrand 	siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
442d4433c76SJason Ekstrand 	if (!siblings)
443d4433c76SJason Ekstrand 		return -ENOMEM;
444d4433c76SJason Ekstrand 
445d4433c76SJason Ekstrand 	for (n = 0; n < num_siblings; n++) {
446d4433c76SJason Ekstrand 		struct i915_engine_class_instance ci;
447d4433c76SJason Ekstrand 
448d4433c76SJason Ekstrand 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
449d4433c76SJason Ekstrand 			err = -EFAULT;
450d4433c76SJason Ekstrand 			goto err_siblings;
451d4433c76SJason Ekstrand 		}
452d4433c76SJason Ekstrand 
453d4433c76SJason Ekstrand 		siblings[n] = intel_engine_lookup_user(i915,
454d4433c76SJason Ekstrand 						       ci.engine_class,
455d4433c76SJason Ekstrand 						       ci.engine_instance);
456d4433c76SJason Ekstrand 		if (!siblings[n]) {
457d4433c76SJason Ekstrand 			drm_dbg(&i915->drm,
458d4433c76SJason Ekstrand 				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
459d4433c76SJason Ekstrand 				n, ci.engine_class, ci.engine_instance);
460d4433c76SJason Ekstrand 			err = -EINVAL;
461d4433c76SJason Ekstrand 			goto err_siblings;
462d4433c76SJason Ekstrand 		}
463d4433c76SJason Ekstrand 	}
464d4433c76SJason Ekstrand 
465d4433c76SJason Ekstrand 	if (num_siblings == 1) {
466d4433c76SJason Ekstrand 		set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
467d4433c76SJason Ekstrand 		set->engines[idx].engine = siblings[0];
468d4433c76SJason Ekstrand 		kfree(siblings);
469d4433c76SJason Ekstrand 	} else {
470d4433c76SJason Ekstrand 		set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
471d4433c76SJason Ekstrand 		set->engines[idx].num_siblings = num_siblings;
472d4433c76SJason Ekstrand 		set->engines[idx].siblings = siblings;
473d4433c76SJason Ekstrand 	}
474d4433c76SJason Ekstrand 
475d4433c76SJason Ekstrand 	return 0;
476d4433c76SJason Ekstrand 
477d4433c76SJason Ekstrand err_siblings:
478d4433c76SJason Ekstrand 	kfree(siblings);
479d4433c76SJason Ekstrand 
480d4433c76SJason Ekstrand 	return err;
481d4433c76SJason Ekstrand }
482d4433c76SJason Ekstrand 
483d4433c76SJason Ekstrand static int
484d4433c76SJason Ekstrand set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
485d4433c76SJason Ekstrand {
486d4433c76SJason Ekstrand 	struct i915_context_engines_bond __user *ext =
487d4433c76SJason Ekstrand 		container_of_user(base, typeof(*ext), base);
488d4433c76SJason Ekstrand 	const struct set_proto_ctx_engines *set = data;
489d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = set->i915;
490d4433c76SJason Ekstrand 	struct i915_engine_class_instance ci;
491d4433c76SJason Ekstrand 	struct intel_engine_cs *master;
492d4433c76SJason Ekstrand 	u16 idx, num_bonds;
493d4433c76SJason Ekstrand 	int err, n;
494d4433c76SJason Ekstrand 
495ce7e75c7SMatthew Brost 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
496ce7e75c7SMatthew Brost 	    !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
497ce7e75c7SMatthew Brost 		drm_dbg(&i915->drm,
4982c85034dSRodrigo Vivi 			"Bonding not supported on this platform\n");
499ce7e75c7SMatthew Brost 		return -ENODEV;
500ce7e75c7SMatthew Brost 	}
501ce7e75c7SMatthew Brost 
502d4433c76SJason Ekstrand 	if (get_user(idx, &ext->virtual_index))
503d4433c76SJason Ekstrand 		return -EFAULT;
504d4433c76SJason Ekstrand 
505d4433c76SJason Ekstrand 	if (idx >= set->num_engines) {
506d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
507d4433c76SJason Ekstrand 			"Invalid index for virtual engine: %d >= %d\n",
508d4433c76SJason Ekstrand 			idx, set->num_engines);
509d4433c76SJason Ekstrand 		return -EINVAL;
510d4433c76SJason Ekstrand 	}
511d4433c76SJason Ekstrand 
512d4433c76SJason Ekstrand 	idx = array_index_nospec(idx, set->num_engines);
513d4433c76SJason Ekstrand 	if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
514d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
515d4433c76SJason Ekstrand 		return -EINVAL;
516d4433c76SJason Ekstrand 	}
517d4433c76SJason Ekstrand 
518d4433c76SJason Ekstrand 	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
519d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
520d4433c76SJason Ekstrand 			"Bonding with virtual engines not allowed\n");
521d4433c76SJason Ekstrand 		return -EINVAL;
522d4433c76SJason Ekstrand 	}
523d4433c76SJason Ekstrand 
524d4433c76SJason Ekstrand 	err = check_user_mbz(&ext->flags);
525d4433c76SJason Ekstrand 	if (err)
526d4433c76SJason Ekstrand 		return err;
527d4433c76SJason Ekstrand 
528d4433c76SJason Ekstrand 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
529d4433c76SJason Ekstrand 		err = check_user_mbz(&ext->mbz64[n]);
530d4433c76SJason Ekstrand 		if (err)
531d4433c76SJason Ekstrand 			return err;
532d4433c76SJason Ekstrand 	}
533d4433c76SJason Ekstrand 
534d4433c76SJason Ekstrand 	if (copy_from_user(&ci, &ext->master, sizeof(ci)))
535d4433c76SJason Ekstrand 		return -EFAULT;
536d4433c76SJason Ekstrand 
537d4433c76SJason Ekstrand 	master = intel_engine_lookup_user(i915,
538d4433c76SJason Ekstrand 					  ci.engine_class,
539d4433c76SJason Ekstrand 					  ci.engine_instance);
540d4433c76SJason Ekstrand 	if (!master) {
541d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
542d4433c76SJason Ekstrand 			"Unrecognised master engine: { class:%u, instance:%u }\n",
543d4433c76SJason Ekstrand 			ci.engine_class, ci.engine_instance);
544d4433c76SJason Ekstrand 		return -EINVAL;
545d4433c76SJason Ekstrand 	}
546d4433c76SJason Ekstrand 
547b02d86b9SMatthew Brost 	if (intel_engine_uses_guc(master)) {
548b02d86b9SMatthew Brost 		DRM_DEBUG("bonding extension not supported with GuC submission");
549b02d86b9SMatthew Brost 		return -ENODEV;
550b02d86b9SMatthew Brost 	}
551b02d86b9SMatthew Brost 
552d4433c76SJason Ekstrand 	if (get_user(num_bonds, &ext->num_bonds))
553d4433c76SJason Ekstrand 		return -EFAULT;
554d4433c76SJason Ekstrand 
555d4433c76SJason Ekstrand 	for (n = 0; n < num_bonds; n++) {
556d4433c76SJason Ekstrand 		struct intel_engine_cs *bond;
557d4433c76SJason Ekstrand 
558d4433c76SJason Ekstrand 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
559d4433c76SJason Ekstrand 			return -EFAULT;
560d4433c76SJason Ekstrand 
561d4433c76SJason Ekstrand 		bond = intel_engine_lookup_user(i915,
562d4433c76SJason Ekstrand 						ci.engine_class,
563d4433c76SJason Ekstrand 						ci.engine_instance);
564d4433c76SJason Ekstrand 		if (!bond) {
565d4433c76SJason Ekstrand 			drm_dbg(&i915->drm,
566d4433c76SJason Ekstrand 				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
567d4433c76SJason Ekstrand 				n, ci.engine_class, ci.engine_instance);
568d4433c76SJason Ekstrand 			return -EINVAL;
569d4433c76SJason Ekstrand 		}
570d4433c76SJason Ekstrand 	}
571d4433c76SJason Ekstrand 
572d4433c76SJason Ekstrand 	return 0;
573d4433c76SJason Ekstrand }
574d4433c76SJason Ekstrand 
575e5e32171SMatthew Brost static int
576e5e32171SMatthew Brost set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
577e5e32171SMatthew Brost 				      void *data)
578e5e32171SMatthew Brost {
579e5e32171SMatthew Brost 	struct i915_context_engines_parallel_submit __user *ext =
580e5e32171SMatthew Brost 		container_of_user(base, typeof(*ext), base);
581e5e32171SMatthew Brost 	const struct set_proto_ctx_engines *set = data;
582e5e32171SMatthew Brost 	struct drm_i915_private *i915 = set->i915;
5830f9d36afSMatthew Brost 	struct i915_engine_class_instance prev_engine;
584e5e32171SMatthew Brost 	u64 flags;
585e5e32171SMatthew Brost 	int err = 0, n, i, j;
586e5e32171SMatthew Brost 	u16 slot, width, num_siblings;
587e5e32171SMatthew Brost 	struct intel_engine_cs **siblings = NULL;
588e5e32171SMatthew Brost 	intel_engine_mask_t prev_mask;
589e5e32171SMatthew Brost 
590e5e32171SMatthew Brost 	if (get_user(slot, &ext->engine_index))
591e5e32171SMatthew Brost 		return -EFAULT;
592e5e32171SMatthew Brost 
593e5e32171SMatthew Brost 	if (get_user(width, &ext->width))
594e5e32171SMatthew Brost 		return -EFAULT;
595e5e32171SMatthew Brost 
596e5e32171SMatthew Brost 	if (get_user(num_siblings, &ext->num_siblings))
597e5e32171SMatthew Brost 		return -EFAULT;
598e5e32171SMatthew Brost 
599a88afcfaSMatthew Brost 	if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
600a88afcfaSMatthew Brost 	    num_siblings != 1) {
601a88afcfaSMatthew Brost 		drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
602a88afcfaSMatthew Brost 			num_siblings);
603a88afcfaSMatthew Brost 		return -EINVAL;
604a88afcfaSMatthew Brost 	}
605a88afcfaSMatthew Brost 
606e5e32171SMatthew Brost 	if (slot >= set->num_engines) {
607e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
608e5e32171SMatthew Brost 			slot, set->num_engines);
609e5e32171SMatthew Brost 		return -EINVAL;
610e5e32171SMatthew Brost 	}
611e5e32171SMatthew Brost 
612e5e32171SMatthew Brost 	if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
613e5e32171SMatthew Brost 		drm_dbg(&i915->drm,
614e5e32171SMatthew Brost 			"Invalid placement[%d], already occupied\n", slot);
615e5e32171SMatthew Brost 		return -EINVAL;
616e5e32171SMatthew Brost 	}
617e5e32171SMatthew Brost 
618e5e32171SMatthew Brost 	if (get_user(flags, &ext->flags))
619e5e32171SMatthew Brost 		return -EFAULT;
620e5e32171SMatthew Brost 
621e5e32171SMatthew Brost 	if (flags) {
622e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
623e5e32171SMatthew Brost 		return -EINVAL;
624e5e32171SMatthew Brost 	}
625e5e32171SMatthew Brost 
626e5e32171SMatthew Brost 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
627e5e32171SMatthew Brost 		err = check_user_mbz(&ext->mbz64[n]);
628e5e32171SMatthew Brost 		if (err)
629e5e32171SMatthew Brost 			return err;
630e5e32171SMatthew Brost 	}
631e5e32171SMatthew Brost 
632e5e32171SMatthew Brost 	if (width < 2) {
633e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
634e5e32171SMatthew Brost 		return -EINVAL;
635e5e32171SMatthew Brost 	}
636e5e32171SMatthew Brost 
637e5e32171SMatthew Brost 	if (num_siblings < 1) {
638e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
639e5e32171SMatthew Brost 			num_siblings);
640e5e32171SMatthew Brost 		return -EINVAL;
641e5e32171SMatthew Brost 	}
642e5e32171SMatthew Brost 
643e5e32171SMatthew Brost 	siblings = kmalloc_array(num_siblings * width,
644e5e32171SMatthew Brost 				 sizeof(*siblings),
645e5e32171SMatthew Brost 				 GFP_KERNEL);
646e5e32171SMatthew Brost 	if (!siblings)
647e5e32171SMatthew Brost 		return -ENOMEM;
648e5e32171SMatthew Brost 
649e5e32171SMatthew Brost 	/* Create contexts / engines */
650e5e32171SMatthew Brost 	for (i = 0; i < width; ++i) {
651e5e32171SMatthew Brost 		intel_engine_mask_t current_mask = 0;
652e5e32171SMatthew Brost 
653e5e32171SMatthew Brost 		for (j = 0; j < num_siblings; ++j) {
654e5e32171SMatthew Brost 			struct i915_engine_class_instance ci;
655e5e32171SMatthew Brost 
656e5e32171SMatthew Brost 			n = i * num_siblings + j;
657e5e32171SMatthew Brost 			if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
658e5e32171SMatthew Brost 				err = -EFAULT;
659e5e32171SMatthew Brost 				goto out_err;
660e5e32171SMatthew Brost 			}
661e5e32171SMatthew Brost 
662e5e32171SMatthew Brost 			siblings[n] =
663e5e32171SMatthew Brost 				intel_engine_lookup_user(i915, ci.engine_class,
664e5e32171SMatthew Brost 							 ci.engine_instance);
665e5e32171SMatthew Brost 			if (!siblings[n]) {
666e5e32171SMatthew Brost 				drm_dbg(&i915->drm,
667e5e32171SMatthew Brost 					"Invalid sibling[%d]: { class:%d, inst:%d }\n",
668e5e32171SMatthew Brost 					n, ci.engine_class, ci.engine_instance);
669e5e32171SMatthew Brost 				err = -EINVAL;
670e5e32171SMatthew Brost 				goto out_err;
671e5e32171SMatthew Brost 			}
672e5e32171SMatthew Brost 
673e393e2aaSMatthew Brost 			/*
674e393e2aaSMatthew Brost 			 * We don't support breadcrumb handshake on these
675e393e2aaSMatthew Brost 			 * classes
676e393e2aaSMatthew Brost 			 */
677e393e2aaSMatthew Brost 			if (siblings[n]->class == RENDER_CLASS ||
678e393e2aaSMatthew Brost 			    siblings[n]->class == COMPUTE_CLASS) {
679e393e2aaSMatthew Brost 				err = -EINVAL;
680e393e2aaSMatthew Brost 				goto out_err;
681e393e2aaSMatthew Brost 			}
682e393e2aaSMatthew Brost 
683e5e32171SMatthew Brost 			if (n) {
684e5e32171SMatthew Brost 				if (prev_engine.engine_class !=
685e5e32171SMatthew Brost 				    ci.engine_class) {
686e5e32171SMatthew Brost 					drm_dbg(&i915->drm,
687e5e32171SMatthew Brost 						"Mismatched class %d, %d\n",
688e5e32171SMatthew Brost 						prev_engine.engine_class,
689e5e32171SMatthew Brost 						ci.engine_class);
690e5e32171SMatthew Brost 					err = -EINVAL;
691e5e32171SMatthew Brost 					goto out_err;
692e5e32171SMatthew Brost 				}
693e5e32171SMatthew Brost 			}
694e5e32171SMatthew Brost 
695e5e32171SMatthew Brost 			prev_engine = ci;
696e5e32171SMatthew Brost 			current_mask |= siblings[n]->logical_mask;
697e5e32171SMatthew Brost 		}
698e5e32171SMatthew Brost 
699e5e32171SMatthew Brost 		if (i > 0) {
700e5e32171SMatthew Brost 			if (current_mask != prev_mask << 1) {
701e5e32171SMatthew Brost 				drm_dbg(&i915->drm,
702e5e32171SMatthew Brost 					"Non contiguous logical mask 0x%x, 0x%x\n",
703e5e32171SMatthew Brost 					prev_mask, current_mask);
704e5e32171SMatthew Brost 				err = -EINVAL;
705e5e32171SMatthew Brost 				goto out_err;
706e5e32171SMatthew Brost 			}
707e5e32171SMatthew Brost 		}
708e5e32171SMatthew Brost 		prev_mask = current_mask;
709e5e32171SMatthew Brost 	}
710e5e32171SMatthew Brost 
711e5e32171SMatthew Brost 	set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
712e5e32171SMatthew Brost 	set->engines[slot].num_siblings = num_siblings;
713e5e32171SMatthew Brost 	set->engines[slot].width = width;
714e5e32171SMatthew Brost 	set->engines[slot].siblings = siblings;
715e5e32171SMatthew Brost 
716e5e32171SMatthew Brost 	return 0;
717e5e32171SMatthew Brost 
718e5e32171SMatthew Brost out_err:
719e5e32171SMatthew Brost 	kfree(siblings);
720e5e32171SMatthew Brost 
721e5e32171SMatthew Brost 	return err;
722e5e32171SMatthew Brost }
723e5e32171SMatthew Brost 
724d4433c76SJason Ekstrand static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
725d4433c76SJason Ekstrand 	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
726d4433c76SJason Ekstrand 	[I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
727e5e32171SMatthew Brost 	[I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
728e5e32171SMatthew Brost 		set_proto_ctx_engines_parallel_submit,
729d4433c76SJason Ekstrand };
730d4433c76SJason Ekstrand 
731d4433c76SJason Ekstrand static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
732d4433c76SJason Ekstrand 			         struct i915_gem_proto_context *pc,
733d4433c76SJason Ekstrand 			         const struct drm_i915_gem_context_param *args)
734d4433c76SJason Ekstrand {
735d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = fpriv->dev_priv;
736d4433c76SJason Ekstrand 	struct set_proto_ctx_engines set = { .i915 = i915 };
737d4433c76SJason Ekstrand 	struct i915_context_param_engines __user *user =
738d4433c76SJason Ekstrand 		u64_to_user_ptr(args->value);
739d4433c76SJason Ekstrand 	unsigned int n;
740d4433c76SJason Ekstrand 	u64 extensions;
741d4433c76SJason Ekstrand 	int err;
742d4433c76SJason Ekstrand 
743d4433c76SJason Ekstrand 	if (pc->num_user_engines >= 0) {
744d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Cannot set engines twice");
745d4433c76SJason Ekstrand 		return -EINVAL;
746d4433c76SJason Ekstrand 	}
747d4433c76SJason Ekstrand 
748d4433c76SJason Ekstrand 	if (args->size < sizeof(*user) ||
749d4433c76SJason Ekstrand 	    !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
750d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
751d4433c76SJason Ekstrand 			args->size);
752d4433c76SJason Ekstrand 		return -EINVAL;
753d4433c76SJason Ekstrand 	}
754d4433c76SJason Ekstrand 
755d4433c76SJason Ekstrand 	set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
756d4433c76SJason Ekstrand 	/* RING_MASK has no shift so we can use it directly here */
757d4433c76SJason Ekstrand 	if (set.num_engines > I915_EXEC_RING_MASK + 1)
758d4433c76SJason Ekstrand 		return -EINVAL;
759d4433c76SJason Ekstrand 
760d4433c76SJason Ekstrand 	set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
761d4433c76SJason Ekstrand 	if (!set.engines)
762d4433c76SJason Ekstrand 		return -ENOMEM;
763d4433c76SJason Ekstrand 
764d4433c76SJason Ekstrand 	for (n = 0; n < set.num_engines; n++) {
765d4433c76SJason Ekstrand 		struct i915_engine_class_instance ci;
766d4433c76SJason Ekstrand 		struct intel_engine_cs *engine;
767d4433c76SJason Ekstrand 
768d4433c76SJason Ekstrand 		if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
769d4433c76SJason Ekstrand 			kfree(set.engines);
770d4433c76SJason Ekstrand 			return -EFAULT;
771d4433c76SJason Ekstrand 		}
772d4433c76SJason Ekstrand 
773d4433c76SJason Ekstrand 		memset(&set.engines[n], 0, sizeof(set.engines[n]));
774d4433c76SJason Ekstrand 
775d4433c76SJason Ekstrand 		if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
776d4433c76SJason Ekstrand 		    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
777d4433c76SJason Ekstrand 			continue;
778d4433c76SJason Ekstrand 
779d4433c76SJason Ekstrand 		engine = intel_engine_lookup_user(i915,
780d4433c76SJason Ekstrand 						  ci.engine_class,
781d4433c76SJason Ekstrand 						  ci.engine_instance);
782d4433c76SJason Ekstrand 		if (!engine) {
783d4433c76SJason Ekstrand 			drm_dbg(&i915->drm,
784d4433c76SJason Ekstrand 				"Invalid engine[%d]: { class:%d, instance:%d }\n",
785d4433c76SJason Ekstrand 				n, ci.engine_class, ci.engine_instance);
786d4433c76SJason Ekstrand 			kfree(set.engines);
787d4433c76SJason Ekstrand 			return -ENOENT;
788d4433c76SJason Ekstrand 		}
789d4433c76SJason Ekstrand 
790d4433c76SJason Ekstrand 		set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
791d4433c76SJason Ekstrand 		set.engines[n].engine = engine;
792d4433c76SJason Ekstrand 	}
793d4433c76SJason Ekstrand 
794d4433c76SJason Ekstrand 	err = -EFAULT;
795d4433c76SJason Ekstrand 	if (!get_user(extensions, &user->extensions))
796d4433c76SJason Ekstrand 		err = i915_user_extensions(u64_to_user_ptr(extensions),
797d4433c76SJason Ekstrand 					   set_proto_ctx_engines_extensions,
798d4433c76SJason Ekstrand 					   ARRAY_SIZE(set_proto_ctx_engines_extensions),
799d4433c76SJason Ekstrand 					   &set);
800d4433c76SJason Ekstrand 	if (err) {
801d4433c76SJason Ekstrand 		kfree(set.engines);
802d4433c76SJason Ekstrand 		return err;
803d4433c76SJason Ekstrand 	}
804d4433c76SJason Ekstrand 
805d4433c76SJason Ekstrand 	pc->num_user_engines = set.num_engines;
806d4433c76SJason Ekstrand 	pc->user_engines = set.engines;
807d4433c76SJason Ekstrand 
808d4433c76SJason Ekstrand 	return 0;
809d4433c76SJason Ekstrand }
810d4433c76SJason Ekstrand 
811d4433c76SJason Ekstrand static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
812d4433c76SJason Ekstrand 			      struct i915_gem_proto_context *pc,
813d4433c76SJason Ekstrand 			      struct drm_i915_gem_context_param *args)
814d4433c76SJason Ekstrand {
815d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = fpriv->dev_priv;
816d4433c76SJason Ekstrand 	struct drm_i915_gem_context_param_sseu user_sseu;
817d4433c76SJason Ekstrand 	struct intel_sseu *sseu;
818d4433c76SJason Ekstrand 	int ret;
819d4433c76SJason Ekstrand 
820d4433c76SJason Ekstrand 	if (args->size < sizeof(user_sseu))
821d4433c76SJason Ekstrand 		return -EINVAL;
822d4433c76SJason Ekstrand 
823d4433c76SJason Ekstrand 	if (GRAPHICS_VER(i915) != 11)
824d4433c76SJason Ekstrand 		return -ENODEV;
825d4433c76SJason Ekstrand 
826d4433c76SJason Ekstrand 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
827d4433c76SJason Ekstrand 			   sizeof(user_sseu)))
828d4433c76SJason Ekstrand 		return -EFAULT;
829d4433c76SJason Ekstrand 
830d4433c76SJason Ekstrand 	if (user_sseu.rsvd)
831d4433c76SJason Ekstrand 		return -EINVAL;
832d4433c76SJason Ekstrand 
833d4433c76SJason Ekstrand 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
834d4433c76SJason Ekstrand 		return -EINVAL;
835d4433c76SJason Ekstrand 
836d4433c76SJason Ekstrand 	if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
837d4433c76SJason Ekstrand 		return -EINVAL;
838d4433c76SJason Ekstrand 
839d4433c76SJason Ekstrand 	if (pc->num_user_engines >= 0) {
840d4433c76SJason Ekstrand 		int idx = user_sseu.engine.engine_instance;
841d4433c76SJason Ekstrand 		struct i915_gem_proto_engine *pe;
842d4433c76SJason Ekstrand 
843d4433c76SJason Ekstrand 		if (idx >= pc->num_user_engines)
844d4433c76SJason Ekstrand 			return -EINVAL;
845d4433c76SJason Ekstrand 
846d4433c76SJason Ekstrand 		pe = &pc->user_engines[idx];
847d4433c76SJason Ekstrand 
848d4433c76SJason Ekstrand 		/* Only render engine supports RPCS configuration. */
849d4433c76SJason Ekstrand 		if (pe->engine->class != RENDER_CLASS)
850d4433c76SJason Ekstrand 			return -EINVAL;
851d4433c76SJason Ekstrand 
852d4433c76SJason Ekstrand 		sseu = &pe->sseu;
853d4433c76SJason Ekstrand 	} else {
854d4433c76SJason Ekstrand 		/* Only render engine supports RPCS configuration. */
855d4433c76SJason Ekstrand 		if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
856d4433c76SJason Ekstrand 			return -EINVAL;
857d4433c76SJason Ekstrand 
858d4433c76SJason Ekstrand 		/* There is only one render engine */
859d4433c76SJason Ekstrand 		if (user_sseu.engine.engine_instance != 0)
860d4433c76SJason Ekstrand 			return -EINVAL;
861d4433c76SJason Ekstrand 
862d4433c76SJason Ekstrand 		sseu = &pc->legacy_rcs_sseu;
863d4433c76SJason Ekstrand 	}
864d4433c76SJason Ekstrand 
8651a9c4db4SMichał Winiarski 	ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
866d4433c76SJason Ekstrand 	if (ret)
867d4433c76SJason Ekstrand 		return ret;
868d4433c76SJason Ekstrand 
869d4433c76SJason Ekstrand 	args->size = sizeof(user_sseu);
870d4433c76SJason Ekstrand 
871d4433c76SJason Ekstrand 	return 0;
872d4433c76SJason Ekstrand }
873d4433c76SJason Ekstrand 
874d4433c76SJason Ekstrand static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
875d4433c76SJason Ekstrand 			       struct i915_gem_proto_context *pc,
876d4433c76SJason Ekstrand 			       struct drm_i915_gem_context_param *args)
877d4433c76SJason Ekstrand {
878d4433c76SJason Ekstrand 	int ret = 0;
879d4433c76SJason Ekstrand 
880d4433c76SJason Ekstrand 	switch (args->param) {
881d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
882d4433c76SJason Ekstrand 		if (args->size)
883d4433c76SJason Ekstrand 			ret = -EINVAL;
884d4433c76SJason Ekstrand 		else if (args->value)
885d4433c76SJason Ekstrand 			pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
886d4433c76SJason Ekstrand 		else
887d4433c76SJason Ekstrand 			pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
888d4433c76SJason Ekstrand 		break;
889d4433c76SJason Ekstrand 
890d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_BANNABLE:
891d4433c76SJason Ekstrand 		if (args->size)
892d4433c76SJason Ekstrand 			ret = -EINVAL;
893d4433c76SJason Ekstrand 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
894d4433c76SJason Ekstrand 			ret = -EPERM;
895d4433c76SJason Ekstrand 		else if (args->value)
896d4433c76SJason Ekstrand 			pc->user_flags |= BIT(UCONTEXT_BANNABLE);
897d3ac8d42SDaniele Ceraolo Spurio 		else if (pc->uses_protected_content)
898d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM;
899d4433c76SJason Ekstrand 		else
900d4433c76SJason Ekstrand 			pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
901d4433c76SJason Ekstrand 		break;
902d4433c76SJason Ekstrand 
903d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_RECOVERABLE:
904d4433c76SJason Ekstrand 		if (args->size)
905d4433c76SJason Ekstrand 			ret = -EINVAL;
906d3ac8d42SDaniele Ceraolo Spurio 		else if (!args->value)
907d4433c76SJason Ekstrand 			pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
908d3ac8d42SDaniele Ceraolo Spurio 		else if (pc->uses_protected_content)
909d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM;
910d3ac8d42SDaniele Ceraolo Spurio 		else
911d3ac8d42SDaniele Ceraolo Spurio 			pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
912d4433c76SJason Ekstrand 		break;
913d4433c76SJason Ekstrand 
914d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_PRIORITY:
915d4433c76SJason Ekstrand 		ret = validate_priority(fpriv->dev_priv, args);
916d4433c76SJason Ekstrand 		if (!ret)
917d4433c76SJason Ekstrand 			pc->sched.priority = args->value;
918d4433c76SJason Ekstrand 		break;
919d4433c76SJason Ekstrand 
920d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_SSEU:
921d4433c76SJason Ekstrand 		ret = set_proto_ctx_sseu(fpriv, pc, args);
922d4433c76SJason Ekstrand 		break;
923d4433c76SJason Ekstrand 
924d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_VM:
925d4433c76SJason Ekstrand 		ret = set_proto_ctx_vm(fpriv, pc, args);
926d4433c76SJason Ekstrand 		break;
927d4433c76SJason Ekstrand 
928d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_ENGINES:
929d4433c76SJason Ekstrand 		ret = set_proto_ctx_engines(fpriv, pc, args);
930d4433c76SJason Ekstrand 		break;
931d4433c76SJason Ekstrand 
932d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_PERSISTENCE:
933d4433c76SJason Ekstrand 		if (args->size)
934d4433c76SJason Ekstrand 			ret = -EINVAL;
935d4433c76SJason Ekstrand 		ret = proto_context_set_persistence(fpriv->dev_priv, pc,
936d4433c76SJason Ekstrand 						    args->value);
937d4433c76SJason Ekstrand 		break;
938d4433c76SJason Ekstrand 
939d3ac8d42SDaniele Ceraolo Spurio 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
940d3ac8d42SDaniele Ceraolo Spurio 		ret = proto_context_set_protected(fpriv->dev_priv, pc,
941d3ac8d42SDaniele Ceraolo Spurio 						  args->value);
942d3ac8d42SDaniele Ceraolo Spurio 		break;
943d3ac8d42SDaniele Ceraolo Spurio 
944d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
945d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_BAN_PERIOD:
946d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_RINGSIZE:
947d4433c76SJason Ekstrand 	default:
948d4433c76SJason Ekstrand 		ret = -EINVAL;
949d4433c76SJason Ekstrand 		break;
950d4433c76SJason Ekstrand 	}
951d4433c76SJason Ekstrand 
952d4433c76SJason Ekstrand 	return ret;
953d4433c76SJason Ekstrand }
954d4433c76SJason Ekstrand 
955263ae12cSJason Ekstrand static int intel_context_set_gem(struct intel_context *ce,
956263ae12cSJason Ekstrand 				 struct i915_gem_context *ctx,
957263ae12cSJason Ekstrand 				 struct intel_sseu sseu)
958e6ba7648SChris Wilson {
959263ae12cSJason Ekstrand 	int ret = 0;
960263ae12cSJason Ekstrand 
9616a8679c0SChris Wilson 	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
9626a8679c0SChris Wilson 	RCU_INIT_POINTER(ce->gem_context, ctx);
963e6ba7648SChris Wilson 
964e5e32171SMatthew Brost 	GEM_BUG_ON(intel_context_is_pinned(ce));
96574e4b909SJason Ekstrand 	ce->ring_size = SZ_16K;
966e6ba7648SChris Wilson 
967e6ba7648SChris Wilson 	i915_vm_put(ce->vm);
9680483a301SDaniel Vetter 	ce->vm = i915_gem_context_get_eb_vm(ctx);
969e6ba7648SChris Wilson 
970e6ba7648SChris Wilson 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
9714dbd3944SMatthew Brost 	    intel_engine_has_timeslices(ce->engine) &&
9724dbd3944SMatthew Brost 	    intel_engine_has_semaphores(ce->engine))
973e6ba7648SChris Wilson 		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
974e8dbb566STvrtko Ursulin 
9751a839e01SLucas De Marchi 	if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
976677db6adSJason Ekstrand 	    ctx->i915->params.request_timeout_ms) {
977677db6adSJason Ekstrand 		unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
978677db6adSJason Ekstrand 
979677db6adSJason Ekstrand 		intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
980677db6adSJason Ekstrand 	}
981263ae12cSJason Ekstrand 
982263ae12cSJason Ekstrand 	/* A valid SSEU has no zero fields */
983263ae12cSJason Ekstrand 	if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
984263ae12cSJason Ekstrand 		ret = intel_context_reconfigure_sseu(ce, sseu);
985263ae12cSJason Ekstrand 
986263ae12cSJason Ekstrand 	return ret;
987e6ba7648SChris Wilson }
988e6ba7648SChris Wilson 
989e5e32171SMatthew Brost static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
990e5e32171SMatthew Brost {
991e5e32171SMatthew Brost 	while (count--) {
992e5e32171SMatthew Brost 		struct intel_context *ce = e->engines[count], *child;
993e5e32171SMatthew Brost 
994e5e32171SMatthew Brost 		if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
995e5e32171SMatthew Brost 			continue;
996e5e32171SMatthew Brost 
997e5e32171SMatthew Brost 		for_each_child(ce, child)
998e5e32171SMatthew Brost 			intel_context_unpin(child);
999e5e32171SMatthew Brost 		intel_context_unpin(ce);
1000e5e32171SMatthew Brost 	}
1001e5e32171SMatthew Brost }
1002e5e32171SMatthew Brost 
1003e5e32171SMatthew Brost static void unpin_engines(struct i915_gem_engines *e)
1004e5e32171SMatthew Brost {
1005e5e32171SMatthew Brost 	__unpin_engines(e, e->num_engines);
1006e5e32171SMatthew Brost }
1007e5e32171SMatthew Brost 
100810be98a7SChris Wilson static void __free_engines(struct i915_gem_engines *e, unsigned int count)
100910be98a7SChris Wilson {
101010be98a7SChris Wilson 	while (count--) {
101110be98a7SChris Wilson 		if (!e->engines[count])
101210be98a7SChris Wilson 			continue;
101310be98a7SChris Wilson 
101410be98a7SChris Wilson 		intel_context_put(e->engines[count]);
101510be98a7SChris Wilson 	}
101610be98a7SChris Wilson 	kfree(e);
101710be98a7SChris Wilson }
101810be98a7SChris Wilson 
101910be98a7SChris Wilson static void free_engines(struct i915_gem_engines *e)
102010be98a7SChris Wilson {
102110be98a7SChris Wilson 	__free_engines(e, e->num_engines);
102210be98a7SChris Wilson }
102310be98a7SChris Wilson 
1024155ab883SChris Wilson static void free_engines_rcu(struct rcu_head *rcu)
102510be98a7SChris Wilson {
1026130a95e9SChris Wilson 	struct i915_gem_engines *engines =
1027130a95e9SChris Wilson 		container_of(rcu, struct i915_gem_engines, rcu);
1028130a95e9SChris Wilson 
1029130a95e9SChris Wilson 	i915_sw_fence_fini(&engines->fence);
1030130a95e9SChris Wilson 	free_engines(engines);
103110be98a7SChris Wilson }
103210be98a7SChris Wilson 
10338399eec8STvrtko Ursulin static void accumulate_runtime(struct i915_drm_client *client,
10348399eec8STvrtko Ursulin 			       struct i915_gem_engines *engines)
10358399eec8STvrtko Ursulin {
10368399eec8STvrtko Ursulin 	struct i915_gem_engines_iter it;
10378399eec8STvrtko Ursulin 	struct intel_context *ce;
10388399eec8STvrtko Ursulin 
10398399eec8STvrtko Ursulin 	if (!client)
10408399eec8STvrtko Ursulin 		return;
10418399eec8STvrtko Ursulin 
10428399eec8STvrtko Ursulin 	/* Transfer accumulated runtime to the parent GEM context. */
10438399eec8STvrtko Ursulin 	for_each_gem_engine(ce, engines, it) {
10448399eec8STvrtko Ursulin 		unsigned int class = ce->engine->uabi_class;
10458399eec8STvrtko Ursulin 
10468399eec8STvrtko Ursulin 		GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
10478399eec8STvrtko Ursulin 		atomic64_add(intel_context_get_total_runtime_ns(ce),
10488399eec8STvrtko Ursulin 			     &client->past_runtime[class]);
10498399eec8STvrtko Ursulin 	}
10508399eec8STvrtko Ursulin }
10518399eec8STvrtko Ursulin 
105244505168SMatthew Brost static int
105370c96e39SChris Wilson engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
105470c96e39SChris Wilson {
105570c96e39SChris Wilson 	struct i915_gem_engines *engines =
105670c96e39SChris Wilson 		container_of(fence, typeof(*engines), fence);
10578399eec8STvrtko Ursulin 	struct i915_gem_context *ctx = engines->ctx;
105870c96e39SChris Wilson 
105970c96e39SChris Wilson 	switch (state) {
106070c96e39SChris Wilson 	case FENCE_COMPLETE:
106170c96e39SChris Wilson 		if (!list_empty(&engines->link)) {
106270c96e39SChris Wilson 			unsigned long flags;
106370c96e39SChris Wilson 
106470c96e39SChris Wilson 			spin_lock_irqsave(&ctx->stale.lock, flags);
106570c96e39SChris Wilson 			list_del(&engines->link);
106670c96e39SChris Wilson 			spin_unlock_irqrestore(&ctx->stale.lock, flags);
106770c96e39SChris Wilson 		}
10688399eec8STvrtko Ursulin 		accumulate_runtime(ctx->client, engines);
10698399eec8STvrtko Ursulin 		i915_gem_context_put(ctx);
10708399eec8STvrtko Ursulin 
107170c96e39SChris Wilson 		break;
107270c96e39SChris Wilson 
107370c96e39SChris Wilson 	case FENCE_FREE:
107470c96e39SChris Wilson 		init_rcu_head(&engines->rcu);
107570c96e39SChris Wilson 		call_rcu(&engines->rcu, free_engines_rcu);
107670c96e39SChris Wilson 		break;
107770c96e39SChris Wilson 	}
107870c96e39SChris Wilson 
107970c96e39SChris Wilson 	return NOTIFY_DONE;
108070c96e39SChris Wilson }
108170c96e39SChris Wilson 
108270c96e39SChris Wilson static struct i915_gem_engines *alloc_engines(unsigned int count)
108370c96e39SChris Wilson {
108470c96e39SChris Wilson 	struct i915_gem_engines *e;
108570c96e39SChris Wilson 
108670c96e39SChris Wilson 	e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
108770c96e39SChris Wilson 	if (!e)
108870c96e39SChris Wilson 		return NULL;
108970c96e39SChris Wilson 
109070c96e39SChris Wilson 	i915_sw_fence_init(&e->fence, engines_notify);
109170c96e39SChris Wilson 	return e;
109270c96e39SChris Wilson }
109370c96e39SChris Wilson 
1094263ae12cSJason Ekstrand static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1095263ae12cSJason Ekstrand 						struct intel_sseu rcs_sseu)
109610be98a7SChris Wilson {
10971a9c4db4SMichał Winiarski 	const struct intel_gt *gt = to_gt(ctx->i915);
109810be98a7SChris Wilson 	struct intel_engine_cs *engine;
109907a635a8SJason Ekstrand 	struct i915_gem_engines *e, *err;
110010be98a7SChris Wilson 	enum intel_engine_id id;
110110be98a7SChris Wilson 
110270c96e39SChris Wilson 	e = alloc_engines(I915_NUM_ENGINES);
110310be98a7SChris Wilson 	if (!e)
110410be98a7SChris Wilson 		return ERR_PTR(-ENOMEM);
110510be98a7SChris Wilson 
1106f1c4d157SChris Wilson 	for_each_engine(engine, gt, id) {
110710be98a7SChris Wilson 		struct intel_context *ce;
1108263ae12cSJason Ekstrand 		struct intel_sseu sseu = {};
1109263ae12cSJason Ekstrand 		int ret;
111010be98a7SChris Wilson 
1111a50134b1STvrtko Ursulin 		if (engine->legacy_idx == INVALID_ENGINE)
1112a50134b1STvrtko Ursulin 			continue;
1113a50134b1STvrtko Ursulin 
1114a50134b1STvrtko Ursulin 		GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
1115a50134b1STvrtko Ursulin 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
1116a50134b1STvrtko Ursulin 
1117e6ba7648SChris Wilson 		ce = intel_context_create(engine);
111810be98a7SChris Wilson 		if (IS_ERR(ce)) {
111907a635a8SJason Ekstrand 			err = ERR_CAST(ce);
112007a635a8SJason Ekstrand 			goto free_engines;
112110be98a7SChris Wilson 		}
112210be98a7SChris Wilson 
1123a50134b1STvrtko Ursulin 		e->engines[engine->legacy_idx] = ce;
112407a635a8SJason Ekstrand 		e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1125263ae12cSJason Ekstrand 
1126263ae12cSJason Ekstrand 		if (engine->class == RENDER_CLASS)
1127263ae12cSJason Ekstrand 			sseu = rcs_sseu;
1128263ae12cSJason Ekstrand 
1129263ae12cSJason Ekstrand 		ret = intel_context_set_gem(ce, ctx, sseu);
1130263ae12cSJason Ekstrand 		if (ret) {
1131263ae12cSJason Ekstrand 			err = ERR_PTR(ret);
1132263ae12cSJason Ekstrand 			goto free_engines;
1133263ae12cSJason Ekstrand 		}
1134263ae12cSJason Ekstrand 
113510be98a7SChris Wilson 	}
113610be98a7SChris Wilson 
113710be98a7SChris Wilson 	return e;
113807a635a8SJason Ekstrand 
113907a635a8SJason Ekstrand free_engines:
114007a635a8SJason Ekstrand 	free_engines(e);
114107a635a8SJason Ekstrand 	return err;
114210be98a7SChris Wilson }
114310be98a7SChris Wilson 
1144e5e32171SMatthew Brost static int perma_pin_contexts(struct intel_context *ce)
1145e5e32171SMatthew Brost {
1146e5e32171SMatthew Brost 	struct intel_context *child;
1147e5e32171SMatthew Brost 	int i = 0, j = 0, ret;
1148e5e32171SMatthew Brost 
1149e5e32171SMatthew Brost 	GEM_BUG_ON(!intel_context_is_parent(ce));
1150e5e32171SMatthew Brost 
1151e5e32171SMatthew Brost 	ret = intel_context_pin(ce);
1152e5e32171SMatthew Brost 	if (unlikely(ret))
1153e5e32171SMatthew Brost 		return ret;
1154e5e32171SMatthew Brost 
1155e5e32171SMatthew Brost 	for_each_child(ce, child) {
1156e5e32171SMatthew Brost 		ret = intel_context_pin(child);
1157e5e32171SMatthew Brost 		if (unlikely(ret))
1158e5e32171SMatthew Brost 			goto unwind;
1159e5e32171SMatthew Brost 		++i;
1160e5e32171SMatthew Brost 	}
1161e5e32171SMatthew Brost 
1162e5e32171SMatthew Brost 	set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1163e5e32171SMatthew Brost 
1164e5e32171SMatthew Brost 	return 0;
1165e5e32171SMatthew Brost 
1166e5e32171SMatthew Brost unwind:
1167e5e32171SMatthew Brost 	intel_context_unpin(ce);
1168e5e32171SMatthew Brost 	for_each_child(ce, child) {
1169e5e32171SMatthew Brost 		if (j++ < i)
1170e5e32171SMatthew Brost 			intel_context_unpin(child);
1171e5e32171SMatthew Brost 		else
1172e5e32171SMatthew Brost 			break;
1173e5e32171SMatthew Brost 	}
1174e5e32171SMatthew Brost 
1175e5e32171SMatthew Brost 	return ret;
1176e5e32171SMatthew Brost }
1177e5e32171SMatthew Brost 
1178d4433c76SJason Ekstrand static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1179d4433c76SJason Ekstrand 					     unsigned int num_engines,
1180d4433c76SJason Ekstrand 					     struct i915_gem_proto_engine *pe)
1181d4433c76SJason Ekstrand {
1182d4433c76SJason Ekstrand 	struct i915_gem_engines *e, *err;
1183d4433c76SJason Ekstrand 	unsigned int n;
1184d4433c76SJason Ekstrand 
1185d4433c76SJason Ekstrand 	e = alloc_engines(num_engines);
118684edf537SMatthew Brost 	if (!e)
118784edf537SMatthew Brost 		return ERR_PTR(-ENOMEM);
118884edf537SMatthew Brost 	e->num_engines = num_engines;
118984edf537SMatthew Brost 
1190d4433c76SJason Ekstrand 	for (n = 0; n < num_engines; n++) {
1191e5e32171SMatthew Brost 		struct intel_context *ce, *child;
1192d4433c76SJason Ekstrand 		int ret;
1193d4433c76SJason Ekstrand 
1194d4433c76SJason Ekstrand 		switch (pe[n].type) {
1195d4433c76SJason Ekstrand 		case I915_GEM_ENGINE_TYPE_PHYSICAL:
1196d4433c76SJason Ekstrand 			ce = intel_context_create(pe[n].engine);
1197d4433c76SJason Ekstrand 			break;
1198d4433c76SJason Ekstrand 
1199d4433c76SJason Ekstrand 		case I915_GEM_ENGINE_TYPE_BALANCED:
120055612025SMatthew Brost 			ce = intel_engine_create_virtual(pe[n].siblings,
1201e5e32171SMatthew Brost 							 pe[n].num_siblings, 0);
1202e5e32171SMatthew Brost 			break;
1203e5e32171SMatthew Brost 
1204e5e32171SMatthew Brost 		case I915_GEM_ENGINE_TYPE_PARALLEL:
1205e5e32171SMatthew Brost 			ce = intel_engine_create_parallel(pe[n].siblings,
1206e5e32171SMatthew Brost 							  pe[n].num_siblings,
1207e5e32171SMatthew Brost 							  pe[n].width);
1208d4433c76SJason Ekstrand 			break;
1209d4433c76SJason Ekstrand 
1210d4433c76SJason Ekstrand 		case I915_GEM_ENGINE_TYPE_INVALID:
1211d4433c76SJason Ekstrand 		default:
1212d4433c76SJason Ekstrand 			GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1213d4433c76SJason Ekstrand 			continue;
1214d4433c76SJason Ekstrand 		}
1215d4433c76SJason Ekstrand 
1216d4433c76SJason Ekstrand 		if (IS_ERR(ce)) {
1217d4433c76SJason Ekstrand 			err = ERR_CAST(ce);
1218d4433c76SJason Ekstrand 			goto free_engines;
1219d4433c76SJason Ekstrand 		}
1220d4433c76SJason Ekstrand 
1221d4433c76SJason Ekstrand 		e->engines[n] = ce;
1222d4433c76SJason Ekstrand 
1223d4433c76SJason Ekstrand 		ret = intel_context_set_gem(ce, ctx, pe->sseu);
1224d4433c76SJason Ekstrand 		if (ret) {
1225d4433c76SJason Ekstrand 			err = ERR_PTR(ret);
1226d4433c76SJason Ekstrand 			goto free_engines;
1227d4433c76SJason Ekstrand 		}
1228e5e32171SMatthew Brost 		for_each_child(ce, child) {
1229e5e32171SMatthew Brost 			ret = intel_context_set_gem(child, ctx, pe->sseu);
1230e5e32171SMatthew Brost 			if (ret) {
1231e5e32171SMatthew Brost 				err = ERR_PTR(ret);
1232e5e32171SMatthew Brost 				goto free_engines;
1233e5e32171SMatthew Brost 			}
1234e5e32171SMatthew Brost 		}
1235e5e32171SMatthew Brost 
1236e5e32171SMatthew Brost 		/*
1237e5e32171SMatthew Brost 		 * XXX: Must be done after calling intel_context_set_gem as that
1238e5e32171SMatthew Brost 		 * function changes the ring size. The ring is allocated when
1239e5e32171SMatthew Brost 		 * the context is pinned. If the ring size is changed after
1240e5e32171SMatthew Brost 		 * allocation we have a mismatch of the ring size and will cause
1241e5e32171SMatthew Brost 		 * the context to hang. Presumably with a bit of reordering we
1242e5e32171SMatthew Brost 		 * could move the perma-pin step to the backend function
1243e5e32171SMatthew Brost 		 * intel_engine_create_parallel.
1244e5e32171SMatthew Brost 		 */
1245e5e32171SMatthew Brost 		if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1246e5e32171SMatthew Brost 			ret = perma_pin_contexts(ce);
1247e5e32171SMatthew Brost 			if (ret) {
1248e5e32171SMatthew Brost 				err = ERR_PTR(ret);
1249e5e32171SMatthew Brost 				goto free_engines;
1250e5e32171SMatthew Brost 			}
1251e5e32171SMatthew Brost 		}
1252d4433c76SJason Ekstrand 	}
1253d4433c76SJason Ekstrand 
1254d4433c76SJason Ekstrand 	return e;
1255d4433c76SJason Ekstrand 
1256d4433c76SJason Ekstrand free_engines:
1257d4433c76SJason Ekstrand 	free_engines(e);
1258d4433c76SJason Ekstrand 	return err;
1259d4433c76SJason Ekstrand }
1260d4433c76SJason Ekstrand 
126175eefd82SDaniel Vetter static void i915_gem_context_release_work(struct work_struct *work)
126210be98a7SChris Wilson {
126375eefd82SDaniel Vetter 	struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
126475eefd82SDaniel Vetter 						    release_work);
12658cf97637SDaniel Vetter 	struct i915_address_space *vm;
126610be98a7SChris Wilson 
1267f8246cf4SChris Wilson 	trace_i915_context_free(ctx);
1268f8246cf4SChris Wilson 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1269a4e7ccdaSChris Wilson 
1270c238980eSDaniel Vetter 	if (ctx->syncobj)
1271c238980eSDaniel Vetter 		drm_syncobj_put(ctx->syncobj);
1272c238980eSDaniel Vetter 
12739ec8795eSDaniel Vetter 	vm = ctx->vm;
12748cf97637SDaniel Vetter 	if (vm)
12758cf97637SDaniel Vetter 		i915_vm_put(vm);
12768cf97637SDaniel Vetter 
1277d3ac8d42SDaniele Ceraolo Spurio 	if (ctx->pxp_wakeref)
1278d3ac8d42SDaniele Ceraolo Spurio 		intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1279d3ac8d42SDaniele Ceraolo Spurio 
128043c50460STvrtko Ursulin 	if (ctx->client)
128143c50460STvrtko Ursulin 		i915_drm_client_put(ctx->client);
128243c50460STvrtko Ursulin 
128310be98a7SChris Wilson 	mutex_destroy(&ctx->engines_mutex);
1284f7ce8639SChris Wilson 	mutex_destroy(&ctx->lut_mutex);
128510be98a7SChris Wilson 
128610be98a7SChris Wilson 	put_pid(ctx->pid);
128710be98a7SChris Wilson 	mutex_destroy(&ctx->mutex);
128810be98a7SChris Wilson 
128910be98a7SChris Wilson 	kfree_rcu(ctx, rcu);
129010be98a7SChris Wilson }
129110be98a7SChris Wilson 
129275eefd82SDaniel Vetter void i915_gem_context_release(struct kref *ref)
129375eefd82SDaniel Vetter {
129475eefd82SDaniel Vetter 	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
129575eefd82SDaniel Vetter 
129675eefd82SDaniel Vetter 	queue_work(ctx->i915->wq, &ctx->release_work);
129775eefd82SDaniel Vetter }
129875eefd82SDaniel Vetter 
12992e0986a5SChris Wilson static inline struct i915_gem_engines *
13002e0986a5SChris Wilson __context_engines_static(const struct i915_gem_context *ctx)
13012e0986a5SChris Wilson {
13022e0986a5SChris Wilson 	return rcu_dereference_protected(ctx->engines, true);
13032e0986a5SChris Wilson }
13042e0986a5SChris Wilson 
13052e0986a5SChris Wilson static void __reset_context(struct i915_gem_context *ctx,
13062e0986a5SChris Wilson 			    struct intel_engine_cs *engine)
13072e0986a5SChris Wilson {
13082e0986a5SChris Wilson 	intel_gt_handle_error(engine->gt, engine->mask, 0,
13092e0986a5SChris Wilson 			      "context closure in %s", ctx->name);
13102e0986a5SChris Wilson }
13112e0986a5SChris Wilson 
13122e0986a5SChris Wilson static bool __cancel_engine(struct intel_engine_cs *engine)
13132e0986a5SChris Wilson {
13142e0986a5SChris Wilson 	/*
13152e0986a5SChris Wilson 	 * Send a "high priority pulse" down the engine to cause the
13162e0986a5SChris Wilson 	 * current request to be momentarily preempted. (If it fails to
13172e0986a5SChris Wilson 	 * be preempted, it will be reset). As we have marked our context
13182e0986a5SChris Wilson 	 * as banned, any incomplete request, including any running, will
13192e0986a5SChris Wilson 	 * be skipped following the preemption.
13202e0986a5SChris Wilson 	 *
13212e0986a5SChris Wilson 	 * If there is no hangchecking (one of the reasons why we try to
13222e0986a5SChris Wilson 	 * cancel the context) and no forced preemption, there may be no
13232e0986a5SChris Wilson 	 * means by which we reset the GPU and evict the persistent hog.
13242e0986a5SChris Wilson 	 * Ergo if we are unable to inject a preemptive pulse that can
13252e0986a5SChris Wilson 	 * kill the banned context, we fallback to doing a local reset
13262e0986a5SChris Wilson 	 * instead.
13272e0986a5SChris Wilson 	 */
1328651dabe2SChris Wilson 	return intel_engine_pulse(engine) == 0;
13292e0986a5SChris Wilson }
13302e0986a5SChris Wilson 
13314a317415SChris Wilson static struct intel_engine_cs *active_engine(struct intel_context *ce)
13324a317415SChris Wilson {
13334a317415SChris Wilson 	struct intel_engine_cs *engine = NULL;
13344a317415SChris Wilson 	struct i915_request *rq;
13354a317415SChris Wilson 
1336cc1557caSChris Wilson 	if (intel_context_has_inflight(ce))
1337cc1557caSChris Wilson 		return intel_context_inflight(ce);
1338cc1557caSChris Wilson 
13394a317415SChris Wilson 	if (!ce->timeline)
13404a317415SChris Wilson 		return NULL;
13414a317415SChris Wilson 
13423cfea8c9SChris Wilson 	/*
13433cfea8c9SChris Wilson 	 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
13443cfea8c9SChris Wilson 	 * to the request to prevent it being transferred to a new timeline
13453cfea8c9SChris Wilson 	 * (and onto a new timeline->requests list).
13463cfea8c9SChris Wilson 	 */
1347736e785fSChris Wilson 	rcu_read_lock();
13483cfea8c9SChris Wilson 	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
13493cfea8c9SChris Wilson 		bool found;
13503cfea8c9SChris Wilson 
13513cfea8c9SChris Wilson 		/* timeline is already completed upto this point? */
13523cfea8c9SChris Wilson 		if (!i915_request_get_rcu(rq))
13533cfea8c9SChris Wilson 			break;
13544a317415SChris Wilson 
13554a317415SChris Wilson 		/* Check with the backend if the request is inflight */
13563cfea8c9SChris Wilson 		found = true;
13573cfea8c9SChris Wilson 		if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
13587dbc19daSTvrtko Ursulin 			found = i915_request_active_engine(rq, &engine);
13593cfea8c9SChris Wilson 
13603cfea8c9SChris Wilson 		i915_request_put(rq);
13613cfea8c9SChris Wilson 		if (found)
13624a317415SChris Wilson 			break;
13634a317415SChris Wilson 	}
1364736e785fSChris Wilson 	rcu_read_unlock();
13654a317415SChris Wilson 
13664a317415SChris Wilson 	return engine;
13674a317415SChris Wilson }
13684a317415SChris Wilson 
1369651dabe2SChris Wilson static void kill_engines(struct i915_gem_engines *engines, bool ban)
13702e0986a5SChris Wilson {
13712e0986a5SChris Wilson 	struct i915_gem_engines_iter it;
13722e0986a5SChris Wilson 	struct intel_context *ce;
13732e0986a5SChris Wilson 
13742e0986a5SChris Wilson 	/*
13752e0986a5SChris Wilson 	 * Map the user's engine back to the actual engines; one virtual
13762e0986a5SChris Wilson 	 * engine will be mapped to multiple engines, and using ctx->engine[]
13772e0986a5SChris Wilson 	 * the same engine may be have multiple instances in the user's map.
13782e0986a5SChris Wilson 	 * However, we only care about pending requests, so only include
13792e0986a5SChris Wilson 	 * engines on which there are incomplete requests.
13802e0986a5SChris Wilson 	 */
138142fb60deSChris Wilson 	for_each_gem_engine(ce, engines, it) {
13822e0986a5SChris Wilson 		struct intel_engine_cs *engine;
13832e0986a5SChris Wilson 
1384ae8ac10dSMatthew Brost 		if (ban && intel_context_ban(ce, NULL))
13859f3ccd40SChris Wilson 			continue;
13869f3ccd40SChris Wilson 
13874a317415SChris Wilson 		/*
13884a317415SChris Wilson 		 * Check the current active state of this context; if we
13894a317415SChris Wilson 		 * are currently executing on the GPU we need to evict
13904a317415SChris Wilson 		 * ourselves. On the other hand, if we haven't yet been
13914a317415SChris Wilson 		 * submitted to the GPU or if everything is complete,
13924a317415SChris Wilson 		 * we have nothing to do.
13934a317415SChris Wilson 		 */
13944a317415SChris Wilson 		engine = active_engine(ce);
13952e0986a5SChris Wilson 
13962e0986a5SChris Wilson 		/* First attempt to gracefully cancel the context */
1397651dabe2SChris Wilson 		if (engine && !__cancel_engine(engine) && ban)
13982e0986a5SChris Wilson 			/*
13992e0986a5SChris Wilson 			 * If we are unable to send a preemptive pulse to bump
14002e0986a5SChris Wilson 			 * the context from the GPU, we have to resort to a full
14012e0986a5SChris Wilson 			 * reset. We hope the collateral damage is worth it.
14022e0986a5SChris Wilson 			 */
140342fb60deSChris Wilson 			__reset_context(engines->ctx, engine);
14042e0986a5SChris Wilson 	}
14052e0986a5SChris Wilson }
14062e0986a5SChris Wilson 
1407651dabe2SChris Wilson static void kill_context(struct i915_gem_context *ctx)
140842fb60deSChris Wilson {
1409651dabe2SChris Wilson 	bool ban = (!i915_gem_context_is_persistent(ctx) ||
1410651dabe2SChris Wilson 		    !ctx->i915->params.enable_hangcheck);
141142fb60deSChris Wilson 	struct i915_gem_engines *pos, *next;
141242fb60deSChris Wilson 
1413130a95e9SChris Wilson 	spin_lock_irq(&ctx->stale.lock);
1414130a95e9SChris Wilson 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
141542fb60deSChris Wilson 	list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1416130a95e9SChris Wilson 		if (!i915_sw_fence_await(&pos->fence)) {
1417130a95e9SChris Wilson 			list_del_init(&pos->link);
141842fb60deSChris Wilson 			continue;
1419130a95e9SChris Wilson 		}
142042fb60deSChris Wilson 
1421130a95e9SChris Wilson 		spin_unlock_irq(&ctx->stale.lock);
142242fb60deSChris Wilson 
1423651dabe2SChris Wilson 		kill_engines(pos, ban);
142442fb60deSChris Wilson 
1425130a95e9SChris Wilson 		spin_lock_irq(&ctx->stale.lock);
1426130a95e9SChris Wilson 		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
142742fb60deSChris Wilson 		list_safe_reset_next(pos, next, link);
142842fb60deSChris Wilson 		list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
142942fb60deSChris Wilson 
143042fb60deSChris Wilson 		i915_sw_fence_complete(&pos->fence);
143142fb60deSChris Wilson 	}
1432130a95e9SChris Wilson 	spin_unlock_irq(&ctx->stale.lock);
143342fb60deSChris Wilson }
143442fb60deSChris Wilson 
1435130a95e9SChris Wilson static void engines_idle_release(struct i915_gem_context *ctx,
1436130a95e9SChris Wilson 				 struct i915_gem_engines *engines)
1437130a95e9SChris Wilson {
1438130a95e9SChris Wilson 	struct i915_gem_engines_iter it;
1439130a95e9SChris Wilson 	struct intel_context *ce;
1440130a95e9SChris Wilson 
1441130a95e9SChris Wilson 	INIT_LIST_HEAD(&engines->link);
1442130a95e9SChris Wilson 
1443130a95e9SChris Wilson 	engines->ctx = i915_gem_context_get(ctx);
1444130a95e9SChris Wilson 
1445130a95e9SChris Wilson 	for_each_gem_engine(ce, engines, it) {
1446e6829625SChris Wilson 		int err;
1447130a95e9SChris Wilson 
1448130a95e9SChris Wilson 		/* serialises with execbuf */
1449207e4a71SChris Wilson 		set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
1450130a95e9SChris Wilson 		if (!intel_context_pin_if_active(ce))
1451130a95e9SChris Wilson 			continue;
1452130a95e9SChris Wilson 
1453e6829625SChris Wilson 		/* Wait until context is finally scheduled out and retired */
1454e6829625SChris Wilson 		err = i915_sw_fence_await_active(&engines->fence,
1455e6829625SChris Wilson 						 &ce->active,
1456e6829625SChris Wilson 						 I915_ACTIVE_AWAIT_BARRIER);
1457130a95e9SChris Wilson 		intel_context_unpin(ce);
1458e6829625SChris Wilson 		if (err)
1459130a95e9SChris Wilson 			goto kill;
1460130a95e9SChris Wilson 	}
1461130a95e9SChris Wilson 
1462130a95e9SChris Wilson 	spin_lock_irq(&ctx->stale.lock);
1463130a95e9SChris Wilson 	if (!i915_gem_context_is_closed(ctx))
1464130a95e9SChris Wilson 		list_add_tail(&engines->link, &ctx->stale.engines);
1465130a95e9SChris Wilson 	spin_unlock_irq(&ctx->stale.lock);
1466130a95e9SChris Wilson 
1467130a95e9SChris Wilson kill:
1468130a95e9SChris Wilson 	if (list_empty(&engines->link)) /* raced, already closed */
1469651dabe2SChris Wilson 		kill_engines(engines, true);
1470130a95e9SChris Wilson 
1471130a95e9SChris Wilson 	i915_sw_fence_commit(&engines->fence);
147242fb60deSChris Wilson }
147342fb60deSChris Wilson 
1474267c0126SChris Wilson static void set_closed_name(struct i915_gem_context *ctx)
1475267c0126SChris Wilson {
1476267c0126SChris Wilson 	char *s;
1477267c0126SChris Wilson 
1478267c0126SChris Wilson 	/* Replace '[]' with '<>' to indicate closed in debug prints */
1479267c0126SChris Wilson 
1480267c0126SChris Wilson 	s = strrchr(ctx->name, '[');
1481267c0126SChris Wilson 	if (!s)
1482267c0126SChris Wilson 		return;
1483267c0126SChris Wilson 
1484267c0126SChris Wilson 	*s = '<';
1485267c0126SChris Wilson 
1486267c0126SChris Wilson 	s = strchr(s + 1, ']');
1487267c0126SChris Wilson 	if (s)
1488267c0126SChris Wilson 		*s = '>';
1489267c0126SChris Wilson }
1490267c0126SChris Wilson 
149110be98a7SChris Wilson static void context_close(struct i915_gem_context *ctx)
149210be98a7SChris Wilson {
1493*49bd54b3STvrtko Ursulin 	struct i915_drm_client *client;
1494*49bd54b3STvrtko Ursulin 
1495130a95e9SChris Wilson 	/* Flush any concurrent set_engines() */
1496130a95e9SChris Wilson 	mutex_lock(&ctx->engines_mutex);
1497e5e32171SMatthew Brost 	unpin_engines(__context_engines_static(ctx));
1498130a95e9SChris Wilson 	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
14992850748eSChris Wilson 	i915_gem_context_set_closed(ctx);
1500130a95e9SChris Wilson 	mutex_unlock(&ctx->engines_mutex);
15012850748eSChris Wilson 
1502155ab883SChris Wilson 	mutex_lock(&ctx->mutex);
1503155ab883SChris Wilson 
1504130a95e9SChris Wilson 	set_closed_name(ctx);
1505130a95e9SChris Wilson 
150610be98a7SChris Wilson 	/*
150710be98a7SChris Wilson 	 * The LUT uses the VMA as a backpointer to unref the object,
150810be98a7SChris Wilson 	 * so we need to clear the LUT before we close all the VMA (inside
150910be98a7SChris Wilson 	 * the ppgtt).
151010be98a7SChris Wilson 	 */
151110be98a7SChris Wilson 	lut_close(ctx);
151210be98a7SChris Wilson 
1513e1a7ab4fSThomas Hellström 	ctx->file_priv = ERR_PTR(-EBADF);
1514e1a7ab4fSThomas Hellström 
1515f8246cf4SChris Wilson 	spin_lock(&ctx->i915->gem.contexts.lock);
1516f8246cf4SChris Wilson 	list_del(&ctx->link);
1517f8246cf4SChris Wilson 	spin_unlock(&ctx->i915->gem.contexts.lock);
1518f8246cf4SChris Wilson 
1519*49bd54b3STvrtko Ursulin 	client = ctx->client;
1520*49bd54b3STvrtko Ursulin 	if (client) {
1521*49bd54b3STvrtko Ursulin 		spin_lock(&client->ctx_lock);
1522*49bd54b3STvrtko Ursulin 		list_del_rcu(&ctx->client_link);
1523*49bd54b3STvrtko Ursulin 		spin_unlock(&client->ctx_lock);
1524*49bd54b3STvrtko Ursulin 	}
1525*49bd54b3STvrtko Ursulin 
1526155ab883SChris Wilson 	mutex_unlock(&ctx->mutex);
15272e0986a5SChris Wilson 
15282e0986a5SChris Wilson 	/*
15292e0986a5SChris Wilson 	 * If the user has disabled hangchecking, we can not be sure that
15302e0986a5SChris Wilson 	 * the batches will ever complete after the context is closed,
15312e0986a5SChris Wilson 	 * keeping the context and all resources pinned forever. So in this
15322e0986a5SChris Wilson 	 * case we opt to forcibly kill off all remaining requests on
15332e0986a5SChris Wilson 	 * context close.
15342e0986a5SChris Wilson 	 */
15352e0986a5SChris Wilson 	kill_context(ctx);
15362e0986a5SChris Wilson 
153710be98a7SChris Wilson 	i915_gem_context_put(ctx);
153810be98a7SChris Wilson }
153910be98a7SChris Wilson 
1540a0e04715SChris Wilson static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1541a0e04715SChris Wilson {
1542a0e04715SChris Wilson 	if (i915_gem_context_is_persistent(ctx) == state)
1543a0e04715SChris Wilson 		return 0;
1544a0e04715SChris Wilson 
1545a0e04715SChris Wilson 	if (state) {
1546a0e04715SChris Wilson 		/*
1547a0e04715SChris Wilson 		 * Only contexts that are short-lived [that will expire or be
1548a0e04715SChris Wilson 		 * reset] are allowed to survive past termination. We require
1549a0e04715SChris Wilson 		 * hangcheck to ensure that the persistent requests are healthy.
1550a0e04715SChris Wilson 		 */
15518a25c4beSJani Nikula 		if (!ctx->i915->params.enable_hangcheck)
1552a0e04715SChris Wilson 			return -EINVAL;
1553a0e04715SChris Wilson 
1554a0e04715SChris Wilson 		i915_gem_context_set_persistence(ctx);
1555a0e04715SChris Wilson 	} else {
1556a0e04715SChris Wilson 		/* To cancel a context we use "preempt-to-idle" */
1557a0e04715SChris Wilson 		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1558a0e04715SChris Wilson 			return -ENODEV;
1559a0e04715SChris Wilson 
1560d1b9b5f1SChris Wilson 		/*
1561d1b9b5f1SChris Wilson 		 * If the cancel fails, we then need to reset, cleanly!
1562d1b9b5f1SChris Wilson 		 *
1563d1b9b5f1SChris Wilson 		 * If the per-engine reset fails, all hope is lost! We resort
1564d1b9b5f1SChris Wilson 		 * to a full GPU reset in that unlikely case, but realistically
1565d1b9b5f1SChris Wilson 		 * if the engine could not reset, the full reset does not fare
1566d1b9b5f1SChris Wilson 		 * much better. The damage has been done.
1567d1b9b5f1SChris Wilson 		 *
1568d1b9b5f1SChris Wilson 		 * However, if we cannot reset an engine by itself, we cannot
1569d1b9b5f1SChris Wilson 		 * cleanup a hanging persistent context without causing
1570d1b9b5f1SChris Wilson 		 * colateral damage, and we should not pretend we can by
1571d1b9b5f1SChris Wilson 		 * exposing the interface.
1572d1b9b5f1SChris Wilson 		 */
15731a9c4db4SMichał Winiarski 		if (!intel_has_reset_engine(to_gt(ctx->i915)))
1574d1b9b5f1SChris Wilson 			return -ENODEV;
1575d1b9b5f1SChris Wilson 
1576a0e04715SChris Wilson 		i915_gem_context_clear_persistence(ctx);
1577a0e04715SChris Wilson 	}
1578a0e04715SChris Wilson 
1579a0e04715SChris Wilson 	return 0;
1580a0e04715SChris Wilson }
1581a0e04715SChris Wilson 
158210be98a7SChris Wilson static struct i915_gem_context *
1583a34857dcSJason Ekstrand i915_gem_create_context(struct drm_i915_private *i915,
1584a34857dcSJason Ekstrand 			const struct i915_gem_proto_context *pc)
158510be98a7SChris Wilson {
158610be98a7SChris Wilson 	struct i915_gem_context *ctx;
15870eee9977SJason Ekstrand 	struct i915_address_space *vm = NULL;
15880eee9977SJason Ekstrand 	struct i915_gem_engines *e;
15890eee9977SJason Ekstrand 	int err;
15900eee9977SJason Ekstrand 	int i;
159110be98a7SChris Wilson 
15920eee9977SJason Ekstrand 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
15930eee9977SJason Ekstrand 	if (!ctx)
15940eee9977SJason Ekstrand 		return ERR_PTR(-ENOMEM);
15950eee9977SJason Ekstrand 
15960eee9977SJason Ekstrand 	kref_init(&ctx->ref);
15970eee9977SJason Ekstrand 	ctx->i915 = i915;
15980eee9977SJason Ekstrand 	ctx->sched = pc->sched;
15990eee9977SJason Ekstrand 	mutex_init(&ctx->mutex);
16000eee9977SJason Ekstrand 	INIT_LIST_HEAD(&ctx->link);
160175eefd82SDaniel Vetter 	INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
16020eee9977SJason Ekstrand 
16030eee9977SJason Ekstrand 	spin_lock_init(&ctx->stale.lock);
16040eee9977SJason Ekstrand 	INIT_LIST_HEAD(&ctx->stale.engines);
160510be98a7SChris Wilson 
1606a34857dcSJason Ekstrand 	if (pc->vm) {
16070eee9977SJason Ekstrand 		vm = i915_vm_get(pc->vm);
1608a34857dcSJason Ekstrand 	} else if (HAS_FULL_PPGTT(i915)) {
1609ab53497bSChris Wilson 		struct i915_ppgtt *ppgtt;
161010be98a7SChris Wilson 
16111a9c4db4SMichał Winiarski 		ppgtt = i915_ppgtt_create(to_gt(i915), 0);
161210be98a7SChris Wilson 		if (IS_ERR(ppgtt)) {
1613baa89ba3SWambui Karuga 			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
161410be98a7SChris Wilson 				PTR_ERR(ppgtt));
16150eee9977SJason Ekstrand 			err = PTR_ERR(ppgtt);
16160eee9977SJason Ekstrand 			goto err_ctx;
16170eee9977SJason Ekstrand 		}
16180eee9977SJason Ekstrand 		vm = &ppgtt->vm;
16190eee9977SJason Ekstrand 	}
1620e1a7ab4fSThomas Hellström 	if (vm)
1621e1a7ab4fSThomas Hellström 		ctx->vm = vm;
162210be98a7SChris Wilson 
16230eee9977SJason Ekstrand 	mutex_init(&ctx->engines_mutex);
1624d4433c76SJason Ekstrand 	if (pc->num_user_engines >= 0) {
1625d4433c76SJason Ekstrand 		i915_gem_context_set_user_engines(ctx);
16260eee9977SJason Ekstrand 		e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
16270eee9977SJason Ekstrand 	} else {
16280eee9977SJason Ekstrand 		i915_gem_context_clear_user_engines(ctx);
16290eee9977SJason Ekstrand 		e = default_engines(ctx, pc->legacy_rcs_sseu);
1630d4433c76SJason Ekstrand 	}
16310eee9977SJason Ekstrand 	if (IS_ERR(e)) {
16320eee9977SJason Ekstrand 		err = PTR_ERR(e);
16330eee9977SJason Ekstrand 		goto err_vm;
16340eee9977SJason Ekstrand 	}
16350eee9977SJason Ekstrand 	RCU_INIT_POINTER(ctx->engines, e);
16360eee9977SJason Ekstrand 
16370eee9977SJason Ekstrand 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
16380eee9977SJason Ekstrand 	mutex_init(&ctx->lut_mutex);
16390eee9977SJason Ekstrand 
16400eee9977SJason Ekstrand 	/* NB: Mark all slices as needing a remap so that when the context first
16410eee9977SJason Ekstrand 	 * loads it will restore whatever remap state already exists. If there
16420eee9977SJason Ekstrand 	 * is no remap info, it will be a NOP. */
16430eee9977SJason Ekstrand 	ctx->remap_slice = ALL_L3_SLICES(i915);
16440eee9977SJason Ekstrand 
16450eee9977SJason Ekstrand 	ctx->user_flags = pc->user_flags;
16460eee9977SJason Ekstrand 
16470eee9977SJason Ekstrand 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
16480eee9977SJason Ekstrand 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1649d4433c76SJason Ekstrand 
1650a34857dcSJason Ekstrand 	if (pc->single_timeline) {
16510eee9977SJason Ekstrand 		err = drm_syncobj_create(&ctx->syncobj,
165200dae4d3SJason Ekstrand 					 DRM_SYNCOBJ_CREATE_SIGNALED,
165300dae4d3SJason Ekstrand 					 NULL);
16540eee9977SJason Ekstrand 		if (err)
16550eee9977SJason Ekstrand 			goto err_engines;
165610be98a7SChris Wilson 	}
165710be98a7SChris Wilson 
1658d3ac8d42SDaniele Ceraolo Spurio 	if (pc->uses_protected_content) {
1659d3ac8d42SDaniele Ceraolo Spurio 		ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1660d3ac8d42SDaniele Ceraolo Spurio 		ctx->uses_protected_content = true;
1661d3ac8d42SDaniele Ceraolo Spurio 	}
1662d3ac8d42SDaniele Ceraolo Spurio 
166310be98a7SChris Wilson 	trace_i915_context_create(ctx);
166410be98a7SChris Wilson 
166510be98a7SChris Wilson 	return ctx;
16660eee9977SJason Ekstrand 
16670eee9977SJason Ekstrand err_engines:
16680eee9977SJason Ekstrand 	free_engines(e);
16690eee9977SJason Ekstrand err_vm:
16700eee9977SJason Ekstrand 	if (ctx->vm)
1671e1a7ab4fSThomas Hellström 		i915_vm_put(ctx->vm);
16720eee9977SJason Ekstrand err_ctx:
16730eee9977SJason Ekstrand 	kfree(ctx);
16740eee9977SJason Ekstrand 	return ERR_PTR(err);
167510be98a7SChris Wilson }
167610be98a7SChris Wilson 
1677a4e7ccdaSChris Wilson static void init_contexts(struct i915_gem_contexts *gc)
167810be98a7SChris Wilson {
1679a4e7ccdaSChris Wilson 	spin_lock_init(&gc->lock);
1680a4e7ccdaSChris Wilson 	INIT_LIST_HEAD(&gc->list);
168110be98a7SChris Wilson }
168210be98a7SChris Wilson 
1683e6ba7648SChris Wilson void i915_gem_init__contexts(struct drm_i915_private *i915)
168410be98a7SChris Wilson {
1685a4e7ccdaSChris Wilson 	init_contexts(&i915->gem.contexts);
168610be98a7SChris Wilson }
168710be98a7SChris Wilson 
1688a4c1cdd3SJason Ekstrand static void gem_context_register(struct i915_gem_context *ctx,
1689c100777cSTvrtko Ursulin 				 struct drm_i915_file_private *fpriv,
1690a4c1cdd3SJason Ekstrand 				 u32 id)
169110be98a7SChris Wilson {
1692eb4dedaeSChris Wilson 	struct drm_i915_private *i915 = ctx->i915;
1693a4c1cdd3SJason Ekstrand 	void *old;
169410be98a7SChris Wilson 
169510be98a7SChris Wilson 	ctx->file_priv = fpriv;
1696a4e7ccdaSChris Wilson 
169710be98a7SChris Wilson 	ctx->pid = get_task_pid(current, PIDTYPE_PID);
169843c50460STvrtko Ursulin 	ctx->client = i915_drm_client_get(fpriv->client);
169943c50460STvrtko Ursulin 
1700fc4f125dSChris Wilson 	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
170110be98a7SChris Wilson 		 current->comm, pid_nr(ctx->pid));
170210be98a7SChris Wilson 
170310be98a7SChris Wilson 	/* And finally expose ourselves to userspace via the idr */
1704a4c1cdd3SJason Ekstrand 	old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1705a4c1cdd3SJason Ekstrand 	WARN_ON(old);
1706c100777cSTvrtko Ursulin 
1707*49bd54b3STvrtko Ursulin 	spin_lock(&ctx->client->ctx_lock);
1708*49bd54b3STvrtko Ursulin 	list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
1709*49bd54b3STvrtko Ursulin 	spin_unlock(&ctx->client->ctx_lock);
1710*49bd54b3STvrtko Ursulin 
1711eb4dedaeSChris Wilson 	spin_lock(&i915->gem.contexts.lock);
1712eb4dedaeSChris Wilson 	list_add_tail(&ctx->link, &i915->gem.contexts.list);
1713eb4dedaeSChris Wilson 	spin_unlock(&i915->gem.contexts.lock);
171410be98a7SChris Wilson }
171510be98a7SChris Wilson 
171610be98a7SChris Wilson int i915_gem_context_open(struct drm_i915_private *i915,
171710be98a7SChris Wilson 			  struct drm_file *file)
171810be98a7SChris Wilson {
171910be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
1720a34857dcSJason Ekstrand 	struct i915_gem_proto_context *pc;
172110be98a7SChris Wilson 	struct i915_gem_context *ctx;
172210be98a7SChris Wilson 	int err;
172310be98a7SChris Wilson 
1724a4c1cdd3SJason Ekstrand 	mutex_init(&file_priv->proto_context_lock);
1725a4c1cdd3SJason Ekstrand 	xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1726a4c1cdd3SJason Ekstrand 
1727a4c1cdd3SJason Ekstrand 	/* 0 reserved for the default context */
1728a4c1cdd3SJason Ekstrand 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1729c100777cSTvrtko Ursulin 
17305dbd2b7bSChris Wilson 	/* 0 reserved for invalid/unassigned ppgtt */
17315dbd2b7bSChris Wilson 	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
173210be98a7SChris Wilson 
1733a34857dcSJason Ekstrand 	pc = proto_context_create(i915, 0);
1734a34857dcSJason Ekstrand 	if (IS_ERR(pc)) {
1735a34857dcSJason Ekstrand 		err = PTR_ERR(pc);
1736a34857dcSJason Ekstrand 		goto err;
1737a34857dcSJason Ekstrand 	}
1738a34857dcSJason Ekstrand 
1739a34857dcSJason Ekstrand 	ctx = i915_gem_create_context(i915, pc);
1740d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(i915, pc);
174110be98a7SChris Wilson 	if (IS_ERR(ctx)) {
174210be98a7SChris Wilson 		err = PTR_ERR(ctx);
174310be98a7SChris Wilson 		goto err;
174410be98a7SChris Wilson 	}
174510be98a7SChris Wilson 
1746a4c1cdd3SJason Ekstrand 	gem_context_register(ctx, file_priv, 0);
174710be98a7SChris Wilson 
174810be98a7SChris Wilson 	return 0;
174910be98a7SChris Wilson 
175010be98a7SChris Wilson err:
17515dbd2b7bSChris Wilson 	xa_destroy(&file_priv->vm_xa);
1752c100777cSTvrtko Ursulin 	xa_destroy(&file_priv->context_xa);
1753a4c1cdd3SJason Ekstrand 	xa_destroy(&file_priv->proto_context_xa);
1754a4c1cdd3SJason Ekstrand 	mutex_destroy(&file_priv->proto_context_lock);
175510be98a7SChris Wilson 	return err;
175610be98a7SChris Wilson }
175710be98a7SChris Wilson 
175810be98a7SChris Wilson void i915_gem_context_close(struct drm_file *file)
175910be98a7SChris Wilson {
176010be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
1761a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
17625dbd2b7bSChris Wilson 	struct i915_address_space *vm;
1763c100777cSTvrtko Ursulin 	struct i915_gem_context *ctx;
1764c100777cSTvrtko Ursulin 	unsigned long idx;
176510be98a7SChris Wilson 
1766a4c1cdd3SJason Ekstrand 	xa_for_each(&file_priv->proto_context_xa, idx, pc)
1767d3ac8d42SDaniele Ceraolo Spurio 		proto_context_close(file_priv->dev_priv, pc);
1768a4c1cdd3SJason Ekstrand 	xa_destroy(&file_priv->proto_context_xa);
1769a4c1cdd3SJason Ekstrand 	mutex_destroy(&file_priv->proto_context_lock);
1770a4c1cdd3SJason Ekstrand 
1771c100777cSTvrtko Ursulin 	xa_for_each(&file_priv->context_xa, idx, ctx)
1772c100777cSTvrtko Ursulin 		context_close(ctx);
1773c100777cSTvrtko Ursulin 	xa_destroy(&file_priv->context_xa);
177410be98a7SChris Wilson 
17755dbd2b7bSChris Wilson 	xa_for_each(&file_priv->vm_xa, idx, vm)
17765dbd2b7bSChris Wilson 		i915_vm_put(vm);
17775dbd2b7bSChris Wilson 	xa_destroy(&file_priv->vm_xa);
177810be98a7SChris Wilson }
177910be98a7SChris Wilson 
178010be98a7SChris Wilson int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
178110be98a7SChris Wilson 			     struct drm_file *file)
178210be98a7SChris Wilson {
178310be98a7SChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
178410be98a7SChris Wilson 	struct drm_i915_gem_vm_control *args = data;
178510be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
1786ab53497bSChris Wilson 	struct i915_ppgtt *ppgtt;
17875dbd2b7bSChris Wilson 	u32 id;
178810be98a7SChris Wilson 	int err;
178910be98a7SChris Wilson 
179010be98a7SChris Wilson 	if (!HAS_FULL_PPGTT(i915))
179110be98a7SChris Wilson 		return -ENODEV;
179210be98a7SChris Wilson 
179310be98a7SChris Wilson 	if (args->flags)
179410be98a7SChris Wilson 		return -EINVAL;
179510be98a7SChris Wilson 
17961a9c4db4SMichał Winiarski 	ppgtt = i915_ppgtt_create(to_gt(i915), 0);
179710be98a7SChris Wilson 	if (IS_ERR(ppgtt))
179810be98a7SChris Wilson 		return PTR_ERR(ppgtt);
179910be98a7SChris Wilson 
180010be98a7SChris Wilson 	if (args->extensions) {
180110be98a7SChris Wilson 		err = i915_user_extensions(u64_to_user_ptr(args->extensions),
180210be98a7SChris Wilson 					   NULL, 0,
180310be98a7SChris Wilson 					   ppgtt);
180410be98a7SChris Wilson 		if (err)
180510be98a7SChris Wilson 			goto err_put;
180610be98a7SChris Wilson 	}
180710be98a7SChris Wilson 
18085dbd2b7bSChris Wilson 	err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
18095dbd2b7bSChris Wilson 		       xa_limit_32b, GFP_KERNEL);
181010be98a7SChris Wilson 	if (err)
181110be98a7SChris Wilson 		goto err_put;
181210be98a7SChris Wilson 
18135dbd2b7bSChris Wilson 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
18145dbd2b7bSChris Wilson 	args->vm_id = id;
181510be98a7SChris Wilson 	return 0;
181610be98a7SChris Wilson 
181710be98a7SChris Wilson err_put:
1818e568ac38SChris Wilson 	i915_vm_put(&ppgtt->vm);
181910be98a7SChris Wilson 	return err;
182010be98a7SChris Wilson }
182110be98a7SChris Wilson 
182210be98a7SChris Wilson int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
182310be98a7SChris Wilson 			      struct drm_file *file)
182410be98a7SChris Wilson {
182510be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
182610be98a7SChris Wilson 	struct drm_i915_gem_vm_control *args = data;
1827e568ac38SChris Wilson 	struct i915_address_space *vm;
182810be98a7SChris Wilson 
182910be98a7SChris Wilson 	if (args->flags)
183010be98a7SChris Wilson 		return -EINVAL;
183110be98a7SChris Wilson 
183210be98a7SChris Wilson 	if (args->extensions)
183310be98a7SChris Wilson 		return -EINVAL;
183410be98a7SChris Wilson 
18355dbd2b7bSChris Wilson 	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1836e568ac38SChris Wilson 	if (!vm)
183710be98a7SChris Wilson 		return -ENOENT;
183810be98a7SChris Wilson 
1839e568ac38SChris Wilson 	i915_vm_put(vm);
184010be98a7SChris Wilson 	return 0;
184110be98a7SChris Wilson }
184210be98a7SChris Wilson 
184310be98a7SChris Wilson static int get_ppgtt(struct drm_i915_file_private *file_priv,
184410be98a7SChris Wilson 		     struct i915_gem_context *ctx,
184510be98a7SChris Wilson 		     struct drm_i915_gem_context_param *args)
184610be98a7SChris Wilson {
1847e568ac38SChris Wilson 	struct i915_address_space *vm;
18485dbd2b7bSChris Wilson 	int err;
18495dbd2b7bSChris Wilson 	u32 id;
185010be98a7SChris Wilson 
1851a82a9979SDaniel Vetter 	if (!i915_gem_context_has_full_ppgtt(ctx))
185210be98a7SChris Wilson 		return -ENODEV;
185310be98a7SChris Wilson 
18549ec8795eSDaniel Vetter 	vm = ctx->vm;
18559ec8795eSDaniel Vetter 	GEM_BUG_ON(!vm);
185690211ea4SChris Wilson 
185790211ea4SChris Wilson 	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
18585dbd2b7bSChris Wilson 	if (err)
18599ec8795eSDaniel Vetter 		return err;
186010be98a7SChris Wilson 
1861e1a7ab4fSThomas Hellström 	i915_vm_get(vm);
186210be98a7SChris Wilson 
18635dbd2b7bSChris Wilson 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
18645dbd2b7bSChris Wilson 	args->value = id;
186510be98a7SChris Wilson 	args->size = 0;
186610be98a7SChris Wilson 
18675dbd2b7bSChris Wilson 	return err;
186810be98a7SChris Wilson }
186910be98a7SChris Wilson 
187011ecbdddSLionel Landwerlin int
18710b6613c6SVenkata Sandeep Dhanalakota i915_gem_user_to_context_sseu(struct intel_gt *gt,
187210be98a7SChris Wilson 			      const struct drm_i915_gem_context_param_sseu *user,
187310be98a7SChris Wilson 			      struct intel_sseu *context)
187410be98a7SChris Wilson {
18750b6613c6SVenkata Sandeep Dhanalakota 	const struct sseu_dev_info *device = &gt->info.sseu;
18760b6613c6SVenkata Sandeep Dhanalakota 	struct drm_i915_private *i915 = gt->i915;
187710be98a7SChris Wilson 
187810be98a7SChris Wilson 	/* No zeros in any field. */
187910be98a7SChris Wilson 	if (!user->slice_mask || !user->subslice_mask ||
188010be98a7SChris Wilson 	    !user->min_eus_per_subslice || !user->max_eus_per_subslice)
188110be98a7SChris Wilson 		return -EINVAL;
188210be98a7SChris Wilson 
188310be98a7SChris Wilson 	/* Max > min. */
188410be98a7SChris Wilson 	if (user->max_eus_per_subslice < user->min_eus_per_subslice)
188510be98a7SChris Wilson 		return -EINVAL;
188610be98a7SChris Wilson 
188710be98a7SChris Wilson 	/*
188810be98a7SChris Wilson 	 * Some future proofing on the types since the uAPI is wider than the
188910be98a7SChris Wilson 	 * current internal implementation.
189010be98a7SChris Wilson 	 */
189110be98a7SChris Wilson 	if (overflows_type(user->slice_mask, context->slice_mask) ||
189210be98a7SChris Wilson 	    overflows_type(user->subslice_mask, context->subslice_mask) ||
189310be98a7SChris Wilson 	    overflows_type(user->min_eus_per_subslice,
189410be98a7SChris Wilson 			   context->min_eus_per_subslice) ||
189510be98a7SChris Wilson 	    overflows_type(user->max_eus_per_subslice,
189610be98a7SChris Wilson 			   context->max_eus_per_subslice))
189710be98a7SChris Wilson 		return -EINVAL;
189810be98a7SChris Wilson 
189910be98a7SChris Wilson 	/* Check validity against hardware. */
190010be98a7SChris Wilson 	if (user->slice_mask & ~device->slice_mask)
190110be98a7SChris Wilson 		return -EINVAL;
190210be98a7SChris Wilson 
190310be98a7SChris Wilson 	if (user->subslice_mask & ~device->subslice_mask[0])
190410be98a7SChris Wilson 		return -EINVAL;
190510be98a7SChris Wilson 
190610be98a7SChris Wilson 	if (user->max_eus_per_subslice > device->max_eus_per_subslice)
190710be98a7SChris Wilson 		return -EINVAL;
190810be98a7SChris Wilson 
190910be98a7SChris Wilson 	context->slice_mask = user->slice_mask;
191010be98a7SChris Wilson 	context->subslice_mask = user->subslice_mask;
191110be98a7SChris Wilson 	context->min_eus_per_subslice = user->min_eus_per_subslice;
191210be98a7SChris Wilson 	context->max_eus_per_subslice = user->max_eus_per_subslice;
191310be98a7SChris Wilson 
191410be98a7SChris Wilson 	/* Part specific restrictions. */
191540e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) == 11) {
191610be98a7SChris Wilson 		unsigned int hw_s = hweight8(device->slice_mask);
191710be98a7SChris Wilson 		unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
191810be98a7SChris Wilson 		unsigned int req_s = hweight8(context->slice_mask);
191910be98a7SChris Wilson 		unsigned int req_ss = hweight8(context->subslice_mask);
192010be98a7SChris Wilson 
192110be98a7SChris Wilson 		/*
192210be98a7SChris Wilson 		 * Only full subslice enablement is possible if more than one
192310be98a7SChris Wilson 		 * slice is turned on.
192410be98a7SChris Wilson 		 */
192510be98a7SChris Wilson 		if (req_s > 1 && req_ss != hw_ss_per_s)
192610be98a7SChris Wilson 			return -EINVAL;
192710be98a7SChris Wilson 
192810be98a7SChris Wilson 		/*
192910be98a7SChris Wilson 		 * If more than four (SScount bitfield limit) subslices are
193010be98a7SChris Wilson 		 * requested then the number has to be even.
193110be98a7SChris Wilson 		 */
193210be98a7SChris Wilson 		if (req_ss > 4 && (req_ss & 1))
193310be98a7SChris Wilson 			return -EINVAL;
193410be98a7SChris Wilson 
193510be98a7SChris Wilson 		/*
193610be98a7SChris Wilson 		 * If only one slice is enabled and subslice count is below the
193710be98a7SChris Wilson 		 * device full enablement, it must be at most half of the all
193810be98a7SChris Wilson 		 * available subslices.
193910be98a7SChris Wilson 		 */
194010be98a7SChris Wilson 		if (req_s == 1 && req_ss < hw_ss_per_s &&
194110be98a7SChris Wilson 		    req_ss > (hw_ss_per_s / 2))
194210be98a7SChris Wilson 			return -EINVAL;
194310be98a7SChris Wilson 
194410be98a7SChris Wilson 		/* ABI restriction - VME use case only. */
194510be98a7SChris Wilson 
194610be98a7SChris Wilson 		/* All slices or one slice only. */
194710be98a7SChris Wilson 		if (req_s != 1 && req_s != hw_s)
194810be98a7SChris Wilson 			return -EINVAL;
194910be98a7SChris Wilson 
195010be98a7SChris Wilson 		/*
195110be98a7SChris Wilson 		 * Half subslices or full enablement only when one slice is
195210be98a7SChris Wilson 		 * enabled.
195310be98a7SChris Wilson 		 */
195410be98a7SChris Wilson 		if (req_s == 1 &&
195510be98a7SChris Wilson 		    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
195610be98a7SChris Wilson 			return -EINVAL;
195710be98a7SChris Wilson 
195810be98a7SChris Wilson 		/* No EU configuration changes. */
195910be98a7SChris Wilson 		if ((user->min_eus_per_subslice !=
196010be98a7SChris Wilson 		     device->max_eus_per_subslice) ||
196110be98a7SChris Wilson 		    (user->max_eus_per_subslice !=
196210be98a7SChris Wilson 		     device->max_eus_per_subslice))
196310be98a7SChris Wilson 			return -EINVAL;
196410be98a7SChris Wilson 	}
196510be98a7SChris Wilson 
196610be98a7SChris Wilson 	return 0;
196710be98a7SChris Wilson }
196810be98a7SChris Wilson 
196910be98a7SChris Wilson static int set_sseu(struct i915_gem_context *ctx,
197010be98a7SChris Wilson 		    struct drm_i915_gem_context_param *args)
197110be98a7SChris Wilson {
197210be98a7SChris Wilson 	struct drm_i915_private *i915 = ctx->i915;
197310be98a7SChris Wilson 	struct drm_i915_gem_context_param_sseu user_sseu;
197410be98a7SChris Wilson 	struct intel_context *ce;
197510be98a7SChris Wilson 	struct intel_sseu sseu;
197610be98a7SChris Wilson 	unsigned long lookup;
197710be98a7SChris Wilson 	int ret;
197810be98a7SChris Wilson 
197910be98a7SChris Wilson 	if (args->size < sizeof(user_sseu))
198010be98a7SChris Wilson 		return -EINVAL;
198110be98a7SChris Wilson 
198240e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) != 11)
198310be98a7SChris Wilson 		return -ENODEV;
198410be98a7SChris Wilson 
198510be98a7SChris Wilson 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
198610be98a7SChris Wilson 			   sizeof(user_sseu)))
198710be98a7SChris Wilson 		return -EFAULT;
198810be98a7SChris Wilson 
198910be98a7SChris Wilson 	if (user_sseu.rsvd)
199010be98a7SChris Wilson 		return -EINVAL;
199110be98a7SChris Wilson 
199210be98a7SChris Wilson 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
199310be98a7SChris Wilson 		return -EINVAL;
199410be98a7SChris Wilson 
199510be98a7SChris Wilson 	lookup = 0;
199610be98a7SChris Wilson 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
199710be98a7SChris Wilson 		lookup |= LOOKUP_USER_INDEX;
199810be98a7SChris Wilson 
199910be98a7SChris Wilson 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
200010be98a7SChris Wilson 	if (IS_ERR(ce))
200110be98a7SChris Wilson 		return PTR_ERR(ce);
200210be98a7SChris Wilson 
200310be98a7SChris Wilson 	/* Only render engine supports RPCS configuration. */
200410be98a7SChris Wilson 	if (ce->engine->class != RENDER_CLASS) {
200510be98a7SChris Wilson 		ret = -ENODEV;
200610be98a7SChris Wilson 		goto out_ce;
200710be98a7SChris Wilson 	}
200810be98a7SChris Wilson 
20090b6613c6SVenkata Sandeep Dhanalakota 	ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
201010be98a7SChris Wilson 	if (ret)
201110be98a7SChris Wilson 		goto out_ce;
201210be98a7SChris Wilson 
201310be98a7SChris Wilson 	ret = intel_context_reconfigure_sseu(ce, sseu);
201410be98a7SChris Wilson 	if (ret)
201510be98a7SChris Wilson 		goto out_ce;
201610be98a7SChris Wilson 
201710be98a7SChris Wilson 	args->size = sizeof(user_sseu);
201810be98a7SChris Wilson 
201910be98a7SChris Wilson out_ce:
202010be98a7SChris Wilson 	intel_context_put(ce);
202110be98a7SChris Wilson 	return ret;
202210be98a7SChris Wilson }
202310be98a7SChris Wilson 
202410be98a7SChris Wilson static int
2025a0e04715SChris Wilson set_persistence(struct i915_gem_context *ctx,
2026a0e04715SChris Wilson 		const struct drm_i915_gem_context_param *args)
2027a0e04715SChris Wilson {
2028a0e04715SChris Wilson 	if (args->size)
2029a0e04715SChris Wilson 		return -EINVAL;
2030a0e04715SChris Wilson 
2031a0e04715SChris Wilson 	return __context_set_persistence(ctx, args->value);
2032a0e04715SChris Wilson }
2033a0e04715SChris Wilson 
20340f100b70SChris Wilson static int set_priority(struct i915_gem_context *ctx,
20350f100b70SChris Wilson 			const struct drm_i915_gem_context_param *args)
20360f100b70SChris Wilson {
2037b9709057SDaniel Vetter 	struct i915_gem_engines_iter it;
2038b9709057SDaniel Vetter 	struct intel_context *ce;
2039aaa5957cSJason Ekstrand 	int err;
20400f100b70SChris Wilson 
2041aaa5957cSJason Ekstrand 	err = validate_priority(ctx->i915, args);
2042aaa5957cSJason Ekstrand 	if (err)
2043aaa5957cSJason Ekstrand 		return err;
20440f100b70SChris Wilson 
2045aaa5957cSJason Ekstrand 	ctx->sched.priority = args->value;
2046b9709057SDaniel Vetter 
2047b9709057SDaniel Vetter 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2048b9709057SDaniel Vetter 		if (!intel_engine_has_timeslices(ce->engine))
2049b9709057SDaniel Vetter 			continue;
2050b9709057SDaniel Vetter 
2051b9709057SDaniel Vetter 		if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2052b9709057SDaniel Vetter 		    intel_engine_has_semaphores(ce->engine))
2053b9709057SDaniel Vetter 			intel_context_set_use_semaphores(ce);
2054b9709057SDaniel Vetter 		else
2055b9709057SDaniel Vetter 			intel_context_clear_use_semaphores(ce);
2056b9709057SDaniel Vetter 	}
2057b9709057SDaniel Vetter 	i915_gem_context_unlock_engines(ctx);
20580f100b70SChris Wilson 
20590f100b70SChris Wilson 	return 0;
20600f100b70SChris Wilson }
20610f100b70SChris Wilson 
2062d3ac8d42SDaniele Ceraolo Spurio static int get_protected(struct i915_gem_context *ctx,
2063d3ac8d42SDaniele Ceraolo Spurio 			 struct drm_i915_gem_context_param *args)
2064d3ac8d42SDaniele Ceraolo Spurio {
2065d3ac8d42SDaniele Ceraolo Spurio 	args->size = 0;
2066d3ac8d42SDaniele Ceraolo Spurio 	args->value = i915_gem_context_uses_protected_content(ctx);
2067d3ac8d42SDaniele Ceraolo Spurio 
2068d3ac8d42SDaniele Ceraolo Spurio 	return 0;
2069d3ac8d42SDaniele Ceraolo Spurio }
2070d3ac8d42SDaniele Ceraolo Spurio 
207110be98a7SChris Wilson static int ctx_setparam(struct drm_i915_file_private *fpriv,
207210be98a7SChris Wilson 			struct i915_gem_context *ctx,
207310be98a7SChris Wilson 			struct drm_i915_gem_context_param *args)
207410be98a7SChris Wilson {
207510be98a7SChris Wilson 	int ret = 0;
207610be98a7SChris Wilson 
207710be98a7SChris Wilson 	switch (args->param) {
207810be98a7SChris Wilson 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
207910be98a7SChris Wilson 		if (args->size)
208010be98a7SChris Wilson 			ret = -EINVAL;
208110be98a7SChris Wilson 		else if (args->value)
208210be98a7SChris Wilson 			i915_gem_context_set_no_error_capture(ctx);
208310be98a7SChris Wilson 		else
208410be98a7SChris Wilson 			i915_gem_context_clear_no_error_capture(ctx);
208510be98a7SChris Wilson 		break;
208610be98a7SChris Wilson 
208710be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BANNABLE:
208810be98a7SChris Wilson 		if (args->size)
208910be98a7SChris Wilson 			ret = -EINVAL;
209010be98a7SChris Wilson 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
209110be98a7SChris Wilson 			ret = -EPERM;
209210be98a7SChris Wilson 		else if (args->value)
209310be98a7SChris Wilson 			i915_gem_context_set_bannable(ctx);
2094d3ac8d42SDaniele Ceraolo Spurio 		else if (i915_gem_context_uses_protected_content(ctx))
2095d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM; /* can't clear this for protected contexts */
209610be98a7SChris Wilson 		else
209710be98a7SChris Wilson 			i915_gem_context_clear_bannable(ctx);
209810be98a7SChris Wilson 		break;
209910be98a7SChris Wilson 
210010be98a7SChris Wilson 	case I915_CONTEXT_PARAM_RECOVERABLE:
210110be98a7SChris Wilson 		if (args->size)
210210be98a7SChris Wilson 			ret = -EINVAL;
2103d3ac8d42SDaniele Ceraolo Spurio 		else if (!args->value)
210410be98a7SChris Wilson 			i915_gem_context_clear_recoverable(ctx);
2105d3ac8d42SDaniele Ceraolo Spurio 		else if (i915_gem_context_uses_protected_content(ctx))
2106d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM; /* can't set this for protected contexts */
2107d3ac8d42SDaniele Ceraolo Spurio 		else
2108d3ac8d42SDaniele Ceraolo Spurio 			i915_gem_context_set_recoverable(ctx);
210910be98a7SChris Wilson 		break;
211010be98a7SChris Wilson 
211110be98a7SChris Wilson 	case I915_CONTEXT_PARAM_PRIORITY:
21120f100b70SChris Wilson 		ret = set_priority(ctx, args);
211310be98a7SChris Wilson 		break;
211410be98a7SChris Wilson 
211510be98a7SChris Wilson 	case I915_CONTEXT_PARAM_SSEU:
211610be98a7SChris Wilson 		ret = set_sseu(ctx, args);
211710be98a7SChris Wilson 		break;
211810be98a7SChris Wilson 
2119a0e04715SChris Wilson 	case I915_CONTEXT_PARAM_PERSISTENCE:
2120a0e04715SChris Wilson 		ret = set_persistence(ctx, args);
2121a0e04715SChris Wilson 		break;
2122a0e04715SChris Wilson 
2123d3ac8d42SDaniele Ceraolo Spurio 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
21246ff6d61dSJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
212510be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2126fe4751c3SJason Ekstrand 	case I915_CONTEXT_PARAM_RINGSIZE:
2127ccbc1b97SJason Ekstrand 	case I915_CONTEXT_PARAM_VM:
2128d9d29c74SJason Ekstrand 	case I915_CONTEXT_PARAM_ENGINES:
212910be98a7SChris Wilson 	default:
213010be98a7SChris Wilson 		ret = -EINVAL;
213110be98a7SChris Wilson 		break;
213210be98a7SChris Wilson 	}
213310be98a7SChris Wilson 
213410be98a7SChris Wilson 	return ret;
213510be98a7SChris Wilson }
213610be98a7SChris Wilson 
213710be98a7SChris Wilson struct create_ext {
2138d4433c76SJason Ekstrand 	struct i915_gem_proto_context *pc;
213910be98a7SChris Wilson 	struct drm_i915_file_private *fpriv;
214010be98a7SChris Wilson };
214110be98a7SChris Wilson 
214210be98a7SChris Wilson static int create_setparam(struct i915_user_extension __user *ext, void *data)
214310be98a7SChris Wilson {
214410be98a7SChris Wilson 	struct drm_i915_gem_context_create_ext_setparam local;
214510be98a7SChris Wilson 	const struct create_ext *arg = data;
214610be98a7SChris Wilson 
214710be98a7SChris Wilson 	if (copy_from_user(&local, ext, sizeof(local)))
214810be98a7SChris Wilson 		return -EFAULT;
214910be98a7SChris Wilson 
215010be98a7SChris Wilson 	if (local.param.ctx_id)
215110be98a7SChris Wilson 		return -EINVAL;
215210be98a7SChris Wilson 
2153d4433c76SJason Ekstrand 	return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
215410be98a7SChris Wilson }
215510be98a7SChris Wilson 
21564a766ae4SJason Ekstrand static int invalid_ext(struct i915_user_extension __user *ext, void *data)
215710be98a7SChris Wilson {
215810be98a7SChris Wilson 	return -EINVAL;
215910be98a7SChris Wilson }
216010be98a7SChris Wilson 
216110be98a7SChris Wilson static const i915_user_extension_fn create_extensions[] = {
216210be98a7SChris Wilson 	[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
21634a766ae4SJason Ekstrand 	[I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
216410be98a7SChris Wilson };
216510be98a7SChris Wilson 
216610be98a7SChris Wilson static bool client_is_banned(struct drm_i915_file_private *file_priv)
216710be98a7SChris Wilson {
216810be98a7SChris Wilson 	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
216910be98a7SChris Wilson }
217010be98a7SChris Wilson 
2171a4c1cdd3SJason Ekstrand static inline struct i915_gem_context *
2172a4c1cdd3SJason Ekstrand __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2173a4c1cdd3SJason Ekstrand {
2174a4c1cdd3SJason Ekstrand 	struct i915_gem_context *ctx;
2175a4c1cdd3SJason Ekstrand 
2176a4c1cdd3SJason Ekstrand 	rcu_read_lock();
2177a4c1cdd3SJason Ekstrand 	ctx = xa_load(&file_priv->context_xa, id);
2178a4c1cdd3SJason Ekstrand 	if (ctx && !kref_get_unless_zero(&ctx->ref))
2179a4c1cdd3SJason Ekstrand 		ctx = NULL;
2180a4c1cdd3SJason Ekstrand 	rcu_read_unlock();
2181a4c1cdd3SJason Ekstrand 
2182a4c1cdd3SJason Ekstrand 	return ctx;
2183a4c1cdd3SJason Ekstrand }
2184a4c1cdd3SJason Ekstrand 
2185a4c1cdd3SJason Ekstrand static struct i915_gem_context *
2186a4c1cdd3SJason Ekstrand finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2187a4c1cdd3SJason Ekstrand 			       struct i915_gem_proto_context *pc, u32 id)
2188a4c1cdd3SJason Ekstrand {
2189a4c1cdd3SJason Ekstrand 	struct i915_gem_context *ctx;
2190a4c1cdd3SJason Ekstrand 	void *old;
2191a4c1cdd3SJason Ekstrand 
2192a4c1cdd3SJason Ekstrand 	lockdep_assert_held(&file_priv->proto_context_lock);
2193a4c1cdd3SJason Ekstrand 
2194a4c1cdd3SJason Ekstrand 	ctx = i915_gem_create_context(file_priv->dev_priv, pc);
2195a4c1cdd3SJason Ekstrand 	if (IS_ERR(ctx))
2196a4c1cdd3SJason Ekstrand 		return ctx;
2197a4c1cdd3SJason Ekstrand 
2198a4c1cdd3SJason Ekstrand 	gem_context_register(ctx, file_priv, id);
2199a4c1cdd3SJason Ekstrand 
2200a4c1cdd3SJason Ekstrand 	old = xa_erase(&file_priv->proto_context_xa, id);
2201a4c1cdd3SJason Ekstrand 	GEM_BUG_ON(old != pc);
2202d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(file_priv->dev_priv, pc);
2203a4c1cdd3SJason Ekstrand 
2204a4c1cdd3SJason Ekstrand 	/* One for the xarray and one for the caller */
2205a4c1cdd3SJason Ekstrand 	return i915_gem_context_get(ctx);
2206a4c1cdd3SJason Ekstrand }
2207a4c1cdd3SJason Ekstrand 
2208a4c1cdd3SJason Ekstrand struct i915_gem_context *
2209a4c1cdd3SJason Ekstrand i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2210a4c1cdd3SJason Ekstrand {
2211a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
2212a4c1cdd3SJason Ekstrand 	struct i915_gem_context *ctx;
2213a4c1cdd3SJason Ekstrand 
2214a4c1cdd3SJason Ekstrand 	ctx = __context_lookup(file_priv, id);
2215a4c1cdd3SJason Ekstrand 	if (ctx)
2216a4c1cdd3SJason Ekstrand 		return ctx;
2217a4c1cdd3SJason Ekstrand 
2218a4c1cdd3SJason Ekstrand 	mutex_lock(&file_priv->proto_context_lock);
2219a4c1cdd3SJason Ekstrand 	/* Try one more time under the lock */
2220a4c1cdd3SJason Ekstrand 	ctx = __context_lookup(file_priv, id);
2221a4c1cdd3SJason Ekstrand 	if (!ctx) {
2222a4c1cdd3SJason Ekstrand 		pc = xa_load(&file_priv->proto_context_xa, id);
2223a4c1cdd3SJason Ekstrand 		if (!pc)
2224a4c1cdd3SJason Ekstrand 			ctx = ERR_PTR(-ENOENT);
2225a4c1cdd3SJason Ekstrand 		else
2226a4c1cdd3SJason Ekstrand 			ctx = finalize_create_context_locked(file_priv, pc, id);
2227a4c1cdd3SJason Ekstrand 	}
2228a4c1cdd3SJason Ekstrand 	mutex_unlock(&file_priv->proto_context_lock);
2229a4c1cdd3SJason Ekstrand 
2230a4c1cdd3SJason Ekstrand 	return ctx;
2231a4c1cdd3SJason Ekstrand }
2232a4c1cdd3SJason Ekstrand 
223310be98a7SChris Wilson int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
223410be98a7SChris Wilson 				  struct drm_file *file)
223510be98a7SChris Wilson {
223610be98a7SChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
223710be98a7SChris Wilson 	struct drm_i915_gem_context_create_ext *args = data;
223810be98a7SChris Wilson 	struct create_ext ext_data;
223910be98a7SChris Wilson 	int ret;
2240c100777cSTvrtko Ursulin 	u32 id;
224110be98a7SChris Wilson 
224210be98a7SChris Wilson 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
224310be98a7SChris Wilson 		return -ENODEV;
224410be98a7SChris Wilson 
224510be98a7SChris Wilson 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
224610be98a7SChris Wilson 		return -EINVAL;
224710be98a7SChris Wilson 
22481a9c4db4SMichał Winiarski 	ret = intel_gt_terminally_wedged(to_gt(i915));
224910be98a7SChris Wilson 	if (ret)
225010be98a7SChris Wilson 		return ret;
225110be98a7SChris Wilson 
225210be98a7SChris Wilson 	ext_data.fpriv = file->driver_priv;
225310be98a7SChris Wilson 	if (client_is_banned(ext_data.fpriv)) {
2254baa89ba3SWambui Karuga 		drm_dbg(&i915->drm,
2255baa89ba3SWambui Karuga 			"client %s[%d] banned from creating ctx\n",
2256ba16a48aSTvrtko Ursulin 			current->comm, task_pid_nr(current));
225710be98a7SChris Wilson 		return -EIO;
225810be98a7SChris Wilson 	}
225910be98a7SChris Wilson 
2260d4433c76SJason Ekstrand 	ext_data.pc = proto_context_create(i915, args->flags);
2261d4433c76SJason Ekstrand 	if (IS_ERR(ext_data.pc))
2262d4433c76SJason Ekstrand 		return PTR_ERR(ext_data.pc);
226310be98a7SChris Wilson 
226410be98a7SChris Wilson 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
226510be98a7SChris Wilson 		ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
226610be98a7SChris Wilson 					   create_extensions,
226710be98a7SChris Wilson 					   ARRAY_SIZE(create_extensions),
226810be98a7SChris Wilson 					   &ext_data);
2269a4c1cdd3SJason Ekstrand 		if (ret)
2270a4c1cdd3SJason Ekstrand 			goto err_pc;
227110be98a7SChris Wilson 	}
227210be98a7SChris Wilson 
2273ca06f936SJason Ekstrand 	if (GRAPHICS_VER(i915) > 12) {
2274ca06f936SJason Ekstrand 		struct i915_gem_context *ctx;
2275ca06f936SJason Ekstrand 
2276ca06f936SJason Ekstrand 		/* Get ourselves a context ID */
2277ca06f936SJason Ekstrand 		ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2278ca06f936SJason Ekstrand 			       xa_limit_32b, GFP_KERNEL);
2279ca06f936SJason Ekstrand 		if (ret)
2280ca06f936SJason Ekstrand 			goto err_pc;
2281ca06f936SJason Ekstrand 
2282ca06f936SJason Ekstrand 		ctx = i915_gem_create_context(i915, ext_data.pc);
2283ca06f936SJason Ekstrand 		if (IS_ERR(ctx)) {
2284ca06f936SJason Ekstrand 			ret = PTR_ERR(ctx);
2285ca06f936SJason Ekstrand 			goto err_pc;
2286ca06f936SJason Ekstrand 		}
2287ca06f936SJason Ekstrand 
2288d3ac8d42SDaniele Ceraolo Spurio 		proto_context_close(i915, ext_data.pc);
2289ca06f936SJason Ekstrand 		gem_context_register(ctx, ext_data.fpriv, id);
2290ca06f936SJason Ekstrand 	} else {
2291a4c1cdd3SJason Ekstrand 		ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
229210be98a7SChris Wilson 		if (ret < 0)
2293a4c1cdd3SJason Ekstrand 			goto err_pc;
2294ca06f936SJason Ekstrand 	}
229510be98a7SChris Wilson 
2296c100777cSTvrtko Ursulin 	args->ctx_id = id;
2297baa89ba3SWambui Karuga 	drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
229810be98a7SChris Wilson 
229910be98a7SChris Wilson 	return 0;
230010be98a7SChris Wilson 
2301a4c1cdd3SJason Ekstrand err_pc:
2302d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(i915, ext_data.pc);
230310be98a7SChris Wilson 	return ret;
230410be98a7SChris Wilson }
230510be98a7SChris Wilson 
230610be98a7SChris Wilson int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
230710be98a7SChris Wilson 				   struct drm_file *file)
230810be98a7SChris Wilson {
230910be98a7SChris Wilson 	struct drm_i915_gem_context_destroy *args = data;
231010be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
2311a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
231210be98a7SChris Wilson 	struct i915_gem_context *ctx;
231310be98a7SChris Wilson 
231410be98a7SChris Wilson 	if (args->pad != 0)
231510be98a7SChris Wilson 		return -EINVAL;
231610be98a7SChris Wilson 
231710be98a7SChris Wilson 	if (!args->ctx_id)
231810be98a7SChris Wilson 		return -ENOENT;
231910be98a7SChris Wilson 
2320a4c1cdd3SJason Ekstrand 	/* We need to hold the proto-context lock here to prevent races
2321a4c1cdd3SJason Ekstrand 	 * with finalize_create_context_locked().
2322a4c1cdd3SJason Ekstrand 	 */
2323a4c1cdd3SJason Ekstrand 	mutex_lock(&file_priv->proto_context_lock);
2324c100777cSTvrtko Ursulin 	ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2325a4c1cdd3SJason Ekstrand 	pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2326a4c1cdd3SJason Ekstrand 	mutex_unlock(&file_priv->proto_context_lock);
232710be98a7SChris Wilson 
2328a4c1cdd3SJason Ekstrand 	if (!ctx && !pc)
2329a4c1cdd3SJason Ekstrand 		return -ENOENT;
2330a4c1cdd3SJason Ekstrand 	GEM_WARN_ON(ctx && pc);
2331a4c1cdd3SJason Ekstrand 
2332a4c1cdd3SJason Ekstrand 	if (pc)
2333d3ac8d42SDaniele Ceraolo Spurio 		proto_context_close(file_priv->dev_priv, pc);
2334a4c1cdd3SJason Ekstrand 
2335a4c1cdd3SJason Ekstrand 	if (ctx)
233610be98a7SChris Wilson 		context_close(ctx);
2337a4c1cdd3SJason Ekstrand 
233810be98a7SChris Wilson 	return 0;
233910be98a7SChris Wilson }
234010be98a7SChris Wilson 
234110be98a7SChris Wilson static int get_sseu(struct i915_gem_context *ctx,
234210be98a7SChris Wilson 		    struct drm_i915_gem_context_param *args)
234310be98a7SChris Wilson {
234410be98a7SChris Wilson 	struct drm_i915_gem_context_param_sseu user_sseu;
234510be98a7SChris Wilson 	struct intel_context *ce;
234610be98a7SChris Wilson 	unsigned long lookup;
234710be98a7SChris Wilson 	int err;
234810be98a7SChris Wilson 
234910be98a7SChris Wilson 	if (args->size == 0)
235010be98a7SChris Wilson 		goto out;
235110be98a7SChris Wilson 	else if (args->size < sizeof(user_sseu))
235210be98a7SChris Wilson 		return -EINVAL;
235310be98a7SChris Wilson 
235410be98a7SChris Wilson 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
235510be98a7SChris Wilson 			   sizeof(user_sseu)))
235610be98a7SChris Wilson 		return -EFAULT;
235710be98a7SChris Wilson 
235810be98a7SChris Wilson 	if (user_sseu.rsvd)
235910be98a7SChris Wilson 		return -EINVAL;
236010be98a7SChris Wilson 
236110be98a7SChris Wilson 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
236210be98a7SChris Wilson 		return -EINVAL;
236310be98a7SChris Wilson 
236410be98a7SChris Wilson 	lookup = 0;
236510be98a7SChris Wilson 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
236610be98a7SChris Wilson 		lookup |= LOOKUP_USER_INDEX;
236710be98a7SChris Wilson 
236810be98a7SChris Wilson 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
236910be98a7SChris Wilson 	if (IS_ERR(ce))
237010be98a7SChris Wilson 		return PTR_ERR(ce);
237110be98a7SChris Wilson 
237210be98a7SChris Wilson 	err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
237310be98a7SChris Wilson 	if (err) {
237410be98a7SChris Wilson 		intel_context_put(ce);
237510be98a7SChris Wilson 		return err;
237610be98a7SChris Wilson 	}
237710be98a7SChris Wilson 
237810be98a7SChris Wilson 	user_sseu.slice_mask = ce->sseu.slice_mask;
237910be98a7SChris Wilson 	user_sseu.subslice_mask = ce->sseu.subslice_mask;
238010be98a7SChris Wilson 	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
238110be98a7SChris Wilson 	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
238210be98a7SChris Wilson 
238310be98a7SChris Wilson 	intel_context_unlock_pinned(ce);
238410be98a7SChris Wilson 	intel_context_put(ce);
238510be98a7SChris Wilson 
238610be98a7SChris Wilson 	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
238710be98a7SChris Wilson 			 sizeof(user_sseu)))
238810be98a7SChris Wilson 		return -EFAULT;
238910be98a7SChris Wilson 
239010be98a7SChris Wilson out:
239110be98a7SChris Wilson 	args->size = sizeof(user_sseu);
239210be98a7SChris Wilson 
239310be98a7SChris Wilson 	return 0;
239410be98a7SChris Wilson }
239510be98a7SChris Wilson 
239610be98a7SChris Wilson int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
239710be98a7SChris Wilson 				    struct drm_file *file)
239810be98a7SChris Wilson {
239910be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
240010be98a7SChris Wilson 	struct drm_i915_gem_context_param *args = data;
240110be98a7SChris Wilson 	struct i915_gem_context *ctx;
240224fad29eSDaniel Vetter 	struct i915_address_space *vm;
240310be98a7SChris Wilson 	int ret = 0;
240410be98a7SChris Wilson 
240510be98a7SChris Wilson 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2406046d1660SJason Ekstrand 	if (IS_ERR(ctx))
2407046d1660SJason Ekstrand 		return PTR_ERR(ctx);
240810be98a7SChris Wilson 
240910be98a7SChris Wilson 	switch (args->param) {
241010be98a7SChris Wilson 	case I915_CONTEXT_PARAM_GTT_SIZE:
241110be98a7SChris Wilson 		args->size = 0;
241224fad29eSDaniel Vetter 		vm = i915_gem_context_get_eb_vm(ctx);
241324fad29eSDaniel Vetter 		args->value = vm->total;
241424fad29eSDaniel Vetter 		i915_vm_put(vm);
241524fad29eSDaniel Vetter 
241610be98a7SChris Wilson 		break;
241710be98a7SChris Wilson 
241810be98a7SChris Wilson 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
241910be98a7SChris Wilson 		args->size = 0;
242010be98a7SChris Wilson 		args->value = i915_gem_context_no_error_capture(ctx);
242110be98a7SChris Wilson 		break;
242210be98a7SChris Wilson 
242310be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BANNABLE:
242410be98a7SChris Wilson 		args->size = 0;
242510be98a7SChris Wilson 		args->value = i915_gem_context_is_bannable(ctx);
242610be98a7SChris Wilson 		break;
242710be98a7SChris Wilson 
242810be98a7SChris Wilson 	case I915_CONTEXT_PARAM_RECOVERABLE:
242910be98a7SChris Wilson 		args->size = 0;
243010be98a7SChris Wilson 		args->value = i915_gem_context_is_recoverable(ctx);
243110be98a7SChris Wilson 		break;
243210be98a7SChris Wilson 
243310be98a7SChris Wilson 	case I915_CONTEXT_PARAM_PRIORITY:
243410be98a7SChris Wilson 		args->size = 0;
2435eb5c10cbSChris Wilson 		args->value = ctx->sched.priority;
243610be98a7SChris Wilson 		break;
243710be98a7SChris Wilson 
243810be98a7SChris Wilson 	case I915_CONTEXT_PARAM_SSEU:
243910be98a7SChris Wilson 		ret = get_sseu(ctx, args);
244010be98a7SChris Wilson 		break;
244110be98a7SChris Wilson 
244210be98a7SChris Wilson 	case I915_CONTEXT_PARAM_VM:
244310be98a7SChris Wilson 		ret = get_ppgtt(file_priv, ctx, args);
244410be98a7SChris Wilson 		break;
244510be98a7SChris Wilson 
2446a0e04715SChris Wilson 	case I915_CONTEXT_PARAM_PERSISTENCE:
2447a0e04715SChris Wilson 		args->size = 0;
2448a0e04715SChris Wilson 		args->value = i915_gem_context_is_persistent(ctx);
2449a0e04715SChris Wilson 		break;
2450a0e04715SChris Wilson 
2451d3ac8d42SDaniele Ceraolo Spurio 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2452d3ac8d42SDaniele Ceraolo Spurio 		ret = get_protected(ctx, args);
2453d3ac8d42SDaniele Ceraolo Spurio 		break;
2454d3ac8d42SDaniele Ceraolo Spurio 
24556ff6d61dSJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
245610be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2457c7a71fc8SJason Ekstrand 	case I915_CONTEXT_PARAM_ENGINES:
2458fe4751c3SJason Ekstrand 	case I915_CONTEXT_PARAM_RINGSIZE:
245910be98a7SChris Wilson 	default:
246010be98a7SChris Wilson 		ret = -EINVAL;
246110be98a7SChris Wilson 		break;
246210be98a7SChris Wilson 	}
246310be98a7SChris Wilson 
246410be98a7SChris Wilson 	i915_gem_context_put(ctx);
246510be98a7SChris Wilson 	return ret;
246610be98a7SChris Wilson }
246710be98a7SChris Wilson 
246810be98a7SChris Wilson int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
246910be98a7SChris Wilson 				    struct drm_file *file)
247010be98a7SChris Wilson {
247110be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
247210be98a7SChris Wilson 	struct drm_i915_gem_context_param *args = data;
2473a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
247410be98a7SChris Wilson 	struct i915_gem_context *ctx;
2475a4c1cdd3SJason Ekstrand 	int ret = 0;
247610be98a7SChris Wilson 
2477a4c1cdd3SJason Ekstrand 	mutex_lock(&file_priv->proto_context_lock);
2478a4c1cdd3SJason Ekstrand 	ctx = __context_lookup(file_priv, args->ctx_id);
2479a4c1cdd3SJason Ekstrand 	if (!ctx) {
2480a4c1cdd3SJason Ekstrand 		pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2481ca06f936SJason Ekstrand 		if (pc) {
2482ca06f936SJason Ekstrand 			/* Contexts should be finalized inside
2483ca06f936SJason Ekstrand 			 * GEM_CONTEXT_CREATE starting with graphics
2484ca06f936SJason Ekstrand 			 * version 13.
2485ca06f936SJason Ekstrand 			 */
2486ca06f936SJason Ekstrand 			WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2487a4c1cdd3SJason Ekstrand 			ret = set_proto_ctx_param(file_priv, pc, args);
2488ca06f936SJason Ekstrand 		} else {
2489a4c1cdd3SJason Ekstrand 			ret = -ENOENT;
2490a4c1cdd3SJason Ekstrand 		}
2491ca06f936SJason Ekstrand 	}
2492a4c1cdd3SJason Ekstrand 	mutex_unlock(&file_priv->proto_context_lock);
249310be98a7SChris Wilson 
2494a4c1cdd3SJason Ekstrand 	if (ctx) {
249510be98a7SChris Wilson 		ret = ctx_setparam(file_priv, ctx, args);
249610be98a7SChris Wilson 		i915_gem_context_put(ctx);
2497a4c1cdd3SJason Ekstrand 	}
2498a4c1cdd3SJason Ekstrand 
249910be98a7SChris Wilson 	return ret;
250010be98a7SChris Wilson }
250110be98a7SChris Wilson 
250210be98a7SChris Wilson int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
250310be98a7SChris Wilson 				       void *data, struct drm_file *file)
250410be98a7SChris Wilson {
2505a4e7ccdaSChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
250610be98a7SChris Wilson 	struct drm_i915_reset_stats *args = data;
250710be98a7SChris Wilson 	struct i915_gem_context *ctx;
250810be98a7SChris Wilson 
250910be98a7SChris Wilson 	if (args->flags || args->pad)
251010be98a7SChris Wilson 		return -EINVAL;
251110be98a7SChris Wilson 
2512a4839cb1SJason Ekstrand 	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2513046d1660SJason Ekstrand 	if (IS_ERR(ctx))
2514046d1660SJason Ekstrand 		return PTR_ERR(ctx);
251510be98a7SChris Wilson 
251610be98a7SChris Wilson 	/*
251710be98a7SChris Wilson 	 * We opt for unserialised reads here. This may result in tearing
251810be98a7SChris Wilson 	 * in the extremely unlikely event of a GPU hang on this context
251910be98a7SChris Wilson 	 * as we are querying them. If we need that extra layer of protection,
252010be98a7SChris Wilson 	 * we should wrap the hangstats with a seqlock.
252110be98a7SChris Wilson 	 */
252210be98a7SChris Wilson 
252310be98a7SChris Wilson 	if (capable(CAP_SYS_ADMIN))
2524a4e7ccdaSChris Wilson 		args->reset_count = i915_reset_count(&i915->gpu_error);
252510be98a7SChris Wilson 	else
252610be98a7SChris Wilson 		args->reset_count = 0;
252710be98a7SChris Wilson 
252810be98a7SChris Wilson 	args->batch_active = atomic_read(&ctx->guilty_count);
252910be98a7SChris Wilson 	args->batch_pending = atomic_read(&ctx->active_count);
253010be98a7SChris Wilson 
2531a4839cb1SJason Ekstrand 	i915_gem_context_put(ctx);
2532a4839cb1SJason Ekstrand 	return 0;
253310be98a7SChris Wilson }
253410be98a7SChris Wilson 
253510be98a7SChris Wilson /* GEM context-engines iterator: for_each_gem_engine() */
253610be98a7SChris Wilson struct intel_context *
253710be98a7SChris Wilson i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
253810be98a7SChris Wilson {
253910be98a7SChris Wilson 	const struct i915_gem_engines *e = it->engines;
254010be98a7SChris Wilson 	struct intel_context *ctx;
254110be98a7SChris Wilson 
2542130a95e9SChris Wilson 	if (unlikely(!e))
2543130a95e9SChris Wilson 		return NULL;
2544130a95e9SChris Wilson 
254510be98a7SChris Wilson 	do {
254610be98a7SChris Wilson 		if (it->idx >= e->num_engines)
254710be98a7SChris Wilson 			return NULL;
254810be98a7SChris Wilson 
254910be98a7SChris Wilson 		ctx = e->engines[it->idx++];
255010be98a7SChris Wilson 	} while (!ctx);
255110be98a7SChris Wilson 
255210be98a7SChris Wilson 	return ctx;
255310be98a7SChris Wilson }
255410be98a7SChris Wilson 
255510be98a7SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
255610be98a7SChris Wilson #include "selftests/mock_context.c"
255710be98a7SChris Wilson #include "selftests/i915_gem_context.c"
255810be98a7SChris Wilson #endif
255910be98a7SChris Wilson 
2560a6270d1dSDaniel Vetter void i915_gem_context_module_exit(void)
256110be98a7SChris Wilson {
2562a6270d1dSDaniel Vetter 	kmem_cache_destroy(slab_luts);
256310be98a7SChris Wilson }
256410be98a7SChris Wilson 
2565a6270d1dSDaniel Vetter int __init i915_gem_context_module_init(void)
256610be98a7SChris Wilson {
2567a6270d1dSDaniel Vetter 	slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2568a6270d1dSDaniel Vetter 	if (!slab_luts)
256910be98a7SChris Wilson 		return -ENOMEM;
257010be98a7SChris Wilson 
257110be98a7SChris Wilson 	return 0;
257210be98a7SChris Wilson }
2573