110be98a7SChris Wilson /*
210be98a7SChris Wilson  * SPDX-License-Identifier: MIT
310be98a7SChris Wilson  *
410be98a7SChris Wilson  * Copyright © 2011-2012 Intel Corporation
510be98a7SChris Wilson  */
610be98a7SChris Wilson 
710be98a7SChris Wilson /*
810be98a7SChris Wilson  * This file implements HW context support. On gen5+ a HW context consists of an
910be98a7SChris Wilson  * opaque GPU object which is referenced at times of context saves and restores.
1010be98a7SChris Wilson  * With RC6 enabled, the context is also referenced as the GPU enters and exists
1110be98a7SChris Wilson  * from RC6 (GPU has it's own internal power context, except on gen5). Though
1210be98a7SChris Wilson  * something like a context does exist for the media ring, the code only
1310be98a7SChris Wilson  * supports contexts for the render ring.
1410be98a7SChris Wilson  *
1510be98a7SChris Wilson  * In software, there is a distinction between contexts created by the user,
1610be98a7SChris Wilson  * and the default HW context. The default HW context is used by GPU clients
1710be98a7SChris Wilson  * that do not request setup of their own hardware context. The default
1810be98a7SChris Wilson  * context's state is never restored to help prevent programming errors. This
1910be98a7SChris Wilson  * would happen if a client ran and piggy-backed off another clients GPU state.
2010be98a7SChris Wilson  * The default context only exists to give the GPU some offset to load as the
2110be98a7SChris Wilson  * current to invoke a save of the context we actually care about. In fact, the
2210be98a7SChris Wilson  * code could likely be constructed, albeit in a more complicated fashion, to
2310be98a7SChris Wilson  * never use the default context, though that limits the driver's ability to
2410be98a7SChris Wilson  * swap out, and/or destroy other contexts.
2510be98a7SChris Wilson  *
2610be98a7SChris Wilson  * All other contexts are created as a request by the GPU client. These contexts
2710be98a7SChris Wilson  * store GPU state, and thus allow GPU clients to not re-emit state (and
2810be98a7SChris Wilson  * potentially query certain state) at any time. The kernel driver makes
2910be98a7SChris Wilson  * certain that the appropriate commands are inserted.
3010be98a7SChris Wilson  *
3110be98a7SChris Wilson  * The context life cycle is semi-complicated in that context BOs may live
3210be98a7SChris Wilson  * longer than the context itself because of the way the hardware, and object
3310be98a7SChris Wilson  * tracking works. Below is a very crude representation of the state machine
3410be98a7SChris Wilson  * describing the context life.
3510be98a7SChris Wilson  *                                         refcount     pincount     active
3610be98a7SChris Wilson  * S0: initial state                          0            0           0
3710be98a7SChris Wilson  * S1: context created                        1            0           0
3810be98a7SChris Wilson  * S2: context is currently running           2            1           X
3910be98a7SChris Wilson  * S3: GPU referenced, but not current        2            0           1
4010be98a7SChris Wilson  * S4: context is current, but destroyed      1            1           0
4110be98a7SChris Wilson  * S5: like S3, but destroyed                 1            0           1
4210be98a7SChris Wilson  *
4310be98a7SChris Wilson  * The most common (but not all) transitions:
4410be98a7SChris Wilson  * S0->S1: client creates a context
4510be98a7SChris Wilson  * S1->S2: client submits execbuf with context
4610be98a7SChris Wilson  * S2->S3: other clients submits execbuf with context
4710be98a7SChris Wilson  * S3->S1: context object was retired
4810be98a7SChris Wilson  * S3->S2: clients submits another execbuf
4910be98a7SChris Wilson  * S2->S4: context destroy called with current context
5010be98a7SChris Wilson  * S3->S5->S0: destroy path
5110be98a7SChris Wilson  * S4->S5->S0: destroy path on current context
5210be98a7SChris Wilson  *
5310be98a7SChris Wilson  * There are two confusing terms used above:
5410be98a7SChris Wilson  *  The "current context" means the context which is currently running on the
5510be98a7SChris Wilson  *  GPU. The GPU has loaded its state already and has stored away the gtt
5610be98a7SChris Wilson  *  offset of the BO. The GPU is not actively referencing the data at this
5710be98a7SChris Wilson  *  offset, but it will on the next context switch. The only way to avoid this
5810be98a7SChris Wilson  *  is to do a GPU reset.
5910be98a7SChris Wilson  *
6010be98a7SChris Wilson  *  An "active context' is one which was previously the "current context" and is
6110be98a7SChris Wilson  *  on the active list waiting for the next context switch to occur. Until this
6210be98a7SChris Wilson  *  happens, the object must remain at the same gtt offset. It is therefore
6310be98a7SChris Wilson  *  possible to destroy a context, but it is still active.
6410be98a7SChris Wilson  *
6510be98a7SChris Wilson  */
6610be98a7SChris Wilson 
67e9b67ec2SJani Nikula #include <linux/highmem.h>
6810be98a7SChris Wilson #include <linux/log2.h>
6910be98a7SChris Wilson #include <linux/nospec.h>
7010be98a7SChris Wilson 
715f2ec909SJani Nikula #include <drm/drm_cache.h>
7200dae4d3SJason Ekstrand #include <drm/drm_syncobj.h>
7300dae4d3SJason Ekstrand 
742c86e55dSMatthew Auld #include "gt/gen6_ppgtt.h"
759f3ccd40SChris Wilson #include "gt/intel_context.h"
7688be76cdSChris Wilson #include "gt/intel_context_param.h"
772e0986a5SChris Wilson #include "gt/intel_engine_heartbeat.h"
78750e76b4SChris Wilson #include "gt/intel_engine_user.h"
7945233ab2SChris Wilson #include "gt/intel_gpu_commands.h"
802871ea85SChris Wilson #include "gt/intel_ring.h"
8110be98a7SChris Wilson 
82d3ac8d42SDaniele Ceraolo Spurio #include "pxp/intel_pxp.h"
83d3ac8d42SDaniele Ceraolo Spurio 
845472b3f2SJani Nikula #include "i915_file_private.h"
8510be98a7SChris Wilson #include "i915_gem_context.h"
8610be98a7SChris Wilson #include "i915_trace.h"
8710be98a7SChris Wilson #include "i915_user_extensions.h"
8810be98a7SChris Wilson 
8910be98a7SChris Wilson #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
9010be98a7SChris Wilson 
91a6270d1dSDaniel Vetter static struct kmem_cache *slab_luts;
9210be98a7SChris Wilson 
i915_lut_handle_alloc(void)9310be98a7SChris Wilson struct i915_lut_handle *i915_lut_handle_alloc(void)
9410be98a7SChris Wilson {
95a6270d1dSDaniel Vetter 	return kmem_cache_alloc(slab_luts, GFP_KERNEL);
9610be98a7SChris Wilson }
9710be98a7SChris Wilson 
i915_lut_handle_free(struct i915_lut_handle * lut)9810be98a7SChris Wilson void i915_lut_handle_free(struct i915_lut_handle *lut)
9910be98a7SChris Wilson {
100a6270d1dSDaniel Vetter 	return kmem_cache_free(slab_luts, lut);
10110be98a7SChris Wilson }
10210be98a7SChris Wilson 
lut_close(struct i915_gem_context * ctx)10310be98a7SChris Wilson static void lut_close(struct i915_gem_context *ctx)
10410be98a7SChris Wilson {
10510be98a7SChris Wilson 	struct radix_tree_iter iter;
10610be98a7SChris Wilson 	void __rcu **slot;
10710be98a7SChris Wilson 
108f7ce8639SChris Wilson 	mutex_lock(&ctx->lut_mutex);
10910be98a7SChris Wilson 	rcu_read_lock();
11010be98a7SChris Wilson 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
11110be98a7SChris Wilson 		struct i915_vma *vma = rcu_dereference_raw(*slot);
112155ab883SChris Wilson 		struct drm_i915_gem_object *obj = vma->obj;
113155ab883SChris Wilson 		struct i915_lut_handle *lut;
11410be98a7SChris Wilson 
115155ab883SChris Wilson 		if (!kref_get_unless_zero(&obj->base.refcount))
116155ab883SChris Wilson 			continue;
117155ab883SChris Wilson 
118096a42ddSChris Wilson 		spin_lock(&obj->lut_lock);
119155ab883SChris Wilson 		list_for_each_entry(lut, &obj->lut_list, obj_link) {
120155ab883SChris Wilson 			if (lut->ctx != ctx)
121155ab883SChris Wilson 				continue;
122155ab883SChris Wilson 
123155ab883SChris Wilson 			if (lut->handle != iter.index)
124155ab883SChris Wilson 				continue;
125155ab883SChris Wilson 
126155ab883SChris Wilson 			list_del(&lut->obj_link);
127155ab883SChris Wilson 			break;
128155ab883SChris Wilson 		}
129096a42ddSChris Wilson 		spin_unlock(&obj->lut_lock);
130155ab883SChris Wilson 
131155ab883SChris Wilson 		if (&lut->obj_link != &obj->lut_list) {
132155ab883SChris Wilson 			i915_lut_handle_free(lut);
13310be98a7SChris Wilson 			radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
134155ab883SChris Wilson 			i915_vma_close(vma);
135155ab883SChris Wilson 			i915_gem_object_put(obj);
136155ab883SChris Wilson 		}
13710be98a7SChris Wilson 
138155ab883SChris Wilson 		i915_gem_object_put(obj);
13910be98a7SChris Wilson 	}
14010be98a7SChris Wilson 	rcu_read_unlock();
141f7ce8639SChris Wilson 	mutex_unlock(&ctx->lut_mutex);
14210be98a7SChris Wilson }
14310be98a7SChris Wilson 
14410be98a7SChris Wilson static struct intel_context *
lookup_user_engine(struct i915_gem_context * ctx,unsigned long flags,const struct i915_engine_class_instance * ci)14510be98a7SChris Wilson lookup_user_engine(struct i915_gem_context *ctx,
14610be98a7SChris Wilson 		   unsigned long flags,
14710be98a7SChris Wilson 		   const struct i915_engine_class_instance *ci)
14810be98a7SChris Wilson #define LOOKUP_USER_INDEX BIT(0)
14910be98a7SChris Wilson {
15010be98a7SChris Wilson 	int idx;
15110be98a7SChris Wilson 
15210be98a7SChris Wilson 	if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
15310be98a7SChris Wilson 		return ERR_PTR(-EINVAL);
15410be98a7SChris Wilson 
15510be98a7SChris Wilson 	if (!i915_gem_context_user_engines(ctx)) {
15610be98a7SChris Wilson 		struct intel_engine_cs *engine;
15710be98a7SChris Wilson 
15810be98a7SChris Wilson 		engine = intel_engine_lookup_user(ctx->i915,
15910be98a7SChris Wilson 						  ci->engine_class,
16010be98a7SChris Wilson 						  ci->engine_instance);
16110be98a7SChris Wilson 		if (!engine)
16210be98a7SChris Wilson 			return ERR_PTR(-EINVAL);
16310be98a7SChris Wilson 
164f1c4d157SChris Wilson 		idx = engine->legacy_idx;
16510be98a7SChris Wilson 	} else {
16610be98a7SChris Wilson 		idx = ci->engine_instance;
16710be98a7SChris Wilson 	}
16810be98a7SChris Wilson 
16910be98a7SChris Wilson 	return i915_gem_context_get_engine(ctx, idx);
17010be98a7SChris Wilson }
17110be98a7SChris Wilson 
validate_priority(struct drm_i915_private * i915,const struct drm_i915_gem_context_param * args)172aaa5957cSJason Ekstrand static int validate_priority(struct drm_i915_private *i915,
173aaa5957cSJason Ekstrand 			     const struct drm_i915_gem_context_param *args)
174aaa5957cSJason Ekstrand {
175aaa5957cSJason Ekstrand 	s64 priority = args->value;
176aaa5957cSJason Ekstrand 
177aaa5957cSJason Ekstrand 	if (args->size)
178aaa5957cSJason Ekstrand 		return -EINVAL;
179aaa5957cSJason Ekstrand 
180aaa5957cSJason Ekstrand 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
181aaa5957cSJason Ekstrand 		return -ENODEV;
182aaa5957cSJason Ekstrand 
183aaa5957cSJason Ekstrand 	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
184aaa5957cSJason Ekstrand 	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
185aaa5957cSJason Ekstrand 		return -EINVAL;
186aaa5957cSJason Ekstrand 
187aaa5957cSJason Ekstrand 	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
188aaa5957cSJason Ekstrand 	    !capable(CAP_SYS_NICE))
189aaa5957cSJason Ekstrand 		return -EPERM;
190aaa5957cSJason Ekstrand 
191aaa5957cSJason Ekstrand 	return 0;
192aaa5957cSJason Ekstrand }
193aaa5957cSJason Ekstrand 
proto_context_close(struct drm_i915_private * i915,struct i915_gem_proto_context * pc)194d3ac8d42SDaniele Ceraolo Spurio static void proto_context_close(struct drm_i915_private *i915,
195d3ac8d42SDaniele Ceraolo Spurio 				struct i915_gem_proto_context *pc)
196a34857dcSJason Ekstrand {
197d4433c76SJason Ekstrand 	int i;
198d4433c76SJason Ekstrand 
199d3ac8d42SDaniele Ceraolo Spurio 	if (pc->pxp_wakeref)
200d3ac8d42SDaniele Ceraolo Spurio 		intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
201a34857dcSJason Ekstrand 	if (pc->vm)
202a34857dcSJason Ekstrand 		i915_vm_put(pc->vm);
203d4433c76SJason Ekstrand 	if (pc->user_engines) {
204d4433c76SJason Ekstrand 		for (i = 0; i < pc->num_user_engines; i++)
205d4433c76SJason Ekstrand 			kfree(pc->user_engines[i].siblings);
206d4433c76SJason Ekstrand 		kfree(pc->user_engines);
207d4433c76SJason Ekstrand 	}
208a34857dcSJason Ekstrand 	kfree(pc);
209a34857dcSJason Ekstrand }
210a34857dcSJason Ekstrand 
proto_context_set_persistence(struct drm_i915_private * i915,struct i915_gem_proto_context * pc,bool persist)211d4433c76SJason Ekstrand static int proto_context_set_persistence(struct drm_i915_private *i915,
212d4433c76SJason Ekstrand 					 struct i915_gem_proto_context *pc,
213d4433c76SJason Ekstrand 					 bool persist)
214d4433c76SJason Ekstrand {
215d4433c76SJason Ekstrand 	if (persist) {
216d4433c76SJason Ekstrand 		/*
217d4433c76SJason Ekstrand 		 * Only contexts that are short-lived [that will expire or be
218d4433c76SJason Ekstrand 		 * reset] are allowed to survive past termination. We require
219d4433c76SJason Ekstrand 		 * hangcheck to ensure that the persistent requests are healthy.
220d4433c76SJason Ekstrand 		 */
221d4433c76SJason Ekstrand 		if (!i915->params.enable_hangcheck)
222d4433c76SJason Ekstrand 			return -EINVAL;
223d4433c76SJason Ekstrand 
224d4433c76SJason Ekstrand 		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
225d4433c76SJason Ekstrand 	} else {
226d4433c76SJason Ekstrand 		/* To cancel a context we use "preempt-to-idle" */
227d4433c76SJason Ekstrand 		if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
228d4433c76SJason Ekstrand 			return -ENODEV;
229d4433c76SJason Ekstrand 
230d4433c76SJason Ekstrand 		/*
231d4433c76SJason Ekstrand 		 * If the cancel fails, we then need to reset, cleanly!
232d4433c76SJason Ekstrand 		 *
233d4433c76SJason Ekstrand 		 * If the per-engine reset fails, all hope is lost! We resort
234d4433c76SJason Ekstrand 		 * to a full GPU reset in that unlikely case, but realistically
235d4433c76SJason Ekstrand 		 * if the engine could not reset, the full reset does not fare
236d4433c76SJason Ekstrand 		 * much better. The damage has been done.
237d4433c76SJason Ekstrand 		 *
238d4433c76SJason Ekstrand 		 * However, if we cannot reset an engine by itself, we cannot
239d4433c76SJason Ekstrand 		 * cleanup a hanging persistent context without causing
240d4433c76SJason Ekstrand 		 * colateral damage, and we should not pretend we can by
241d4433c76SJason Ekstrand 		 * exposing the interface.
242d4433c76SJason Ekstrand 		 */
2431a9c4db4SMichał Winiarski 		if (!intel_has_reset_engine(to_gt(i915)))
244d4433c76SJason Ekstrand 			return -ENODEV;
245d4433c76SJason Ekstrand 
246d4433c76SJason Ekstrand 		pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
247d4433c76SJason Ekstrand 	}
248d4433c76SJason Ekstrand 
249d4433c76SJason Ekstrand 	return 0;
250d4433c76SJason Ekstrand }
251d4433c76SJason Ekstrand 
proto_context_set_protected(struct drm_i915_private * i915,struct i915_gem_proto_context * pc,bool protected)252d3ac8d42SDaniele Ceraolo Spurio static int proto_context_set_protected(struct drm_i915_private *i915,
253d3ac8d42SDaniele Ceraolo Spurio 				       struct i915_gem_proto_context *pc,
254d3ac8d42SDaniele Ceraolo Spurio 				       bool protected)
255d3ac8d42SDaniele Ceraolo Spurio {
256d3ac8d42SDaniele Ceraolo Spurio 	int ret = 0;
257d3ac8d42SDaniele Ceraolo Spurio 
258d3ac8d42SDaniele Ceraolo Spurio 	if (!protected) {
259d3ac8d42SDaniele Ceraolo Spurio 		pc->uses_protected_content = false;
260f67986b0SAlan Previn 	} else if (!intel_pxp_is_enabled(i915->pxp)) {
261d3ac8d42SDaniele Ceraolo Spurio 		ret = -ENODEV;
262d3ac8d42SDaniele Ceraolo Spurio 	} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
263d3ac8d42SDaniele Ceraolo Spurio 		   !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
264d3ac8d42SDaniele Ceraolo Spurio 		ret = -EPERM;
265d3ac8d42SDaniele Ceraolo Spurio 	} else {
266d3ac8d42SDaniele Ceraolo Spurio 		pc->uses_protected_content = true;
267d3ac8d42SDaniele Ceraolo Spurio 
268d3ac8d42SDaniele Ceraolo Spurio 		/*
269d3ac8d42SDaniele Ceraolo Spurio 		 * protected context usage requires the PXP session to be up,
270d3ac8d42SDaniele Ceraolo Spurio 		 * which in turn requires the device to be active.
271d3ac8d42SDaniele Ceraolo Spurio 		 */
272d3ac8d42SDaniele Ceraolo Spurio 		pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
27332271ecdSDaniele Ceraolo Spurio 
274f67986b0SAlan Previn 		if (!intel_pxp_is_active(i915->pxp))
275f67986b0SAlan Previn 			ret = intel_pxp_start(i915->pxp);
276d3ac8d42SDaniele Ceraolo Spurio 	}
277d3ac8d42SDaniele Ceraolo Spurio 
278d3ac8d42SDaniele Ceraolo Spurio 	return ret;
279d3ac8d42SDaniele Ceraolo Spurio }
280d3ac8d42SDaniele Ceraolo Spurio 
281a34857dcSJason Ekstrand static struct i915_gem_proto_context *
proto_context_create(struct drm_i915_private * i915,unsigned int flags)282a34857dcSJason Ekstrand proto_context_create(struct drm_i915_private *i915, unsigned int flags)
283a34857dcSJason Ekstrand {
284a34857dcSJason Ekstrand 	struct i915_gem_proto_context *pc, *err;
285a34857dcSJason Ekstrand 
286a34857dcSJason Ekstrand 	pc = kzalloc(sizeof(*pc), GFP_KERNEL);
287a34857dcSJason Ekstrand 	if (!pc)
288a34857dcSJason Ekstrand 		return ERR_PTR(-ENOMEM);
289a34857dcSJason Ekstrand 
290d4433c76SJason Ekstrand 	pc->num_user_engines = -1;
291d4433c76SJason Ekstrand 	pc->user_engines = NULL;
292a34857dcSJason Ekstrand 	pc->user_flags = BIT(UCONTEXT_BANNABLE) |
293a34857dcSJason Ekstrand 			 BIT(UCONTEXT_RECOVERABLE);
294a34857dcSJason Ekstrand 	if (i915->params.enable_hangcheck)
295a34857dcSJason Ekstrand 		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
296a34857dcSJason Ekstrand 	pc->sched.priority = I915_PRIORITY_NORMAL;
297a34857dcSJason Ekstrand 
298a34857dcSJason Ekstrand 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
299a34857dcSJason Ekstrand 		if (!HAS_EXECLISTS(i915)) {
300a34857dcSJason Ekstrand 			err = ERR_PTR(-EINVAL);
301a34857dcSJason Ekstrand 			goto proto_close;
302a34857dcSJason Ekstrand 		}
303a34857dcSJason Ekstrand 		pc->single_timeline = true;
304a34857dcSJason Ekstrand 	}
305a34857dcSJason Ekstrand 
306a34857dcSJason Ekstrand 	return pc;
307a34857dcSJason Ekstrand 
308a34857dcSJason Ekstrand proto_close:
309d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(i915, pc);
310a34857dcSJason Ekstrand 	return err;
311a34857dcSJason Ekstrand }
312a34857dcSJason Ekstrand 
proto_context_register_locked(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,u32 * id)313a4c1cdd3SJason Ekstrand static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
314a4c1cdd3SJason Ekstrand 					 struct i915_gem_proto_context *pc,
315a4c1cdd3SJason Ekstrand 					 u32 *id)
316a4c1cdd3SJason Ekstrand {
317a4c1cdd3SJason Ekstrand 	int ret;
318a4c1cdd3SJason Ekstrand 	void *old;
319a4c1cdd3SJason Ekstrand 
320a4c1cdd3SJason Ekstrand 	lockdep_assert_held(&fpriv->proto_context_lock);
321a4c1cdd3SJason Ekstrand 
322a4c1cdd3SJason Ekstrand 	ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
323a4c1cdd3SJason Ekstrand 	if (ret)
324a4c1cdd3SJason Ekstrand 		return ret;
325a4c1cdd3SJason Ekstrand 
326a4c1cdd3SJason Ekstrand 	old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
327a4c1cdd3SJason Ekstrand 	if (xa_is_err(old)) {
328a4c1cdd3SJason Ekstrand 		xa_erase(&fpriv->context_xa, *id);
329a4c1cdd3SJason Ekstrand 		return xa_err(old);
330a4c1cdd3SJason Ekstrand 	}
331a4c1cdd3SJason Ekstrand 	WARN_ON(old);
332a4c1cdd3SJason Ekstrand 
333a4c1cdd3SJason Ekstrand 	return 0;
334a4c1cdd3SJason Ekstrand }
335a4c1cdd3SJason Ekstrand 
proto_context_register(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,u32 * id)336a4c1cdd3SJason Ekstrand static int proto_context_register(struct drm_i915_file_private *fpriv,
337a4c1cdd3SJason Ekstrand 				  struct i915_gem_proto_context *pc,
338a4c1cdd3SJason Ekstrand 				  u32 *id)
339a4c1cdd3SJason Ekstrand {
340a4c1cdd3SJason Ekstrand 	int ret;
341a4c1cdd3SJason Ekstrand 
342a4c1cdd3SJason Ekstrand 	mutex_lock(&fpriv->proto_context_lock);
343a4c1cdd3SJason Ekstrand 	ret = proto_context_register_locked(fpriv, pc, id);
344a4c1cdd3SJason Ekstrand 	mutex_unlock(&fpriv->proto_context_lock);
345a4c1cdd3SJason Ekstrand 
346a4c1cdd3SJason Ekstrand 	return ret;
347a4c1cdd3SJason Ekstrand }
348a4c1cdd3SJason Ekstrand 
349d83d5298SJani Nikula static struct i915_address_space *
i915_gem_vm_lookup(struct drm_i915_file_private * file_priv,u32 id)350d83d5298SJani Nikula i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
351d83d5298SJani Nikula {
352d83d5298SJani Nikula 	struct i915_address_space *vm;
353d83d5298SJani Nikula 
354d83d5298SJani Nikula 	xa_lock(&file_priv->vm_xa);
355d83d5298SJani Nikula 	vm = xa_load(&file_priv->vm_xa, id);
356d83d5298SJani Nikula 	if (vm)
357d83d5298SJani Nikula 		kref_get(&vm->ref);
358d83d5298SJani Nikula 	xa_unlock(&file_priv->vm_xa);
359d83d5298SJani Nikula 
360d83d5298SJani Nikula 	return vm;
361d83d5298SJani Nikula }
362d83d5298SJani Nikula 
set_proto_ctx_vm(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,const struct drm_i915_gem_context_param * args)363d4433c76SJason Ekstrand static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
364d4433c76SJason Ekstrand 			    struct i915_gem_proto_context *pc,
365d4433c76SJason Ekstrand 			    const struct drm_i915_gem_context_param *args)
366d4433c76SJason Ekstrand {
367badb3027SAndi Shyti 	struct drm_i915_private *i915 = fpriv->i915;
368d4433c76SJason Ekstrand 	struct i915_address_space *vm;
369d4433c76SJason Ekstrand 
370d4433c76SJason Ekstrand 	if (args->size)
371d4433c76SJason Ekstrand 		return -EINVAL;
372d4433c76SJason Ekstrand 
373d4433c76SJason Ekstrand 	if (!HAS_FULL_PPGTT(i915))
374d4433c76SJason Ekstrand 		return -ENODEV;
375d4433c76SJason Ekstrand 
376d4433c76SJason Ekstrand 	if (upper_32_bits(args->value))
377d4433c76SJason Ekstrand 		return -ENOENT;
378d4433c76SJason Ekstrand 
379d4433c76SJason Ekstrand 	vm = i915_gem_vm_lookup(fpriv, args->value);
380d4433c76SJason Ekstrand 	if (!vm)
381d4433c76SJason Ekstrand 		return -ENOENT;
382d4433c76SJason Ekstrand 
383d4433c76SJason Ekstrand 	if (pc->vm)
384d4433c76SJason Ekstrand 		i915_vm_put(pc->vm);
385d4433c76SJason Ekstrand 	pc->vm = vm;
386d4433c76SJason Ekstrand 
387d4433c76SJason Ekstrand 	return 0;
388d4433c76SJason Ekstrand }
389d4433c76SJason Ekstrand 
390d4433c76SJason Ekstrand struct set_proto_ctx_engines {
391d4433c76SJason Ekstrand 	struct drm_i915_private *i915;
392d4433c76SJason Ekstrand 	unsigned num_engines;
393d4433c76SJason Ekstrand 	struct i915_gem_proto_engine *engines;
394d4433c76SJason Ekstrand };
395d4433c76SJason Ekstrand 
396d4433c76SJason Ekstrand static int
set_proto_ctx_engines_balance(struct i915_user_extension __user * base,void * data)397d4433c76SJason Ekstrand set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
398d4433c76SJason Ekstrand 			      void *data)
399d4433c76SJason Ekstrand {
400d4433c76SJason Ekstrand 	struct i915_context_engines_load_balance __user *ext =
401d4433c76SJason Ekstrand 		container_of_user(base, typeof(*ext), base);
402d4433c76SJason Ekstrand 	const struct set_proto_ctx_engines *set = data;
403d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = set->i915;
404d4433c76SJason Ekstrand 	struct intel_engine_cs **siblings;
405d4433c76SJason Ekstrand 	u16 num_siblings, idx;
406d4433c76SJason Ekstrand 	unsigned int n;
407d4433c76SJason Ekstrand 	int err;
408d4433c76SJason Ekstrand 
409d4433c76SJason Ekstrand 	if (!HAS_EXECLISTS(i915))
410d4433c76SJason Ekstrand 		return -ENODEV;
411d4433c76SJason Ekstrand 
412d4433c76SJason Ekstrand 	if (get_user(idx, &ext->engine_index))
413d4433c76SJason Ekstrand 		return -EFAULT;
414d4433c76SJason Ekstrand 
415d4433c76SJason Ekstrand 	if (idx >= set->num_engines) {
416d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
417d4433c76SJason Ekstrand 			idx, set->num_engines);
418d4433c76SJason Ekstrand 		return -EINVAL;
419d4433c76SJason Ekstrand 	}
420d4433c76SJason Ekstrand 
421d4433c76SJason Ekstrand 	idx = array_index_nospec(idx, set->num_engines);
422d4433c76SJason Ekstrand 	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
423d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
424d4433c76SJason Ekstrand 			"Invalid placement[%d], already occupied\n", idx);
425d4433c76SJason Ekstrand 		return -EEXIST;
426d4433c76SJason Ekstrand 	}
427d4433c76SJason Ekstrand 
428d4433c76SJason Ekstrand 	if (get_user(num_siblings, &ext->num_siblings))
429d4433c76SJason Ekstrand 		return -EFAULT;
430d4433c76SJason Ekstrand 
431d4433c76SJason Ekstrand 	err = check_user_mbz(&ext->flags);
432d4433c76SJason Ekstrand 	if (err)
433d4433c76SJason Ekstrand 		return err;
434d4433c76SJason Ekstrand 
435d4433c76SJason Ekstrand 	err = check_user_mbz(&ext->mbz64);
436d4433c76SJason Ekstrand 	if (err)
437d4433c76SJason Ekstrand 		return err;
438d4433c76SJason Ekstrand 
439d4433c76SJason Ekstrand 	if (num_siblings == 0)
440d4433c76SJason Ekstrand 		return 0;
441d4433c76SJason Ekstrand 
442d4433c76SJason Ekstrand 	siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
443d4433c76SJason Ekstrand 	if (!siblings)
444d4433c76SJason Ekstrand 		return -ENOMEM;
445d4433c76SJason Ekstrand 
446d4433c76SJason Ekstrand 	for (n = 0; n < num_siblings; n++) {
447d4433c76SJason Ekstrand 		struct i915_engine_class_instance ci;
448d4433c76SJason Ekstrand 
449d4433c76SJason Ekstrand 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
450d4433c76SJason Ekstrand 			err = -EFAULT;
451d4433c76SJason Ekstrand 			goto err_siblings;
452d4433c76SJason Ekstrand 		}
453d4433c76SJason Ekstrand 
454d4433c76SJason Ekstrand 		siblings[n] = intel_engine_lookup_user(i915,
455d4433c76SJason Ekstrand 						       ci.engine_class,
456d4433c76SJason Ekstrand 						       ci.engine_instance);
457d4433c76SJason Ekstrand 		if (!siblings[n]) {
458d4433c76SJason Ekstrand 			drm_dbg(&i915->drm,
459d4433c76SJason Ekstrand 				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
460d4433c76SJason Ekstrand 				n, ci.engine_class, ci.engine_instance);
461d4433c76SJason Ekstrand 			err = -EINVAL;
462d4433c76SJason Ekstrand 			goto err_siblings;
463d4433c76SJason Ekstrand 		}
464d4433c76SJason Ekstrand 	}
465d4433c76SJason Ekstrand 
466d4433c76SJason Ekstrand 	if (num_siblings == 1) {
467d4433c76SJason Ekstrand 		set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
468d4433c76SJason Ekstrand 		set->engines[idx].engine = siblings[0];
469d4433c76SJason Ekstrand 		kfree(siblings);
470d4433c76SJason Ekstrand 	} else {
471d4433c76SJason Ekstrand 		set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
472d4433c76SJason Ekstrand 		set->engines[idx].num_siblings = num_siblings;
473d4433c76SJason Ekstrand 		set->engines[idx].siblings = siblings;
474d4433c76SJason Ekstrand 	}
475d4433c76SJason Ekstrand 
476d4433c76SJason Ekstrand 	return 0;
477d4433c76SJason Ekstrand 
478d4433c76SJason Ekstrand err_siblings:
479d4433c76SJason Ekstrand 	kfree(siblings);
480d4433c76SJason Ekstrand 
481d4433c76SJason Ekstrand 	return err;
482d4433c76SJason Ekstrand }
483d4433c76SJason Ekstrand 
484d4433c76SJason Ekstrand static int
set_proto_ctx_engines_bond(struct i915_user_extension __user * base,void * data)485d4433c76SJason Ekstrand set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
486d4433c76SJason Ekstrand {
487d4433c76SJason Ekstrand 	struct i915_context_engines_bond __user *ext =
488d4433c76SJason Ekstrand 		container_of_user(base, typeof(*ext), base);
489d4433c76SJason Ekstrand 	const struct set_proto_ctx_engines *set = data;
490d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = set->i915;
491d4433c76SJason Ekstrand 	struct i915_engine_class_instance ci;
492d4433c76SJason Ekstrand 	struct intel_engine_cs *master;
493d4433c76SJason Ekstrand 	u16 idx, num_bonds;
494d4433c76SJason Ekstrand 	int err, n;
495d4433c76SJason Ekstrand 
496ce7e75c7SMatthew Brost 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
497ce7e75c7SMatthew Brost 	    !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
498ce7e75c7SMatthew Brost 		drm_dbg(&i915->drm,
4992c85034dSRodrigo Vivi 			"Bonding not supported on this platform\n");
500ce7e75c7SMatthew Brost 		return -ENODEV;
501ce7e75c7SMatthew Brost 	}
502ce7e75c7SMatthew Brost 
503d4433c76SJason Ekstrand 	if (get_user(idx, &ext->virtual_index))
504d4433c76SJason Ekstrand 		return -EFAULT;
505d4433c76SJason Ekstrand 
506d4433c76SJason Ekstrand 	if (idx >= set->num_engines) {
507d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
508d4433c76SJason Ekstrand 			"Invalid index for virtual engine: %d >= %d\n",
509d4433c76SJason Ekstrand 			idx, set->num_engines);
510d4433c76SJason Ekstrand 		return -EINVAL;
511d4433c76SJason Ekstrand 	}
512d4433c76SJason Ekstrand 
513d4433c76SJason Ekstrand 	idx = array_index_nospec(idx, set->num_engines);
514d4433c76SJason Ekstrand 	if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
515d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
516d4433c76SJason Ekstrand 		return -EINVAL;
517d4433c76SJason Ekstrand 	}
518d4433c76SJason Ekstrand 
519d4433c76SJason Ekstrand 	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
520d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
521d4433c76SJason Ekstrand 			"Bonding with virtual engines not allowed\n");
522d4433c76SJason Ekstrand 		return -EINVAL;
523d4433c76SJason Ekstrand 	}
524d4433c76SJason Ekstrand 
525d4433c76SJason Ekstrand 	err = check_user_mbz(&ext->flags);
526d4433c76SJason Ekstrand 	if (err)
527d4433c76SJason Ekstrand 		return err;
528d4433c76SJason Ekstrand 
529d4433c76SJason Ekstrand 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
530d4433c76SJason Ekstrand 		err = check_user_mbz(&ext->mbz64[n]);
531d4433c76SJason Ekstrand 		if (err)
532d4433c76SJason Ekstrand 			return err;
533d4433c76SJason Ekstrand 	}
534d4433c76SJason Ekstrand 
535d4433c76SJason Ekstrand 	if (copy_from_user(&ci, &ext->master, sizeof(ci)))
536d4433c76SJason Ekstrand 		return -EFAULT;
537d4433c76SJason Ekstrand 
538d4433c76SJason Ekstrand 	master = intel_engine_lookup_user(i915,
539d4433c76SJason Ekstrand 					  ci.engine_class,
540d4433c76SJason Ekstrand 					  ci.engine_instance);
541d4433c76SJason Ekstrand 	if (!master) {
542d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
543d4433c76SJason Ekstrand 			"Unrecognised master engine: { class:%u, instance:%u }\n",
544d4433c76SJason Ekstrand 			ci.engine_class, ci.engine_instance);
545d4433c76SJason Ekstrand 		return -EINVAL;
546d4433c76SJason Ekstrand 	}
547d4433c76SJason Ekstrand 
548b02d86b9SMatthew Brost 	if (intel_engine_uses_guc(master)) {
549a10234fdSTvrtko Ursulin 		drm_dbg(&i915->drm, "bonding extension not supported with GuC submission");
550b02d86b9SMatthew Brost 		return -ENODEV;
551b02d86b9SMatthew Brost 	}
552b02d86b9SMatthew Brost 
553d4433c76SJason Ekstrand 	if (get_user(num_bonds, &ext->num_bonds))
554d4433c76SJason Ekstrand 		return -EFAULT;
555d4433c76SJason Ekstrand 
556d4433c76SJason Ekstrand 	for (n = 0; n < num_bonds; n++) {
557d4433c76SJason Ekstrand 		struct intel_engine_cs *bond;
558d4433c76SJason Ekstrand 
559d4433c76SJason Ekstrand 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
560d4433c76SJason Ekstrand 			return -EFAULT;
561d4433c76SJason Ekstrand 
562d4433c76SJason Ekstrand 		bond = intel_engine_lookup_user(i915,
563d4433c76SJason Ekstrand 						ci.engine_class,
564d4433c76SJason Ekstrand 						ci.engine_instance);
565d4433c76SJason Ekstrand 		if (!bond) {
566d4433c76SJason Ekstrand 			drm_dbg(&i915->drm,
567d4433c76SJason Ekstrand 				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
568d4433c76SJason Ekstrand 				n, ci.engine_class, ci.engine_instance);
569d4433c76SJason Ekstrand 			return -EINVAL;
570d4433c76SJason Ekstrand 		}
571d4433c76SJason Ekstrand 	}
572d4433c76SJason Ekstrand 
573d4433c76SJason Ekstrand 	return 0;
574d4433c76SJason Ekstrand }
575d4433c76SJason Ekstrand 
576e5e32171SMatthew Brost static int
set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user * base,void * data)577e5e32171SMatthew Brost set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
578e5e32171SMatthew Brost 				      void *data)
579e5e32171SMatthew Brost {
580e5e32171SMatthew Brost 	struct i915_context_engines_parallel_submit __user *ext =
581e5e32171SMatthew Brost 		container_of_user(base, typeof(*ext), base);
582e5e32171SMatthew Brost 	const struct set_proto_ctx_engines *set = data;
583e5e32171SMatthew Brost 	struct drm_i915_private *i915 = set->i915;
5840f9d36afSMatthew Brost 	struct i915_engine_class_instance prev_engine;
585e5e32171SMatthew Brost 	u64 flags;
586e5e32171SMatthew Brost 	int err = 0, n, i, j;
587e5e32171SMatthew Brost 	u16 slot, width, num_siblings;
588e5e32171SMatthew Brost 	struct intel_engine_cs **siblings = NULL;
589e5e32171SMatthew Brost 	intel_engine_mask_t prev_mask;
590e5e32171SMatthew Brost 
591e5e32171SMatthew Brost 	if (get_user(slot, &ext->engine_index))
592e5e32171SMatthew Brost 		return -EFAULT;
593e5e32171SMatthew Brost 
594e5e32171SMatthew Brost 	if (get_user(width, &ext->width))
595e5e32171SMatthew Brost 		return -EFAULT;
596e5e32171SMatthew Brost 
597e5e32171SMatthew Brost 	if (get_user(num_siblings, &ext->num_siblings))
598e5e32171SMatthew Brost 		return -EFAULT;
599e5e32171SMatthew Brost 
600a88afcfaSMatthew Brost 	if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
601a88afcfaSMatthew Brost 	    num_siblings != 1) {
602a88afcfaSMatthew Brost 		drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
603a88afcfaSMatthew Brost 			num_siblings);
604a88afcfaSMatthew Brost 		return -EINVAL;
605a88afcfaSMatthew Brost 	}
606a88afcfaSMatthew Brost 
607e5e32171SMatthew Brost 	if (slot >= set->num_engines) {
608e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
609e5e32171SMatthew Brost 			slot, set->num_engines);
610e5e32171SMatthew Brost 		return -EINVAL;
611e5e32171SMatthew Brost 	}
612e5e32171SMatthew Brost 
613e5e32171SMatthew Brost 	if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
614e5e32171SMatthew Brost 		drm_dbg(&i915->drm,
615e5e32171SMatthew Brost 			"Invalid placement[%d], already occupied\n", slot);
616e5e32171SMatthew Brost 		return -EINVAL;
617e5e32171SMatthew Brost 	}
618e5e32171SMatthew Brost 
619e5e32171SMatthew Brost 	if (get_user(flags, &ext->flags))
620e5e32171SMatthew Brost 		return -EFAULT;
621e5e32171SMatthew Brost 
622e5e32171SMatthew Brost 	if (flags) {
623e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
624e5e32171SMatthew Brost 		return -EINVAL;
625e5e32171SMatthew Brost 	}
626e5e32171SMatthew Brost 
627e5e32171SMatthew Brost 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
628e5e32171SMatthew Brost 		err = check_user_mbz(&ext->mbz64[n]);
629e5e32171SMatthew Brost 		if (err)
630e5e32171SMatthew Brost 			return err;
631e5e32171SMatthew Brost 	}
632e5e32171SMatthew Brost 
633e5e32171SMatthew Brost 	if (width < 2) {
634e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
635e5e32171SMatthew Brost 		return -EINVAL;
636e5e32171SMatthew Brost 	}
637e5e32171SMatthew Brost 
638e5e32171SMatthew Brost 	if (num_siblings < 1) {
639e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
640e5e32171SMatthew Brost 			num_siblings);
641e5e32171SMatthew Brost 		return -EINVAL;
642e5e32171SMatthew Brost 	}
643e5e32171SMatthew Brost 
644e5e32171SMatthew Brost 	siblings = kmalloc_array(num_siblings * width,
645e5e32171SMatthew Brost 				 sizeof(*siblings),
646e5e32171SMatthew Brost 				 GFP_KERNEL);
647e5e32171SMatthew Brost 	if (!siblings)
648e5e32171SMatthew Brost 		return -ENOMEM;
649e5e32171SMatthew Brost 
650e5e32171SMatthew Brost 	/* Create contexts / engines */
651e5e32171SMatthew Brost 	for (i = 0; i < width; ++i) {
652e5e32171SMatthew Brost 		intel_engine_mask_t current_mask = 0;
653e5e32171SMatthew Brost 
654e5e32171SMatthew Brost 		for (j = 0; j < num_siblings; ++j) {
655e5e32171SMatthew Brost 			struct i915_engine_class_instance ci;
656e5e32171SMatthew Brost 
657e5e32171SMatthew Brost 			n = i * num_siblings + j;
658e5e32171SMatthew Brost 			if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
659e5e32171SMatthew Brost 				err = -EFAULT;
660e5e32171SMatthew Brost 				goto out_err;
661e5e32171SMatthew Brost 			}
662e5e32171SMatthew Brost 
663e5e32171SMatthew Brost 			siblings[n] =
664e5e32171SMatthew Brost 				intel_engine_lookup_user(i915, ci.engine_class,
665e5e32171SMatthew Brost 							 ci.engine_instance);
666e5e32171SMatthew Brost 			if (!siblings[n]) {
667e5e32171SMatthew Brost 				drm_dbg(&i915->drm,
668e5e32171SMatthew Brost 					"Invalid sibling[%d]: { class:%d, inst:%d }\n",
669e5e32171SMatthew Brost 					n, ci.engine_class, ci.engine_instance);
670e5e32171SMatthew Brost 				err = -EINVAL;
671e5e32171SMatthew Brost 				goto out_err;
672e5e32171SMatthew Brost 			}
673e5e32171SMatthew Brost 
674e393e2aaSMatthew Brost 			/*
675e393e2aaSMatthew Brost 			 * We don't support breadcrumb handshake on these
676e393e2aaSMatthew Brost 			 * classes
677e393e2aaSMatthew Brost 			 */
678e393e2aaSMatthew Brost 			if (siblings[n]->class == RENDER_CLASS ||
679e393e2aaSMatthew Brost 			    siblings[n]->class == COMPUTE_CLASS) {
680e393e2aaSMatthew Brost 				err = -EINVAL;
681e393e2aaSMatthew Brost 				goto out_err;
682e393e2aaSMatthew Brost 			}
683e393e2aaSMatthew Brost 
684e5e32171SMatthew Brost 			if (n) {
685e5e32171SMatthew Brost 				if (prev_engine.engine_class !=
686e5e32171SMatthew Brost 				    ci.engine_class) {
687e5e32171SMatthew Brost 					drm_dbg(&i915->drm,
688e5e32171SMatthew Brost 						"Mismatched class %d, %d\n",
689e5e32171SMatthew Brost 						prev_engine.engine_class,
690e5e32171SMatthew Brost 						ci.engine_class);
691e5e32171SMatthew Brost 					err = -EINVAL;
692e5e32171SMatthew Brost 					goto out_err;
693e5e32171SMatthew Brost 				}
694e5e32171SMatthew Brost 			}
695e5e32171SMatthew Brost 
696e5e32171SMatthew Brost 			prev_engine = ci;
697e5e32171SMatthew Brost 			current_mask |= siblings[n]->logical_mask;
698e5e32171SMatthew Brost 		}
699e5e32171SMatthew Brost 
700e5e32171SMatthew Brost 		if (i > 0) {
701e5e32171SMatthew Brost 			if (current_mask != prev_mask << 1) {
702e5e32171SMatthew Brost 				drm_dbg(&i915->drm,
703e5e32171SMatthew Brost 					"Non contiguous logical mask 0x%x, 0x%x\n",
704e5e32171SMatthew Brost 					prev_mask, current_mask);
705e5e32171SMatthew Brost 				err = -EINVAL;
706e5e32171SMatthew Brost 				goto out_err;
707e5e32171SMatthew Brost 			}
708e5e32171SMatthew Brost 		}
709e5e32171SMatthew Brost 		prev_mask = current_mask;
710e5e32171SMatthew Brost 	}
711e5e32171SMatthew Brost 
712e5e32171SMatthew Brost 	set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
713e5e32171SMatthew Brost 	set->engines[slot].num_siblings = num_siblings;
714e5e32171SMatthew Brost 	set->engines[slot].width = width;
715e5e32171SMatthew Brost 	set->engines[slot].siblings = siblings;
716e5e32171SMatthew Brost 
717e5e32171SMatthew Brost 	return 0;
718e5e32171SMatthew Brost 
719e5e32171SMatthew Brost out_err:
720e5e32171SMatthew Brost 	kfree(siblings);
721e5e32171SMatthew Brost 
722e5e32171SMatthew Brost 	return err;
723e5e32171SMatthew Brost }
724e5e32171SMatthew Brost 
725d4433c76SJason Ekstrand static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
726d4433c76SJason Ekstrand 	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
727d4433c76SJason Ekstrand 	[I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
728e5e32171SMatthew Brost 	[I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
729e5e32171SMatthew Brost 		set_proto_ctx_engines_parallel_submit,
730d4433c76SJason Ekstrand };
731d4433c76SJason Ekstrand 
set_proto_ctx_engines(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,const struct drm_i915_gem_context_param * args)732d4433c76SJason Ekstrand static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
733d4433c76SJason Ekstrand 			         struct i915_gem_proto_context *pc,
734d4433c76SJason Ekstrand 			         const struct drm_i915_gem_context_param *args)
735d4433c76SJason Ekstrand {
736badb3027SAndi Shyti 	struct drm_i915_private *i915 = fpriv->i915;
737d4433c76SJason Ekstrand 	struct set_proto_ctx_engines set = { .i915 = i915 };
738d4433c76SJason Ekstrand 	struct i915_context_param_engines __user *user =
739d4433c76SJason Ekstrand 		u64_to_user_ptr(args->value);
740d4433c76SJason Ekstrand 	unsigned int n;
741d4433c76SJason Ekstrand 	u64 extensions;
742d4433c76SJason Ekstrand 	int err;
743d4433c76SJason Ekstrand 
744d4433c76SJason Ekstrand 	if (pc->num_user_engines >= 0) {
745d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Cannot set engines twice");
746d4433c76SJason Ekstrand 		return -EINVAL;
747d4433c76SJason Ekstrand 	}
748d4433c76SJason Ekstrand 
749d4433c76SJason Ekstrand 	if (args->size < sizeof(*user) ||
750d4433c76SJason Ekstrand 	    !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
751d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
752d4433c76SJason Ekstrand 			args->size);
753d4433c76SJason Ekstrand 		return -EINVAL;
754d4433c76SJason Ekstrand 	}
755d4433c76SJason Ekstrand 
756d4433c76SJason Ekstrand 	set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
757d4433c76SJason Ekstrand 	/* RING_MASK has no shift so we can use it directly here */
758d4433c76SJason Ekstrand 	if (set.num_engines > I915_EXEC_RING_MASK + 1)
759d4433c76SJason Ekstrand 		return -EINVAL;
760d4433c76SJason Ekstrand 
761d4433c76SJason Ekstrand 	set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
762d4433c76SJason Ekstrand 	if (!set.engines)
763d4433c76SJason Ekstrand 		return -ENOMEM;
764d4433c76SJason Ekstrand 
765d4433c76SJason Ekstrand 	for (n = 0; n < set.num_engines; n++) {
766d4433c76SJason Ekstrand 		struct i915_engine_class_instance ci;
767d4433c76SJason Ekstrand 		struct intel_engine_cs *engine;
768d4433c76SJason Ekstrand 
769d4433c76SJason Ekstrand 		if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
770d4433c76SJason Ekstrand 			kfree(set.engines);
771d4433c76SJason Ekstrand 			return -EFAULT;
772d4433c76SJason Ekstrand 		}
773d4433c76SJason Ekstrand 
774d4433c76SJason Ekstrand 		memset(&set.engines[n], 0, sizeof(set.engines[n]));
775d4433c76SJason Ekstrand 
776d4433c76SJason Ekstrand 		if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
777d4433c76SJason Ekstrand 		    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
778d4433c76SJason Ekstrand 			continue;
779d4433c76SJason Ekstrand 
780d4433c76SJason Ekstrand 		engine = intel_engine_lookup_user(i915,
781d4433c76SJason Ekstrand 						  ci.engine_class,
782d4433c76SJason Ekstrand 						  ci.engine_instance);
783d4433c76SJason Ekstrand 		if (!engine) {
784d4433c76SJason Ekstrand 			drm_dbg(&i915->drm,
785d4433c76SJason Ekstrand 				"Invalid engine[%d]: { class:%d, instance:%d }\n",
786d4433c76SJason Ekstrand 				n, ci.engine_class, ci.engine_instance);
787d4433c76SJason Ekstrand 			kfree(set.engines);
788d4433c76SJason Ekstrand 			return -ENOENT;
789d4433c76SJason Ekstrand 		}
790d4433c76SJason Ekstrand 
791d4433c76SJason Ekstrand 		set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
792d4433c76SJason Ekstrand 		set.engines[n].engine = engine;
793d4433c76SJason Ekstrand 	}
794d4433c76SJason Ekstrand 
795d4433c76SJason Ekstrand 	err = -EFAULT;
796d4433c76SJason Ekstrand 	if (!get_user(extensions, &user->extensions))
797d4433c76SJason Ekstrand 		err = i915_user_extensions(u64_to_user_ptr(extensions),
798d4433c76SJason Ekstrand 					   set_proto_ctx_engines_extensions,
799d4433c76SJason Ekstrand 					   ARRAY_SIZE(set_proto_ctx_engines_extensions),
800d4433c76SJason Ekstrand 					   &set);
801d4433c76SJason Ekstrand 	if (err) {
802d4433c76SJason Ekstrand 		kfree(set.engines);
803d4433c76SJason Ekstrand 		return err;
804d4433c76SJason Ekstrand 	}
805d4433c76SJason Ekstrand 
806d4433c76SJason Ekstrand 	pc->num_user_engines = set.num_engines;
807d4433c76SJason Ekstrand 	pc->user_engines = set.engines;
808d4433c76SJason Ekstrand 
809d4433c76SJason Ekstrand 	return 0;
810d4433c76SJason Ekstrand }
811d4433c76SJason Ekstrand 
set_proto_ctx_sseu(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,struct drm_i915_gem_context_param * args)812d4433c76SJason Ekstrand static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
813d4433c76SJason Ekstrand 			      struct i915_gem_proto_context *pc,
814d4433c76SJason Ekstrand 			      struct drm_i915_gem_context_param *args)
815d4433c76SJason Ekstrand {
816badb3027SAndi Shyti 	struct drm_i915_private *i915 = fpriv->i915;
817d4433c76SJason Ekstrand 	struct drm_i915_gem_context_param_sseu user_sseu;
818d4433c76SJason Ekstrand 	struct intel_sseu *sseu;
819d4433c76SJason Ekstrand 	int ret;
820d4433c76SJason Ekstrand 
821d4433c76SJason Ekstrand 	if (args->size < sizeof(user_sseu))
822d4433c76SJason Ekstrand 		return -EINVAL;
823d4433c76SJason Ekstrand 
824d4433c76SJason Ekstrand 	if (GRAPHICS_VER(i915) != 11)
825d4433c76SJason Ekstrand 		return -ENODEV;
826d4433c76SJason Ekstrand 
827d4433c76SJason Ekstrand 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
828d4433c76SJason Ekstrand 			   sizeof(user_sseu)))
829d4433c76SJason Ekstrand 		return -EFAULT;
830d4433c76SJason Ekstrand 
831d4433c76SJason Ekstrand 	if (user_sseu.rsvd)
832d4433c76SJason Ekstrand 		return -EINVAL;
833d4433c76SJason Ekstrand 
834d4433c76SJason Ekstrand 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
835d4433c76SJason Ekstrand 		return -EINVAL;
836d4433c76SJason Ekstrand 
837d4433c76SJason Ekstrand 	if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
838d4433c76SJason Ekstrand 		return -EINVAL;
839d4433c76SJason Ekstrand 
840d4433c76SJason Ekstrand 	if (pc->num_user_engines >= 0) {
841d4433c76SJason Ekstrand 		int idx = user_sseu.engine.engine_instance;
842d4433c76SJason Ekstrand 		struct i915_gem_proto_engine *pe;
843d4433c76SJason Ekstrand 
844d4433c76SJason Ekstrand 		if (idx >= pc->num_user_engines)
845d4433c76SJason Ekstrand 			return -EINVAL;
846d4433c76SJason Ekstrand 
847*269303d2SKunwu Chan 		idx = array_index_nospec(idx, pc->num_user_engines);
848d4433c76SJason Ekstrand 		pe = &pc->user_engines[idx];
849d4433c76SJason Ekstrand 
850d4433c76SJason Ekstrand 		/* Only render engine supports RPCS configuration. */
851d4433c76SJason Ekstrand 		if (pe->engine->class != RENDER_CLASS)
852d4433c76SJason Ekstrand 			return -EINVAL;
853d4433c76SJason Ekstrand 
854d4433c76SJason Ekstrand 		sseu = &pe->sseu;
855d4433c76SJason Ekstrand 	} else {
856d4433c76SJason Ekstrand 		/* Only render engine supports RPCS configuration. */
857d4433c76SJason Ekstrand 		if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
858d4433c76SJason Ekstrand 			return -EINVAL;
859d4433c76SJason Ekstrand 
860d4433c76SJason Ekstrand 		/* There is only one render engine */
861d4433c76SJason Ekstrand 		if (user_sseu.engine.engine_instance != 0)
862d4433c76SJason Ekstrand 			return -EINVAL;
863d4433c76SJason Ekstrand 
864d4433c76SJason Ekstrand 		sseu = &pc->legacy_rcs_sseu;
865d4433c76SJason Ekstrand 	}
866d4433c76SJason Ekstrand 
8671a9c4db4SMichał Winiarski 	ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
868d4433c76SJason Ekstrand 	if (ret)
869d4433c76SJason Ekstrand 		return ret;
870d4433c76SJason Ekstrand 
871d4433c76SJason Ekstrand 	args->size = sizeof(user_sseu);
872d4433c76SJason Ekstrand 
873d4433c76SJason Ekstrand 	return 0;
874d4433c76SJason Ekstrand }
875d4433c76SJason Ekstrand 
set_proto_ctx_param(struct drm_i915_file_private * fpriv,struct i915_gem_proto_context * pc,struct drm_i915_gem_context_param * args)876d4433c76SJason Ekstrand static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
877d4433c76SJason Ekstrand 			       struct i915_gem_proto_context *pc,
878d4433c76SJason Ekstrand 			       struct drm_i915_gem_context_param *args)
879d4433c76SJason Ekstrand {
880d4433c76SJason Ekstrand 	int ret = 0;
881d4433c76SJason Ekstrand 
882d4433c76SJason Ekstrand 	switch (args->param) {
883d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
884d4433c76SJason Ekstrand 		if (args->size)
885d4433c76SJason Ekstrand 			ret = -EINVAL;
886d4433c76SJason Ekstrand 		else if (args->value)
887d4433c76SJason Ekstrand 			pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
888d4433c76SJason Ekstrand 		else
889d4433c76SJason Ekstrand 			pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
890d4433c76SJason Ekstrand 		break;
891d4433c76SJason Ekstrand 
892d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_BANNABLE:
893d4433c76SJason Ekstrand 		if (args->size)
894d4433c76SJason Ekstrand 			ret = -EINVAL;
895d4433c76SJason Ekstrand 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
896d4433c76SJason Ekstrand 			ret = -EPERM;
897d4433c76SJason Ekstrand 		else if (args->value)
898d4433c76SJason Ekstrand 			pc->user_flags |= BIT(UCONTEXT_BANNABLE);
899d3ac8d42SDaniele Ceraolo Spurio 		else if (pc->uses_protected_content)
900d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM;
901d4433c76SJason Ekstrand 		else
902d4433c76SJason Ekstrand 			pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
903d4433c76SJason Ekstrand 		break;
904d4433c76SJason Ekstrand 
905d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_RECOVERABLE:
906d4433c76SJason Ekstrand 		if (args->size)
907d4433c76SJason Ekstrand 			ret = -EINVAL;
908d3ac8d42SDaniele Ceraolo Spurio 		else if (!args->value)
909d4433c76SJason Ekstrand 			pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
910d3ac8d42SDaniele Ceraolo Spurio 		else if (pc->uses_protected_content)
911d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM;
912d3ac8d42SDaniele Ceraolo Spurio 		else
913d3ac8d42SDaniele Ceraolo Spurio 			pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
914d4433c76SJason Ekstrand 		break;
915d4433c76SJason Ekstrand 
916d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_PRIORITY:
917badb3027SAndi Shyti 		ret = validate_priority(fpriv->i915, args);
918d4433c76SJason Ekstrand 		if (!ret)
919d4433c76SJason Ekstrand 			pc->sched.priority = args->value;
920d4433c76SJason Ekstrand 		break;
921d4433c76SJason Ekstrand 
922d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_SSEU:
923d4433c76SJason Ekstrand 		ret = set_proto_ctx_sseu(fpriv, pc, args);
924d4433c76SJason Ekstrand 		break;
925d4433c76SJason Ekstrand 
926d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_VM:
927d4433c76SJason Ekstrand 		ret = set_proto_ctx_vm(fpriv, pc, args);
928d4433c76SJason Ekstrand 		break;
929d4433c76SJason Ekstrand 
930d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_ENGINES:
931d4433c76SJason Ekstrand 		ret = set_proto_ctx_engines(fpriv, pc, args);
932d4433c76SJason Ekstrand 		break;
933d4433c76SJason Ekstrand 
934d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_PERSISTENCE:
935d4433c76SJason Ekstrand 		if (args->size)
936d4433c76SJason Ekstrand 			ret = -EINVAL;
9377482a656Skatrinzhou 		else
938badb3027SAndi Shyti 			ret = proto_context_set_persistence(fpriv->i915, pc,
939d4433c76SJason Ekstrand 							    args->value);
940d4433c76SJason Ekstrand 		break;
941d4433c76SJason Ekstrand 
942d3ac8d42SDaniele Ceraolo Spurio 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
943badb3027SAndi Shyti 		ret = proto_context_set_protected(fpriv->i915, pc,
944d3ac8d42SDaniele Ceraolo Spurio 						  args->value);
945d3ac8d42SDaniele Ceraolo Spurio 		break;
946d3ac8d42SDaniele Ceraolo Spurio 
947d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
948d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_BAN_PERIOD:
949d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_RINGSIZE:
950d4433c76SJason Ekstrand 	default:
951d4433c76SJason Ekstrand 		ret = -EINVAL;
952d4433c76SJason Ekstrand 		break;
953d4433c76SJason Ekstrand 	}
954d4433c76SJason Ekstrand 
955d4433c76SJason Ekstrand 	return ret;
956d4433c76SJason Ekstrand }
957d4433c76SJason Ekstrand 
intel_context_set_gem(struct intel_context * ce,struct i915_gem_context * ctx,struct intel_sseu sseu)958263ae12cSJason Ekstrand static int intel_context_set_gem(struct intel_context *ce,
959263ae12cSJason Ekstrand 				 struct i915_gem_context *ctx,
960263ae12cSJason Ekstrand 				 struct intel_sseu sseu)
961e6ba7648SChris Wilson {
962263ae12cSJason Ekstrand 	int ret = 0;
963263ae12cSJason Ekstrand 
9646a8679c0SChris Wilson 	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
9656a8679c0SChris Wilson 	RCU_INIT_POINTER(ce->gem_context, ctx);
966e6ba7648SChris Wilson 
967e5e32171SMatthew Brost 	GEM_BUG_ON(intel_context_is_pinned(ce));
9685945d8b9SChris Wilson 
9695945d8b9SChris Wilson 	if (ce->engine->class == COMPUTE_CLASS)
9705945d8b9SChris Wilson 		ce->ring_size = SZ_512K;
9715945d8b9SChris Wilson 	else
97274e4b909SJason Ekstrand 		ce->ring_size = SZ_16K;
973e6ba7648SChris Wilson 
974e6ba7648SChris Wilson 	i915_vm_put(ce->vm);
9750483a301SDaniel Vetter 	ce->vm = i915_gem_context_get_eb_vm(ctx);
976e6ba7648SChris Wilson 
977e6ba7648SChris Wilson 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
9784dbd3944SMatthew Brost 	    intel_engine_has_timeslices(ce->engine) &&
9794dbd3944SMatthew Brost 	    intel_engine_has_semaphores(ce->engine))
980e6ba7648SChris Wilson 		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
981e8dbb566STvrtko Ursulin 
9821a839e01SLucas De Marchi 	if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
983677db6adSJason Ekstrand 	    ctx->i915->params.request_timeout_ms) {
984677db6adSJason Ekstrand 		unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
985677db6adSJason Ekstrand 
986677db6adSJason Ekstrand 		intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
987677db6adSJason Ekstrand 	}
988263ae12cSJason Ekstrand 
989263ae12cSJason Ekstrand 	/* A valid SSEU has no zero fields */
990263ae12cSJason Ekstrand 	if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
991263ae12cSJason Ekstrand 		ret = intel_context_reconfigure_sseu(ce, sseu);
992263ae12cSJason Ekstrand 
993263ae12cSJason Ekstrand 	return ret;
994e6ba7648SChris Wilson }
995e6ba7648SChris Wilson 
__unpin_engines(struct i915_gem_engines * e,unsigned int count)996e5e32171SMatthew Brost static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
997e5e32171SMatthew Brost {
998e5e32171SMatthew Brost 	while (count--) {
999e5e32171SMatthew Brost 		struct intel_context *ce = e->engines[count], *child;
1000e5e32171SMatthew Brost 
1001e5e32171SMatthew Brost 		if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
1002e5e32171SMatthew Brost 			continue;
1003e5e32171SMatthew Brost 
1004e5e32171SMatthew Brost 		for_each_child(ce, child)
1005e5e32171SMatthew Brost 			intel_context_unpin(child);
1006e5e32171SMatthew Brost 		intel_context_unpin(ce);
1007e5e32171SMatthew Brost 	}
1008e5e32171SMatthew Brost }
1009e5e32171SMatthew Brost 
unpin_engines(struct i915_gem_engines * e)1010e5e32171SMatthew Brost static void unpin_engines(struct i915_gem_engines *e)
1011e5e32171SMatthew Brost {
1012e5e32171SMatthew Brost 	__unpin_engines(e, e->num_engines);
1013e5e32171SMatthew Brost }
1014e5e32171SMatthew Brost 
__free_engines(struct i915_gem_engines * e,unsigned int count)101510be98a7SChris Wilson static void __free_engines(struct i915_gem_engines *e, unsigned int count)
101610be98a7SChris Wilson {
101710be98a7SChris Wilson 	while (count--) {
101810be98a7SChris Wilson 		if (!e->engines[count])
101910be98a7SChris Wilson 			continue;
102010be98a7SChris Wilson 
102110be98a7SChris Wilson 		intel_context_put(e->engines[count]);
102210be98a7SChris Wilson 	}
102310be98a7SChris Wilson 	kfree(e);
102410be98a7SChris Wilson }
102510be98a7SChris Wilson 
free_engines(struct i915_gem_engines * e)102610be98a7SChris Wilson static void free_engines(struct i915_gem_engines *e)
102710be98a7SChris Wilson {
102810be98a7SChris Wilson 	__free_engines(e, e->num_engines);
102910be98a7SChris Wilson }
103010be98a7SChris Wilson 
free_engines_rcu(struct rcu_head * rcu)1031155ab883SChris Wilson static void free_engines_rcu(struct rcu_head *rcu)
103210be98a7SChris Wilson {
1033130a95e9SChris Wilson 	struct i915_gem_engines *engines =
1034130a95e9SChris Wilson 		container_of(rcu, struct i915_gem_engines, rcu);
1035130a95e9SChris Wilson 
1036130a95e9SChris Wilson 	i915_sw_fence_fini(&engines->fence);
1037130a95e9SChris Wilson 	free_engines(engines);
103810be98a7SChris Wilson }
103910be98a7SChris Wilson 
accumulate_runtime(struct i915_drm_client * client,struct i915_gem_engines * engines)10408399eec8STvrtko Ursulin static void accumulate_runtime(struct i915_drm_client *client,
10418399eec8STvrtko Ursulin 			       struct i915_gem_engines *engines)
10428399eec8STvrtko Ursulin {
10438399eec8STvrtko Ursulin 	struct i915_gem_engines_iter it;
10448399eec8STvrtko Ursulin 	struct intel_context *ce;
10458399eec8STvrtko Ursulin 
10468399eec8STvrtko Ursulin 	if (!client)
10478399eec8STvrtko Ursulin 		return;
10488399eec8STvrtko Ursulin 
10498399eec8STvrtko Ursulin 	/* Transfer accumulated runtime to the parent GEM context. */
10508399eec8STvrtko Ursulin 	for_each_gem_engine(ce, engines, it) {
10518399eec8STvrtko Ursulin 		unsigned int class = ce->engine->uabi_class;
10528399eec8STvrtko Ursulin 
10538399eec8STvrtko Ursulin 		GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
10548399eec8STvrtko Ursulin 		atomic64_add(intel_context_get_total_runtime_ns(ce),
10558399eec8STvrtko Ursulin 			     &client->past_runtime[class]);
10568399eec8STvrtko Ursulin 	}
10578399eec8STvrtko Ursulin }
10588399eec8STvrtko Ursulin 
105944505168SMatthew Brost static int
engines_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)106070c96e39SChris Wilson engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
106170c96e39SChris Wilson {
106270c96e39SChris Wilson 	struct i915_gem_engines *engines =
106370c96e39SChris Wilson 		container_of(fence, typeof(*engines), fence);
10648399eec8STvrtko Ursulin 	struct i915_gem_context *ctx = engines->ctx;
106570c96e39SChris Wilson 
106670c96e39SChris Wilson 	switch (state) {
106770c96e39SChris Wilson 	case FENCE_COMPLETE:
106870c96e39SChris Wilson 		if (!list_empty(&engines->link)) {
106970c96e39SChris Wilson 			unsigned long flags;
107070c96e39SChris Wilson 
107170c96e39SChris Wilson 			spin_lock_irqsave(&ctx->stale.lock, flags);
107270c96e39SChris Wilson 			list_del(&engines->link);
107370c96e39SChris Wilson 			spin_unlock_irqrestore(&ctx->stale.lock, flags);
107470c96e39SChris Wilson 		}
10758399eec8STvrtko Ursulin 		accumulate_runtime(ctx->client, engines);
10768399eec8STvrtko Ursulin 		i915_gem_context_put(ctx);
10778399eec8STvrtko Ursulin 
107870c96e39SChris Wilson 		break;
107970c96e39SChris Wilson 
108070c96e39SChris Wilson 	case FENCE_FREE:
108170c96e39SChris Wilson 		init_rcu_head(&engines->rcu);
108270c96e39SChris Wilson 		call_rcu(&engines->rcu, free_engines_rcu);
108370c96e39SChris Wilson 		break;
108470c96e39SChris Wilson 	}
108570c96e39SChris Wilson 
108670c96e39SChris Wilson 	return NOTIFY_DONE;
108770c96e39SChris Wilson }
108870c96e39SChris Wilson 
alloc_engines(unsigned int count)108970c96e39SChris Wilson static struct i915_gem_engines *alloc_engines(unsigned int count)
109070c96e39SChris Wilson {
109170c96e39SChris Wilson 	struct i915_gem_engines *e;
109270c96e39SChris Wilson 
109370c96e39SChris Wilson 	e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
109470c96e39SChris Wilson 	if (!e)
109570c96e39SChris Wilson 		return NULL;
109670c96e39SChris Wilson 
109770c96e39SChris Wilson 	i915_sw_fence_init(&e->fence, engines_notify);
109870c96e39SChris Wilson 	return e;
109970c96e39SChris Wilson }
110070c96e39SChris Wilson 
default_engines(struct i915_gem_context * ctx,struct intel_sseu rcs_sseu)1101263ae12cSJason Ekstrand static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1102263ae12cSJason Ekstrand 						struct intel_sseu rcs_sseu)
110310be98a7SChris Wilson {
11041ec23ed7STvrtko Ursulin 	const unsigned int max = I915_NUM_ENGINES;
110510be98a7SChris Wilson 	struct intel_engine_cs *engine;
110607a635a8SJason Ekstrand 	struct i915_gem_engines *e, *err;
110710be98a7SChris Wilson 
11081ec23ed7STvrtko Ursulin 	e = alloc_engines(max);
110910be98a7SChris Wilson 	if (!e)
111010be98a7SChris Wilson 		return ERR_PTR(-ENOMEM);
111110be98a7SChris Wilson 
11121ec23ed7STvrtko Ursulin 	for_each_uabi_engine(engine, ctx->i915) {
111310be98a7SChris Wilson 		struct intel_context *ce;
1114263ae12cSJason Ekstrand 		struct intel_sseu sseu = {};
1115263ae12cSJason Ekstrand 		int ret;
111610be98a7SChris Wilson 
1117a50134b1STvrtko Ursulin 		if (engine->legacy_idx == INVALID_ENGINE)
1118a50134b1STvrtko Ursulin 			continue;
1119a50134b1STvrtko Ursulin 
11201ec23ed7STvrtko Ursulin 		GEM_BUG_ON(engine->legacy_idx >= max);
1121a50134b1STvrtko Ursulin 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
1122a50134b1STvrtko Ursulin 
1123e6ba7648SChris Wilson 		ce = intel_context_create(engine);
112410be98a7SChris Wilson 		if (IS_ERR(ce)) {
112507a635a8SJason Ekstrand 			err = ERR_CAST(ce);
112607a635a8SJason Ekstrand 			goto free_engines;
112710be98a7SChris Wilson 		}
112810be98a7SChris Wilson 
1129a50134b1STvrtko Ursulin 		e->engines[engine->legacy_idx] = ce;
113007a635a8SJason Ekstrand 		e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1131263ae12cSJason Ekstrand 
1132263ae12cSJason Ekstrand 		if (engine->class == RENDER_CLASS)
1133263ae12cSJason Ekstrand 			sseu = rcs_sseu;
1134263ae12cSJason Ekstrand 
1135263ae12cSJason Ekstrand 		ret = intel_context_set_gem(ce, ctx, sseu);
1136263ae12cSJason Ekstrand 		if (ret) {
1137263ae12cSJason Ekstrand 			err = ERR_PTR(ret);
1138263ae12cSJason Ekstrand 			goto free_engines;
1139263ae12cSJason Ekstrand 		}
1140263ae12cSJason Ekstrand 
114110be98a7SChris Wilson 	}
114210be98a7SChris Wilson 
114310be98a7SChris Wilson 	return e;
114407a635a8SJason Ekstrand 
114507a635a8SJason Ekstrand free_engines:
114607a635a8SJason Ekstrand 	free_engines(e);
114707a635a8SJason Ekstrand 	return err;
114810be98a7SChris Wilson }
114910be98a7SChris Wilson 
perma_pin_contexts(struct intel_context * ce)1150e5e32171SMatthew Brost static int perma_pin_contexts(struct intel_context *ce)
1151e5e32171SMatthew Brost {
1152e5e32171SMatthew Brost 	struct intel_context *child;
1153e5e32171SMatthew Brost 	int i = 0, j = 0, ret;
1154e5e32171SMatthew Brost 
1155e5e32171SMatthew Brost 	GEM_BUG_ON(!intel_context_is_parent(ce));
1156e5e32171SMatthew Brost 
1157e5e32171SMatthew Brost 	ret = intel_context_pin(ce);
1158e5e32171SMatthew Brost 	if (unlikely(ret))
1159e5e32171SMatthew Brost 		return ret;
1160e5e32171SMatthew Brost 
1161e5e32171SMatthew Brost 	for_each_child(ce, child) {
1162e5e32171SMatthew Brost 		ret = intel_context_pin(child);
1163e5e32171SMatthew Brost 		if (unlikely(ret))
1164e5e32171SMatthew Brost 			goto unwind;
1165e5e32171SMatthew Brost 		++i;
1166e5e32171SMatthew Brost 	}
1167e5e32171SMatthew Brost 
1168e5e32171SMatthew Brost 	set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1169e5e32171SMatthew Brost 
1170e5e32171SMatthew Brost 	return 0;
1171e5e32171SMatthew Brost 
1172e5e32171SMatthew Brost unwind:
1173e5e32171SMatthew Brost 	intel_context_unpin(ce);
1174e5e32171SMatthew Brost 	for_each_child(ce, child) {
1175e5e32171SMatthew Brost 		if (j++ < i)
1176e5e32171SMatthew Brost 			intel_context_unpin(child);
1177e5e32171SMatthew Brost 		else
1178e5e32171SMatthew Brost 			break;
1179e5e32171SMatthew Brost 	}
1180e5e32171SMatthew Brost 
1181e5e32171SMatthew Brost 	return ret;
1182e5e32171SMatthew Brost }
1183e5e32171SMatthew Brost 
user_engines(struct i915_gem_context * ctx,unsigned int num_engines,struct i915_gem_proto_engine * pe)1184d4433c76SJason Ekstrand static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1185d4433c76SJason Ekstrand 					     unsigned int num_engines,
1186d4433c76SJason Ekstrand 					     struct i915_gem_proto_engine *pe)
1187d4433c76SJason Ekstrand {
1188d4433c76SJason Ekstrand 	struct i915_gem_engines *e, *err;
1189d4433c76SJason Ekstrand 	unsigned int n;
1190d4433c76SJason Ekstrand 
1191d4433c76SJason Ekstrand 	e = alloc_engines(num_engines);
119284edf537SMatthew Brost 	if (!e)
119384edf537SMatthew Brost 		return ERR_PTR(-ENOMEM);
119484edf537SMatthew Brost 	e->num_engines = num_engines;
119584edf537SMatthew Brost 
1196d4433c76SJason Ekstrand 	for (n = 0; n < num_engines; n++) {
1197e5e32171SMatthew Brost 		struct intel_context *ce, *child;
1198d4433c76SJason Ekstrand 		int ret;
1199d4433c76SJason Ekstrand 
1200d4433c76SJason Ekstrand 		switch (pe[n].type) {
1201d4433c76SJason Ekstrand 		case I915_GEM_ENGINE_TYPE_PHYSICAL:
1202d4433c76SJason Ekstrand 			ce = intel_context_create(pe[n].engine);
1203d4433c76SJason Ekstrand 			break;
1204d4433c76SJason Ekstrand 
1205d4433c76SJason Ekstrand 		case I915_GEM_ENGINE_TYPE_BALANCED:
120655612025SMatthew Brost 			ce = intel_engine_create_virtual(pe[n].siblings,
1207e5e32171SMatthew Brost 							 pe[n].num_siblings, 0);
1208e5e32171SMatthew Brost 			break;
1209e5e32171SMatthew Brost 
1210e5e32171SMatthew Brost 		case I915_GEM_ENGINE_TYPE_PARALLEL:
1211e5e32171SMatthew Brost 			ce = intel_engine_create_parallel(pe[n].siblings,
1212e5e32171SMatthew Brost 							  pe[n].num_siblings,
1213e5e32171SMatthew Brost 							  pe[n].width);
1214d4433c76SJason Ekstrand 			break;
1215d4433c76SJason Ekstrand 
1216d4433c76SJason Ekstrand 		case I915_GEM_ENGINE_TYPE_INVALID:
1217d4433c76SJason Ekstrand 		default:
1218d4433c76SJason Ekstrand 			GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1219d4433c76SJason Ekstrand 			continue;
1220d4433c76SJason Ekstrand 		}
1221d4433c76SJason Ekstrand 
1222d4433c76SJason Ekstrand 		if (IS_ERR(ce)) {
1223d4433c76SJason Ekstrand 			err = ERR_CAST(ce);
1224d4433c76SJason Ekstrand 			goto free_engines;
1225d4433c76SJason Ekstrand 		}
1226d4433c76SJason Ekstrand 
1227d4433c76SJason Ekstrand 		e->engines[n] = ce;
1228d4433c76SJason Ekstrand 
1229d4433c76SJason Ekstrand 		ret = intel_context_set_gem(ce, ctx, pe->sseu);
1230d4433c76SJason Ekstrand 		if (ret) {
1231d4433c76SJason Ekstrand 			err = ERR_PTR(ret);
1232d4433c76SJason Ekstrand 			goto free_engines;
1233d4433c76SJason Ekstrand 		}
1234e5e32171SMatthew Brost 		for_each_child(ce, child) {
1235e5e32171SMatthew Brost 			ret = intel_context_set_gem(child, ctx, pe->sseu);
1236e5e32171SMatthew Brost 			if (ret) {
1237e5e32171SMatthew Brost 				err = ERR_PTR(ret);
1238e5e32171SMatthew Brost 				goto free_engines;
1239e5e32171SMatthew Brost 			}
1240e5e32171SMatthew Brost 		}
1241e5e32171SMatthew Brost 
1242e5e32171SMatthew Brost 		/*
1243e5e32171SMatthew Brost 		 * XXX: Must be done after calling intel_context_set_gem as that
1244e5e32171SMatthew Brost 		 * function changes the ring size. The ring is allocated when
1245e5e32171SMatthew Brost 		 * the context is pinned. If the ring size is changed after
1246e5e32171SMatthew Brost 		 * allocation we have a mismatch of the ring size and will cause
1247e5e32171SMatthew Brost 		 * the context to hang. Presumably with a bit of reordering we
1248e5e32171SMatthew Brost 		 * could move the perma-pin step to the backend function
1249e5e32171SMatthew Brost 		 * intel_engine_create_parallel.
1250e5e32171SMatthew Brost 		 */
1251e5e32171SMatthew Brost 		if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1252e5e32171SMatthew Brost 			ret = perma_pin_contexts(ce);
1253e5e32171SMatthew Brost 			if (ret) {
1254e5e32171SMatthew Brost 				err = ERR_PTR(ret);
1255e5e32171SMatthew Brost 				goto free_engines;
1256e5e32171SMatthew Brost 			}
1257e5e32171SMatthew Brost 		}
1258d4433c76SJason Ekstrand 	}
1259d4433c76SJason Ekstrand 
1260d4433c76SJason Ekstrand 	return e;
1261d4433c76SJason Ekstrand 
1262d4433c76SJason Ekstrand free_engines:
1263d4433c76SJason Ekstrand 	free_engines(e);
1264d4433c76SJason Ekstrand 	return err;
1265d4433c76SJason Ekstrand }
1266d4433c76SJason Ekstrand 
i915_gem_context_release_work(struct work_struct * work)126775eefd82SDaniel Vetter static void i915_gem_context_release_work(struct work_struct *work)
126810be98a7SChris Wilson {
126975eefd82SDaniel Vetter 	struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
127075eefd82SDaniel Vetter 						    release_work);
12718cf97637SDaniel Vetter 	struct i915_address_space *vm;
127210be98a7SChris Wilson 
1273f8246cf4SChris Wilson 	trace_i915_context_free(ctx);
1274f8246cf4SChris Wilson 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1275a4e7ccdaSChris Wilson 
1276ad3aa7c3SChris Wilson 	spin_lock(&ctx->i915->gem.contexts.lock);
1277ad3aa7c3SChris Wilson 	list_del(&ctx->link);
1278ad3aa7c3SChris Wilson 	spin_unlock(&ctx->i915->gem.contexts.lock);
1279ad3aa7c3SChris Wilson 
1280c238980eSDaniel Vetter 	if (ctx->syncobj)
1281c238980eSDaniel Vetter 		drm_syncobj_put(ctx->syncobj);
1282c238980eSDaniel Vetter 
12839ec8795eSDaniel Vetter 	vm = ctx->vm;
12848cf97637SDaniel Vetter 	if (vm)
12858cf97637SDaniel Vetter 		i915_vm_put(vm);
12868cf97637SDaniel Vetter 
1287d3ac8d42SDaniele Ceraolo Spurio 	if (ctx->pxp_wakeref)
1288d3ac8d42SDaniele Ceraolo Spurio 		intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1289d3ac8d42SDaniele Ceraolo Spurio 
129043c50460STvrtko Ursulin 	if (ctx->client)
129143c50460STvrtko Ursulin 		i915_drm_client_put(ctx->client);
129243c50460STvrtko Ursulin 
129310be98a7SChris Wilson 	mutex_destroy(&ctx->engines_mutex);
1294f7ce8639SChris Wilson 	mutex_destroy(&ctx->lut_mutex);
129510be98a7SChris Wilson 
129610be98a7SChris Wilson 	put_pid(ctx->pid);
129710be98a7SChris Wilson 	mutex_destroy(&ctx->mutex);
129810be98a7SChris Wilson 
129910be98a7SChris Wilson 	kfree_rcu(ctx, rcu);
130010be98a7SChris Wilson }
130110be98a7SChris Wilson 
i915_gem_context_release(struct kref * ref)130275eefd82SDaniel Vetter void i915_gem_context_release(struct kref *ref)
130375eefd82SDaniel Vetter {
130475eefd82SDaniel Vetter 	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
130575eefd82SDaniel Vetter 
130675eefd82SDaniel Vetter 	queue_work(ctx->i915->wq, &ctx->release_work);
130775eefd82SDaniel Vetter }
130875eefd82SDaniel Vetter 
13092e0986a5SChris Wilson static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context * ctx)13102e0986a5SChris Wilson __context_engines_static(const struct i915_gem_context *ctx)
13112e0986a5SChris Wilson {
13122e0986a5SChris Wilson 	return rcu_dereference_protected(ctx->engines, true);
13132e0986a5SChris Wilson }
13142e0986a5SChris Wilson 
__reset_context(struct i915_gem_context * ctx,struct intel_engine_cs * engine)13152e0986a5SChris Wilson static void __reset_context(struct i915_gem_context *ctx,
13162e0986a5SChris Wilson 			    struct intel_engine_cs *engine)
13172e0986a5SChris Wilson {
13182e0986a5SChris Wilson 	intel_gt_handle_error(engine->gt, engine->mask, 0,
13192e0986a5SChris Wilson 			      "context closure in %s", ctx->name);
13202e0986a5SChris Wilson }
13212e0986a5SChris Wilson 
__cancel_engine(struct intel_engine_cs * engine)13222e0986a5SChris Wilson static bool __cancel_engine(struct intel_engine_cs *engine)
13232e0986a5SChris Wilson {
13242e0986a5SChris Wilson 	/*
13252e0986a5SChris Wilson 	 * Send a "high priority pulse" down the engine to cause the
13262e0986a5SChris Wilson 	 * current request to be momentarily preempted. (If it fails to
13272e0986a5SChris Wilson 	 * be preempted, it will be reset). As we have marked our context
13282e0986a5SChris Wilson 	 * as banned, any incomplete request, including any running, will
13292e0986a5SChris Wilson 	 * be skipped following the preemption.
13302e0986a5SChris Wilson 	 *
13312e0986a5SChris Wilson 	 * If there is no hangchecking (one of the reasons why we try to
13322e0986a5SChris Wilson 	 * cancel the context) and no forced preemption, there may be no
13332e0986a5SChris Wilson 	 * means by which we reset the GPU and evict the persistent hog.
13342e0986a5SChris Wilson 	 * Ergo if we are unable to inject a preemptive pulse that can
13352e0986a5SChris Wilson 	 * kill the banned context, we fallback to doing a local reset
13362e0986a5SChris Wilson 	 * instead.
13372e0986a5SChris Wilson 	 */
1338651dabe2SChris Wilson 	return intel_engine_pulse(engine) == 0;
13392e0986a5SChris Wilson }
13402e0986a5SChris Wilson 
active_engine(struct intel_context * ce)13414a317415SChris Wilson static struct intel_engine_cs *active_engine(struct intel_context *ce)
13424a317415SChris Wilson {
13434a317415SChris Wilson 	struct intel_engine_cs *engine = NULL;
13444a317415SChris Wilson 	struct i915_request *rq;
13454a317415SChris Wilson 
1346cc1557caSChris Wilson 	if (intel_context_has_inflight(ce))
1347cc1557caSChris Wilson 		return intel_context_inflight(ce);
1348cc1557caSChris Wilson 
13494a317415SChris Wilson 	if (!ce->timeline)
13504a317415SChris Wilson 		return NULL;
13514a317415SChris Wilson 
13523cfea8c9SChris Wilson 	/*
13533cfea8c9SChris Wilson 	 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
13543cfea8c9SChris Wilson 	 * to the request to prevent it being transferred to a new timeline
13553cfea8c9SChris Wilson 	 * (and onto a new timeline->requests list).
13563cfea8c9SChris Wilson 	 */
1357736e785fSChris Wilson 	rcu_read_lock();
13583cfea8c9SChris Wilson 	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
13593cfea8c9SChris Wilson 		bool found;
13603cfea8c9SChris Wilson 
13613cfea8c9SChris Wilson 		/* timeline is already completed upto this point? */
13623cfea8c9SChris Wilson 		if (!i915_request_get_rcu(rq))
13633cfea8c9SChris Wilson 			break;
13644a317415SChris Wilson 
13654a317415SChris Wilson 		/* Check with the backend if the request is inflight */
13663cfea8c9SChris Wilson 		found = true;
13673cfea8c9SChris Wilson 		if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
13687dbc19daSTvrtko Ursulin 			found = i915_request_active_engine(rq, &engine);
13693cfea8c9SChris Wilson 
13703cfea8c9SChris Wilson 		i915_request_put(rq);
13713cfea8c9SChris Wilson 		if (found)
13724a317415SChris Wilson 			break;
13734a317415SChris Wilson 	}
1374736e785fSChris Wilson 	rcu_read_unlock();
13754a317415SChris Wilson 
13764a317415SChris Wilson 	return engine;
13774a317415SChris Wilson }
13784a317415SChris Wilson 
137945c64ecfSTvrtko Ursulin static void
kill_engines(struct i915_gem_engines * engines,bool exit,bool persistent)138045c64ecfSTvrtko Ursulin kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
13812e0986a5SChris Wilson {
13822e0986a5SChris Wilson 	struct i915_gem_engines_iter it;
13832e0986a5SChris Wilson 	struct intel_context *ce;
13842e0986a5SChris Wilson 
13852e0986a5SChris Wilson 	/*
13862e0986a5SChris Wilson 	 * Map the user's engine back to the actual engines; one virtual
13872e0986a5SChris Wilson 	 * engine will be mapped to multiple engines, and using ctx->engine[]
13882e0986a5SChris Wilson 	 * the same engine may be have multiple instances in the user's map.
13892e0986a5SChris Wilson 	 * However, we only care about pending requests, so only include
13902e0986a5SChris Wilson 	 * engines on which there are incomplete requests.
13912e0986a5SChris Wilson 	 */
139242fb60deSChris Wilson 	for_each_gem_engine(ce, engines, it) {
13932e0986a5SChris Wilson 		struct intel_engine_cs *engine;
13942e0986a5SChris Wilson 
13950add082cSTvrtko Ursulin 		if ((exit || !persistent) && intel_context_revoke(ce))
139645c64ecfSTvrtko Ursulin 			continue; /* Already marked. */
13979f3ccd40SChris Wilson 
13984a317415SChris Wilson 		/*
13994a317415SChris Wilson 		 * Check the current active state of this context; if we
14004a317415SChris Wilson 		 * are currently executing on the GPU we need to evict
14014a317415SChris Wilson 		 * ourselves. On the other hand, if we haven't yet been
14024a317415SChris Wilson 		 * submitted to the GPU or if everything is complete,
14034a317415SChris Wilson 		 * we have nothing to do.
14044a317415SChris Wilson 		 */
14054a317415SChris Wilson 		engine = active_engine(ce);
14062e0986a5SChris Wilson 
14072e0986a5SChris Wilson 		/* First attempt to gracefully cancel the context */
140845c64ecfSTvrtko Ursulin 		if (engine && !__cancel_engine(engine) && (exit || !persistent))
14092e0986a5SChris Wilson 			/*
14102e0986a5SChris Wilson 			 * If we are unable to send a preemptive pulse to bump
14112e0986a5SChris Wilson 			 * the context from the GPU, we have to resort to a full
14122e0986a5SChris Wilson 			 * reset. We hope the collateral damage is worth it.
14132e0986a5SChris Wilson 			 */
141442fb60deSChris Wilson 			__reset_context(engines->ctx, engine);
14152e0986a5SChris Wilson 	}
14162e0986a5SChris Wilson }
14172e0986a5SChris Wilson 
kill_context(struct i915_gem_context * ctx)1418651dabe2SChris Wilson static void kill_context(struct i915_gem_context *ctx)
141942fb60deSChris Wilson {
142042fb60deSChris Wilson 	struct i915_gem_engines *pos, *next;
142142fb60deSChris Wilson 
1422130a95e9SChris Wilson 	spin_lock_irq(&ctx->stale.lock);
1423130a95e9SChris Wilson 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
142442fb60deSChris Wilson 	list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1425130a95e9SChris Wilson 		if (!i915_sw_fence_await(&pos->fence)) {
1426130a95e9SChris Wilson 			list_del_init(&pos->link);
142742fb60deSChris Wilson 			continue;
1428130a95e9SChris Wilson 		}
142942fb60deSChris Wilson 
1430130a95e9SChris Wilson 		spin_unlock_irq(&ctx->stale.lock);
143142fb60deSChris Wilson 
143245c64ecfSTvrtko Ursulin 		kill_engines(pos, !ctx->i915->params.enable_hangcheck,
143345c64ecfSTvrtko Ursulin 			     i915_gem_context_is_persistent(ctx));
143442fb60deSChris Wilson 
1435130a95e9SChris Wilson 		spin_lock_irq(&ctx->stale.lock);
1436130a95e9SChris Wilson 		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
143742fb60deSChris Wilson 		list_safe_reset_next(pos, next, link);
143842fb60deSChris Wilson 		list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
143942fb60deSChris Wilson 
144042fb60deSChris Wilson 		i915_sw_fence_complete(&pos->fence);
144142fb60deSChris Wilson 	}
1442130a95e9SChris Wilson 	spin_unlock_irq(&ctx->stale.lock);
144342fb60deSChris Wilson }
144442fb60deSChris Wilson 
engines_idle_release(struct i915_gem_context * ctx,struct i915_gem_engines * engines)1445130a95e9SChris Wilson static void engines_idle_release(struct i915_gem_context *ctx,
1446130a95e9SChris Wilson 				 struct i915_gem_engines *engines)
1447130a95e9SChris Wilson {
1448130a95e9SChris Wilson 	struct i915_gem_engines_iter it;
1449130a95e9SChris Wilson 	struct intel_context *ce;
1450130a95e9SChris Wilson 
1451130a95e9SChris Wilson 	INIT_LIST_HEAD(&engines->link);
1452130a95e9SChris Wilson 
1453130a95e9SChris Wilson 	engines->ctx = i915_gem_context_get(ctx);
1454130a95e9SChris Wilson 
1455130a95e9SChris Wilson 	for_each_gem_engine(ce, engines, it) {
1456e6829625SChris Wilson 		int err;
1457130a95e9SChris Wilson 
1458130a95e9SChris Wilson 		/* serialises with execbuf */
145983321094SMatthew Brost 		intel_context_close(ce);
1460130a95e9SChris Wilson 		if (!intel_context_pin_if_active(ce))
1461130a95e9SChris Wilson 			continue;
1462130a95e9SChris Wilson 
1463e6829625SChris Wilson 		/* Wait until context is finally scheduled out and retired */
1464e6829625SChris Wilson 		err = i915_sw_fence_await_active(&engines->fence,
1465e6829625SChris Wilson 						 &ce->active,
1466e6829625SChris Wilson 						 I915_ACTIVE_AWAIT_BARRIER);
1467130a95e9SChris Wilson 		intel_context_unpin(ce);
1468e6829625SChris Wilson 		if (err)
1469130a95e9SChris Wilson 			goto kill;
1470130a95e9SChris Wilson 	}
1471130a95e9SChris Wilson 
1472130a95e9SChris Wilson 	spin_lock_irq(&ctx->stale.lock);
1473130a95e9SChris Wilson 	if (!i915_gem_context_is_closed(ctx))
1474130a95e9SChris Wilson 		list_add_tail(&engines->link, &ctx->stale.engines);
1475130a95e9SChris Wilson 	spin_unlock_irq(&ctx->stale.lock);
1476130a95e9SChris Wilson 
1477130a95e9SChris Wilson kill:
1478130a95e9SChris Wilson 	if (list_empty(&engines->link)) /* raced, already closed */
147945c64ecfSTvrtko Ursulin 		kill_engines(engines, true,
148045c64ecfSTvrtko Ursulin 			     i915_gem_context_is_persistent(ctx));
1481130a95e9SChris Wilson 
1482130a95e9SChris Wilson 	i915_sw_fence_commit(&engines->fence);
148342fb60deSChris Wilson }
148442fb60deSChris Wilson 
set_closed_name(struct i915_gem_context * ctx)1485267c0126SChris Wilson static void set_closed_name(struct i915_gem_context *ctx)
1486267c0126SChris Wilson {
1487267c0126SChris Wilson 	char *s;
1488267c0126SChris Wilson 
1489267c0126SChris Wilson 	/* Replace '[]' with '<>' to indicate closed in debug prints */
1490267c0126SChris Wilson 
1491267c0126SChris Wilson 	s = strrchr(ctx->name, '[');
1492267c0126SChris Wilson 	if (!s)
1493267c0126SChris Wilson 		return;
1494267c0126SChris Wilson 
1495267c0126SChris Wilson 	*s = '<';
1496267c0126SChris Wilson 
1497267c0126SChris Wilson 	s = strchr(s + 1, ']');
1498267c0126SChris Wilson 	if (s)
1499267c0126SChris Wilson 		*s = '>';
1500267c0126SChris Wilson }
1501267c0126SChris Wilson 
context_close(struct i915_gem_context * ctx)150210be98a7SChris Wilson static void context_close(struct i915_gem_context *ctx)
150310be98a7SChris Wilson {
150449bd54b3STvrtko Ursulin 	struct i915_drm_client *client;
150549bd54b3STvrtko Ursulin 
1506130a95e9SChris Wilson 	/* Flush any concurrent set_engines() */
1507130a95e9SChris Wilson 	mutex_lock(&ctx->engines_mutex);
1508e5e32171SMatthew Brost 	unpin_engines(__context_engines_static(ctx));
1509130a95e9SChris Wilson 	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
15102850748eSChris Wilson 	i915_gem_context_set_closed(ctx);
1511130a95e9SChris Wilson 	mutex_unlock(&ctx->engines_mutex);
15122850748eSChris Wilson 
1513155ab883SChris Wilson 	mutex_lock(&ctx->mutex);
1514155ab883SChris Wilson 
1515130a95e9SChris Wilson 	set_closed_name(ctx);
1516130a95e9SChris Wilson 
151710be98a7SChris Wilson 	/*
151810be98a7SChris Wilson 	 * The LUT uses the VMA as a backpointer to unref the object,
151910be98a7SChris Wilson 	 * so we need to clear the LUT before we close all the VMA (inside
152010be98a7SChris Wilson 	 * the ppgtt).
152110be98a7SChris Wilson 	 */
152210be98a7SChris Wilson 	lut_close(ctx);
152310be98a7SChris Wilson 
1524e1a7ab4fSThomas Hellström 	ctx->file_priv = ERR_PTR(-EBADF);
1525e1a7ab4fSThomas Hellström 
152649bd54b3STvrtko Ursulin 	client = ctx->client;
152749bd54b3STvrtko Ursulin 	if (client) {
152849bd54b3STvrtko Ursulin 		spin_lock(&client->ctx_lock);
152949bd54b3STvrtko Ursulin 		list_del_rcu(&ctx->client_link);
153049bd54b3STvrtko Ursulin 		spin_unlock(&client->ctx_lock);
153149bd54b3STvrtko Ursulin 	}
153249bd54b3STvrtko Ursulin 
1533155ab883SChris Wilson 	mutex_unlock(&ctx->mutex);
15342e0986a5SChris Wilson 
15352e0986a5SChris Wilson 	/*
15362e0986a5SChris Wilson 	 * If the user has disabled hangchecking, we can not be sure that
15372e0986a5SChris Wilson 	 * the batches will ever complete after the context is closed,
15382e0986a5SChris Wilson 	 * keeping the context and all resources pinned forever. So in this
15392e0986a5SChris Wilson 	 * case we opt to forcibly kill off all remaining requests on
15402e0986a5SChris Wilson 	 * context close.
15412e0986a5SChris Wilson 	 */
15422e0986a5SChris Wilson 	kill_context(ctx);
15432e0986a5SChris Wilson 
154410be98a7SChris Wilson 	i915_gem_context_put(ctx);
154510be98a7SChris Wilson }
154610be98a7SChris Wilson 
__context_set_persistence(struct i915_gem_context * ctx,bool state)1547a0e04715SChris Wilson static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1548a0e04715SChris Wilson {
1549a0e04715SChris Wilson 	if (i915_gem_context_is_persistent(ctx) == state)
1550a0e04715SChris Wilson 		return 0;
1551a0e04715SChris Wilson 
1552a0e04715SChris Wilson 	if (state) {
1553a0e04715SChris Wilson 		/*
1554a0e04715SChris Wilson 		 * Only contexts that are short-lived [that will expire or be
1555a0e04715SChris Wilson 		 * reset] are allowed to survive past termination. We require
1556a0e04715SChris Wilson 		 * hangcheck to ensure that the persistent requests are healthy.
1557a0e04715SChris Wilson 		 */
15588a25c4beSJani Nikula 		if (!ctx->i915->params.enable_hangcheck)
1559a0e04715SChris Wilson 			return -EINVAL;
1560a0e04715SChris Wilson 
1561a0e04715SChris Wilson 		i915_gem_context_set_persistence(ctx);
1562a0e04715SChris Wilson 	} else {
1563a0e04715SChris Wilson 		/* To cancel a context we use "preempt-to-idle" */
1564a0e04715SChris Wilson 		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1565a0e04715SChris Wilson 			return -ENODEV;
1566a0e04715SChris Wilson 
1567d1b9b5f1SChris Wilson 		/*
1568d1b9b5f1SChris Wilson 		 * If the cancel fails, we then need to reset, cleanly!
1569d1b9b5f1SChris Wilson 		 *
1570d1b9b5f1SChris Wilson 		 * If the per-engine reset fails, all hope is lost! We resort
1571d1b9b5f1SChris Wilson 		 * to a full GPU reset in that unlikely case, but realistically
1572d1b9b5f1SChris Wilson 		 * if the engine could not reset, the full reset does not fare
1573d1b9b5f1SChris Wilson 		 * much better. The damage has been done.
1574d1b9b5f1SChris Wilson 		 *
1575d1b9b5f1SChris Wilson 		 * However, if we cannot reset an engine by itself, we cannot
1576d1b9b5f1SChris Wilson 		 * cleanup a hanging persistent context without causing
1577d1b9b5f1SChris Wilson 		 * colateral damage, and we should not pretend we can by
1578d1b9b5f1SChris Wilson 		 * exposing the interface.
1579d1b9b5f1SChris Wilson 		 */
15801a9c4db4SMichał Winiarski 		if (!intel_has_reset_engine(to_gt(ctx->i915)))
1581d1b9b5f1SChris Wilson 			return -ENODEV;
1582d1b9b5f1SChris Wilson 
1583a0e04715SChris Wilson 		i915_gem_context_clear_persistence(ctx);
1584a0e04715SChris Wilson 	}
1585a0e04715SChris Wilson 
1586a0e04715SChris Wilson 	return 0;
1587a0e04715SChris Wilson }
1588a0e04715SChris Wilson 
158910be98a7SChris Wilson static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private * i915,const struct i915_gem_proto_context * pc)1590a34857dcSJason Ekstrand i915_gem_create_context(struct drm_i915_private *i915,
1591a34857dcSJason Ekstrand 			const struct i915_gem_proto_context *pc)
159210be98a7SChris Wilson {
159310be98a7SChris Wilson 	struct i915_gem_context *ctx;
15940eee9977SJason Ekstrand 	struct i915_address_space *vm = NULL;
15950eee9977SJason Ekstrand 	struct i915_gem_engines *e;
15960eee9977SJason Ekstrand 	int err;
15970eee9977SJason Ekstrand 	int i;
159810be98a7SChris Wilson 
15990eee9977SJason Ekstrand 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
16000eee9977SJason Ekstrand 	if (!ctx)
16010eee9977SJason Ekstrand 		return ERR_PTR(-ENOMEM);
16020eee9977SJason Ekstrand 
16030eee9977SJason Ekstrand 	kref_init(&ctx->ref);
16040eee9977SJason Ekstrand 	ctx->i915 = i915;
16050eee9977SJason Ekstrand 	ctx->sched = pc->sched;
16060eee9977SJason Ekstrand 	mutex_init(&ctx->mutex);
16070eee9977SJason Ekstrand 	INIT_LIST_HEAD(&ctx->link);
160875eefd82SDaniel Vetter 	INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
16090eee9977SJason Ekstrand 
16100eee9977SJason Ekstrand 	spin_lock_init(&ctx->stale.lock);
16110eee9977SJason Ekstrand 	INIT_LIST_HEAD(&ctx->stale.engines);
161210be98a7SChris Wilson 
1613a34857dcSJason Ekstrand 	if (pc->vm) {
16140eee9977SJason Ekstrand 		vm = i915_vm_get(pc->vm);
1615a34857dcSJason Ekstrand 	} else if (HAS_FULL_PPGTT(i915)) {
1616ab53497bSChris Wilson 		struct i915_ppgtt *ppgtt;
161710be98a7SChris Wilson 
16181a9c4db4SMichał Winiarski 		ppgtt = i915_ppgtt_create(to_gt(i915), 0);
161910be98a7SChris Wilson 		if (IS_ERR(ppgtt)) {
1620baa89ba3SWambui Karuga 			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
162110be98a7SChris Wilson 				PTR_ERR(ppgtt));
16220eee9977SJason Ekstrand 			err = PTR_ERR(ppgtt);
16230eee9977SJason Ekstrand 			goto err_ctx;
16240eee9977SJason Ekstrand 		}
16250eee9977SJason Ekstrand 		vm = &ppgtt->vm;
16260eee9977SJason Ekstrand 	}
1627e1a7ab4fSThomas Hellström 	if (vm)
1628e1a7ab4fSThomas Hellström 		ctx->vm = vm;
162910be98a7SChris Wilson 
16300eee9977SJason Ekstrand 	mutex_init(&ctx->engines_mutex);
1631d4433c76SJason Ekstrand 	if (pc->num_user_engines >= 0) {
1632d4433c76SJason Ekstrand 		i915_gem_context_set_user_engines(ctx);
16330eee9977SJason Ekstrand 		e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
16340eee9977SJason Ekstrand 	} else {
16350eee9977SJason Ekstrand 		i915_gem_context_clear_user_engines(ctx);
16360eee9977SJason Ekstrand 		e = default_engines(ctx, pc->legacy_rcs_sseu);
1637d4433c76SJason Ekstrand 	}
16380eee9977SJason Ekstrand 	if (IS_ERR(e)) {
16390eee9977SJason Ekstrand 		err = PTR_ERR(e);
16400eee9977SJason Ekstrand 		goto err_vm;
16410eee9977SJason Ekstrand 	}
16420eee9977SJason Ekstrand 	RCU_INIT_POINTER(ctx->engines, e);
16430eee9977SJason Ekstrand 
16440eee9977SJason Ekstrand 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
16450eee9977SJason Ekstrand 	mutex_init(&ctx->lut_mutex);
16460eee9977SJason Ekstrand 
16470eee9977SJason Ekstrand 	/* NB: Mark all slices as needing a remap so that when the context first
16480eee9977SJason Ekstrand 	 * loads it will restore whatever remap state already exists. If there
16490eee9977SJason Ekstrand 	 * is no remap info, it will be a NOP. */
16500eee9977SJason Ekstrand 	ctx->remap_slice = ALL_L3_SLICES(i915);
16510eee9977SJason Ekstrand 
16520eee9977SJason Ekstrand 	ctx->user_flags = pc->user_flags;
16530eee9977SJason Ekstrand 
16540eee9977SJason Ekstrand 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
16550eee9977SJason Ekstrand 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1656d4433c76SJason Ekstrand 
1657a34857dcSJason Ekstrand 	if (pc->single_timeline) {
16580eee9977SJason Ekstrand 		err = drm_syncobj_create(&ctx->syncobj,
165900dae4d3SJason Ekstrand 					 DRM_SYNCOBJ_CREATE_SIGNALED,
166000dae4d3SJason Ekstrand 					 NULL);
16610eee9977SJason Ekstrand 		if (err)
16620eee9977SJason Ekstrand 			goto err_engines;
166310be98a7SChris Wilson 	}
166410be98a7SChris Wilson 
1665d3ac8d42SDaniele Ceraolo Spurio 	if (pc->uses_protected_content) {
1666d3ac8d42SDaniele Ceraolo Spurio 		ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1667d3ac8d42SDaniele Ceraolo Spurio 		ctx->uses_protected_content = true;
1668d3ac8d42SDaniele Ceraolo Spurio 	}
1669d3ac8d42SDaniele Ceraolo Spurio 
167010be98a7SChris Wilson 	trace_i915_context_create(ctx);
167110be98a7SChris Wilson 
167210be98a7SChris Wilson 	return ctx;
16730eee9977SJason Ekstrand 
16740eee9977SJason Ekstrand err_engines:
16750eee9977SJason Ekstrand 	free_engines(e);
16760eee9977SJason Ekstrand err_vm:
16770eee9977SJason Ekstrand 	if (ctx->vm)
1678e1a7ab4fSThomas Hellström 		i915_vm_put(ctx->vm);
16790eee9977SJason Ekstrand err_ctx:
16800eee9977SJason Ekstrand 	kfree(ctx);
16810eee9977SJason Ekstrand 	return ERR_PTR(err);
168210be98a7SChris Wilson }
168310be98a7SChris Wilson 
init_contexts(struct i915_gem_contexts * gc)1684a4e7ccdaSChris Wilson static void init_contexts(struct i915_gem_contexts *gc)
168510be98a7SChris Wilson {
1686a4e7ccdaSChris Wilson 	spin_lock_init(&gc->lock);
1687a4e7ccdaSChris Wilson 	INIT_LIST_HEAD(&gc->list);
168810be98a7SChris Wilson }
168910be98a7SChris Wilson 
i915_gem_init__contexts(struct drm_i915_private * i915)1690e6ba7648SChris Wilson void i915_gem_init__contexts(struct drm_i915_private *i915)
169110be98a7SChris Wilson {
1692a4e7ccdaSChris Wilson 	init_contexts(&i915->gem.contexts);
169310be98a7SChris Wilson }
169410be98a7SChris Wilson 
1695bed4b455SRob Clark /*
1696bed4b455SRob Clark  * Note that this implicitly consumes the ctx reference, by placing
1697bed4b455SRob Clark  * the ctx in the context_xa.
1698bed4b455SRob Clark  */
gem_context_register(struct i915_gem_context * ctx,struct drm_i915_file_private * fpriv,u32 id)1699a4c1cdd3SJason Ekstrand static void gem_context_register(struct i915_gem_context *ctx,
1700c100777cSTvrtko Ursulin 				 struct drm_i915_file_private *fpriv,
1701a4c1cdd3SJason Ekstrand 				 u32 id)
170210be98a7SChris Wilson {
1703eb4dedaeSChris Wilson 	struct drm_i915_private *i915 = ctx->i915;
1704a4c1cdd3SJason Ekstrand 	void *old;
170510be98a7SChris Wilson 
170610be98a7SChris Wilson 	ctx->file_priv = fpriv;
1707a4e7ccdaSChris Wilson 
170810be98a7SChris Wilson 	ctx->pid = get_task_pid(current, PIDTYPE_PID);
170943c50460STvrtko Ursulin 	ctx->client = i915_drm_client_get(fpriv->client);
171043c50460STvrtko Ursulin 
1711fc4f125dSChris Wilson 	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
171210be98a7SChris Wilson 		 current->comm, pid_nr(ctx->pid));
171310be98a7SChris Wilson 
171449bd54b3STvrtko Ursulin 	spin_lock(&ctx->client->ctx_lock);
171549bd54b3STvrtko Ursulin 	list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
171649bd54b3STvrtko Ursulin 	spin_unlock(&ctx->client->ctx_lock);
171749bd54b3STvrtko Ursulin 
1718eb4dedaeSChris Wilson 	spin_lock(&i915->gem.contexts.lock);
1719eb4dedaeSChris Wilson 	list_add_tail(&ctx->link, &i915->gem.contexts.list);
1720eb4dedaeSChris Wilson 	spin_unlock(&i915->gem.contexts.lock);
1721bed4b455SRob Clark 
1722bed4b455SRob Clark 	/* And finally expose ourselves to userspace via the idr */
1723bed4b455SRob Clark 	old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1724bed4b455SRob Clark 	WARN_ON(old);
172510be98a7SChris Wilson }
172610be98a7SChris Wilson 
i915_gem_context_open(struct drm_i915_private * i915,struct drm_file * file)172710be98a7SChris Wilson int i915_gem_context_open(struct drm_i915_private *i915,
172810be98a7SChris Wilson 			  struct drm_file *file)
172910be98a7SChris Wilson {
173010be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
1731a34857dcSJason Ekstrand 	struct i915_gem_proto_context *pc;
173210be98a7SChris Wilson 	struct i915_gem_context *ctx;
173310be98a7SChris Wilson 	int err;
173410be98a7SChris Wilson 
1735a4c1cdd3SJason Ekstrand 	mutex_init(&file_priv->proto_context_lock);
1736a4c1cdd3SJason Ekstrand 	xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1737a4c1cdd3SJason Ekstrand 
1738a4c1cdd3SJason Ekstrand 	/* 0 reserved for the default context */
1739a4c1cdd3SJason Ekstrand 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1740c100777cSTvrtko Ursulin 
17415dbd2b7bSChris Wilson 	/* 0 reserved for invalid/unassigned ppgtt */
17425dbd2b7bSChris Wilson 	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
174310be98a7SChris Wilson 
1744a34857dcSJason Ekstrand 	pc = proto_context_create(i915, 0);
1745a34857dcSJason Ekstrand 	if (IS_ERR(pc)) {
1746a34857dcSJason Ekstrand 		err = PTR_ERR(pc);
1747a34857dcSJason Ekstrand 		goto err;
1748a34857dcSJason Ekstrand 	}
1749a34857dcSJason Ekstrand 
1750a34857dcSJason Ekstrand 	ctx = i915_gem_create_context(i915, pc);
1751d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(i915, pc);
175210be98a7SChris Wilson 	if (IS_ERR(ctx)) {
175310be98a7SChris Wilson 		err = PTR_ERR(ctx);
175410be98a7SChris Wilson 		goto err;
175510be98a7SChris Wilson 	}
175610be98a7SChris Wilson 
1757a4c1cdd3SJason Ekstrand 	gem_context_register(ctx, file_priv, 0);
175810be98a7SChris Wilson 
175910be98a7SChris Wilson 	return 0;
176010be98a7SChris Wilson 
176110be98a7SChris Wilson err:
17625dbd2b7bSChris Wilson 	xa_destroy(&file_priv->vm_xa);
1763c100777cSTvrtko Ursulin 	xa_destroy(&file_priv->context_xa);
1764a4c1cdd3SJason Ekstrand 	xa_destroy(&file_priv->proto_context_xa);
1765a4c1cdd3SJason Ekstrand 	mutex_destroy(&file_priv->proto_context_lock);
176610be98a7SChris Wilson 	return err;
176710be98a7SChris Wilson }
176810be98a7SChris Wilson 
i915_gem_context_close(struct drm_file * file)176910be98a7SChris Wilson void i915_gem_context_close(struct drm_file *file)
177010be98a7SChris Wilson {
177110be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
1772a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
17735dbd2b7bSChris Wilson 	struct i915_address_space *vm;
1774c100777cSTvrtko Ursulin 	struct i915_gem_context *ctx;
1775c100777cSTvrtko Ursulin 	unsigned long idx;
177610be98a7SChris Wilson 
1777a4c1cdd3SJason Ekstrand 	xa_for_each(&file_priv->proto_context_xa, idx, pc)
1778badb3027SAndi Shyti 		proto_context_close(file_priv->i915, pc);
1779a4c1cdd3SJason Ekstrand 	xa_destroy(&file_priv->proto_context_xa);
1780a4c1cdd3SJason Ekstrand 	mutex_destroy(&file_priv->proto_context_lock);
1781a4c1cdd3SJason Ekstrand 
1782c100777cSTvrtko Ursulin 	xa_for_each(&file_priv->context_xa, idx, ctx)
1783c100777cSTvrtko Ursulin 		context_close(ctx);
1784c100777cSTvrtko Ursulin 	xa_destroy(&file_priv->context_xa);
178510be98a7SChris Wilson 
17865dbd2b7bSChris Wilson 	xa_for_each(&file_priv->vm_xa, idx, vm)
17875dbd2b7bSChris Wilson 		i915_vm_put(vm);
17885dbd2b7bSChris Wilson 	xa_destroy(&file_priv->vm_xa);
178910be98a7SChris Wilson }
179010be98a7SChris Wilson 
i915_gem_vm_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)179110be98a7SChris Wilson int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
179210be98a7SChris Wilson 			     struct drm_file *file)
179310be98a7SChris Wilson {
179410be98a7SChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
179510be98a7SChris Wilson 	struct drm_i915_gem_vm_control *args = data;
179610be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
1797ab53497bSChris Wilson 	struct i915_ppgtt *ppgtt;
17985dbd2b7bSChris Wilson 	u32 id;
179910be98a7SChris Wilson 	int err;
180010be98a7SChris Wilson 
180110be98a7SChris Wilson 	if (!HAS_FULL_PPGTT(i915))
180210be98a7SChris Wilson 		return -ENODEV;
180310be98a7SChris Wilson 
180410be98a7SChris Wilson 	if (args->flags)
180510be98a7SChris Wilson 		return -EINVAL;
180610be98a7SChris Wilson 
18071a9c4db4SMichał Winiarski 	ppgtt = i915_ppgtt_create(to_gt(i915), 0);
180810be98a7SChris Wilson 	if (IS_ERR(ppgtt))
180910be98a7SChris Wilson 		return PTR_ERR(ppgtt);
181010be98a7SChris Wilson 
181110be98a7SChris Wilson 	if (args->extensions) {
181210be98a7SChris Wilson 		err = i915_user_extensions(u64_to_user_ptr(args->extensions),
181310be98a7SChris Wilson 					   NULL, 0,
181410be98a7SChris Wilson 					   ppgtt);
181510be98a7SChris Wilson 		if (err)
181610be98a7SChris Wilson 			goto err_put;
181710be98a7SChris Wilson 	}
181810be98a7SChris Wilson 
18195dbd2b7bSChris Wilson 	err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
18205dbd2b7bSChris Wilson 		       xa_limit_32b, GFP_KERNEL);
182110be98a7SChris Wilson 	if (err)
182210be98a7SChris Wilson 		goto err_put;
182310be98a7SChris Wilson 
18245dbd2b7bSChris Wilson 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
18255dbd2b7bSChris Wilson 	args->vm_id = id;
182610be98a7SChris Wilson 	return 0;
182710be98a7SChris Wilson 
182810be98a7SChris Wilson err_put:
1829e568ac38SChris Wilson 	i915_vm_put(&ppgtt->vm);
183010be98a7SChris Wilson 	return err;
183110be98a7SChris Wilson }
183210be98a7SChris Wilson 
i915_gem_vm_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)183310be98a7SChris Wilson int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
183410be98a7SChris Wilson 			      struct drm_file *file)
183510be98a7SChris Wilson {
183610be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
183710be98a7SChris Wilson 	struct drm_i915_gem_vm_control *args = data;
1838e568ac38SChris Wilson 	struct i915_address_space *vm;
183910be98a7SChris Wilson 
184010be98a7SChris Wilson 	if (args->flags)
184110be98a7SChris Wilson 		return -EINVAL;
184210be98a7SChris Wilson 
184310be98a7SChris Wilson 	if (args->extensions)
184410be98a7SChris Wilson 		return -EINVAL;
184510be98a7SChris Wilson 
18465dbd2b7bSChris Wilson 	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1847e568ac38SChris Wilson 	if (!vm)
184810be98a7SChris Wilson 		return -ENOENT;
184910be98a7SChris Wilson 
1850e568ac38SChris Wilson 	i915_vm_put(vm);
185110be98a7SChris Wilson 	return 0;
185210be98a7SChris Wilson }
185310be98a7SChris Wilson 
get_ppgtt(struct drm_i915_file_private * file_priv,struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)185410be98a7SChris Wilson static int get_ppgtt(struct drm_i915_file_private *file_priv,
185510be98a7SChris Wilson 		     struct i915_gem_context *ctx,
185610be98a7SChris Wilson 		     struct drm_i915_gem_context_param *args)
185710be98a7SChris Wilson {
1858e568ac38SChris Wilson 	struct i915_address_space *vm;
18595dbd2b7bSChris Wilson 	int err;
18605dbd2b7bSChris Wilson 	u32 id;
186110be98a7SChris Wilson 
1862a82a9979SDaniel Vetter 	if (!i915_gem_context_has_full_ppgtt(ctx))
186310be98a7SChris Wilson 		return -ENODEV;
186410be98a7SChris Wilson 
18659ec8795eSDaniel Vetter 	vm = ctx->vm;
18669ec8795eSDaniel Vetter 	GEM_BUG_ON(!vm);
186790211ea4SChris Wilson 
186899343c46SRob Clark 	/*
186999343c46SRob Clark 	 * Get a reference for the allocated handle.  Once the handle is
187099343c46SRob Clark 	 * visible in the vm_xa table, userspace could try to close it
187199343c46SRob Clark 	 * from under our feet, so we need to hold the extra reference
187299343c46SRob Clark 	 * first.
187399343c46SRob Clark 	 */
1874e1a7ab4fSThomas Hellström 	i915_vm_get(vm);
187510be98a7SChris Wilson 
187699343c46SRob Clark 	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
187799343c46SRob Clark 	if (err) {
187899343c46SRob Clark 		i915_vm_put(vm);
187999343c46SRob Clark 		return err;
188099343c46SRob Clark 	}
188199343c46SRob Clark 
18825dbd2b7bSChris Wilson 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
18835dbd2b7bSChris Wilson 	args->value = id;
188410be98a7SChris Wilson 	args->size = 0;
188510be98a7SChris Wilson 
18865dbd2b7bSChris Wilson 	return err;
188710be98a7SChris Wilson }
188810be98a7SChris Wilson 
188911ecbdddSLionel Landwerlin int
i915_gem_user_to_context_sseu(struct intel_gt * gt,const struct drm_i915_gem_context_param_sseu * user,struct intel_sseu * context)18900b6613c6SVenkata Sandeep Dhanalakota i915_gem_user_to_context_sseu(struct intel_gt *gt,
189110be98a7SChris Wilson 			      const struct drm_i915_gem_context_param_sseu *user,
189210be98a7SChris Wilson 			      struct intel_sseu *context)
189310be98a7SChris Wilson {
18940b6613c6SVenkata Sandeep Dhanalakota 	const struct sseu_dev_info *device = &gt->info.sseu;
18950b6613c6SVenkata Sandeep Dhanalakota 	struct drm_i915_private *i915 = gt->i915;
1896b87d3901SMatt Roper 	unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0);
189710be98a7SChris Wilson 
189810be98a7SChris Wilson 	/* No zeros in any field. */
189910be98a7SChris Wilson 	if (!user->slice_mask || !user->subslice_mask ||
190010be98a7SChris Wilson 	    !user->min_eus_per_subslice || !user->max_eus_per_subslice)
190110be98a7SChris Wilson 		return -EINVAL;
190210be98a7SChris Wilson 
190310be98a7SChris Wilson 	/* Max > min. */
190410be98a7SChris Wilson 	if (user->max_eus_per_subslice < user->min_eus_per_subslice)
190510be98a7SChris Wilson 		return -EINVAL;
190610be98a7SChris Wilson 
190710be98a7SChris Wilson 	/*
190810be98a7SChris Wilson 	 * Some future proofing on the types since the uAPI is wider than the
190910be98a7SChris Wilson 	 * current internal implementation.
191010be98a7SChris Wilson 	 */
191110be98a7SChris Wilson 	if (overflows_type(user->slice_mask, context->slice_mask) ||
191210be98a7SChris Wilson 	    overflows_type(user->subslice_mask, context->subslice_mask) ||
191310be98a7SChris Wilson 	    overflows_type(user->min_eus_per_subslice,
191410be98a7SChris Wilson 			   context->min_eus_per_subslice) ||
191510be98a7SChris Wilson 	    overflows_type(user->max_eus_per_subslice,
191610be98a7SChris Wilson 			   context->max_eus_per_subslice))
191710be98a7SChris Wilson 		return -EINVAL;
191810be98a7SChris Wilson 
191910be98a7SChris Wilson 	/* Check validity against hardware. */
192010be98a7SChris Wilson 	if (user->slice_mask & ~device->slice_mask)
192110be98a7SChris Wilson 		return -EINVAL;
192210be98a7SChris Wilson 
1923b87d3901SMatt Roper 	if (user->subslice_mask & ~dev_subslice_mask)
192410be98a7SChris Wilson 		return -EINVAL;
192510be98a7SChris Wilson 
192610be98a7SChris Wilson 	if (user->max_eus_per_subslice > device->max_eus_per_subslice)
192710be98a7SChris Wilson 		return -EINVAL;
192810be98a7SChris Wilson 
192910be98a7SChris Wilson 	context->slice_mask = user->slice_mask;
193010be98a7SChris Wilson 	context->subslice_mask = user->subslice_mask;
193110be98a7SChris Wilson 	context->min_eus_per_subslice = user->min_eus_per_subslice;
193210be98a7SChris Wilson 	context->max_eus_per_subslice = user->max_eus_per_subslice;
193310be98a7SChris Wilson 
193410be98a7SChris Wilson 	/* Part specific restrictions. */
193540e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) == 11) {
193610be98a7SChris Wilson 		unsigned int hw_s = hweight8(device->slice_mask);
1937b87d3901SMatt Roper 		unsigned int hw_ss_per_s = hweight8(dev_subslice_mask);
193810be98a7SChris Wilson 		unsigned int req_s = hweight8(context->slice_mask);
193910be98a7SChris Wilson 		unsigned int req_ss = hweight8(context->subslice_mask);
194010be98a7SChris Wilson 
194110be98a7SChris Wilson 		/*
194210be98a7SChris Wilson 		 * Only full subslice enablement is possible if more than one
194310be98a7SChris Wilson 		 * slice is turned on.
194410be98a7SChris Wilson 		 */
194510be98a7SChris Wilson 		if (req_s > 1 && req_ss != hw_ss_per_s)
194610be98a7SChris Wilson 			return -EINVAL;
194710be98a7SChris Wilson 
194810be98a7SChris Wilson 		/*
194910be98a7SChris Wilson 		 * If more than four (SScount bitfield limit) subslices are
195010be98a7SChris Wilson 		 * requested then the number has to be even.
195110be98a7SChris Wilson 		 */
195210be98a7SChris Wilson 		if (req_ss > 4 && (req_ss & 1))
195310be98a7SChris Wilson 			return -EINVAL;
195410be98a7SChris Wilson 
195510be98a7SChris Wilson 		/*
195610be98a7SChris Wilson 		 * If only one slice is enabled and subslice count is below the
195710be98a7SChris Wilson 		 * device full enablement, it must be at most half of the all
195810be98a7SChris Wilson 		 * available subslices.
195910be98a7SChris Wilson 		 */
196010be98a7SChris Wilson 		if (req_s == 1 && req_ss < hw_ss_per_s &&
196110be98a7SChris Wilson 		    req_ss > (hw_ss_per_s / 2))
196210be98a7SChris Wilson 			return -EINVAL;
196310be98a7SChris Wilson 
196410be98a7SChris Wilson 		/* ABI restriction - VME use case only. */
196510be98a7SChris Wilson 
196610be98a7SChris Wilson 		/* All slices or one slice only. */
196710be98a7SChris Wilson 		if (req_s != 1 && req_s != hw_s)
196810be98a7SChris Wilson 			return -EINVAL;
196910be98a7SChris Wilson 
197010be98a7SChris Wilson 		/*
197110be98a7SChris Wilson 		 * Half subslices or full enablement only when one slice is
197210be98a7SChris Wilson 		 * enabled.
197310be98a7SChris Wilson 		 */
197410be98a7SChris Wilson 		if (req_s == 1 &&
197510be98a7SChris Wilson 		    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
197610be98a7SChris Wilson 			return -EINVAL;
197710be98a7SChris Wilson 
197810be98a7SChris Wilson 		/* No EU configuration changes. */
197910be98a7SChris Wilson 		if ((user->min_eus_per_subslice !=
198010be98a7SChris Wilson 		     device->max_eus_per_subslice) ||
198110be98a7SChris Wilson 		    (user->max_eus_per_subslice !=
198210be98a7SChris Wilson 		     device->max_eus_per_subslice))
198310be98a7SChris Wilson 			return -EINVAL;
198410be98a7SChris Wilson 	}
198510be98a7SChris Wilson 
198610be98a7SChris Wilson 	return 0;
198710be98a7SChris Wilson }
198810be98a7SChris Wilson 
set_sseu(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)198910be98a7SChris Wilson static int set_sseu(struct i915_gem_context *ctx,
199010be98a7SChris Wilson 		    struct drm_i915_gem_context_param *args)
199110be98a7SChris Wilson {
199210be98a7SChris Wilson 	struct drm_i915_private *i915 = ctx->i915;
199310be98a7SChris Wilson 	struct drm_i915_gem_context_param_sseu user_sseu;
199410be98a7SChris Wilson 	struct intel_context *ce;
199510be98a7SChris Wilson 	struct intel_sseu sseu;
199610be98a7SChris Wilson 	unsigned long lookup;
199710be98a7SChris Wilson 	int ret;
199810be98a7SChris Wilson 
199910be98a7SChris Wilson 	if (args->size < sizeof(user_sseu))
200010be98a7SChris Wilson 		return -EINVAL;
200110be98a7SChris Wilson 
200240e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) != 11)
200310be98a7SChris Wilson 		return -ENODEV;
200410be98a7SChris Wilson 
200510be98a7SChris Wilson 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
200610be98a7SChris Wilson 			   sizeof(user_sseu)))
200710be98a7SChris Wilson 		return -EFAULT;
200810be98a7SChris Wilson 
200910be98a7SChris Wilson 	if (user_sseu.rsvd)
201010be98a7SChris Wilson 		return -EINVAL;
201110be98a7SChris Wilson 
201210be98a7SChris Wilson 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
201310be98a7SChris Wilson 		return -EINVAL;
201410be98a7SChris Wilson 
201510be98a7SChris Wilson 	lookup = 0;
201610be98a7SChris Wilson 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
201710be98a7SChris Wilson 		lookup |= LOOKUP_USER_INDEX;
201810be98a7SChris Wilson 
201910be98a7SChris Wilson 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
202010be98a7SChris Wilson 	if (IS_ERR(ce))
202110be98a7SChris Wilson 		return PTR_ERR(ce);
202210be98a7SChris Wilson 
202310be98a7SChris Wilson 	/* Only render engine supports RPCS configuration. */
202410be98a7SChris Wilson 	if (ce->engine->class != RENDER_CLASS) {
202510be98a7SChris Wilson 		ret = -ENODEV;
202610be98a7SChris Wilson 		goto out_ce;
202710be98a7SChris Wilson 	}
202810be98a7SChris Wilson 
20290b6613c6SVenkata Sandeep Dhanalakota 	ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
203010be98a7SChris Wilson 	if (ret)
203110be98a7SChris Wilson 		goto out_ce;
203210be98a7SChris Wilson 
203310be98a7SChris Wilson 	ret = intel_context_reconfigure_sseu(ce, sseu);
203410be98a7SChris Wilson 	if (ret)
203510be98a7SChris Wilson 		goto out_ce;
203610be98a7SChris Wilson 
203710be98a7SChris Wilson 	args->size = sizeof(user_sseu);
203810be98a7SChris Wilson 
203910be98a7SChris Wilson out_ce:
204010be98a7SChris Wilson 	intel_context_put(ce);
204110be98a7SChris Wilson 	return ret;
204210be98a7SChris Wilson }
204310be98a7SChris Wilson 
204410be98a7SChris Wilson static int
set_persistence(struct i915_gem_context * ctx,const struct drm_i915_gem_context_param * args)2045a0e04715SChris Wilson set_persistence(struct i915_gem_context *ctx,
2046a0e04715SChris Wilson 		const struct drm_i915_gem_context_param *args)
2047a0e04715SChris Wilson {
2048a0e04715SChris Wilson 	if (args->size)
2049a0e04715SChris Wilson 		return -EINVAL;
2050a0e04715SChris Wilson 
2051a0e04715SChris Wilson 	return __context_set_persistence(ctx, args->value);
2052a0e04715SChris Wilson }
2053a0e04715SChris Wilson 
set_priority(struct i915_gem_context * ctx,const struct drm_i915_gem_context_param * args)20540f100b70SChris Wilson static int set_priority(struct i915_gem_context *ctx,
20550f100b70SChris Wilson 			const struct drm_i915_gem_context_param *args)
20560f100b70SChris Wilson {
2057b9709057SDaniel Vetter 	struct i915_gem_engines_iter it;
2058b9709057SDaniel Vetter 	struct intel_context *ce;
2059aaa5957cSJason Ekstrand 	int err;
20600f100b70SChris Wilson 
2061aaa5957cSJason Ekstrand 	err = validate_priority(ctx->i915, args);
2062aaa5957cSJason Ekstrand 	if (err)
2063aaa5957cSJason Ekstrand 		return err;
20640f100b70SChris Wilson 
2065aaa5957cSJason Ekstrand 	ctx->sched.priority = args->value;
2066b9709057SDaniel Vetter 
2067b9709057SDaniel Vetter 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2068b9709057SDaniel Vetter 		if (!intel_engine_has_timeslices(ce->engine))
2069b9709057SDaniel Vetter 			continue;
2070b9709057SDaniel Vetter 
2071b9709057SDaniel Vetter 		if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2072b9709057SDaniel Vetter 		    intel_engine_has_semaphores(ce->engine))
2073b9709057SDaniel Vetter 			intel_context_set_use_semaphores(ce);
2074b9709057SDaniel Vetter 		else
2075b9709057SDaniel Vetter 			intel_context_clear_use_semaphores(ce);
2076b9709057SDaniel Vetter 	}
2077b9709057SDaniel Vetter 	i915_gem_context_unlock_engines(ctx);
20780f100b70SChris Wilson 
20790f100b70SChris Wilson 	return 0;
20800f100b70SChris Wilson }
20810f100b70SChris Wilson 
get_protected(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)2082d3ac8d42SDaniele Ceraolo Spurio static int get_protected(struct i915_gem_context *ctx,
2083d3ac8d42SDaniele Ceraolo Spurio 			 struct drm_i915_gem_context_param *args)
2084d3ac8d42SDaniele Ceraolo Spurio {
2085d3ac8d42SDaniele Ceraolo Spurio 	args->size = 0;
2086d3ac8d42SDaniele Ceraolo Spurio 	args->value = i915_gem_context_uses_protected_content(ctx);
2087d3ac8d42SDaniele Ceraolo Spurio 
2088d3ac8d42SDaniele Ceraolo Spurio 	return 0;
2089d3ac8d42SDaniele Ceraolo Spurio }
2090d3ac8d42SDaniele Ceraolo Spurio 
ctx_setparam(struct drm_i915_file_private * fpriv,struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)209110be98a7SChris Wilson static int ctx_setparam(struct drm_i915_file_private *fpriv,
209210be98a7SChris Wilson 			struct i915_gem_context *ctx,
209310be98a7SChris Wilson 			struct drm_i915_gem_context_param *args)
209410be98a7SChris Wilson {
209510be98a7SChris Wilson 	int ret = 0;
209610be98a7SChris Wilson 
209710be98a7SChris Wilson 	switch (args->param) {
209810be98a7SChris Wilson 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
209910be98a7SChris Wilson 		if (args->size)
210010be98a7SChris Wilson 			ret = -EINVAL;
210110be98a7SChris Wilson 		else if (args->value)
210210be98a7SChris Wilson 			i915_gem_context_set_no_error_capture(ctx);
210310be98a7SChris Wilson 		else
210410be98a7SChris Wilson 			i915_gem_context_clear_no_error_capture(ctx);
210510be98a7SChris Wilson 		break;
210610be98a7SChris Wilson 
210710be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BANNABLE:
210810be98a7SChris Wilson 		if (args->size)
210910be98a7SChris Wilson 			ret = -EINVAL;
211010be98a7SChris Wilson 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
211110be98a7SChris Wilson 			ret = -EPERM;
211210be98a7SChris Wilson 		else if (args->value)
211310be98a7SChris Wilson 			i915_gem_context_set_bannable(ctx);
2114d3ac8d42SDaniele Ceraolo Spurio 		else if (i915_gem_context_uses_protected_content(ctx))
2115d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM; /* can't clear this for protected contexts */
211610be98a7SChris Wilson 		else
211710be98a7SChris Wilson 			i915_gem_context_clear_bannable(ctx);
211810be98a7SChris Wilson 		break;
211910be98a7SChris Wilson 
212010be98a7SChris Wilson 	case I915_CONTEXT_PARAM_RECOVERABLE:
212110be98a7SChris Wilson 		if (args->size)
212210be98a7SChris Wilson 			ret = -EINVAL;
2123d3ac8d42SDaniele Ceraolo Spurio 		else if (!args->value)
212410be98a7SChris Wilson 			i915_gem_context_clear_recoverable(ctx);
2125d3ac8d42SDaniele Ceraolo Spurio 		else if (i915_gem_context_uses_protected_content(ctx))
2126d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM; /* can't set this for protected contexts */
2127d3ac8d42SDaniele Ceraolo Spurio 		else
2128d3ac8d42SDaniele Ceraolo Spurio 			i915_gem_context_set_recoverable(ctx);
212910be98a7SChris Wilson 		break;
213010be98a7SChris Wilson 
213110be98a7SChris Wilson 	case I915_CONTEXT_PARAM_PRIORITY:
21320f100b70SChris Wilson 		ret = set_priority(ctx, args);
213310be98a7SChris Wilson 		break;
213410be98a7SChris Wilson 
213510be98a7SChris Wilson 	case I915_CONTEXT_PARAM_SSEU:
213610be98a7SChris Wilson 		ret = set_sseu(ctx, args);
213710be98a7SChris Wilson 		break;
213810be98a7SChris Wilson 
2139a0e04715SChris Wilson 	case I915_CONTEXT_PARAM_PERSISTENCE:
2140a0e04715SChris Wilson 		ret = set_persistence(ctx, args);
2141a0e04715SChris Wilson 		break;
2142a0e04715SChris Wilson 
2143d3ac8d42SDaniele Ceraolo Spurio 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
21446ff6d61dSJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
214510be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2146fe4751c3SJason Ekstrand 	case I915_CONTEXT_PARAM_RINGSIZE:
2147ccbc1b97SJason Ekstrand 	case I915_CONTEXT_PARAM_VM:
2148d9d29c74SJason Ekstrand 	case I915_CONTEXT_PARAM_ENGINES:
214910be98a7SChris Wilson 	default:
215010be98a7SChris Wilson 		ret = -EINVAL;
215110be98a7SChris Wilson 		break;
215210be98a7SChris Wilson 	}
215310be98a7SChris Wilson 
215410be98a7SChris Wilson 	return ret;
215510be98a7SChris Wilson }
215610be98a7SChris Wilson 
215710be98a7SChris Wilson struct create_ext {
2158d4433c76SJason Ekstrand 	struct i915_gem_proto_context *pc;
215910be98a7SChris Wilson 	struct drm_i915_file_private *fpriv;
216010be98a7SChris Wilson };
216110be98a7SChris Wilson 
create_setparam(struct i915_user_extension __user * ext,void * data)216210be98a7SChris Wilson static int create_setparam(struct i915_user_extension __user *ext, void *data)
216310be98a7SChris Wilson {
216410be98a7SChris Wilson 	struct drm_i915_gem_context_create_ext_setparam local;
216510be98a7SChris Wilson 	const struct create_ext *arg = data;
216610be98a7SChris Wilson 
216710be98a7SChris Wilson 	if (copy_from_user(&local, ext, sizeof(local)))
216810be98a7SChris Wilson 		return -EFAULT;
216910be98a7SChris Wilson 
217010be98a7SChris Wilson 	if (local.param.ctx_id)
217110be98a7SChris Wilson 		return -EINVAL;
217210be98a7SChris Wilson 
2173d4433c76SJason Ekstrand 	return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
217410be98a7SChris Wilson }
217510be98a7SChris Wilson 
invalid_ext(struct i915_user_extension __user * ext,void * data)21764a766ae4SJason Ekstrand static int invalid_ext(struct i915_user_extension __user *ext, void *data)
217710be98a7SChris Wilson {
217810be98a7SChris Wilson 	return -EINVAL;
217910be98a7SChris Wilson }
218010be98a7SChris Wilson 
218110be98a7SChris Wilson static const i915_user_extension_fn create_extensions[] = {
218210be98a7SChris Wilson 	[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
21834a766ae4SJason Ekstrand 	[I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
218410be98a7SChris Wilson };
218510be98a7SChris Wilson 
client_is_banned(struct drm_i915_file_private * file_priv)218610be98a7SChris Wilson static bool client_is_banned(struct drm_i915_file_private *file_priv)
218710be98a7SChris Wilson {
218810be98a7SChris Wilson 	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
218910be98a7SChris Wilson }
219010be98a7SChris Wilson 
2191a4c1cdd3SJason Ekstrand static inline struct i915_gem_context *
__context_lookup(struct drm_i915_file_private * file_priv,u32 id)2192a4c1cdd3SJason Ekstrand __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2193a4c1cdd3SJason Ekstrand {
2194a4c1cdd3SJason Ekstrand 	struct i915_gem_context *ctx;
2195a4c1cdd3SJason Ekstrand 
2196a4c1cdd3SJason Ekstrand 	rcu_read_lock();
2197a4c1cdd3SJason Ekstrand 	ctx = xa_load(&file_priv->context_xa, id);
2198a4c1cdd3SJason Ekstrand 	if (ctx && !kref_get_unless_zero(&ctx->ref))
2199a4c1cdd3SJason Ekstrand 		ctx = NULL;
2200a4c1cdd3SJason Ekstrand 	rcu_read_unlock();
2201a4c1cdd3SJason Ekstrand 
2202a4c1cdd3SJason Ekstrand 	return ctx;
2203a4c1cdd3SJason Ekstrand }
2204a4c1cdd3SJason Ekstrand 
2205a4c1cdd3SJason Ekstrand static struct i915_gem_context *
finalize_create_context_locked(struct drm_i915_file_private * file_priv,struct i915_gem_proto_context * pc,u32 id)2206a4c1cdd3SJason Ekstrand finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2207a4c1cdd3SJason Ekstrand 			       struct i915_gem_proto_context *pc, u32 id)
2208a4c1cdd3SJason Ekstrand {
2209a4c1cdd3SJason Ekstrand 	struct i915_gem_context *ctx;
2210a4c1cdd3SJason Ekstrand 	void *old;
2211a4c1cdd3SJason Ekstrand 
2212a4c1cdd3SJason Ekstrand 	lockdep_assert_held(&file_priv->proto_context_lock);
2213a4c1cdd3SJason Ekstrand 
2214badb3027SAndi Shyti 	ctx = i915_gem_create_context(file_priv->i915, pc);
2215a4c1cdd3SJason Ekstrand 	if (IS_ERR(ctx))
2216a4c1cdd3SJason Ekstrand 		return ctx;
2217a4c1cdd3SJason Ekstrand 
2218bed4b455SRob Clark 	/*
2219bed4b455SRob Clark 	 * One for the xarray and one for the caller.  We need to grab
2220bed4b455SRob Clark 	 * the reference *prior* to making the ctx visble to userspace
2221bed4b455SRob Clark 	 * in gem_context_register(), as at any point after that
2222bed4b455SRob Clark 	 * userspace can try to race us with another thread destroying
2223bed4b455SRob Clark 	 * the context under our feet.
2224bed4b455SRob Clark 	 */
2225bed4b455SRob Clark 	i915_gem_context_get(ctx);
2226bed4b455SRob Clark 
2227a4c1cdd3SJason Ekstrand 	gem_context_register(ctx, file_priv, id);
2228a4c1cdd3SJason Ekstrand 
2229a4c1cdd3SJason Ekstrand 	old = xa_erase(&file_priv->proto_context_xa, id);
2230a4c1cdd3SJason Ekstrand 	GEM_BUG_ON(old != pc);
2231badb3027SAndi Shyti 	proto_context_close(file_priv->i915, pc);
2232a4c1cdd3SJason Ekstrand 
2233bed4b455SRob Clark 	return ctx;
2234a4c1cdd3SJason Ekstrand }
2235a4c1cdd3SJason Ekstrand 
2236a4c1cdd3SJason Ekstrand struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private * file_priv,u32 id)2237a4c1cdd3SJason Ekstrand i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2238a4c1cdd3SJason Ekstrand {
2239a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
2240a4c1cdd3SJason Ekstrand 	struct i915_gem_context *ctx;
2241a4c1cdd3SJason Ekstrand 
2242a4c1cdd3SJason Ekstrand 	ctx = __context_lookup(file_priv, id);
2243a4c1cdd3SJason Ekstrand 	if (ctx)
2244a4c1cdd3SJason Ekstrand 		return ctx;
2245a4c1cdd3SJason Ekstrand 
2246a4c1cdd3SJason Ekstrand 	mutex_lock(&file_priv->proto_context_lock);
2247a4c1cdd3SJason Ekstrand 	/* Try one more time under the lock */
2248a4c1cdd3SJason Ekstrand 	ctx = __context_lookup(file_priv, id);
2249a4c1cdd3SJason Ekstrand 	if (!ctx) {
2250a4c1cdd3SJason Ekstrand 		pc = xa_load(&file_priv->proto_context_xa, id);
2251a4c1cdd3SJason Ekstrand 		if (!pc)
2252a4c1cdd3SJason Ekstrand 			ctx = ERR_PTR(-ENOENT);
2253a4c1cdd3SJason Ekstrand 		else
2254a4c1cdd3SJason Ekstrand 			ctx = finalize_create_context_locked(file_priv, pc, id);
2255a4c1cdd3SJason Ekstrand 	}
2256a4c1cdd3SJason Ekstrand 	mutex_unlock(&file_priv->proto_context_lock);
2257a4c1cdd3SJason Ekstrand 
2258a4c1cdd3SJason Ekstrand 	return ctx;
2259a4c1cdd3SJason Ekstrand }
2260a4c1cdd3SJason Ekstrand 
i915_gem_context_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)226110be98a7SChris Wilson int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
226210be98a7SChris Wilson 				  struct drm_file *file)
226310be98a7SChris Wilson {
226410be98a7SChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
226510be98a7SChris Wilson 	struct drm_i915_gem_context_create_ext *args = data;
226610be98a7SChris Wilson 	struct create_ext ext_data;
226710be98a7SChris Wilson 	int ret;
2268c100777cSTvrtko Ursulin 	u32 id;
226910be98a7SChris Wilson 
227010be98a7SChris Wilson 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
227110be98a7SChris Wilson 		return -ENODEV;
227210be98a7SChris Wilson 
227310be98a7SChris Wilson 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
227410be98a7SChris Wilson 		return -EINVAL;
227510be98a7SChris Wilson 
22761a9c4db4SMichał Winiarski 	ret = intel_gt_terminally_wedged(to_gt(i915));
227710be98a7SChris Wilson 	if (ret)
227810be98a7SChris Wilson 		return ret;
227910be98a7SChris Wilson 
228010be98a7SChris Wilson 	ext_data.fpriv = file->driver_priv;
228110be98a7SChris Wilson 	if (client_is_banned(ext_data.fpriv)) {
2282baa89ba3SWambui Karuga 		drm_dbg(&i915->drm,
2283baa89ba3SWambui Karuga 			"client %s[%d] banned from creating ctx\n",
2284ba16a48aSTvrtko Ursulin 			current->comm, task_pid_nr(current));
228510be98a7SChris Wilson 		return -EIO;
228610be98a7SChris Wilson 	}
228710be98a7SChris Wilson 
2288d4433c76SJason Ekstrand 	ext_data.pc = proto_context_create(i915, args->flags);
2289d4433c76SJason Ekstrand 	if (IS_ERR(ext_data.pc))
2290d4433c76SJason Ekstrand 		return PTR_ERR(ext_data.pc);
229110be98a7SChris Wilson 
229210be98a7SChris Wilson 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
229310be98a7SChris Wilson 		ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
229410be98a7SChris Wilson 					   create_extensions,
229510be98a7SChris Wilson 					   ARRAY_SIZE(create_extensions),
229610be98a7SChris Wilson 					   &ext_data);
2297a4c1cdd3SJason Ekstrand 		if (ret)
2298a4c1cdd3SJason Ekstrand 			goto err_pc;
229910be98a7SChris Wilson 	}
230010be98a7SChris Wilson 
2301ca06f936SJason Ekstrand 	if (GRAPHICS_VER(i915) > 12) {
2302ca06f936SJason Ekstrand 		struct i915_gem_context *ctx;
2303ca06f936SJason Ekstrand 
2304ca06f936SJason Ekstrand 		/* Get ourselves a context ID */
2305ca06f936SJason Ekstrand 		ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2306ca06f936SJason Ekstrand 			       xa_limit_32b, GFP_KERNEL);
2307ca06f936SJason Ekstrand 		if (ret)
2308ca06f936SJason Ekstrand 			goto err_pc;
2309ca06f936SJason Ekstrand 
2310ca06f936SJason Ekstrand 		ctx = i915_gem_create_context(i915, ext_data.pc);
2311ca06f936SJason Ekstrand 		if (IS_ERR(ctx)) {
2312ca06f936SJason Ekstrand 			ret = PTR_ERR(ctx);
2313ca06f936SJason Ekstrand 			goto err_pc;
2314ca06f936SJason Ekstrand 		}
2315ca06f936SJason Ekstrand 
2316d3ac8d42SDaniele Ceraolo Spurio 		proto_context_close(i915, ext_data.pc);
2317ca06f936SJason Ekstrand 		gem_context_register(ctx, ext_data.fpriv, id);
2318ca06f936SJason Ekstrand 	} else {
2319a4c1cdd3SJason Ekstrand 		ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
232010be98a7SChris Wilson 		if (ret < 0)
2321a4c1cdd3SJason Ekstrand 			goto err_pc;
2322ca06f936SJason Ekstrand 	}
232310be98a7SChris Wilson 
2324c100777cSTvrtko Ursulin 	args->ctx_id = id;
232510be98a7SChris Wilson 
232610be98a7SChris Wilson 	return 0;
232710be98a7SChris Wilson 
2328a4c1cdd3SJason Ekstrand err_pc:
2329d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(i915, ext_data.pc);
233010be98a7SChris Wilson 	return ret;
233110be98a7SChris Wilson }
233210be98a7SChris Wilson 
i915_gem_context_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)233310be98a7SChris Wilson int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
233410be98a7SChris Wilson 				   struct drm_file *file)
233510be98a7SChris Wilson {
233610be98a7SChris Wilson 	struct drm_i915_gem_context_destroy *args = data;
233710be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
2338a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
233910be98a7SChris Wilson 	struct i915_gem_context *ctx;
234010be98a7SChris Wilson 
234110be98a7SChris Wilson 	if (args->pad != 0)
234210be98a7SChris Wilson 		return -EINVAL;
234310be98a7SChris Wilson 
234410be98a7SChris Wilson 	if (!args->ctx_id)
234510be98a7SChris Wilson 		return -ENOENT;
234610be98a7SChris Wilson 
2347a4c1cdd3SJason Ekstrand 	/* We need to hold the proto-context lock here to prevent races
2348a4c1cdd3SJason Ekstrand 	 * with finalize_create_context_locked().
2349a4c1cdd3SJason Ekstrand 	 */
2350a4c1cdd3SJason Ekstrand 	mutex_lock(&file_priv->proto_context_lock);
2351c100777cSTvrtko Ursulin 	ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2352a4c1cdd3SJason Ekstrand 	pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2353a4c1cdd3SJason Ekstrand 	mutex_unlock(&file_priv->proto_context_lock);
235410be98a7SChris Wilson 
2355a4c1cdd3SJason Ekstrand 	if (!ctx && !pc)
2356a4c1cdd3SJason Ekstrand 		return -ENOENT;
2357a4c1cdd3SJason Ekstrand 	GEM_WARN_ON(ctx && pc);
2358a4c1cdd3SJason Ekstrand 
2359a4c1cdd3SJason Ekstrand 	if (pc)
2360badb3027SAndi Shyti 		proto_context_close(file_priv->i915, pc);
2361a4c1cdd3SJason Ekstrand 
2362a4c1cdd3SJason Ekstrand 	if (ctx)
236310be98a7SChris Wilson 		context_close(ctx);
2364a4c1cdd3SJason Ekstrand 
236510be98a7SChris Wilson 	return 0;
236610be98a7SChris Wilson }
236710be98a7SChris Wilson 
get_sseu(struct i915_gem_context * ctx,struct drm_i915_gem_context_param * args)236810be98a7SChris Wilson static int get_sseu(struct i915_gem_context *ctx,
236910be98a7SChris Wilson 		    struct drm_i915_gem_context_param *args)
237010be98a7SChris Wilson {
237110be98a7SChris Wilson 	struct drm_i915_gem_context_param_sseu user_sseu;
237210be98a7SChris Wilson 	struct intel_context *ce;
237310be98a7SChris Wilson 	unsigned long lookup;
237410be98a7SChris Wilson 	int err;
237510be98a7SChris Wilson 
237610be98a7SChris Wilson 	if (args->size == 0)
237710be98a7SChris Wilson 		goto out;
237810be98a7SChris Wilson 	else if (args->size < sizeof(user_sseu))
237910be98a7SChris Wilson 		return -EINVAL;
238010be98a7SChris Wilson 
238110be98a7SChris Wilson 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
238210be98a7SChris Wilson 			   sizeof(user_sseu)))
238310be98a7SChris Wilson 		return -EFAULT;
238410be98a7SChris Wilson 
238510be98a7SChris Wilson 	if (user_sseu.rsvd)
238610be98a7SChris Wilson 		return -EINVAL;
238710be98a7SChris Wilson 
238810be98a7SChris Wilson 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
238910be98a7SChris Wilson 		return -EINVAL;
239010be98a7SChris Wilson 
239110be98a7SChris Wilson 	lookup = 0;
239210be98a7SChris Wilson 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
239310be98a7SChris Wilson 		lookup |= LOOKUP_USER_INDEX;
239410be98a7SChris Wilson 
239510be98a7SChris Wilson 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
239610be98a7SChris Wilson 	if (IS_ERR(ce))
239710be98a7SChris Wilson 		return PTR_ERR(ce);
239810be98a7SChris Wilson 
239910be98a7SChris Wilson 	err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
240010be98a7SChris Wilson 	if (err) {
240110be98a7SChris Wilson 		intel_context_put(ce);
240210be98a7SChris Wilson 		return err;
240310be98a7SChris Wilson 	}
240410be98a7SChris Wilson 
240510be98a7SChris Wilson 	user_sseu.slice_mask = ce->sseu.slice_mask;
240610be98a7SChris Wilson 	user_sseu.subslice_mask = ce->sseu.subslice_mask;
240710be98a7SChris Wilson 	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
240810be98a7SChris Wilson 	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
240910be98a7SChris Wilson 
241010be98a7SChris Wilson 	intel_context_unlock_pinned(ce);
241110be98a7SChris Wilson 	intel_context_put(ce);
241210be98a7SChris Wilson 
241310be98a7SChris Wilson 	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
241410be98a7SChris Wilson 			 sizeof(user_sseu)))
241510be98a7SChris Wilson 		return -EFAULT;
241610be98a7SChris Wilson 
241710be98a7SChris Wilson out:
241810be98a7SChris Wilson 	args->size = sizeof(user_sseu);
241910be98a7SChris Wilson 
242010be98a7SChris Wilson 	return 0;
242110be98a7SChris Wilson }
242210be98a7SChris Wilson 
i915_gem_context_getparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)242310be98a7SChris Wilson int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
242410be98a7SChris Wilson 				    struct drm_file *file)
242510be98a7SChris Wilson {
242610be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
242710be98a7SChris Wilson 	struct drm_i915_gem_context_param *args = data;
242810be98a7SChris Wilson 	struct i915_gem_context *ctx;
242924fad29eSDaniel Vetter 	struct i915_address_space *vm;
243010be98a7SChris Wilson 	int ret = 0;
243110be98a7SChris Wilson 
243210be98a7SChris Wilson 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2433046d1660SJason Ekstrand 	if (IS_ERR(ctx))
2434046d1660SJason Ekstrand 		return PTR_ERR(ctx);
243510be98a7SChris Wilson 
243610be98a7SChris Wilson 	switch (args->param) {
243710be98a7SChris Wilson 	case I915_CONTEXT_PARAM_GTT_SIZE:
243810be98a7SChris Wilson 		args->size = 0;
243924fad29eSDaniel Vetter 		vm = i915_gem_context_get_eb_vm(ctx);
244024fad29eSDaniel Vetter 		args->value = vm->total;
244124fad29eSDaniel Vetter 		i915_vm_put(vm);
244224fad29eSDaniel Vetter 
244310be98a7SChris Wilson 		break;
244410be98a7SChris Wilson 
244510be98a7SChris Wilson 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
244610be98a7SChris Wilson 		args->size = 0;
244710be98a7SChris Wilson 		args->value = i915_gem_context_no_error_capture(ctx);
244810be98a7SChris Wilson 		break;
244910be98a7SChris Wilson 
245010be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BANNABLE:
245110be98a7SChris Wilson 		args->size = 0;
245210be98a7SChris Wilson 		args->value = i915_gem_context_is_bannable(ctx);
245310be98a7SChris Wilson 		break;
245410be98a7SChris Wilson 
245510be98a7SChris Wilson 	case I915_CONTEXT_PARAM_RECOVERABLE:
245610be98a7SChris Wilson 		args->size = 0;
245710be98a7SChris Wilson 		args->value = i915_gem_context_is_recoverable(ctx);
245810be98a7SChris Wilson 		break;
245910be98a7SChris Wilson 
246010be98a7SChris Wilson 	case I915_CONTEXT_PARAM_PRIORITY:
246110be98a7SChris Wilson 		args->size = 0;
2462eb5c10cbSChris Wilson 		args->value = ctx->sched.priority;
246310be98a7SChris Wilson 		break;
246410be98a7SChris Wilson 
246510be98a7SChris Wilson 	case I915_CONTEXT_PARAM_SSEU:
246610be98a7SChris Wilson 		ret = get_sseu(ctx, args);
246710be98a7SChris Wilson 		break;
246810be98a7SChris Wilson 
246910be98a7SChris Wilson 	case I915_CONTEXT_PARAM_VM:
247010be98a7SChris Wilson 		ret = get_ppgtt(file_priv, ctx, args);
247110be98a7SChris Wilson 		break;
247210be98a7SChris Wilson 
2473a0e04715SChris Wilson 	case I915_CONTEXT_PARAM_PERSISTENCE:
2474a0e04715SChris Wilson 		args->size = 0;
2475a0e04715SChris Wilson 		args->value = i915_gem_context_is_persistent(ctx);
2476a0e04715SChris Wilson 		break;
2477a0e04715SChris Wilson 
2478d3ac8d42SDaniele Ceraolo Spurio 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2479d3ac8d42SDaniele Ceraolo Spurio 		ret = get_protected(ctx, args);
2480d3ac8d42SDaniele Ceraolo Spurio 		break;
2481d3ac8d42SDaniele Ceraolo Spurio 
24826ff6d61dSJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
248310be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2484c7a71fc8SJason Ekstrand 	case I915_CONTEXT_PARAM_ENGINES:
2485fe4751c3SJason Ekstrand 	case I915_CONTEXT_PARAM_RINGSIZE:
248610be98a7SChris Wilson 	default:
248710be98a7SChris Wilson 		ret = -EINVAL;
248810be98a7SChris Wilson 		break;
248910be98a7SChris Wilson 	}
249010be98a7SChris Wilson 
249110be98a7SChris Wilson 	i915_gem_context_put(ctx);
249210be98a7SChris Wilson 	return ret;
249310be98a7SChris Wilson }
249410be98a7SChris Wilson 
i915_gem_context_setparam_ioctl(struct drm_device * dev,void * data,struct drm_file * file)249510be98a7SChris Wilson int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
249610be98a7SChris Wilson 				    struct drm_file *file)
249710be98a7SChris Wilson {
249810be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
249910be98a7SChris Wilson 	struct drm_i915_gem_context_param *args = data;
2500a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
250110be98a7SChris Wilson 	struct i915_gem_context *ctx;
2502a4c1cdd3SJason Ekstrand 	int ret = 0;
250310be98a7SChris Wilson 
2504a4c1cdd3SJason Ekstrand 	mutex_lock(&file_priv->proto_context_lock);
2505a4c1cdd3SJason Ekstrand 	ctx = __context_lookup(file_priv, args->ctx_id);
2506a4c1cdd3SJason Ekstrand 	if (!ctx) {
2507a4c1cdd3SJason Ekstrand 		pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2508ca06f936SJason Ekstrand 		if (pc) {
2509ca06f936SJason Ekstrand 			/* Contexts should be finalized inside
2510ca06f936SJason Ekstrand 			 * GEM_CONTEXT_CREATE starting with graphics
2511ca06f936SJason Ekstrand 			 * version 13.
2512ca06f936SJason Ekstrand 			 */
2513badb3027SAndi Shyti 			WARN_ON(GRAPHICS_VER(file_priv->i915) > 12);
2514a4c1cdd3SJason Ekstrand 			ret = set_proto_ctx_param(file_priv, pc, args);
2515ca06f936SJason Ekstrand 		} else {
2516a4c1cdd3SJason Ekstrand 			ret = -ENOENT;
2517a4c1cdd3SJason Ekstrand 		}
2518ca06f936SJason Ekstrand 	}
2519a4c1cdd3SJason Ekstrand 	mutex_unlock(&file_priv->proto_context_lock);
252010be98a7SChris Wilson 
2521a4c1cdd3SJason Ekstrand 	if (ctx) {
252210be98a7SChris Wilson 		ret = ctx_setparam(file_priv, ctx, args);
252310be98a7SChris Wilson 		i915_gem_context_put(ctx);
2524a4c1cdd3SJason Ekstrand 	}
2525a4c1cdd3SJason Ekstrand 
252610be98a7SChris Wilson 	return ret;
252710be98a7SChris Wilson }
252810be98a7SChris Wilson 
i915_gem_context_reset_stats_ioctl(struct drm_device * dev,void * data,struct drm_file * file)252910be98a7SChris Wilson int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
253010be98a7SChris Wilson 				       void *data, struct drm_file *file)
253110be98a7SChris Wilson {
2532a4e7ccdaSChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
253310be98a7SChris Wilson 	struct drm_i915_reset_stats *args = data;
253410be98a7SChris Wilson 	struct i915_gem_context *ctx;
253510be98a7SChris Wilson 
253610be98a7SChris Wilson 	if (args->flags || args->pad)
253710be98a7SChris Wilson 		return -EINVAL;
253810be98a7SChris Wilson 
2539a4839cb1SJason Ekstrand 	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2540046d1660SJason Ekstrand 	if (IS_ERR(ctx))
2541046d1660SJason Ekstrand 		return PTR_ERR(ctx);
254210be98a7SChris Wilson 
254310be98a7SChris Wilson 	/*
254410be98a7SChris Wilson 	 * We opt for unserialised reads here. This may result in tearing
254510be98a7SChris Wilson 	 * in the extremely unlikely event of a GPU hang on this context
254610be98a7SChris Wilson 	 * as we are querying them. If we need that extra layer of protection,
254710be98a7SChris Wilson 	 * we should wrap the hangstats with a seqlock.
254810be98a7SChris Wilson 	 */
254910be98a7SChris Wilson 
255010be98a7SChris Wilson 	if (capable(CAP_SYS_ADMIN))
2551a4e7ccdaSChris Wilson 		args->reset_count = i915_reset_count(&i915->gpu_error);
255210be98a7SChris Wilson 	else
255310be98a7SChris Wilson 		args->reset_count = 0;
255410be98a7SChris Wilson 
255510be98a7SChris Wilson 	args->batch_active = atomic_read(&ctx->guilty_count);
255610be98a7SChris Wilson 	args->batch_pending = atomic_read(&ctx->active_count);
255710be98a7SChris Wilson 
2558a4839cb1SJason Ekstrand 	i915_gem_context_put(ctx);
2559a4839cb1SJason Ekstrand 	return 0;
256010be98a7SChris Wilson }
256110be98a7SChris Wilson 
256210be98a7SChris Wilson /* GEM context-engines iterator: for_each_gem_engine() */
256310be98a7SChris Wilson struct intel_context *
i915_gem_engines_iter_next(struct i915_gem_engines_iter * it)256410be98a7SChris Wilson i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
256510be98a7SChris Wilson {
256610be98a7SChris Wilson 	const struct i915_gem_engines *e = it->engines;
256710be98a7SChris Wilson 	struct intel_context *ctx;
256810be98a7SChris Wilson 
2569130a95e9SChris Wilson 	if (unlikely(!e))
2570130a95e9SChris Wilson 		return NULL;
2571130a95e9SChris Wilson 
257210be98a7SChris Wilson 	do {
257310be98a7SChris Wilson 		if (it->idx >= e->num_engines)
257410be98a7SChris Wilson 			return NULL;
257510be98a7SChris Wilson 
257610be98a7SChris Wilson 		ctx = e->engines[it->idx++];
257710be98a7SChris Wilson 	} while (!ctx);
257810be98a7SChris Wilson 
257910be98a7SChris Wilson 	return ctx;
258010be98a7SChris Wilson }
258110be98a7SChris Wilson 
258210be98a7SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
258310be98a7SChris Wilson #include "selftests/mock_context.c"
258410be98a7SChris Wilson #include "selftests/i915_gem_context.c"
258510be98a7SChris Wilson #endif
258610be98a7SChris Wilson 
i915_gem_context_module_exit(void)2587a6270d1dSDaniel Vetter void i915_gem_context_module_exit(void)
258810be98a7SChris Wilson {
2589a6270d1dSDaniel Vetter 	kmem_cache_destroy(slab_luts);
259010be98a7SChris Wilson }
259110be98a7SChris Wilson 
i915_gem_context_module_init(void)2592a6270d1dSDaniel Vetter int __init i915_gem_context_module_init(void)
259310be98a7SChris Wilson {
2594a6270d1dSDaniel Vetter 	slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2595a6270d1dSDaniel Vetter 	if (!slab_luts)
259610be98a7SChris Wilson 		return -ENOMEM;
259710be98a7SChris Wilson 
259810be98a7SChris Wilson 	return 0;
259910be98a7SChris Wilson }
2600