110be98a7SChris Wilson /*
210be98a7SChris Wilson  * SPDX-License-Identifier: MIT
310be98a7SChris Wilson  *
410be98a7SChris Wilson  * Copyright © 2011-2012 Intel Corporation
510be98a7SChris Wilson  */
610be98a7SChris Wilson 
710be98a7SChris Wilson /*
810be98a7SChris Wilson  * This file implements HW context support. On gen5+ a HW context consists of an
910be98a7SChris Wilson  * opaque GPU object which is referenced at times of context saves and restores.
1010be98a7SChris Wilson  * With RC6 enabled, the context is also referenced as the GPU enters and exists
1110be98a7SChris Wilson  * from RC6 (GPU has it's own internal power context, except on gen5). Though
1210be98a7SChris Wilson  * something like a context does exist for the media ring, the code only
1310be98a7SChris Wilson  * supports contexts for the render ring.
1410be98a7SChris Wilson  *
1510be98a7SChris Wilson  * In software, there is a distinction between contexts created by the user,
1610be98a7SChris Wilson  * and the default HW context. The default HW context is used by GPU clients
1710be98a7SChris Wilson  * that do not request setup of their own hardware context. The default
1810be98a7SChris Wilson  * context's state is never restored to help prevent programming errors. This
1910be98a7SChris Wilson  * would happen if a client ran and piggy-backed off another clients GPU state.
2010be98a7SChris Wilson  * The default context only exists to give the GPU some offset to load as the
2110be98a7SChris Wilson  * current to invoke a save of the context we actually care about. In fact, the
2210be98a7SChris Wilson  * code could likely be constructed, albeit in a more complicated fashion, to
2310be98a7SChris Wilson  * never use the default context, though that limits the driver's ability to
2410be98a7SChris Wilson  * swap out, and/or destroy other contexts.
2510be98a7SChris Wilson  *
2610be98a7SChris Wilson  * All other contexts are created as a request by the GPU client. These contexts
2710be98a7SChris Wilson  * store GPU state, and thus allow GPU clients to not re-emit state (and
2810be98a7SChris Wilson  * potentially query certain state) at any time. The kernel driver makes
2910be98a7SChris Wilson  * certain that the appropriate commands are inserted.
3010be98a7SChris Wilson  *
3110be98a7SChris Wilson  * The context life cycle is semi-complicated in that context BOs may live
3210be98a7SChris Wilson  * longer than the context itself because of the way the hardware, and object
3310be98a7SChris Wilson  * tracking works. Below is a very crude representation of the state machine
3410be98a7SChris Wilson  * describing the context life.
3510be98a7SChris Wilson  *                                         refcount     pincount     active
3610be98a7SChris Wilson  * S0: initial state                          0            0           0
3710be98a7SChris Wilson  * S1: context created                        1            0           0
3810be98a7SChris Wilson  * S2: context is currently running           2            1           X
3910be98a7SChris Wilson  * S3: GPU referenced, but not current        2            0           1
4010be98a7SChris Wilson  * S4: context is current, but destroyed      1            1           0
4110be98a7SChris Wilson  * S5: like S3, but destroyed                 1            0           1
4210be98a7SChris Wilson  *
4310be98a7SChris Wilson  * The most common (but not all) transitions:
4410be98a7SChris Wilson  * S0->S1: client creates a context
4510be98a7SChris Wilson  * S1->S2: client submits execbuf with context
4610be98a7SChris Wilson  * S2->S3: other clients submits execbuf with context
4710be98a7SChris Wilson  * S3->S1: context object was retired
4810be98a7SChris Wilson  * S3->S2: clients submits another execbuf
4910be98a7SChris Wilson  * S2->S4: context destroy called with current context
5010be98a7SChris Wilson  * S3->S5->S0: destroy path
5110be98a7SChris Wilson  * S4->S5->S0: destroy path on current context
5210be98a7SChris Wilson  *
5310be98a7SChris Wilson  * There are two confusing terms used above:
5410be98a7SChris Wilson  *  The "current context" means the context which is currently running on the
5510be98a7SChris Wilson  *  GPU. The GPU has loaded its state already and has stored away the gtt
5610be98a7SChris Wilson  *  offset of the BO. The GPU is not actively referencing the data at this
5710be98a7SChris Wilson  *  offset, but it will on the next context switch. The only way to avoid this
5810be98a7SChris Wilson  *  is to do a GPU reset.
5910be98a7SChris Wilson  *
6010be98a7SChris Wilson  *  An "active context' is one which was previously the "current context" and is
6110be98a7SChris Wilson  *  on the active list waiting for the next context switch to occur. Until this
6210be98a7SChris Wilson  *  happens, the object must remain at the same gtt offset. It is therefore
6310be98a7SChris Wilson  *  possible to destroy a context, but it is still active.
6410be98a7SChris Wilson  *
6510be98a7SChris Wilson  */
6610be98a7SChris Wilson 
6710be98a7SChris Wilson #include <linux/log2.h>
6810be98a7SChris Wilson #include <linux/nospec.h>
6910be98a7SChris Wilson 
7000dae4d3SJason Ekstrand #include <drm/drm_syncobj.h>
7100dae4d3SJason Ekstrand 
722c86e55dSMatthew Auld #include "gt/gen6_ppgtt.h"
739f3ccd40SChris Wilson #include "gt/intel_context.h"
7488be76cdSChris Wilson #include "gt/intel_context_param.h"
752e0986a5SChris Wilson #include "gt/intel_engine_heartbeat.h"
76750e76b4SChris Wilson #include "gt/intel_engine_user.h"
7745233ab2SChris Wilson #include "gt/intel_gpu_commands.h"
782871ea85SChris Wilson #include "gt/intel_ring.h"
7910be98a7SChris Wilson 
80d3ac8d42SDaniele Ceraolo Spurio #include "pxp/intel_pxp.h"
81d3ac8d42SDaniele Ceraolo Spurio 
8210be98a7SChris Wilson #include "i915_gem_context.h"
8310be98a7SChris Wilson #include "i915_trace.h"
8410be98a7SChris Wilson #include "i915_user_extensions.h"
8510be98a7SChris Wilson 
8610be98a7SChris Wilson #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
8710be98a7SChris Wilson 
88a6270d1dSDaniel Vetter static struct kmem_cache *slab_luts;
8910be98a7SChris Wilson 
9010be98a7SChris Wilson struct i915_lut_handle *i915_lut_handle_alloc(void)
9110be98a7SChris Wilson {
92a6270d1dSDaniel Vetter 	return kmem_cache_alloc(slab_luts, GFP_KERNEL);
9310be98a7SChris Wilson }
9410be98a7SChris Wilson 
9510be98a7SChris Wilson void i915_lut_handle_free(struct i915_lut_handle *lut)
9610be98a7SChris Wilson {
97a6270d1dSDaniel Vetter 	return kmem_cache_free(slab_luts, lut);
9810be98a7SChris Wilson }
9910be98a7SChris Wilson 
10010be98a7SChris Wilson static void lut_close(struct i915_gem_context *ctx)
10110be98a7SChris Wilson {
10210be98a7SChris Wilson 	struct radix_tree_iter iter;
10310be98a7SChris Wilson 	void __rcu **slot;
10410be98a7SChris Wilson 
105f7ce8639SChris Wilson 	mutex_lock(&ctx->lut_mutex);
10610be98a7SChris Wilson 	rcu_read_lock();
10710be98a7SChris Wilson 	radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
10810be98a7SChris Wilson 		struct i915_vma *vma = rcu_dereference_raw(*slot);
109155ab883SChris Wilson 		struct drm_i915_gem_object *obj = vma->obj;
110155ab883SChris Wilson 		struct i915_lut_handle *lut;
11110be98a7SChris Wilson 
112155ab883SChris Wilson 		if (!kref_get_unless_zero(&obj->base.refcount))
113155ab883SChris Wilson 			continue;
114155ab883SChris Wilson 
115096a42ddSChris Wilson 		spin_lock(&obj->lut_lock);
116155ab883SChris Wilson 		list_for_each_entry(lut, &obj->lut_list, obj_link) {
117155ab883SChris Wilson 			if (lut->ctx != ctx)
118155ab883SChris Wilson 				continue;
119155ab883SChris Wilson 
120155ab883SChris Wilson 			if (lut->handle != iter.index)
121155ab883SChris Wilson 				continue;
122155ab883SChris Wilson 
123155ab883SChris Wilson 			list_del(&lut->obj_link);
124155ab883SChris Wilson 			break;
125155ab883SChris Wilson 		}
126096a42ddSChris Wilson 		spin_unlock(&obj->lut_lock);
127155ab883SChris Wilson 
128155ab883SChris Wilson 		if (&lut->obj_link != &obj->lut_list) {
129155ab883SChris Wilson 			i915_lut_handle_free(lut);
13010be98a7SChris Wilson 			radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
131155ab883SChris Wilson 			i915_vma_close(vma);
132155ab883SChris Wilson 			i915_gem_object_put(obj);
133155ab883SChris Wilson 		}
13410be98a7SChris Wilson 
135155ab883SChris Wilson 		i915_gem_object_put(obj);
13610be98a7SChris Wilson 	}
13710be98a7SChris Wilson 	rcu_read_unlock();
138f7ce8639SChris Wilson 	mutex_unlock(&ctx->lut_mutex);
13910be98a7SChris Wilson }
14010be98a7SChris Wilson 
14110be98a7SChris Wilson static struct intel_context *
14210be98a7SChris Wilson lookup_user_engine(struct i915_gem_context *ctx,
14310be98a7SChris Wilson 		   unsigned long flags,
14410be98a7SChris Wilson 		   const struct i915_engine_class_instance *ci)
14510be98a7SChris Wilson #define LOOKUP_USER_INDEX BIT(0)
14610be98a7SChris Wilson {
14710be98a7SChris Wilson 	int idx;
14810be98a7SChris Wilson 
14910be98a7SChris Wilson 	if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
15010be98a7SChris Wilson 		return ERR_PTR(-EINVAL);
15110be98a7SChris Wilson 
15210be98a7SChris Wilson 	if (!i915_gem_context_user_engines(ctx)) {
15310be98a7SChris Wilson 		struct intel_engine_cs *engine;
15410be98a7SChris Wilson 
15510be98a7SChris Wilson 		engine = intel_engine_lookup_user(ctx->i915,
15610be98a7SChris Wilson 						  ci->engine_class,
15710be98a7SChris Wilson 						  ci->engine_instance);
15810be98a7SChris Wilson 		if (!engine)
15910be98a7SChris Wilson 			return ERR_PTR(-EINVAL);
16010be98a7SChris Wilson 
161f1c4d157SChris Wilson 		idx = engine->legacy_idx;
16210be98a7SChris Wilson 	} else {
16310be98a7SChris Wilson 		idx = ci->engine_instance;
16410be98a7SChris Wilson 	}
16510be98a7SChris Wilson 
16610be98a7SChris Wilson 	return i915_gem_context_get_engine(ctx, idx);
16710be98a7SChris Wilson }
16810be98a7SChris Wilson 
169aaa5957cSJason Ekstrand static int validate_priority(struct drm_i915_private *i915,
170aaa5957cSJason Ekstrand 			     const struct drm_i915_gem_context_param *args)
171aaa5957cSJason Ekstrand {
172aaa5957cSJason Ekstrand 	s64 priority = args->value;
173aaa5957cSJason Ekstrand 
174aaa5957cSJason Ekstrand 	if (args->size)
175aaa5957cSJason Ekstrand 		return -EINVAL;
176aaa5957cSJason Ekstrand 
177aaa5957cSJason Ekstrand 	if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
178aaa5957cSJason Ekstrand 		return -ENODEV;
179aaa5957cSJason Ekstrand 
180aaa5957cSJason Ekstrand 	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
181aaa5957cSJason Ekstrand 	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
182aaa5957cSJason Ekstrand 		return -EINVAL;
183aaa5957cSJason Ekstrand 
184aaa5957cSJason Ekstrand 	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
185aaa5957cSJason Ekstrand 	    !capable(CAP_SYS_NICE))
186aaa5957cSJason Ekstrand 		return -EPERM;
187aaa5957cSJason Ekstrand 
188aaa5957cSJason Ekstrand 	return 0;
189aaa5957cSJason Ekstrand }
190aaa5957cSJason Ekstrand 
191d3ac8d42SDaniele Ceraolo Spurio static void proto_context_close(struct drm_i915_private *i915,
192d3ac8d42SDaniele Ceraolo Spurio 				struct i915_gem_proto_context *pc)
193a34857dcSJason Ekstrand {
194d4433c76SJason Ekstrand 	int i;
195d4433c76SJason Ekstrand 
196d3ac8d42SDaniele Ceraolo Spurio 	if (pc->pxp_wakeref)
197d3ac8d42SDaniele Ceraolo Spurio 		intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
198a34857dcSJason Ekstrand 	if (pc->vm)
199a34857dcSJason Ekstrand 		i915_vm_put(pc->vm);
200d4433c76SJason Ekstrand 	if (pc->user_engines) {
201d4433c76SJason Ekstrand 		for (i = 0; i < pc->num_user_engines; i++)
202d4433c76SJason Ekstrand 			kfree(pc->user_engines[i].siblings);
203d4433c76SJason Ekstrand 		kfree(pc->user_engines);
204d4433c76SJason Ekstrand 	}
205a34857dcSJason Ekstrand 	kfree(pc);
206a34857dcSJason Ekstrand }
207a34857dcSJason Ekstrand 
208d4433c76SJason Ekstrand static int proto_context_set_persistence(struct drm_i915_private *i915,
209d4433c76SJason Ekstrand 					 struct i915_gem_proto_context *pc,
210d4433c76SJason Ekstrand 					 bool persist)
211d4433c76SJason Ekstrand {
212d4433c76SJason Ekstrand 	if (persist) {
213d4433c76SJason Ekstrand 		/*
214d4433c76SJason Ekstrand 		 * Only contexts that are short-lived [that will expire or be
215d4433c76SJason Ekstrand 		 * reset] are allowed to survive past termination. We require
216d4433c76SJason Ekstrand 		 * hangcheck to ensure that the persistent requests are healthy.
217d4433c76SJason Ekstrand 		 */
218d4433c76SJason Ekstrand 		if (!i915->params.enable_hangcheck)
219d4433c76SJason Ekstrand 			return -EINVAL;
220d4433c76SJason Ekstrand 
221d4433c76SJason Ekstrand 		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
222d4433c76SJason Ekstrand 	} else {
223d4433c76SJason Ekstrand 		/* To cancel a context we use "preempt-to-idle" */
224d4433c76SJason Ekstrand 		if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
225d4433c76SJason Ekstrand 			return -ENODEV;
226d4433c76SJason Ekstrand 
227d4433c76SJason Ekstrand 		/*
228d4433c76SJason Ekstrand 		 * If the cancel fails, we then need to reset, cleanly!
229d4433c76SJason Ekstrand 		 *
230d4433c76SJason Ekstrand 		 * If the per-engine reset fails, all hope is lost! We resort
231d4433c76SJason Ekstrand 		 * to a full GPU reset in that unlikely case, but realistically
232d4433c76SJason Ekstrand 		 * if the engine could not reset, the full reset does not fare
233d4433c76SJason Ekstrand 		 * much better. The damage has been done.
234d4433c76SJason Ekstrand 		 *
235d4433c76SJason Ekstrand 		 * However, if we cannot reset an engine by itself, we cannot
236d4433c76SJason Ekstrand 		 * cleanup a hanging persistent context without causing
237d4433c76SJason Ekstrand 		 * colateral damage, and we should not pretend we can by
238d4433c76SJason Ekstrand 		 * exposing the interface.
239d4433c76SJason Ekstrand 		 */
2401a9c4db4SMichał Winiarski 		if (!intel_has_reset_engine(to_gt(i915)))
241d4433c76SJason Ekstrand 			return -ENODEV;
242d4433c76SJason Ekstrand 
243d4433c76SJason Ekstrand 		pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
244d4433c76SJason Ekstrand 	}
245d4433c76SJason Ekstrand 
246d4433c76SJason Ekstrand 	return 0;
247d4433c76SJason Ekstrand }
248d4433c76SJason Ekstrand 
249d3ac8d42SDaniele Ceraolo Spurio static int proto_context_set_protected(struct drm_i915_private *i915,
250d3ac8d42SDaniele Ceraolo Spurio 				       struct i915_gem_proto_context *pc,
251d3ac8d42SDaniele Ceraolo Spurio 				       bool protected)
252d3ac8d42SDaniele Ceraolo Spurio {
253d3ac8d42SDaniele Ceraolo Spurio 	int ret = 0;
254d3ac8d42SDaniele Ceraolo Spurio 
255d3ac8d42SDaniele Ceraolo Spurio 	if (!protected) {
256d3ac8d42SDaniele Ceraolo Spurio 		pc->uses_protected_content = false;
2571a9c4db4SMichał Winiarski 	} else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
258d3ac8d42SDaniele Ceraolo Spurio 		ret = -ENODEV;
259d3ac8d42SDaniele Ceraolo Spurio 	} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
260d3ac8d42SDaniele Ceraolo Spurio 		   !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
261d3ac8d42SDaniele Ceraolo Spurio 		ret = -EPERM;
262d3ac8d42SDaniele Ceraolo Spurio 	} else {
263d3ac8d42SDaniele Ceraolo Spurio 		pc->uses_protected_content = true;
264d3ac8d42SDaniele Ceraolo Spurio 
265d3ac8d42SDaniele Ceraolo Spurio 		/*
266d3ac8d42SDaniele Ceraolo Spurio 		 * protected context usage requires the PXP session to be up,
267d3ac8d42SDaniele Ceraolo Spurio 		 * which in turn requires the device to be active.
268d3ac8d42SDaniele Ceraolo Spurio 		 */
269d3ac8d42SDaniele Ceraolo Spurio 		pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
27032271ecdSDaniele Ceraolo Spurio 
2711a9c4db4SMichał Winiarski 		if (!intel_pxp_is_active(&to_gt(i915)->pxp))
2721a9c4db4SMichał Winiarski 			ret = intel_pxp_start(&to_gt(i915)->pxp);
273d3ac8d42SDaniele Ceraolo Spurio 	}
274d3ac8d42SDaniele Ceraolo Spurio 
275d3ac8d42SDaniele Ceraolo Spurio 	return ret;
276d3ac8d42SDaniele Ceraolo Spurio }
277d3ac8d42SDaniele Ceraolo Spurio 
278a34857dcSJason Ekstrand static struct i915_gem_proto_context *
279a34857dcSJason Ekstrand proto_context_create(struct drm_i915_private *i915, unsigned int flags)
280a34857dcSJason Ekstrand {
281a34857dcSJason Ekstrand 	struct i915_gem_proto_context *pc, *err;
282a34857dcSJason Ekstrand 
283a34857dcSJason Ekstrand 	pc = kzalloc(sizeof(*pc), GFP_KERNEL);
284a34857dcSJason Ekstrand 	if (!pc)
285a34857dcSJason Ekstrand 		return ERR_PTR(-ENOMEM);
286a34857dcSJason Ekstrand 
287d4433c76SJason Ekstrand 	pc->num_user_engines = -1;
288d4433c76SJason Ekstrand 	pc->user_engines = NULL;
289a34857dcSJason Ekstrand 	pc->user_flags = BIT(UCONTEXT_BANNABLE) |
290a34857dcSJason Ekstrand 			 BIT(UCONTEXT_RECOVERABLE);
291a34857dcSJason Ekstrand 	if (i915->params.enable_hangcheck)
292a34857dcSJason Ekstrand 		pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
293a34857dcSJason Ekstrand 	pc->sched.priority = I915_PRIORITY_NORMAL;
294a34857dcSJason Ekstrand 
295a34857dcSJason Ekstrand 	if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
296a34857dcSJason Ekstrand 		if (!HAS_EXECLISTS(i915)) {
297a34857dcSJason Ekstrand 			err = ERR_PTR(-EINVAL);
298a34857dcSJason Ekstrand 			goto proto_close;
299a34857dcSJason Ekstrand 		}
300a34857dcSJason Ekstrand 		pc->single_timeline = true;
301a34857dcSJason Ekstrand 	}
302a34857dcSJason Ekstrand 
303a34857dcSJason Ekstrand 	return pc;
304a34857dcSJason Ekstrand 
305a34857dcSJason Ekstrand proto_close:
306d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(i915, pc);
307a34857dcSJason Ekstrand 	return err;
308a34857dcSJason Ekstrand }
309a34857dcSJason Ekstrand 
310a4c1cdd3SJason Ekstrand static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
311a4c1cdd3SJason Ekstrand 					 struct i915_gem_proto_context *pc,
312a4c1cdd3SJason Ekstrand 					 u32 *id)
313a4c1cdd3SJason Ekstrand {
314a4c1cdd3SJason Ekstrand 	int ret;
315a4c1cdd3SJason Ekstrand 	void *old;
316a4c1cdd3SJason Ekstrand 
317a4c1cdd3SJason Ekstrand 	lockdep_assert_held(&fpriv->proto_context_lock);
318a4c1cdd3SJason Ekstrand 
319a4c1cdd3SJason Ekstrand 	ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
320a4c1cdd3SJason Ekstrand 	if (ret)
321a4c1cdd3SJason Ekstrand 		return ret;
322a4c1cdd3SJason Ekstrand 
323a4c1cdd3SJason Ekstrand 	old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
324a4c1cdd3SJason Ekstrand 	if (xa_is_err(old)) {
325a4c1cdd3SJason Ekstrand 		xa_erase(&fpriv->context_xa, *id);
326a4c1cdd3SJason Ekstrand 		return xa_err(old);
327a4c1cdd3SJason Ekstrand 	}
328a4c1cdd3SJason Ekstrand 	WARN_ON(old);
329a4c1cdd3SJason Ekstrand 
330a4c1cdd3SJason Ekstrand 	return 0;
331a4c1cdd3SJason Ekstrand }
332a4c1cdd3SJason Ekstrand 
333a4c1cdd3SJason Ekstrand static int proto_context_register(struct drm_i915_file_private *fpriv,
334a4c1cdd3SJason Ekstrand 				  struct i915_gem_proto_context *pc,
335a4c1cdd3SJason Ekstrand 				  u32 *id)
336a4c1cdd3SJason Ekstrand {
337a4c1cdd3SJason Ekstrand 	int ret;
338a4c1cdd3SJason Ekstrand 
339a4c1cdd3SJason Ekstrand 	mutex_lock(&fpriv->proto_context_lock);
340a4c1cdd3SJason Ekstrand 	ret = proto_context_register_locked(fpriv, pc, id);
341a4c1cdd3SJason Ekstrand 	mutex_unlock(&fpriv->proto_context_lock);
342a4c1cdd3SJason Ekstrand 
343a4c1cdd3SJason Ekstrand 	return ret;
344a4c1cdd3SJason Ekstrand }
345a4c1cdd3SJason Ekstrand 
346d4433c76SJason Ekstrand static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
347d4433c76SJason Ekstrand 			    struct i915_gem_proto_context *pc,
348d4433c76SJason Ekstrand 			    const struct drm_i915_gem_context_param *args)
349d4433c76SJason Ekstrand {
350d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = fpriv->dev_priv;
351d4433c76SJason Ekstrand 	struct i915_address_space *vm;
352d4433c76SJason Ekstrand 
353d4433c76SJason Ekstrand 	if (args->size)
354d4433c76SJason Ekstrand 		return -EINVAL;
355d4433c76SJason Ekstrand 
356d4433c76SJason Ekstrand 	if (!HAS_FULL_PPGTT(i915))
357d4433c76SJason Ekstrand 		return -ENODEV;
358d4433c76SJason Ekstrand 
359d4433c76SJason Ekstrand 	if (upper_32_bits(args->value))
360d4433c76SJason Ekstrand 		return -ENOENT;
361d4433c76SJason Ekstrand 
362d4433c76SJason Ekstrand 	vm = i915_gem_vm_lookup(fpriv, args->value);
363d4433c76SJason Ekstrand 	if (!vm)
364d4433c76SJason Ekstrand 		return -ENOENT;
365d4433c76SJason Ekstrand 
366d4433c76SJason Ekstrand 	if (pc->vm)
367d4433c76SJason Ekstrand 		i915_vm_put(pc->vm);
368d4433c76SJason Ekstrand 	pc->vm = vm;
369d4433c76SJason Ekstrand 
370d4433c76SJason Ekstrand 	return 0;
371d4433c76SJason Ekstrand }
372d4433c76SJason Ekstrand 
373d4433c76SJason Ekstrand struct set_proto_ctx_engines {
374d4433c76SJason Ekstrand 	struct drm_i915_private *i915;
375d4433c76SJason Ekstrand 	unsigned num_engines;
376d4433c76SJason Ekstrand 	struct i915_gem_proto_engine *engines;
377d4433c76SJason Ekstrand };
378d4433c76SJason Ekstrand 
379d4433c76SJason Ekstrand static int
380d4433c76SJason Ekstrand set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
381d4433c76SJason Ekstrand 			      void *data)
382d4433c76SJason Ekstrand {
383d4433c76SJason Ekstrand 	struct i915_context_engines_load_balance __user *ext =
384d4433c76SJason Ekstrand 		container_of_user(base, typeof(*ext), base);
385d4433c76SJason Ekstrand 	const struct set_proto_ctx_engines *set = data;
386d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = set->i915;
387d4433c76SJason Ekstrand 	struct intel_engine_cs **siblings;
388d4433c76SJason Ekstrand 	u16 num_siblings, idx;
389d4433c76SJason Ekstrand 	unsigned int n;
390d4433c76SJason Ekstrand 	int err;
391d4433c76SJason Ekstrand 
392d4433c76SJason Ekstrand 	if (!HAS_EXECLISTS(i915))
393d4433c76SJason Ekstrand 		return -ENODEV;
394d4433c76SJason Ekstrand 
395d4433c76SJason Ekstrand 	if (get_user(idx, &ext->engine_index))
396d4433c76SJason Ekstrand 		return -EFAULT;
397d4433c76SJason Ekstrand 
398d4433c76SJason Ekstrand 	if (idx >= set->num_engines) {
399d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
400d4433c76SJason Ekstrand 			idx, set->num_engines);
401d4433c76SJason Ekstrand 		return -EINVAL;
402d4433c76SJason Ekstrand 	}
403d4433c76SJason Ekstrand 
404d4433c76SJason Ekstrand 	idx = array_index_nospec(idx, set->num_engines);
405d4433c76SJason Ekstrand 	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
406d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
407d4433c76SJason Ekstrand 			"Invalid placement[%d], already occupied\n", idx);
408d4433c76SJason Ekstrand 		return -EEXIST;
409d4433c76SJason Ekstrand 	}
410d4433c76SJason Ekstrand 
411d4433c76SJason Ekstrand 	if (get_user(num_siblings, &ext->num_siblings))
412d4433c76SJason Ekstrand 		return -EFAULT;
413d4433c76SJason Ekstrand 
414d4433c76SJason Ekstrand 	err = check_user_mbz(&ext->flags);
415d4433c76SJason Ekstrand 	if (err)
416d4433c76SJason Ekstrand 		return err;
417d4433c76SJason Ekstrand 
418d4433c76SJason Ekstrand 	err = check_user_mbz(&ext->mbz64);
419d4433c76SJason Ekstrand 	if (err)
420d4433c76SJason Ekstrand 		return err;
421d4433c76SJason Ekstrand 
422d4433c76SJason Ekstrand 	if (num_siblings == 0)
423d4433c76SJason Ekstrand 		return 0;
424d4433c76SJason Ekstrand 
425d4433c76SJason Ekstrand 	siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
426d4433c76SJason Ekstrand 	if (!siblings)
427d4433c76SJason Ekstrand 		return -ENOMEM;
428d4433c76SJason Ekstrand 
429d4433c76SJason Ekstrand 	for (n = 0; n < num_siblings; n++) {
430d4433c76SJason Ekstrand 		struct i915_engine_class_instance ci;
431d4433c76SJason Ekstrand 
432d4433c76SJason Ekstrand 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
433d4433c76SJason Ekstrand 			err = -EFAULT;
434d4433c76SJason Ekstrand 			goto err_siblings;
435d4433c76SJason Ekstrand 		}
436d4433c76SJason Ekstrand 
437d4433c76SJason Ekstrand 		siblings[n] = intel_engine_lookup_user(i915,
438d4433c76SJason Ekstrand 						       ci.engine_class,
439d4433c76SJason Ekstrand 						       ci.engine_instance);
440d4433c76SJason Ekstrand 		if (!siblings[n]) {
441d4433c76SJason Ekstrand 			drm_dbg(&i915->drm,
442d4433c76SJason Ekstrand 				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
443d4433c76SJason Ekstrand 				n, ci.engine_class, ci.engine_instance);
444d4433c76SJason Ekstrand 			err = -EINVAL;
445d4433c76SJason Ekstrand 			goto err_siblings;
446d4433c76SJason Ekstrand 		}
447d4433c76SJason Ekstrand 	}
448d4433c76SJason Ekstrand 
449d4433c76SJason Ekstrand 	if (num_siblings == 1) {
450d4433c76SJason Ekstrand 		set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
451d4433c76SJason Ekstrand 		set->engines[idx].engine = siblings[0];
452d4433c76SJason Ekstrand 		kfree(siblings);
453d4433c76SJason Ekstrand 	} else {
454d4433c76SJason Ekstrand 		set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
455d4433c76SJason Ekstrand 		set->engines[idx].num_siblings = num_siblings;
456d4433c76SJason Ekstrand 		set->engines[idx].siblings = siblings;
457d4433c76SJason Ekstrand 	}
458d4433c76SJason Ekstrand 
459d4433c76SJason Ekstrand 	return 0;
460d4433c76SJason Ekstrand 
461d4433c76SJason Ekstrand err_siblings:
462d4433c76SJason Ekstrand 	kfree(siblings);
463d4433c76SJason Ekstrand 
464d4433c76SJason Ekstrand 	return err;
465d4433c76SJason Ekstrand }
466d4433c76SJason Ekstrand 
467d4433c76SJason Ekstrand static int
468d4433c76SJason Ekstrand set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
469d4433c76SJason Ekstrand {
470d4433c76SJason Ekstrand 	struct i915_context_engines_bond __user *ext =
471d4433c76SJason Ekstrand 		container_of_user(base, typeof(*ext), base);
472d4433c76SJason Ekstrand 	const struct set_proto_ctx_engines *set = data;
473d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = set->i915;
474d4433c76SJason Ekstrand 	struct i915_engine_class_instance ci;
475d4433c76SJason Ekstrand 	struct intel_engine_cs *master;
476d4433c76SJason Ekstrand 	u16 idx, num_bonds;
477d4433c76SJason Ekstrand 	int err, n;
478d4433c76SJason Ekstrand 
479ce7e75c7SMatthew Brost 	if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
480ce7e75c7SMatthew Brost 	    !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
481ce7e75c7SMatthew Brost 		drm_dbg(&i915->drm,
482ce7e75c7SMatthew Brost 			"Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n");
483ce7e75c7SMatthew Brost 		return -ENODEV;
484ce7e75c7SMatthew Brost 	}
485ce7e75c7SMatthew Brost 
486d4433c76SJason Ekstrand 	if (get_user(idx, &ext->virtual_index))
487d4433c76SJason Ekstrand 		return -EFAULT;
488d4433c76SJason Ekstrand 
489d4433c76SJason Ekstrand 	if (idx >= set->num_engines) {
490d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
491d4433c76SJason Ekstrand 			"Invalid index for virtual engine: %d >= %d\n",
492d4433c76SJason Ekstrand 			idx, set->num_engines);
493d4433c76SJason Ekstrand 		return -EINVAL;
494d4433c76SJason Ekstrand 	}
495d4433c76SJason Ekstrand 
496d4433c76SJason Ekstrand 	idx = array_index_nospec(idx, set->num_engines);
497d4433c76SJason Ekstrand 	if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
498d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
499d4433c76SJason Ekstrand 		return -EINVAL;
500d4433c76SJason Ekstrand 	}
501d4433c76SJason Ekstrand 
502d4433c76SJason Ekstrand 	if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
503d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
504d4433c76SJason Ekstrand 			"Bonding with virtual engines not allowed\n");
505d4433c76SJason Ekstrand 		return -EINVAL;
506d4433c76SJason Ekstrand 	}
507d4433c76SJason Ekstrand 
508d4433c76SJason Ekstrand 	err = check_user_mbz(&ext->flags);
509d4433c76SJason Ekstrand 	if (err)
510d4433c76SJason Ekstrand 		return err;
511d4433c76SJason Ekstrand 
512d4433c76SJason Ekstrand 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
513d4433c76SJason Ekstrand 		err = check_user_mbz(&ext->mbz64[n]);
514d4433c76SJason Ekstrand 		if (err)
515d4433c76SJason Ekstrand 			return err;
516d4433c76SJason Ekstrand 	}
517d4433c76SJason Ekstrand 
518d4433c76SJason Ekstrand 	if (copy_from_user(&ci, &ext->master, sizeof(ci)))
519d4433c76SJason Ekstrand 		return -EFAULT;
520d4433c76SJason Ekstrand 
521d4433c76SJason Ekstrand 	master = intel_engine_lookup_user(i915,
522d4433c76SJason Ekstrand 					  ci.engine_class,
523d4433c76SJason Ekstrand 					  ci.engine_instance);
524d4433c76SJason Ekstrand 	if (!master) {
525d4433c76SJason Ekstrand 		drm_dbg(&i915->drm,
526d4433c76SJason Ekstrand 			"Unrecognised master engine: { class:%u, instance:%u }\n",
527d4433c76SJason Ekstrand 			ci.engine_class, ci.engine_instance);
528d4433c76SJason Ekstrand 		return -EINVAL;
529d4433c76SJason Ekstrand 	}
530d4433c76SJason Ekstrand 
531b02d86b9SMatthew Brost 	if (intel_engine_uses_guc(master)) {
532b02d86b9SMatthew Brost 		DRM_DEBUG("bonding extension not supported with GuC submission");
533b02d86b9SMatthew Brost 		return -ENODEV;
534b02d86b9SMatthew Brost 	}
535b02d86b9SMatthew Brost 
536d4433c76SJason Ekstrand 	if (get_user(num_bonds, &ext->num_bonds))
537d4433c76SJason Ekstrand 		return -EFAULT;
538d4433c76SJason Ekstrand 
539d4433c76SJason Ekstrand 	for (n = 0; n < num_bonds; n++) {
540d4433c76SJason Ekstrand 		struct intel_engine_cs *bond;
541d4433c76SJason Ekstrand 
542d4433c76SJason Ekstrand 		if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
543d4433c76SJason Ekstrand 			return -EFAULT;
544d4433c76SJason Ekstrand 
545d4433c76SJason Ekstrand 		bond = intel_engine_lookup_user(i915,
546d4433c76SJason Ekstrand 						ci.engine_class,
547d4433c76SJason Ekstrand 						ci.engine_instance);
548d4433c76SJason Ekstrand 		if (!bond) {
549d4433c76SJason Ekstrand 			drm_dbg(&i915->drm,
550d4433c76SJason Ekstrand 				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
551d4433c76SJason Ekstrand 				n, ci.engine_class, ci.engine_instance);
552d4433c76SJason Ekstrand 			return -EINVAL;
553d4433c76SJason Ekstrand 		}
554d4433c76SJason Ekstrand 	}
555d4433c76SJason Ekstrand 
556d4433c76SJason Ekstrand 	return 0;
557d4433c76SJason Ekstrand }
558d4433c76SJason Ekstrand 
559e5e32171SMatthew Brost static int
560e5e32171SMatthew Brost set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
561e5e32171SMatthew Brost 				      void *data)
562e5e32171SMatthew Brost {
563e5e32171SMatthew Brost 	struct i915_context_engines_parallel_submit __user *ext =
564e5e32171SMatthew Brost 		container_of_user(base, typeof(*ext), base);
565e5e32171SMatthew Brost 	const struct set_proto_ctx_engines *set = data;
566e5e32171SMatthew Brost 	struct drm_i915_private *i915 = set->i915;
567e5e32171SMatthew Brost 	u64 flags;
568e5e32171SMatthew Brost 	int err = 0, n, i, j;
569e5e32171SMatthew Brost 	u16 slot, width, num_siblings;
570e5e32171SMatthew Brost 	struct intel_engine_cs **siblings = NULL;
571e5e32171SMatthew Brost 	intel_engine_mask_t prev_mask;
572e5e32171SMatthew Brost 
573e5e32171SMatthew Brost 	if (get_user(slot, &ext->engine_index))
574e5e32171SMatthew Brost 		return -EFAULT;
575e5e32171SMatthew Brost 
576e5e32171SMatthew Brost 	if (get_user(width, &ext->width))
577e5e32171SMatthew Brost 		return -EFAULT;
578e5e32171SMatthew Brost 
579e5e32171SMatthew Brost 	if (get_user(num_siblings, &ext->num_siblings))
580e5e32171SMatthew Brost 		return -EFAULT;
581e5e32171SMatthew Brost 
582*a88afcfaSMatthew Brost 	if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
583*a88afcfaSMatthew Brost 	    num_siblings != 1) {
584*a88afcfaSMatthew Brost 		drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
585*a88afcfaSMatthew Brost 			num_siblings);
586*a88afcfaSMatthew Brost 		return -EINVAL;
587*a88afcfaSMatthew Brost 	}
588*a88afcfaSMatthew Brost 
589e5e32171SMatthew Brost 	if (slot >= set->num_engines) {
590e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
591e5e32171SMatthew Brost 			slot, set->num_engines);
592e5e32171SMatthew Brost 		return -EINVAL;
593e5e32171SMatthew Brost 	}
594e5e32171SMatthew Brost 
595e5e32171SMatthew Brost 	if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
596e5e32171SMatthew Brost 		drm_dbg(&i915->drm,
597e5e32171SMatthew Brost 			"Invalid placement[%d], already occupied\n", slot);
598e5e32171SMatthew Brost 		return -EINVAL;
599e5e32171SMatthew Brost 	}
600e5e32171SMatthew Brost 
601e5e32171SMatthew Brost 	if (get_user(flags, &ext->flags))
602e5e32171SMatthew Brost 		return -EFAULT;
603e5e32171SMatthew Brost 
604e5e32171SMatthew Brost 	if (flags) {
605e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
606e5e32171SMatthew Brost 		return -EINVAL;
607e5e32171SMatthew Brost 	}
608e5e32171SMatthew Brost 
609e5e32171SMatthew Brost 	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
610e5e32171SMatthew Brost 		err = check_user_mbz(&ext->mbz64[n]);
611e5e32171SMatthew Brost 		if (err)
612e5e32171SMatthew Brost 			return err;
613e5e32171SMatthew Brost 	}
614e5e32171SMatthew Brost 
615e5e32171SMatthew Brost 	if (width < 2) {
616e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
617e5e32171SMatthew Brost 		return -EINVAL;
618e5e32171SMatthew Brost 	}
619e5e32171SMatthew Brost 
620e5e32171SMatthew Brost 	if (num_siblings < 1) {
621e5e32171SMatthew Brost 		drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
622e5e32171SMatthew Brost 			num_siblings);
623e5e32171SMatthew Brost 		return -EINVAL;
624e5e32171SMatthew Brost 	}
625e5e32171SMatthew Brost 
626e5e32171SMatthew Brost 	siblings = kmalloc_array(num_siblings * width,
627e5e32171SMatthew Brost 				 sizeof(*siblings),
628e5e32171SMatthew Brost 				 GFP_KERNEL);
629e5e32171SMatthew Brost 	if (!siblings)
630e5e32171SMatthew Brost 		return -ENOMEM;
631e5e32171SMatthew Brost 
632e5e32171SMatthew Brost 	/* Create contexts / engines */
633e5e32171SMatthew Brost 	for (i = 0; i < width; ++i) {
634e5e32171SMatthew Brost 		intel_engine_mask_t current_mask = 0;
635e5e32171SMatthew Brost 		struct i915_engine_class_instance prev_engine;
636e5e32171SMatthew Brost 
637e5e32171SMatthew Brost 		for (j = 0; j < num_siblings; ++j) {
638e5e32171SMatthew Brost 			struct i915_engine_class_instance ci;
639e5e32171SMatthew Brost 
640e5e32171SMatthew Brost 			n = i * num_siblings + j;
641e5e32171SMatthew Brost 			if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
642e5e32171SMatthew Brost 				err = -EFAULT;
643e5e32171SMatthew Brost 				goto out_err;
644e5e32171SMatthew Brost 			}
645e5e32171SMatthew Brost 
646e5e32171SMatthew Brost 			siblings[n] =
647e5e32171SMatthew Brost 				intel_engine_lookup_user(i915, ci.engine_class,
648e5e32171SMatthew Brost 							 ci.engine_instance);
649e5e32171SMatthew Brost 			if (!siblings[n]) {
650e5e32171SMatthew Brost 				drm_dbg(&i915->drm,
651e5e32171SMatthew Brost 					"Invalid sibling[%d]: { class:%d, inst:%d }\n",
652e5e32171SMatthew Brost 					n, ci.engine_class, ci.engine_instance);
653e5e32171SMatthew Brost 				err = -EINVAL;
654e5e32171SMatthew Brost 				goto out_err;
655e5e32171SMatthew Brost 			}
656e5e32171SMatthew Brost 
657e5e32171SMatthew Brost 			if (n) {
658e5e32171SMatthew Brost 				if (prev_engine.engine_class !=
659e5e32171SMatthew Brost 				    ci.engine_class) {
660e5e32171SMatthew Brost 					drm_dbg(&i915->drm,
661e5e32171SMatthew Brost 						"Mismatched class %d, %d\n",
662e5e32171SMatthew Brost 						prev_engine.engine_class,
663e5e32171SMatthew Brost 						ci.engine_class);
664e5e32171SMatthew Brost 					err = -EINVAL;
665e5e32171SMatthew Brost 					goto out_err;
666e5e32171SMatthew Brost 				}
667e5e32171SMatthew Brost 			}
668e5e32171SMatthew Brost 
669e5e32171SMatthew Brost 			prev_engine = ci;
670e5e32171SMatthew Brost 			current_mask |= siblings[n]->logical_mask;
671e5e32171SMatthew Brost 		}
672e5e32171SMatthew Brost 
673e5e32171SMatthew Brost 		if (i > 0) {
674e5e32171SMatthew Brost 			if (current_mask != prev_mask << 1) {
675e5e32171SMatthew Brost 				drm_dbg(&i915->drm,
676e5e32171SMatthew Brost 					"Non contiguous logical mask 0x%x, 0x%x\n",
677e5e32171SMatthew Brost 					prev_mask, current_mask);
678e5e32171SMatthew Brost 				err = -EINVAL;
679e5e32171SMatthew Brost 				goto out_err;
680e5e32171SMatthew Brost 			}
681e5e32171SMatthew Brost 		}
682e5e32171SMatthew Brost 		prev_mask = current_mask;
683e5e32171SMatthew Brost 	}
684e5e32171SMatthew Brost 
685e5e32171SMatthew Brost 	set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
686e5e32171SMatthew Brost 	set->engines[slot].num_siblings = num_siblings;
687e5e32171SMatthew Brost 	set->engines[slot].width = width;
688e5e32171SMatthew Brost 	set->engines[slot].siblings = siblings;
689e5e32171SMatthew Brost 
690e5e32171SMatthew Brost 	return 0;
691e5e32171SMatthew Brost 
692e5e32171SMatthew Brost out_err:
693e5e32171SMatthew Brost 	kfree(siblings);
694e5e32171SMatthew Brost 
695e5e32171SMatthew Brost 	return err;
696e5e32171SMatthew Brost }
697e5e32171SMatthew Brost 
698d4433c76SJason Ekstrand static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
699d4433c76SJason Ekstrand 	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
700d4433c76SJason Ekstrand 	[I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
701e5e32171SMatthew Brost 	[I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
702e5e32171SMatthew Brost 		set_proto_ctx_engines_parallel_submit,
703d4433c76SJason Ekstrand };
704d4433c76SJason Ekstrand 
705d4433c76SJason Ekstrand static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
706d4433c76SJason Ekstrand 			         struct i915_gem_proto_context *pc,
707d4433c76SJason Ekstrand 			         const struct drm_i915_gem_context_param *args)
708d4433c76SJason Ekstrand {
709d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = fpriv->dev_priv;
710d4433c76SJason Ekstrand 	struct set_proto_ctx_engines set = { .i915 = i915 };
711d4433c76SJason Ekstrand 	struct i915_context_param_engines __user *user =
712d4433c76SJason Ekstrand 		u64_to_user_ptr(args->value);
713d4433c76SJason Ekstrand 	unsigned int n;
714d4433c76SJason Ekstrand 	u64 extensions;
715d4433c76SJason Ekstrand 	int err;
716d4433c76SJason Ekstrand 
717d4433c76SJason Ekstrand 	if (pc->num_user_engines >= 0) {
718d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Cannot set engines twice");
719d4433c76SJason Ekstrand 		return -EINVAL;
720d4433c76SJason Ekstrand 	}
721d4433c76SJason Ekstrand 
722d4433c76SJason Ekstrand 	if (args->size < sizeof(*user) ||
723d4433c76SJason Ekstrand 	    !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
724d4433c76SJason Ekstrand 		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
725d4433c76SJason Ekstrand 			args->size);
726d4433c76SJason Ekstrand 		return -EINVAL;
727d4433c76SJason Ekstrand 	}
728d4433c76SJason Ekstrand 
729d4433c76SJason Ekstrand 	set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
730d4433c76SJason Ekstrand 	/* RING_MASK has no shift so we can use it directly here */
731d4433c76SJason Ekstrand 	if (set.num_engines > I915_EXEC_RING_MASK + 1)
732d4433c76SJason Ekstrand 		return -EINVAL;
733d4433c76SJason Ekstrand 
734d4433c76SJason Ekstrand 	set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
735d4433c76SJason Ekstrand 	if (!set.engines)
736d4433c76SJason Ekstrand 		return -ENOMEM;
737d4433c76SJason Ekstrand 
738d4433c76SJason Ekstrand 	for (n = 0; n < set.num_engines; n++) {
739d4433c76SJason Ekstrand 		struct i915_engine_class_instance ci;
740d4433c76SJason Ekstrand 		struct intel_engine_cs *engine;
741d4433c76SJason Ekstrand 
742d4433c76SJason Ekstrand 		if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
743d4433c76SJason Ekstrand 			kfree(set.engines);
744d4433c76SJason Ekstrand 			return -EFAULT;
745d4433c76SJason Ekstrand 		}
746d4433c76SJason Ekstrand 
747d4433c76SJason Ekstrand 		memset(&set.engines[n], 0, sizeof(set.engines[n]));
748d4433c76SJason Ekstrand 
749d4433c76SJason Ekstrand 		if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
750d4433c76SJason Ekstrand 		    ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
751d4433c76SJason Ekstrand 			continue;
752d4433c76SJason Ekstrand 
753d4433c76SJason Ekstrand 		engine = intel_engine_lookup_user(i915,
754d4433c76SJason Ekstrand 						  ci.engine_class,
755d4433c76SJason Ekstrand 						  ci.engine_instance);
756d4433c76SJason Ekstrand 		if (!engine) {
757d4433c76SJason Ekstrand 			drm_dbg(&i915->drm,
758d4433c76SJason Ekstrand 				"Invalid engine[%d]: { class:%d, instance:%d }\n",
759d4433c76SJason Ekstrand 				n, ci.engine_class, ci.engine_instance);
760d4433c76SJason Ekstrand 			kfree(set.engines);
761d4433c76SJason Ekstrand 			return -ENOENT;
762d4433c76SJason Ekstrand 		}
763d4433c76SJason Ekstrand 
764d4433c76SJason Ekstrand 		set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
765d4433c76SJason Ekstrand 		set.engines[n].engine = engine;
766d4433c76SJason Ekstrand 	}
767d4433c76SJason Ekstrand 
768d4433c76SJason Ekstrand 	err = -EFAULT;
769d4433c76SJason Ekstrand 	if (!get_user(extensions, &user->extensions))
770d4433c76SJason Ekstrand 		err = i915_user_extensions(u64_to_user_ptr(extensions),
771d4433c76SJason Ekstrand 					   set_proto_ctx_engines_extensions,
772d4433c76SJason Ekstrand 					   ARRAY_SIZE(set_proto_ctx_engines_extensions),
773d4433c76SJason Ekstrand 					   &set);
774d4433c76SJason Ekstrand 	if (err) {
775d4433c76SJason Ekstrand 		kfree(set.engines);
776d4433c76SJason Ekstrand 		return err;
777d4433c76SJason Ekstrand 	}
778d4433c76SJason Ekstrand 
779d4433c76SJason Ekstrand 	pc->num_user_engines = set.num_engines;
780d4433c76SJason Ekstrand 	pc->user_engines = set.engines;
781d4433c76SJason Ekstrand 
782d4433c76SJason Ekstrand 	return 0;
783d4433c76SJason Ekstrand }
784d4433c76SJason Ekstrand 
785d4433c76SJason Ekstrand static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
786d4433c76SJason Ekstrand 			      struct i915_gem_proto_context *pc,
787d4433c76SJason Ekstrand 			      struct drm_i915_gem_context_param *args)
788d4433c76SJason Ekstrand {
789d4433c76SJason Ekstrand 	struct drm_i915_private *i915 = fpriv->dev_priv;
790d4433c76SJason Ekstrand 	struct drm_i915_gem_context_param_sseu user_sseu;
791d4433c76SJason Ekstrand 	struct intel_sseu *sseu;
792d4433c76SJason Ekstrand 	int ret;
793d4433c76SJason Ekstrand 
794d4433c76SJason Ekstrand 	if (args->size < sizeof(user_sseu))
795d4433c76SJason Ekstrand 		return -EINVAL;
796d4433c76SJason Ekstrand 
797d4433c76SJason Ekstrand 	if (GRAPHICS_VER(i915) != 11)
798d4433c76SJason Ekstrand 		return -ENODEV;
799d4433c76SJason Ekstrand 
800d4433c76SJason Ekstrand 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
801d4433c76SJason Ekstrand 			   sizeof(user_sseu)))
802d4433c76SJason Ekstrand 		return -EFAULT;
803d4433c76SJason Ekstrand 
804d4433c76SJason Ekstrand 	if (user_sseu.rsvd)
805d4433c76SJason Ekstrand 		return -EINVAL;
806d4433c76SJason Ekstrand 
807d4433c76SJason Ekstrand 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
808d4433c76SJason Ekstrand 		return -EINVAL;
809d4433c76SJason Ekstrand 
810d4433c76SJason Ekstrand 	if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
811d4433c76SJason Ekstrand 		return -EINVAL;
812d4433c76SJason Ekstrand 
813d4433c76SJason Ekstrand 	if (pc->num_user_engines >= 0) {
814d4433c76SJason Ekstrand 		int idx = user_sseu.engine.engine_instance;
815d4433c76SJason Ekstrand 		struct i915_gem_proto_engine *pe;
816d4433c76SJason Ekstrand 
817d4433c76SJason Ekstrand 		if (idx >= pc->num_user_engines)
818d4433c76SJason Ekstrand 			return -EINVAL;
819d4433c76SJason Ekstrand 
820d4433c76SJason Ekstrand 		pe = &pc->user_engines[idx];
821d4433c76SJason Ekstrand 
822d4433c76SJason Ekstrand 		/* Only render engine supports RPCS configuration. */
823d4433c76SJason Ekstrand 		if (pe->engine->class != RENDER_CLASS)
824d4433c76SJason Ekstrand 			return -EINVAL;
825d4433c76SJason Ekstrand 
826d4433c76SJason Ekstrand 		sseu = &pe->sseu;
827d4433c76SJason Ekstrand 	} else {
828d4433c76SJason Ekstrand 		/* Only render engine supports RPCS configuration. */
829d4433c76SJason Ekstrand 		if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
830d4433c76SJason Ekstrand 			return -EINVAL;
831d4433c76SJason Ekstrand 
832d4433c76SJason Ekstrand 		/* There is only one render engine */
833d4433c76SJason Ekstrand 		if (user_sseu.engine.engine_instance != 0)
834d4433c76SJason Ekstrand 			return -EINVAL;
835d4433c76SJason Ekstrand 
836d4433c76SJason Ekstrand 		sseu = &pc->legacy_rcs_sseu;
837d4433c76SJason Ekstrand 	}
838d4433c76SJason Ekstrand 
8391a9c4db4SMichał Winiarski 	ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
840d4433c76SJason Ekstrand 	if (ret)
841d4433c76SJason Ekstrand 		return ret;
842d4433c76SJason Ekstrand 
843d4433c76SJason Ekstrand 	args->size = sizeof(user_sseu);
844d4433c76SJason Ekstrand 
845d4433c76SJason Ekstrand 	return 0;
846d4433c76SJason Ekstrand }
847d4433c76SJason Ekstrand 
848d4433c76SJason Ekstrand static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
849d4433c76SJason Ekstrand 			       struct i915_gem_proto_context *pc,
850d4433c76SJason Ekstrand 			       struct drm_i915_gem_context_param *args)
851d4433c76SJason Ekstrand {
852d4433c76SJason Ekstrand 	int ret = 0;
853d4433c76SJason Ekstrand 
854d4433c76SJason Ekstrand 	switch (args->param) {
855d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
856d4433c76SJason Ekstrand 		if (args->size)
857d4433c76SJason Ekstrand 			ret = -EINVAL;
858d4433c76SJason Ekstrand 		else if (args->value)
859d4433c76SJason Ekstrand 			pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
860d4433c76SJason Ekstrand 		else
861d4433c76SJason Ekstrand 			pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
862d4433c76SJason Ekstrand 		break;
863d4433c76SJason Ekstrand 
864d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_BANNABLE:
865d4433c76SJason Ekstrand 		if (args->size)
866d4433c76SJason Ekstrand 			ret = -EINVAL;
867d4433c76SJason Ekstrand 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
868d4433c76SJason Ekstrand 			ret = -EPERM;
869d4433c76SJason Ekstrand 		else if (args->value)
870d4433c76SJason Ekstrand 			pc->user_flags |= BIT(UCONTEXT_BANNABLE);
871d3ac8d42SDaniele Ceraolo Spurio 		else if (pc->uses_protected_content)
872d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM;
873d4433c76SJason Ekstrand 		else
874d4433c76SJason Ekstrand 			pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
875d4433c76SJason Ekstrand 		break;
876d4433c76SJason Ekstrand 
877d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_RECOVERABLE:
878d4433c76SJason Ekstrand 		if (args->size)
879d4433c76SJason Ekstrand 			ret = -EINVAL;
880d3ac8d42SDaniele Ceraolo Spurio 		else if (!args->value)
881d4433c76SJason Ekstrand 			pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
882d3ac8d42SDaniele Ceraolo Spurio 		else if (pc->uses_protected_content)
883d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM;
884d3ac8d42SDaniele Ceraolo Spurio 		else
885d3ac8d42SDaniele Ceraolo Spurio 			pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
886d4433c76SJason Ekstrand 		break;
887d4433c76SJason Ekstrand 
888d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_PRIORITY:
889d4433c76SJason Ekstrand 		ret = validate_priority(fpriv->dev_priv, args);
890d4433c76SJason Ekstrand 		if (!ret)
891d4433c76SJason Ekstrand 			pc->sched.priority = args->value;
892d4433c76SJason Ekstrand 		break;
893d4433c76SJason Ekstrand 
894d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_SSEU:
895d4433c76SJason Ekstrand 		ret = set_proto_ctx_sseu(fpriv, pc, args);
896d4433c76SJason Ekstrand 		break;
897d4433c76SJason Ekstrand 
898d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_VM:
899d4433c76SJason Ekstrand 		ret = set_proto_ctx_vm(fpriv, pc, args);
900d4433c76SJason Ekstrand 		break;
901d4433c76SJason Ekstrand 
902d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_ENGINES:
903d4433c76SJason Ekstrand 		ret = set_proto_ctx_engines(fpriv, pc, args);
904d4433c76SJason Ekstrand 		break;
905d4433c76SJason Ekstrand 
906d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_PERSISTENCE:
907d4433c76SJason Ekstrand 		if (args->size)
908d4433c76SJason Ekstrand 			ret = -EINVAL;
909d4433c76SJason Ekstrand 		ret = proto_context_set_persistence(fpriv->dev_priv, pc,
910d4433c76SJason Ekstrand 						    args->value);
911d4433c76SJason Ekstrand 		break;
912d4433c76SJason Ekstrand 
913d3ac8d42SDaniele Ceraolo Spurio 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
914d3ac8d42SDaniele Ceraolo Spurio 		ret = proto_context_set_protected(fpriv->dev_priv, pc,
915d3ac8d42SDaniele Ceraolo Spurio 						  args->value);
916d3ac8d42SDaniele Ceraolo Spurio 		break;
917d3ac8d42SDaniele Ceraolo Spurio 
918d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
919d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_BAN_PERIOD:
920d4433c76SJason Ekstrand 	case I915_CONTEXT_PARAM_RINGSIZE:
921d4433c76SJason Ekstrand 	default:
922d4433c76SJason Ekstrand 		ret = -EINVAL;
923d4433c76SJason Ekstrand 		break;
924d4433c76SJason Ekstrand 	}
925d4433c76SJason Ekstrand 
926d4433c76SJason Ekstrand 	return ret;
927d4433c76SJason Ekstrand }
928d4433c76SJason Ekstrand 
929263ae12cSJason Ekstrand static int intel_context_set_gem(struct intel_context *ce,
930263ae12cSJason Ekstrand 				 struct i915_gem_context *ctx,
931263ae12cSJason Ekstrand 				 struct intel_sseu sseu)
932e6ba7648SChris Wilson {
933263ae12cSJason Ekstrand 	int ret = 0;
934263ae12cSJason Ekstrand 
9356a8679c0SChris Wilson 	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
9366a8679c0SChris Wilson 	RCU_INIT_POINTER(ce->gem_context, ctx);
937e6ba7648SChris Wilson 
938e5e32171SMatthew Brost 	GEM_BUG_ON(intel_context_is_pinned(ce));
93974e4b909SJason Ekstrand 	ce->ring_size = SZ_16K;
940e6ba7648SChris Wilson 
941e6ba7648SChris Wilson 	i915_vm_put(ce->vm);
9420483a301SDaniel Vetter 	ce->vm = i915_gem_context_get_eb_vm(ctx);
943e6ba7648SChris Wilson 
944e6ba7648SChris Wilson 	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
9454dbd3944SMatthew Brost 	    intel_engine_has_timeslices(ce->engine) &&
9464dbd3944SMatthew Brost 	    intel_engine_has_semaphores(ce->engine))
947e6ba7648SChris Wilson 		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
948e8dbb566STvrtko Ursulin 
9491a839e01SLucas De Marchi 	if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
950677db6adSJason Ekstrand 	    ctx->i915->params.request_timeout_ms) {
951677db6adSJason Ekstrand 		unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
952677db6adSJason Ekstrand 
953677db6adSJason Ekstrand 		intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
954677db6adSJason Ekstrand 	}
955263ae12cSJason Ekstrand 
956263ae12cSJason Ekstrand 	/* A valid SSEU has no zero fields */
957263ae12cSJason Ekstrand 	if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
958263ae12cSJason Ekstrand 		ret = intel_context_reconfigure_sseu(ce, sseu);
959263ae12cSJason Ekstrand 
960263ae12cSJason Ekstrand 	return ret;
961e6ba7648SChris Wilson }
962e6ba7648SChris Wilson 
963e5e32171SMatthew Brost static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
964e5e32171SMatthew Brost {
965e5e32171SMatthew Brost 	while (count--) {
966e5e32171SMatthew Brost 		struct intel_context *ce = e->engines[count], *child;
967e5e32171SMatthew Brost 
968e5e32171SMatthew Brost 		if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
969e5e32171SMatthew Brost 			continue;
970e5e32171SMatthew Brost 
971e5e32171SMatthew Brost 		for_each_child(ce, child)
972e5e32171SMatthew Brost 			intel_context_unpin(child);
973e5e32171SMatthew Brost 		intel_context_unpin(ce);
974e5e32171SMatthew Brost 	}
975e5e32171SMatthew Brost }
976e5e32171SMatthew Brost 
977e5e32171SMatthew Brost static void unpin_engines(struct i915_gem_engines *e)
978e5e32171SMatthew Brost {
979e5e32171SMatthew Brost 	__unpin_engines(e, e->num_engines);
980e5e32171SMatthew Brost }
981e5e32171SMatthew Brost 
98210be98a7SChris Wilson static void __free_engines(struct i915_gem_engines *e, unsigned int count)
98310be98a7SChris Wilson {
98410be98a7SChris Wilson 	while (count--) {
98510be98a7SChris Wilson 		if (!e->engines[count])
98610be98a7SChris Wilson 			continue;
98710be98a7SChris Wilson 
98810be98a7SChris Wilson 		intel_context_put(e->engines[count]);
98910be98a7SChris Wilson 	}
99010be98a7SChris Wilson 	kfree(e);
99110be98a7SChris Wilson }
99210be98a7SChris Wilson 
99310be98a7SChris Wilson static void free_engines(struct i915_gem_engines *e)
99410be98a7SChris Wilson {
99510be98a7SChris Wilson 	__free_engines(e, e->num_engines);
99610be98a7SChris Wilson }
99710be98a7SChris Wilson 
998155ab883SChris Wilson static void free_engines_rcu(struct rcu_head *rcu)
99910be98a7SChris Wilson {
1000130a95e9SChris Wilson 	struct i915_gem_engines *engines =
1001130a95e9SChris Wilson 		container_of(rcu, struct i915_gem_engines, rcu);
1002130a95e9SChris Wilson 
1003130a95e9SChris Wilson 	i915_sw_fence_fini(&engines->fence);
1004130a95e9SChris Wilson 	free_engines(engines);
100510be98a7SChris Wilson }
100610be98a7SChris Wilson 
100744505168SMatthew Brost static int
100870c96e39SChris Wilson engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
100970c96e39SChris Wilson {
101070c96e39SChris Wilson 	struct i915_gem_engines *engines =
101170c96e39SChris Wilson 		container_of(fence, typeof(*engines), fence);
101270c96e39SChris Wilson 
101370c96e39SChris Wilson 	switch (state) {
101470c96e39SChris Wilson 	case FENCE_COMPLETE:
101570c96e39SChris Wilson 		if (!list_empty(&engines->link)) {
101670c96e39SChris Wilson 			struct i915_gem_context *ctx = engines->ctx;
101770c96e39SChris Wilson 			unsigned long flags;
101870c96e39SChris Wilson 
101970c96e39SChris Wilson 			spin_lock_irqsave(&ctx->stale.lock, flags);
102070c96e39SChris Wilson 			list_del(&engines->link);
102170c96e39SChris Wilson 			spin_unlock_irqrestore(&ctx->stale.lock, flags);
102270c96e39SChris Wilson 		}
102370c96e39SChris Wilson 		i915_gem_context_put(engines->ctx);
102470c96e39SChris Wilson 		break;
102570c96e39SChris Wilson 
102670c96e39SChris Wilson 	case FENCE_FREE:
102770c96e39SChris Wilson 		init_rcu_head(&engines->rcu);
102870c96e39SChris Wilson 		call_rcu(&engines->rcu, free_engines_rcu);
102970c96e39SChris Wilson 		break;
103070c96e39SChris Wilson 	}
103170c96e39SChris Wilson 
103270c96e39SChris Wilson 	return NOTIFY_DONE;
103370c96e39SChris Wilson }
103470c96e39SChris Wilson 
103570c96e39SChris Wilson static struct i915_gem_engines *alloc_engines(unsigned int count)
103670c96e39SChris Wilson {
103770c96e39SChris Wilson 	struct i915_gem_engines *e;
103870c96e39SChris Wilson 
103970c96e39SChris Wilson 	e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
104070c96e39SChris Wilson 	if (!e)
104170c96e39SChris Wilson 		return NULL;
104270c96e39SChris Wilson 
104370c96e39SChris Wilson 	i915_sw_fence_init(&e->fence, engines_notify);
104470c96e39SChris Wilson 	return e;
104570c96e39SChris Wilson }
104670c96e39SChris Wilson 
1047263ae12cSJason Ekstrand static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1048263ae12cSJason Ekstrand 						struct intel_sseu rcs_sseu)
104910be98a7SChris Wilson {
10501a9c4db4SMichał Winiarski 	const struct intel_gt *gt = to_gt(ctx->i915);
105110be98a7SChris Wilson 	struct intel_engine_cs *engine;
105207a635a8SJason Ekstrand 	struct i915_gem_engines *e, *err;
105310be98a7SChris Wilson 	enum intel_engine_id id;
105410be98a7SChris Wilson 
105570c96e39SChris Wilson 	e = alloc_engines(I915_NUM_ENGINES);
105610be98a7SChris Wilson 	if (!e)
105710be98a7SChris Wilson 		return ERR_PTR(-ENOMEM);
105810be98a7SChris Wilson 
1059f1c4d157SChris Wilson 	for_each_engine(engine, gt, id) {
106010be98a7SChris Wilson 		struct intel_context *ce;
1061263ae12cSJason Ekstrand 		struct intel_sseu sseu = {};
1062263ae12cSJason Ekstrand 		int ret;
106310be98a7SChris Wilson 
1064a50134b1STvrtko Ursulin 		if (engine->legacy_idx == INVALID_ENGINE)
1065a50134b1STvrtko Ursulin 			continue;
1066a50134b1STvrtko Ursulin 
1067a50134b1STvrtko Ursulin 		GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
1068a50134b1STvrtko Ursulin 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
1069a50134b1STvrtko Ursulin 
1070e6ba7648SChris Wilson 		ce = intel_context_create(engine);
107110be98a7SChris Wilson 		if (IS_ERR(ce)) {
107207a635a8SJason Ekstrand 			err = ERR_CAST(ce);
107307a635a8SJason Ekstrand 			goto free_engines;
107410be98a7SChris Wilson 		}
107510be98a7SChris Wilson 
1076a50134b1STvrtko Ursulin 		e->engines[engine->legacy_idx] = ce;
107707a635a8SJason Ekstrand 		e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1078263ae12cSJason Ekstrand 
1079263ae12cSJason Ekstrand 		if (engine->class == RENDER_CLASS)
1080263ae12cSJason Ekstrand 			sseu = rcs_sseu;
1081263ae12cSJason Ekstrand 
1082263ae12cSJason Ekstrand 		ret = intel_context_set_gem(ce, ctx, sseu);
1083263ae12cSJason Ekstrand 		if (ret) {
1084263ae12cSJason Ekstrand 			err = ERR_PTR(ret);
1085263ae12cSJason Ekstrand 			goto free_engines;
1086263ae12cSJason Ekstrand 		}
1087263ae12cSJason Ekstrand 
108810be98a7SChris Wilson 	}
108910be98a7SChris Wilson 
109010be98a7SChris Wilson 	return e;
109107a635a8SJason Ekstrand 
109207a635a8SJason Ekstrand free_engines:
109307a635a8SJason Ekstrand 	free_engines(e);
109407a635a8SJason Ekstrand 	return err;
109510be98a7SChris Wilson }
109610be98a7SChris Wilson 
1097e5e32171SMatthew Brost static int perma_pin_contexts(struct intel_context *ce)
1098e5e32171SMatthew Brost {
1099e5e32171SMatthew Brost 	struct intel_context *child;
1100e5e32171SMatthew Brost 	int i = 0, j = 0, ret;
1101e5e32171SMatthew Brost 
1102e5e32171SMatthew Brost 	GEM_BUG_ON(!intel_context_is_parent(ce));
1103e5e32171SMatthew Brost 
1104e5e32171SMatthew Brost 	ret = intel_context_pin(ce);
1105e5e32171SMatthew Brost 	if (unlikely(ret))
1106e5e32171SMatthew Brost 		return ret;
1107e5e32171SMatthew Brost 
1108e5e32171SMatthew Brost 	for_each_child(ce, child) {
1109e5e32171SMatthew Brost 		ret = intel_context_pin(child);
1110e5e32171SMatthew Brost 		if (unlikely(ret))
1111e5e32171SMatthew Brost 			goto unwind;
1112e5e32171SMatthew Brost 		++i;
1113e5e32171SMatthew Brost 	}
1114e5e32171SMatthew Brost 
1115e5e32171SMatthew Brost 	set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1116e5e32171SMatthew Brost 
1117e5e32171SMatthew Brost 	return 0;
1118e5e32171SMatthew Brost 
1119e5e32171SMatthew Brost unwind:
1120e5e32171SMatthew Brost 	intel_context_unpin(ce);
1121e5e32171SMatthew Brost 	for_each_child(ce, child) {
1122e5e32171SMatthew Brost 		if (j++ < i)
1123e5e32171SMatthew Brost 			intel_context_unpin(child);
1124e5e32171SMatthew Brost 		else
1125e5e32171SMatthew Brost 			break;
1126e5e32171SMatthew Brost 	}
1127e5e32171SMatthew Brost 
1128e5e32171SMatthew Brost 	return ret;
1129e5e32171SMatthew Brost }
1130e5e32171SMatthew Brost 
1131d4433c76SJason Ekstrand static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1132d4433c76SJason Ekstrand 					     unsigned int num_engines,
1133d4433c76SJason Ekstrand 					     struct i915_gem_proto_engine *pe)
1134d4433c76SJason Ekstrand {
1135d4433c76SJason Ekstrand 	struct i915_gem_engines *e, *err;
1136d4433c76SJason Ekstrand 	unsigned int n;
1137d4433c76SJason Ekstrand 
1138d4433c76SJason Ekstrand 	e = alloc_engines(num_engines);
113984edf537SMatthew Brost 	if (!e)
114084edf537SMatthew Brost 		return ERR_PTR(-ENOMEM);
114184edf537SMatthew Brost 	e->num_engines = num_engines;
114284edf537SMatthew Brost 
1143d4433c76SJason Ekstrand 	for (n = 0; n < num_engines; n++) {
1144e5e32171SMatthew Brost 		struct intel_context *ce, *child;
1145d4433c76SJason Ekstrand 		int ret;
1146d4433c76SJason Ekstrand 
1147d4433c76SJason Ekstrand 		switch (pe[n].type) {
1148d4433c76SJason Ekstrand 		case I915_GEM_ENGINE_TYPE_PHYSICAL:
1149d4433c76SJason Ekstrand 			ce = intel_context_create(pe[n].engine);
1150d4433c76SJason Ekstrand 			break;
1151d4433c76SJason Ekstrand 
1152d4433c76SJason Ekstrand 		case I915_GEM_ENGINE_TYPE_BALANCED:
115355612025SMatthew Brost 			ce = intel_engine_create_virtual(pe[n].siblings,
1154e5e32171SMatthew Brost 							 pe[n].num_siblings, 0);
1155e5e32171SMatthew Brost 			break;
1156e5e32171SMatthew Brost 
1157e5e32171SMatthew Brost 		case I915_GEM_ENGINE_TYPE_PARALLEL:
1158e5e32171SMatthew Brost 			ce = intel_engine_create_parallel(pe[n].siblings,
1159e5e32171SMatthew Brost 							  pe[n].num_siblings,
1160e5e32171SMatthew Brost 							  pe[n].width);
1161d4433c76SJason Ekstrand 			break;
1162d4433c76SJason Ekstrand 
1163d4433c76SJason Ekstrand 		case I915_GEM_ENGINE_TYPE_INVALID:
1164d4433c76SJason Ekstrand 		default:
1165d4433c76SJason Ekstrand 			GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1166d4433c76SJason Ekstrand 			continue;
1167d4433c76SJason Ekstrand 		}
1168d4433c76SJason Ekstrand 
1169d4433c76SJason Ekstrand 		if (IS_ERR(ce)) {
1170d4433c76SJason Ekstrand 			err = ERR_CAST(ce);
1171d4433c76SJason Ekstrand 			goto free_engines;
1172d4433c76SJason Ekstrand 		}
1173d4433c76SJason Ekstrand 
1174d4433c76SJason Ekstrand 		e->engines[n] = ce;
1175d4433c76SJason Ekstrand 
1176d4433c76SJason Ekstrand 		ret = intel_context_set_gem(ce, ctx, pe->sseu);
1177d4433c76SJason Ekstrand 		if (ret) {
1178d4433c76SJason Ekstrand 			err = ERR_PTR(ret);
1179d4433c76SJason Ekstrand 			goto free_engines;
1180d4433c76SJason Ekstrand 		}
1181e5e32171SMatthew Brost 		for_each_child(ce, child) {
1182e5e32171SMatthew Brost 			ret = intel_context_set_gem(child, ctx, pe->sseu);
1183e5e32171SMatthew Brost 			if (ret) {
1184e5e32171SMatthew Brost 				err = ERR_PTR(ret);
1185e5e32171SMatthew Brost 				goto free_engines;
1186e5e32171SMatthew Brost 			}
1187e5e32171SMatthew Brost 		}
1188e5e32171SMatthew Brost 
1189e5e32171SMatthew Brost 		/*
1190e5e32171SMatthew Brost 		 * XXX: Must be done after calling intel_context_set_gem as that
1191e5e32171SMatthew Brost 		 * function changes the ring size. The ring is allocated when
1192e5e32171SMatthew Brost 		 * the context is pinned. If the ring size is changed after
1193e5e32171SMatthew Brost 		 * allocation we have a mismatch of the ring size and will cause
1194e5e32171SMatthew Brost 		 * the context to hang. Presumably with a bit of reordering we
1195e5e32171SMatthew Brost 		 * could move the perma-pin step to the backend function
1196e5e32171SMatthew Brost 		 * intel_engine_create_parallel.
1197e5e32171SMatthew Brost 		 */
1198e5e32171SMatthew Brost 		if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1199e5e32171SMatthew Brost 			ret = perma_pin_contexts(ce);
1200e5e32171SMatthew Brost 			if (ret) {
1201e5e32171SMatthew Brost 				err = ERR_PTR(ret);
1202e5e32171SMatthew Brost 				goto free_engines;
1203e5e32171SMatthew Brost 			}
1204e5e32171SMatthew Brost 		}
1205d4433c76SJason Ekstrand 	}
1206d4433c76SJason Ekstrand 
1207d4433c76SJason Ekstrand 	return e;
1208d4433c76SJason Ekstrand 
1209d4433c76SJason Ekstrand free_engines:
1210d4433c76SJason Ekstrand 	free_engines(e);
1211d4433c76SJason Ekstrand 	return err;
1212d4433c76SJason Ekstrand }
1213d4433c76SJason Ekstrand 
121475eefd82SDaniel Vetter static void i915_gem_context_release_work(struct work_struct *work)
121510be98a7SChris Wilson {
121675eefd82SDaniel Vetter 	struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
121775eefd82SDaniel Vetter 						    release_work);
12188cf97637SDaniel Vetter 	struct i915_address_space *vm;
121910be98a7SChris Wilson 
1220f8246cf4SChris Wilson 	trace_i915_context_free(ctx);
1221f8246cf4SChris Wilson 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1222a4e7ccdaSChris Wilson 
1223c238980eSDaniel Vetter 	if (ctx->syncobj)
1224c238980eSDaniel Vetter 		drm_syncobj_put(ctx->syncobj);
1225c238980eSDaniel Vetter 
12269ec8795eSDaniel Vetter 	vm = ctx->vm;
12278cf97637SDaniel Vetter 	if (vm)
12288cf97637SDaniel Vetter 		i915_vm_put(vm);
12298cf97637SDaniel Vetter 
1230d3ac8d42SDaniele Ceraolo Spurio 	if (ctx->pxp_wakeref)
1231d3ac8d42SDaniele Ceraolo Spurio 		intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1232d3ac8d42SDaniele Ceraolo Spurio 
123310be98a7SChris Wilson 	mutex_destroy(&ctx->engines_mutex);
1234f7ce8639SChris Wilson 	mutex_destroy(&ctx->lut_mutex);
123510be98a7SChris Wilson 
123610be98a7SChris Wilson 	put_pid(ctx->pid);
123710be98a7SChris Wilson 	mutex_destroy(&ctx->mutex);
123810be98a7SChris Wilson 
123910be98a7SChris Wilson 	kfree_rcu(ctx, rcu);
124010be98a7SChris Wilson }
124110be98a7SChris Wilson 
124275eefd82SDaniel Vetter void i915_gem_context_release(struct kref *ref)
124375eefd82SDaniel Vetter {
124475eefd82SDaniel Vetter 	struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
124575eefd82SDaniel Vetter 
124675eefd82SDaniel Vetter 	queue_work(ctx->i915->wq, &ctx->release_work);
124775eefd82SDaniel Vetter }
124875eefd82SDaniel Vetter 
12492e0986a5SChris Wilson static inline struct i915_gem_engines *
12502e0986a5SChris Wilson __context_engines_static(const struct i915_gem_context *ctx)
12512e0986a5SChris Wilson {
12522e0986a5SChris Wilson 	return rcu_dereference_protected(ctx->engines, true);
12532e0986a5SChris Wilson }
12542e0986a5SChris Wilson 
12552e0986a5SChris Wilson static void __reset_context(struct i915_gem_context *ctx,
12562e0986a5SChris Wilson 			    struct intel_engine_cs *engine)
12572e0986a5SChris Wilson {
12582e0986a5SChris Wilson 	intel_gt_handle_error(engine->gt, engine->mask, 0,
12592e0986a5SChris Wilson 			      "context closure in %s", ctx->name);
12602e0986a5SChris Wilson }
12612e0986a5SChris Wilson 
12622e0986a5SChris Wilson static bool __cancel_engine(struct intel_engine_cs *engine)
12632e0986a5SChris Wilson {
12642e0986a5SChris Wilson 	/*
12652e0986a5SChris Wilson 	 * Send a "high priority pulse" down the engine to cause the
12662e0986a5SChris Wilson 	 * current request to be momentarily preempted. (If it fails to
12672e0986a5SChris Wilson 	 * be preempted, it will be reset). As we have marked our context
12682e0986a5SChris Wilson 	 * as banned, any incomplete request, including any running, will
12692e0986a5SChris Wilson 	 * be skipped following the preemption.
12702e0986a5SChris Wilson 	 *
12712e0986a5SChris Wilson 	 * If there is no hangchecking (one of the reasons why we try to
12722e0986a5SChris Wilson 	 * cancel the context) and no forced preemption, there may be no
12732e0986a5SChris Wilson 	 * means by which we reset the GPU and evict the persistent hog.
12742e0986a5SChris Wilson 	 * Ergo if we are unable to inject a preemptive pulse that can
12752e0986a5SChris Wilson 	 * kill the banned context, we fallback to doing a local reset
12762e0986a5SChris Wilson 	 * instead.
12772e0986a5SChris Wilson 	 */
1278651dabe2SChris Wilson 	return intel_engine_pulse(engine) == 0;
12792e0986a5SChris Wilson }
12802e0986a5SChris Wilson 
12814a317415SChris Wilson static struct intel_engine_cs *active_engine(struct intel_context *ce)
12824a317415SChris Wilson {
12834a317415SChris Wilson 	struct intel_engine_cs *engine = NULL;
12844a317415SChris Wilson 	struct i915_request *rq;
12854a317415SChris Wilson 
1286cc1557caSChris Wilson 	if (intel_context_has_inflight(ce))
1287cc1557caSChris Wilson 		return intel_context_inflight(ce);
1288cc1557caSChris Wilson 
12894a317415SChris Wilson 	if (!ce->timeline)
12904a317415SChris Wilson 		return NULL;
12914a317415SChris Wilson 
12923cfea8c9SChris Wilson 	/*
12933cfea8c9SChris Wilson 	 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
12943cfea8c9SChris Wilson 	 * to the request to prevent it being transferred to a new timeline
12953cfea8c9SChris Wilson 	 * (and onto a new timeline->requests list).
12963cfea8c9SChris Wilson 	 */
1297736e785fSChris Wilson 	rcu_read_lock();
12983cfea8c9SChris Wilson 	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
12993cfea8c9SChris Wilson 		bool found;
13003cfea8c9SChris Wilson 
13013cfea8c9SChris Wilson 		/* timeline is already completed upto this point? */
13023cfea8c9SChris Wilson 		if (!i915_request_get_rcu(rq))
13033cfea8c9SChris Wilson 			break;
13044a317415SChris Wilson 
13054a317415SChris Wilson 		/* Check with the backend if the request is inflight */
13063cfea8c9SChris Wilson 		found = true;
13073cfea8c9SChris Wilson 		if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
13087dbc19daSTvrtko Ursulin 			found = i915_request_active_engine(rq, &engine);
13093cfea8c9SChris Wilson 
13103cfea8c9SChris Wilson 		i915_request_put(rq);
13113cfea8c9SChris Wilson 		if (found)
13124a317415SChris Wilson 			break;
13134a317415SChris Wilson 	}
1314736e785fSChris Wilson 	rcu_read_unlock();
13154a317415SChris Wilson 
13164a317415SChris Wilson 	return engine;
13174a317415SChris Wilson }
13184a317415SChris Wilson 
1319651dabe2SChris Wilson static void kill_engines(struct i915_gem_engines *engines, bool ban)
13202e0986a5SChris Wilson {
13212e0986a5SChris Wilson 	struct i915_gem_engines_iter it;
13222e0986a5SChris Wilson 	struct intel_context *ce;
13232e0986a5SChris Wilson 
13242e0986a5SChris Wilson 	/*
13252e0986a5SChris Wilson 	 * Map the user's engine back to the actual engines; one virtual
13262e0986a5SChris Wilson 	 * engine will be mapped to multiple engines, and using ctx->engine[]
13272e0986a5SChris Wilson 	 * the same engine may be have multiple instances in the user's map.
13282e0986a5SChris Wilson 	 * However, we only care about pending requests, so only include
13292e0986a5SChris Wilson 	 * engines on which there are incomplete requests.
13302e0986a5SChris Wilson 	 */
133142fb60deSChris Wilson 	for_each_gem_engine(ce, engines, it) {
13322e0986a5SChris Wilson 		struct intel_engine_cs *engine;
13332e0986a5SChris Wilson 
1334ae8ac10dSMatthew Brost 		if (ban && intel_context_ban(ce, NULL))
13359f3ccd40SChris Wilson 			continue;
13369f3ccd40SChris Wilson 
13374a317415SChris Wilson 		/*
13384a317415SChris Wilson 		 * Check the current active state of this context; if we
13394a317415SChris Wilson 		 * are currently executing on the GPU we need to evict
13404a317415SChris Wilson 		 * ourselves. On the other hand, if we haven't yet been
13414a317415SChris Wilson 		 * submitted to the GPU or if everything is complete,
13424a317415SChris Wilson 		 * we have nothing to do.
13434a317415SChris Wilson 		 */
13444a317415SChris Wilson 		engine = active_engine(ce);
13452e0986a5SChris Wilson 
13462e0986a5SChris Wilson 		/* First attempt to gracefully cancel the context */
1347651dabe2SChris Wilson 		if (engine && !__cancel_engine(engine) && ban)
13482e0986a5SChris Wilson 			/*
13492e0986a5SChris Wilson 			 * If we are unable to send a preemptive pulse to bump
13502e0986a5SChris Wilson 			 * the context from the GPU, we have to resort to a full
13512e0986a5SChris Wilson 			 * reset. We hope the collateral damage is worth it.
13522e0986a5SChris Wilson 			 */
135342fb60deSChris Wilson 			__reset_context(engines->ctx, engine);
13542e0986a5SChris Wilson 	}
13552e0986a5SChris Wilson }
13562e0986a5SChris Wilson 
1357651dabe2SChris Wilson static void kill_context(struct i915_gem_context *ctx)
135842fb60deSChris Wilson {
1359651dabe2SChris Wilson 	bool ban = (!i915_gem_context_is_persistent(ctx) ||
1360651dabe2SChris Wilson 		    !ctx->i915->params.enable_hangcheck);
136142fb60deSChris Wilson 	struct i915_gem_engines *pos, *next;
136242fb60deSChris Wilson 
1363130a95e9SChris Wilson 	spin_lock_irq(&ctx->stale.lock);
1364130a95e9SChris Wilson 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
136542fb60deSChris Wilson 	list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1366130a95e9SChris Wilson 		if (!i915_sw_fence_await(&pos->fence)) {
1367130a95e9SChris Wilson 			list_del_init(&pos->link);
136842fb60deSChris Wilson 			continue;
1369130a95e9SChris Wilson 		}
137042fb60deSChris Wilson 
1371130a95e9SChris Wilson 		spin_unlock_irq(&ctx->stale.lock);
137242fb60deSChris Wilson 
1373651dabe2SChris Wilson 		kill_engines(pos, ban);
137442fb60deSChris Wilson 
1375130a95e9SChris Wilson 		spin_lock_irq(&ctx->stale.lock);
1376130a95e9SChris Wilson 		GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
137742fb60deSChris Wilson 		list_safe_reset_next(pos, next, link);
137842fb60deSChris Wilson 		list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
137942fb60deSChris Wilson 
138042fb60deSChris Wilson 		i915_sw_fence_complete(&pos->fence);
138142fb60deSChris Wilson 	}
1382130a95e9SChris Wilson 	spin_unlock_irq(&ctx->stale.lock);
138342fb60deSChris Wilson }
138442fb60deSChris Wilson 
1385130a95e9SChris Wilson static void engines_idle_release(struct i915_gem_context *ctx,
1386130a95e9SChris Wilson 				 struct i915_gem_engines *engines)
1387130a95e9SChris Wilson {
1388130a95e9SChris Wilson 	struct i915_gem_engines_iter it;
1389130a95e9SChris Wilson 	struct intel_context *ce;
1390130a95e9SChris Wilson 
1391130a95e9SChris Wilson 	INIT_LIST_HEAD(&engines->link);
1392130a95e9SChris Wilson 
1393130a95e9SChris Wilson 	engines->ctx = i915_gem_context_get(ctx);
1394130a95e9SChris Wilson 
1395130a95e9SChris Wilson 	for_each_gem_engine(ce, engines, it) {
1396e6829625SChris Wilson 		int err;
1397130a95e9SChris Wilson 
1398130a95e9SChris Wilson 		/* serialises with execbuf */
1399207e4a71SChris Wilson 		set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
1400130a95e9SChris Wilson 		if (!intel_context_pin_if_active(ce))
1401130a95e9SChris Wilson 			continue;
1402130a95e9SChris Wilson 
1403e6829625SChris Wilson 		/* Wait until context is finally scheduled out and retired */
1404e6829625SChris Wilson 		err = i915_sw_fence_await_active(&engines->fence,
1405e6829625SChris Wilson 						 &ce->active,
1406e6829625SChris Wilson 						 I915_ACTIVE_AWAIT_BARRIER);
1407130a95e9SChris Wilson 		intel_context_unpin(ce);
1408e6829625SChris Wilson 		if (err)
1409130a95e9SChris Wilson 			goto kill;
1410130a95e9SChris Wilson 	}
1411130a95e9SChris Wilson 
1412130a95e9SChris Wilson 	spin_lock_irq(&ctx->stale.lock);
1413130a95e9SChris Wilson 	if (!i915_gem_context_is_closed(ctx))
1414130a95e9SChris Wilson 		list_add_tail(&engines->link, &ctx->stale.engines);
1415130a95e9SChris Wilson 	spin_unlock_irq(&ctx->stale.lock);
1416130a95e9SChris Wilson 
1417130a95e9SChris Wilson kill:
1418130a95e9SChris Wilson 	if (list_empty(&engines->link)) /* raced, already closed */
1419651dabe2SChris Wilson 		kill_engines(engines, true);
1420130a95e9SChris Wilson 
1421130a95e9SChris Wilson 	i915_sw_fence_commit(&engines->fence);
142242fb60deSChris Wilson }
142342fb60deSChris Wilson 
1424267c0126SChris Wilson static void set_closed_name(struct i915_gem_context *ctx)
1425267c0126SChris Wilson {
1426267c0126SChris Wilson 	char *s;
1427267c0126SChris Wilson 
1428267c0126SChris Wilson 	/* Replace '[]' with '<>' to indicate closed in debug prints */
1429267c0126SChris Wilson 
1430267c0126SChris Wilson 	s = strrchr(ctx->name, '[');
1431267c0126SChris Wilson 	if (!s)
1432267c0126SChris Wilson 		return;
1433267c0126SChris Wilson 
1434267c0126SChris Wilson 	*s = '<';
1435267c0126SChris Wilson 
1436267c0126SChris Wilson 	s = strchr(s + 1, ']');
1437267c0126SChris Wilson 	if (s)
1438267c0126SChris Wilson 		*s = '>';
1439267c0126SChris Wilson }
1440267c0126SChris Wilson 
144110be98a7SChris Wilson static void context_close(struct i915_gem_context *ctx)
144210be98a7SChris Wilson {
1443a4e7ccdaSChris Wilson 	struct i915_address_space *vm;
1444a4e7ccdaSChris Wilson 
1445130a95e9SChris Wilson 	/* Flush any concurrent set_engines() */
1446130a95e9SChris Wilson 	mutex_lock(&ctx->engines_mutex);
1447e5e32171SMatthew Brost 	unpin_engines(__context_engines_static(ctx));
1448130a95e9SChris Wilson 	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
14492850748eSChris Wilson 	i915_gem_context_set_closed(ctx);
1450130a95e9SChris Wilson 	mutex_unlock(&ctx->engines_mutex);
14512850748eSChris Wilson 
1452155ab883SChris Wilson 	mutex_lock(&ctx->mutex);
1453155ab883SChris Wilson 
1454130a95e9SChris Wilson 	set_closed_name(ctx);
1455130a95e9SChris Wilson 
14569ec8795eSDaniel Vetter 	vm = ctx->vm;
14578cf97637SDaniel Vetter 	if (vm) {
14588cf97637SDaniel Vetter 		/* i915_vm_close drops the final reference, which is a bit too
14598cf97637SDaniel Vetter 		 * early and could result in surprises with concurrent
14608cf97637SDaniel Vetter 		 * operations racing with thist ctx close. Keep a full reference
14618cf97637SDaniel Vetter 		 * until the end.
14628cf97637SDaniel Vetter 		 */
14638cf97637SDaniel Vetter 		i915_vm_get(vm);
1464a4e7ccdaSChris Wilson 		i915_vm_close(vm);
14658cf97637SDaniel Vetter 	}
1466a4e7ccdaSChris Wilson 
1467155ab883SChris Wilson 	ctx->file_priv = ERR_PTR(-EBADF);
146810be98a7SChris Wilson 
146910be98a7SChris Wilson 	/*
147010be98a7SChris Wilson 	 * The LUT uses the VMA as a backpointer to unref the object,
147110be98a7SChris Wilson 	 * so we need to clear the LUT before we close all the VMA (inside
147210be98a7SChris Wilson 	 * the ppgtt).
147310be98a7SChris Wilson 	 */
147410be98a7SChris Wilson 	lut_close(ctx);
147510be98a7SChris Wilson 
1476f8246cf4SChris Wilson 	spin_lock(&ctx->i915->gem.contexts.lock);
1477f8246cf4SChris Wilson 	list_del(&ctx->link);
1478f8246cf4SChris Wilson 	spin_unlock(&ctx->i915->gem.contexts.lock);
1479f8246cf4SChris Wilson 
1480155ab883SChris Wilson 	mutex_unlock(&ctx->mutex);
14812e0986a5SChris Wilson 
14822e0986a5SChris Wilson 	/*
14832e0986a5SChris Wilson 	 * If the user has disabled hangchecking, we can not be sure that
14842e0986a5SChris Wilson 	 * the batches will ever complete after the context is closed,
14852e0986a5SChris Wilson 	 * keeping the context and all resources pinned forever. So in this
14862e0986a5SChris Wilson 	 * case we opt to forcibly kill off all remaining requests on
14872e0986a5SChris Wilson 	 * context close.
14882e0986a5SChris Wilson 	 */
14892e0986a5SChris Wilson 	kill_context(ctx);
14902e0986a5SChris Wilson 
149110be98a7SChris Wilson 	i915_gem_context_put(ctx);
149210be98a7SChris Wilson }
149310be98a7SChris Wilson 
1494a0e04715SChris Wilson static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1495a0e04715SChris Wilson {
1496a0e04715SChris Wilson 	if (i915_gem_context_is_persistent(ctx) == state)
1497a0e04715SChris Wilson 		return 0;
1498a0e04715SChris Wilson 
1499a0e04715SChris Wilson 	if (state) {
1500a0e04715SChris Wilson 		/*
1501a0e04715SChris Wilson 		 * Only contexts that are short-lived [that will expire or be
1502a0e04715SChris Wilson 		 * reset] are allowed to survive past termination. We require
1503a0e04715SChris Wilson 		 * hangcheck to ensure that the persistent requests are healthy.
1504a0e04715SChris Wilson 		 */
15058a25c4beSJani Nikula 		if (!ctx->i915->params.enable_hangcheck)
1506a0e04715SChris Wilson 			return -EINVAL;
1507a0e04715SChris Wilson 
1508a0e04715SChris Wilson 		i915_gem_context_set_persistence(ctx);
1509a0e04715SChris Wilson 	} else {
1510a0e04715SChris Wilson 		/* To cancel a context we use "preempt-to-idle" */
1511a0e04715SChris Wilson 		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1512a0e04715SChris Wilson 			return -ENODEV;
1513a0e04715SChris Wilson 
1514d1b9b5f1SChris Wilson 		/*
1515d1b9b5f1SChris Wilson 		 * If the cancel fails, we then need to reset, cleanly!
1516d1b9b5f1SChris Wilson 		 *
1517d1b9b5f1SChris Wilson 		 * If the per-engine reset fails, all hope is lost! We resort
1518d1b9b5f1SChris Wilson 		 * to a full GPU reset in that unlikely case, but realistically
1519d1b9b5f1SChris Wilson 		 * if the engine could not reset, the full reset does not fare
1520d1b9b5f1SChris Wilson 		 * much better. The damage has been done.
1521d1b9b5f1SChris Wilson 		 *
1522d1b9b5f1SChris Wilson 		 * However, if we cannot reset an engine by itself, we cannot
1523d1b9b5f1SChris Wilson 		 * cleanup a hanging persistent context without causing
1524d1b9b5f1SChris Wilson 		 * colateral damage, and we should not pretend we can by
1525d1b9b5f1SChris Wilson 		 * exposing the interface.
1526d1b9b5f1SChris Wilson 		 */
15271a9c4db4SMichał Winiarski 		if (!intel_has_reset_engine(to_gt(ctx->i915)))
1528d1b9b5f1SChris Wilson 			return -ENODEV;
1529d1b9b5f1SChris Wilson 
1530a0e04715SChris Wilson 		i915_gem_context_clear_persistence(ctx);
1531a0e04715SChris Wilson 	}
1532a0e04715SChris Wilson 
1533a0e04715SChris Wilson 	return 0;
1534a0e04715SChris Wilson }
1535a0e04715SChris Wilson 
153610be98a7SChris Wilson static struct i915_gem_context *
1537a34857dcSJason Ekstrand i915_gem_create_context(struct drm_i915_private *i915,
1538a34857dcSJason Ekstrand 			const struct i915_gem_proto_context *pc)
153910be98a7SChris Wilson {
154010be98a7SChris Wilson 	struct i915_gem_context *ctx;
15410eee9977SJason Ekstrand 	struct i915_address_space *vm = NULL;
15420eee9977SJason Ekstrand 	struct i915_gem_engines *e;
15430eee9977SJason Ekstrand 	int err;
15440eee9977SJason Ekstrand 	int i;
154510be98a7SChris Wilson 
15460eee9977SJason Ekstrand 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
15470eee9977SJason Ekstrand 	if (!ctx)
15480eee9977SJason Ekstrand 		return ERR_PTR(-ENOMEM);
15490eee9977SJason Ekstrand 
15500eee9977SJason Ekstrand 	kref_init(&ctx->ref);
15510eee9977SJason Ekstrand 	ctx->i915 = i915;
15520eee9977SJason Ekstrand 	ctx->sched = pc->sched;
15530eee9977SJason Ekstrand 	mutex_init(&ctx->mutex);
15540eee9977SJason Ekstrand 	INIT_LIST_HEAD(&ctx->link);
155575eefd82SDaniel Vetter 	INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
15560eee9977SJason Ekstrand 
15570eee9977SJason Ekstrand 	spin_lock_init(&ctx->stale.lock);
15580eee9977SJason Ekstrand 	INIT_LIST_HEAD(&ctx->stale.engines);
155910be98a7SChris Wilson 
1560a34857dcSJason Ekstrand 	if (pc->vm) {
15610eee9977SJason Ekstrand 		vm = i915_vm_get(pc->vm);
1562a34857dcSJason Ekstrand 	} else if (HAS_FULL_PPGTT(i915)) {
1563ab53497bSChris Wilson 		struct i915_ppgtt *ppgtt;
156410be98a7SChris Wilson 
15651a9c4db4SMichał Winiarski 		ppgtt = i915_ppgtt_create(to_gt(i915), 0);
156610be98a7SChris Wilson 		if (IS_ERR(ppgtt)) {
1567baa89ba3SWambui Karuga 			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
156810be98a7SChris Wilson 				PTR_ERR(ppgtt));
15690eee9977SJason Ekstrand 			err = PTR_ERR(ppgtt);
15700eee9977SJason Ekstrand 			goto err_ctx;
15710eee9977SJason Ekstrand 		}
15720eee9977SJason Ekstrand 		vm = &ppgtt->vm;
15730eee9977SJason Ekstrand 	}
15740eee9977SJason Ekstrand 	if (vm) {
15759ec8795eSDaniel Vetter 		ctx->vm = i915_vm_open(vm);
15760eee9977SJason Ekstrand 
15770eee9977SJason Ekstrand 		/* i915_vm_open() takes a reference */
15780eee9977SJason Ekstrand 		i915_vm_put(vm);
157910be98a7SChris Wilson 	}
158010be98a7SChris Wilson 
15810eee9977SJason Ekstrand 	mutex_init(&ctx->engines_mutex);
1582d4433c76SJason Ekstrand 	if (pc->num_user_engines >= 0) {
1583d4433c76SJason Ekstrand 		i915_gem_context_set_user_engines(ctx);
15840eee9977SJason Ekstrand 		e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
15850eee9977SJason Ekstrand 	} else {
15860eee9977SJason Ekstrand 		i915_gem_context_clear_user_engines(ctx);
15870eee9977SJason Ekstrand 		e = default_engines(ctx, pc->legacy_rcs_sseu);
1588d4433c76SJason Ekstrand 	}
15890eee9977SJason Ekstrand 	if (IS_ERR(e)) {
15900eee9977SJason Ekstrand 		err = PTR_ERR(e);
15910eee9977SJason Ekstrand 		goto err_vm;
15920eee9977SJason Ekstrand 	}
15930eee9977SJason Ekstrand 	RCU_INIT_POINTER(ctx->engines, e);
15940eee9977SJason Ekstrand 
15950eee9977SJason Ekstrand 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
15960eee9977SJason Ekstrand 	mutex_init(&ctx->lut_mutex);
15970eee9977SJason Ekstrand 
15980eee9977SJason Ekstrand 	/* NB: Mark all slices as needing a remap so that when the context first
15990eee9977SJason Ekstrand 	 * loads it will restore whatever remap state already exists. If there
16000eee9977SJason Ekstrand 	 * is no remap info, it will be a NOP. */
16010eee9977SJason Ekstrand 	ctx->remap_slice = ALL_L3_SLICES(i915);
16020eee9977SJason Ekstrand 
16030eee9977SJason Ekstrand 	ctx->user_flags = pc->user_flags;
16040eee9977SJason Ekstrand 
16050eee9977SJason Ekstrand 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
16060eee9977SJason Ekstrand 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1607d4433c76SJason Ekstrand 
1608a34857dcSJason Ekstrand 	if (pc->single_timeline) {
16090eee9977SJason Ekstrand 		err = drm_syncobj_create(&ctx->syncobj,
161000dae4d3SJason Ekstrand 					 DRM_SYNCOBJ_CREATE_SIGNALED,
161100dae4d3SJason Ekstrand 					 NULL);
16120eee9977SJason Ekstrand 		if (err)
16130eee9977SJason Ekstrand 			goto err_engines;
161410be98a7SChris Wilson 	}
161510be98a7SChris Wilson 
1616d3ac8d42SDaniele Ceraolo Spurio 	if (pc->uses_protected_content) {
1617d3ac8d42SDaniele Ceraolo Spurio 		ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1618d3ac8d42SDaniele Ceraolo Spurio 		ctx->uses_protected_content = true;
1619d3ac8d42SDaniele Ceraolo Spurio 	}
1620d3ac8d42SDaniele Ceraolo Spurio 
162110be98a7SChris Wilson 	trace_i915_context_create(ctx);
162210be98a7SChris Wilson 
162310be98a7SChris Wilson 	return ctx;
16240eee9977SJason Ekstrand 
16250eee9977SJason Ekstrand err_engines:
16260eee9977SJason Ekstrand 	free_engines(e);
16270eee9977SJason Ekstrand err_vm:
16280eee9977SJason Ekstrand 	if (ctx->vm)
16290eee9977SJason Ekstrand 		i915_vm_close(ctx->vm);
16300eee9977SJason Ekstrand err_ctx:
16310eee9977SJason Ekstrand 	kfree(ctx);
16320eee9977SJason Ekstrand 	return ERR_PTR(err);
163310be98a7SChris Wilson }
163410be98a7SChris Wilson 
1635a4e7ccdaSChris Wilson static void init_contexts(struct i915_gem_contexts *gc)
163610be98a7SChris Wilson {
1637a4e7ccdaSChris Wilson 	spin_lock_init(&gc->lock);
1638a4e7ccdaSChris Wilson 	INIT_LIST_HEAD(&gc->list);
163910be98a7SChris Wilson }
164010be98a7SChris Wilson 
1641e6ba7648SChris Wilson void i915_gem_init__contexts(struct drm_i915_private *i915)
164210be98a7SChris Wilson {
1643a4e7ccdaSChris Wilson 	init_contexts(&i915->gem.contexts);
164410be98a7SChris Wilson }
164510be98a7SChris Wilson 
1646a4c1cdd3SJason Ekstrand static void gem_context_register(struct i915_gem_context *ctx,
1647c100777cSTvrtko Ursulin 				 struct drm_i915_file_private *fpriv,
1648a4c1cdd3SJason Ekstrand 				 u32 id)
164910be98a7SChris Wilson {
1650eb4dedaeSChris Wilson 	struct drm_i915_private *i915 = ctx->i915;
1651a4c1cdd3SJason Ekstrand 	void *old;
165210be98a7SChris Wilson 
165310be98a7SChris Wilson 	ctx->file_priv = fpriv;
1654a4e7ccdaSChris Wilson 
165510be98a7SChris Wilson 	ctx->pid = get_task_pid(current, PIDTYPE_PID);
1656fc4f125dSChris Wilson 	snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
165710be98a7SChris Wilson 		 current->comm, pid_nr(ctx->pid));
165810be98a7SChris Wilson 
165910be98a7SChris Wilson 	/* And finally expose ourselves to userspace via the idr */
1660a4c1cdd3SJason Ekstrand 	old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1661a4c1cdd3SJason Ekstrand 	WARN_ON(old);
1662c100777cSTvrtko Ursulin 
1663eb4dedaeSChris Wilson 	spin_lock(&i915->gem.contexts.lock);
1664eb4dedaeSChris Wilson 	list_add_tail(&ctx->link, &i915->gem.contexts.list);
1665eb4dedaeSChris Wilson 	spin_unlock(&i915->gem.contexts.lock);
166610be98a7SChris Wilson }
166710be98a7SChris Wilson 
166810be98a7SChris Wilson int i915_gem_context_open(struct drm_i915_private *i915,
166910be98a7SChris Wilson 			  struct drm_file *file)
167010be98a7SChris Wilson {
167110be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
1672a34857dcSJason Ekstrand 	struct i915_gem_proto_context *pc;
167310be98a7SChris Wilson 	struct i915_gem_context *ctx;
167410be98a7SChris Wilson 	int err;
167510be98a7SChris Wilson 
1676a4c1cdd3SJason Ekstrand 	mutex_init(&file_priv->proto_context_lock);
1677a4c1cdd3SJason Ekstrand 	xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1678a4c1cdd3SJason Ekstrand 
1679a4c1cdd3SJason Ekstrand 	/* 0 reserved for the default context */
1680a4c1cdd3SJason Ekstrand 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1681c100777cSTvrtko Ursulin 
16825dbd2b7bSChris Wilson 	/* 0 reserved for invalid/unassigned ppgtt */
16835dbd2b7bSChris Wilson 	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
168410be98a7SChris Wilson 
1685a34857dcSJason Ekstrand 	pc = proto_context_create(i915, 0);
1686a34857dcSJason Ekstrand 	if (IS_ERR(pc)) {
1687a34857dcSJason Ekstrand 		err = PTR_ERR(pc);
1688a34857dcSJason Ekstrand 		goto err;
1689a34857dcSJason Ekstrand 	}
1690a34857dcSJason Ekstrand 
1691a34857dcSJason Ekstrand 	ctx = i915_gem_create_context(i915, pc);
1692d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(i915, pc);
169310be98a7SChris Wilson 	if (IS_ERR(ctx)) {
169410be98a7SChris Wilson 		err = PTR_ERR(ctx);
169510be98a7SChris Wilson 		goto err;
169610be98a7SChris Wilson 	}
169710be98a7SChris Wilson 
1698a4c1cdd3SJason Ekstrand 	gem_context_register(ctx, file_priv, 0);
169910be98a7SChris Wilson 
170010be98a7SChris Wilson 	return 0;
170110be98a7SChris Wilson 
170210be98a7SChris Wilson err:
17035dbd2b7bSChris Wilson 	xa_destroy(&file_priv->vm_xa);
1704c100777cSTvrtko Ursulin 	xa_destroy(&file_priv->context_xa);
1705a4c1cdd3SJason Ekstrand 	xa_destroy(&file_priv->proto_context_xa);
1706a4c1cdd3SJason Ekstrand 	mutex_destroy(&file_priv->proto_context_lock);
170710be98a7SChris Wilson 	return err;
170810be98a7SChris Wilson }
170910be98a7SChris Wilson 
171010be98a7SChris Wilson void i915_gem_context_close(struct drm_file *file)
171110be98a7SChris Wilson {
171210be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
1713a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
17145dbd2b7bSChris Wilson 	struct i915_address_space *vm;
1715c100777cSTvrtko Ursulin 	struct i915_gem_context *ctx;
1716c100777cSTvrtko Ursulin 	unsigned long idx;
171710be98a7SChris Wilson 
1718a4c1cdd3SJason Ekstrand 	xa_for_each(&file_priv->proto_context_xa, idx, pc)
1719d3ac8d42SDaniele Ceraolo Spurio 		proto_context_close(file_priv->dev_priv, pc);
1720a4c1cdd3SJason Ekstrand 	xa_destroy(&file_priv->proto_context_xa);
1721a4c1cdd3SJason Ekstrand 	mutex_destroy(&file_priv->proto_context_lock);
1722a4c1cdd3SJason Ekstrand 
1723c100777cSTvrtko Ursulin 	xa_for_each(&file_priv->context_xa, idx, ctx)
1724c100777cSTvrtko Ursulin 		context_close(ctx);
1725c100777cSTvrtko Ursulin 	xa_destroy(&file_priv->context_xa);
172610be98a7SChris Wilson 
17275dbd2b7bSChris Wilson 	xa_for_each(&file_priv->vm_xa, idx, vm)
17285dbd2b7bSChris Wilson 		i915_vm_put(vm);
17295dbd2b7bSChris Wilson 	xa_destroy(&file_priv->vm_xa);
173010be98a7SChris Wilson }
173110be98a7SChris Wilson 
173210be98a7SChris Wilson int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
173310be98a7SChris Wilson 			     struct drm_file *file)
173410be98a7SChris Wilson {
173510be98a7SChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
173610be98a7SChris Wilson 	struct drm_i915_gem_vm_control *args = data;
173710be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
1738ab53497bSChris Wilson 	struct i915_ppgtt *ppgtt;
17395dbd2b7bSChris Wilson 	u32 id;
174010be98a7SChris Wilson 	int err;
174110be98a7SChris Wilson 
174210be98a7SChris Wilson 	if (!HAS_FULL_PPGTT(i915))
174310be98a7SChris Wilson 		return -ENODEV;
174410be98a7SChris Wilson 
174510be98a7SChris Wilson 	if (args->flags)
174610be98a7SChris Wilson 		return -EINVAL;
174710be98a7SChris Wilson 
17481a9c4db4SMichał Winiarski 	ppgtt = i915_ppgtt_create(to_gt(i915), 0);
174910be98a7SChris Wilson 	if (IS_ERR(ppgtt))
175010be98a7SChris Wilson 		return PTR_ERR(ppgtt);
175110be98a7SChris Wilson 
175210be98a7SChris Wilson 	if (args->extensions) {
175310be98a7SChris Wilson 		err = i915_user_extensions(u64_to_user_ptr(args->extensions),
175410be98a7SChris Wilson 					   NULL, 0,
175510be98a7SChris Wilson 					   ppgtt);
175610be98a7SChris Wilson 		if (err)
175710be98a7SChris Wilson 			goto err_put;
175810be98a7SChris Wilson 	}
175910be98a7SChris Wilson 
17605dbd2b7bSChris Wilson 	err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
17615dbd2b7bSChris Wilson 		       xa_limit_32b, GFP_KERNEL);
176210be98a7SChris Wilson 	if (err)
176310be98a7SChris Wilson 		goto err_put;
176410be98a7SChris Wilson 
17655dbd2b7bSChris Wilson 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
17665dbd2b7bSChris Wilson 	args->vm_id = id;
176710be98a7SChris Wilson 	return 0;
176810be98a7SChris Wilson 
176910be98a7SChris Wilson err_put:
1770e568ac38SChris Wilson 	i915_vm_put(&ppgtt->vm);
177110be98a7SChris Wilson 	return err;
177210be98a7SChris Wilson }
177310be98a7SChris Wilson 
177410be98a7SChris Wilson int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
177510be98a7SChris Wilson 			      struct drm_file *file)
177610be98a7SChris Wilson {
177710be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
177810be98a7SChris Wilson 	struct drm_i915_gem_vm_control *args = data;
1779e568ac38SChris Wilson 	struct i915_address_space *vm;
178010be98a7SChris Wilson 
178110be98a7SChris Wilson 	if (args->flags)
178210be98a7SChris Wilson 		return -EINVAL;
178310be98a7SChris Wilson 
178410be98a7SChris Wilson 	if (args->extensions)
178510be98a7SChris Wilson 		return -EINVAL;
178610be98a7SChris Wilson 
17875dbd2b7bSChris Wilson 	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1788e568ac38SChris Wilson 	if (!vm)
178910be98a7SChris Wilson 		return -ENOENT;
179010be98a7SChris Wilson 
1791e568ac38SChris Wilson 	i915_vm_put(vm);
179210be98a7SChris Wilson 	return 0;
179310be98a7SChris Wilson }
179410be98a7SChris Wilson 
179510be98a7SChris Wilson static int get_ppgtt(struct drm_i915_file_private *file_priv,
179610be98a7SChris Wilson 		     struct i915_gem_context *ctx,
179710be98a7SChris Wilson 		     struct drm_i915_gem_context_param *args)
179810be98a7SChris Wilson {
1799e568ac38SChris Wilson 	struct i915_address_space *vm;
18005dbd2b7bSChris Wilson 	int err;
18015dbd2b7bSChris Wilson 	u32 id;
180210be98a7SChris Wilson 
1803a82a9979SDaniel Vetter 	if (!i915_gem_context_has_full_ppgtt(ctx))
180410be98a7SChris Wilson 		return -ENODEV;
180510be98a7SChris Wilson 
18069ec8795eSDaniel Vetter 	vm = ctx->vm;
18079ec8795eSDaniel Vetter 	GEM_BUG_ON(!vm);
180890211ea4SChris Wilson 
180990211ea4SChris Wilson 	err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
18105dbd2b7bSChris Wilson 	if (err)
18119ec8795eSDaniel Vetter 		return err;
181210be98a7SChris Wilson 
18132850748eSChris Wilson 	i915_vm_open(vm);
181410be98a7SChris Wilson 
18155dbd2b7bSChris Wilson 	GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
18165dbd2b7bSChris Wilson 	args->value = id;
181710be98a7SChris Wilson 	args->size = 0;
181810be98a7SChris Wilson 
18195dbd2b7bSChris Wilson 	return err;
182010be98a7SChris Wilson }
182110be98a7SChris Wilson 
182211ecbdddSLionel Landwerlin int
18230b6613c6SVenkata Sandeep Dhanalakota i915_gem_user_to_context_sseu(struct intel_gt *gt,
182410be98a7SChris Wilson 			      const struct drm_i915_gem_context_param_sseu *user,
182510be98a7SChris Wilson 			      struct intel_sseu *context)
182610be98a7SChris Wilson {
18270b6613c6SVenkata Sandeep Dhanalakota 	const struct sseu_dev_info *device = &gt->info.sseu;
18280b6613c6SVenkata Sandeep Dhanalakota 	struct drm_i915_private *i915 = gt->i915;
182910be98a7SChris Wilson 
183010be98a7SChris Wilson 	/* No zeros in any field. */
183110be98a7SChris Wilson 	if (!user->slice_mask || !user->subslice_mask ||
183210be98a7SChris Wilson 	    !user->min_eus_per_subslice || !user->max_eus_per_subslice)
183310be98a7SChris Wilson 		return -EINVAL;
183410be98a7SChris Wilson 
183510be98a7SChris Wilson 	/* Max > min. */
183610be98a7SChris Wilson 	if (user->max_eus_per_subslice < user->min_eus_per_subslice)
183710be98a7SChris Wilson 		return -EINVAL;
183810be98a7SChris Wilson 
183910be98a7SChris Wilson 	/*
184010be98a7SChris Wilson 	 * Some future proofing on the types since the uAPI is wider than the
184110be98a7SChris Wilson 	 * current internal implementation.
184210be98a7SChris Wilson 	 */
184310be98a7SChris Wilson 	if (overflows_type(user->slice_mask, context->slice_mask) ||
184410be98a7SChris Wilson 	    overflows_type(user->subslice_mask, context->subslice_mask) ||
184510be98a7SChris Wilson 	    overflows_type(user->min_eus_per_subslice,
184610be98a7SChris Wilson 			   context->min_eus_per_subslice) ||
184710be98a7SChris Wilson 	    overflows_type(user->max_eus_per_subslice,
184810be98a7SChris Wilson 			   context->max_eus_per_subslice))
184910be98a7SChris Wilson 		return -EINVAL;
185010be98a7SChris Wilson 
185110be98a7SChris Wilson 	/* Check validity against hardware. */
185210be98a7SChris Wilson 	if (user->slice_mask & ~device->slice_mask)
185310be98a7SChris Wilson 		return -EINVAL;
185410be98a7SChris Wilson 
185510be98a7SChris Wilson 	if (user->subslice_mask & ~device->subslice_mask[0])
185610be98a7SChris Wilson 		return -EINVAL;
185710be98a7SChris Wilson 
185810be98a7SChris Wilson 	if (user->max_eus_per_subslice > device->max_eus_per_subslice)
185910be98a7SChris Wilson 		return -EINVAL;
186010be98a7SChris Wilson 
186110be98a7SChris Wilson 	context->slice_mask = user->slice_mask;
186210be98a7SChris Wilson 	context->subslice_mask = user->subslice_mask;
186310be98a7SChris Wilson 	context->min_eus_per_subslice = user->min_eus_per_subslice;
186410be98a7SChris Wilson 	context->max_eus_per_subslice = user->max_eus_per_subslice;
186510be98a7SChris Wilson 
186610be98a7SChris Wilson 	/* Part specific restrictions. */
186740e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) == 11) {
186810be98a7SChris Wilson 		unsigned int hw_s = hweight8(device->slice_mask);
186910be98a7SChris Wilson 		unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
187010be98a7SChris Wilson 		unsigned int req_s = hweight8(context->slice_mask);
187110be98a7SChris Wilson 		unsigned int req_ss = hweight8(context->subslice_mask);
187210be98a7SChris Wilson 
187310be98a7SChris Wilson 		/*
187410be98a7SChris Wilson 		 * Only full subslice enablement is possible if more than one
187510be98a7SChris Wilson 		 * slice is turned on.
187610be98a7SChris Wilson 		 */
187710be98a7SChris Wilson 		if (req_s > 1 && req_ss != hw_ss_per_s)
187810be98a7SChris Wilson 			return -EINVAL;
187910be98a7SChris Wilson 
188010be98a7SChris Wilson 		/*
188110be98a7SChris Wilson 		 * If more than four (SScount bitfield limit) subslices are
188210be98a7SChris Wilson 		 * requested then the number has to be even.
188310be98a7SChris Wilson 		 */
188410be98a7SChris Wilson 		if (req_ss > 4 && (req_ss & 1))
188510be98a7SChris Wilson 			return -EINVAL;
188610be98a7SChris Wilson 
188710be98a7SChris Wilson 		/*
188810be98a7SChris Wilson 		 * If only one slice is enabled and subslice count is below the
188910be98a7SChris Wilson 		 * device full enablement, it must be at most half of the all
189010be98a7SChris Wilson 		 * available subslices.
189110be98a7SChris Wilson 		 */
189210be98a7SChris Wilson 		if (req_s == 1 && req_ss < hw_ss_per_s &&
189310be98a7SChris Wilson 		    req_ss > (hw_ss_per_s / 2))
189410be98a7SChris Wilson 			return -EINVAL;
189510be98a7SChris Wilson 
189610be98a7SChris Wilson 		/* ABI restriction - VME use case only. */
189710be98a7SChris Wilson 
189810be98a7SChris Wilson 		/* All slices or one slice only. */
189910be98a7SChris Wilson 		if (req_s != 1 && req_s != hw_s)
190010be98a7SChris Wilson 			return -EINVAL;
190110be98a7SChris Wilson 
190210be98a7SChris Wilson 		/*
190310be98a7SChris Wilson 		 * Half subslices or full enablement only when one slice is
190410be98a7SChris Wilson 		 * enabled.
190510be98a7SChris Wilson 		 */
190610be98a7SChris Wilson 		if (req_s == 1 &&
190710be98a7SChris Wilson 		    (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
190810be98a7SChris Wilson 			return -EINVAL;
190910be98a7SChris Wilson 
191010be98a7SChris Wilson 		/* No EU configuration changes. */
191110be98a7SChris Wilson 		if ((user->min_eus_per_subslice !=
191210be98a7SChris Wilson 		     device->max_eus_per_subslice) ||
191310be98a7SChris Wilson 		    (user->max_eus_per_subslice !=
191410be98a7SChris Wilson 		     device->max_eus_per_subslice))
191510be98a7SChris Wilson 			return -EINVAL;
191610be98a7SChris Wilson 	}
191710be98a7SChris Wilson 
191810be98a7SChris Wilson 	return 0;
191910be98a7SChris Wilson }
192010be98a7SChris Wilson 
192110be98a7SChris Wilson static int set_sseu(struct i915_gem_context *ctx,
192210be98a7SChris Wilson 		    struct drm_i915_gem_context_param *args)
192310be98a7SChris Wilson {
192410be98a7SChris Wilson 	struct drm_i915_private *i915 = ctx->i915;
192510be98a7SChris Wilson 	struct drm_i915_gem_context_param_sseu user_sseu;
192610be98a7SChris Wilson 	struct intel_context *ce;
192710be98a7SChris Wilson 	struct intel_sseu sseu;
192810be98a7SChris Wilson 	unsigned long lookup;
192910be98a7SChris Wilson 	int ret;
193010be98a7SChris Wilson 
193110be98a7SChris Wilson 	if (args->size < sizeof(user_sseu))
193210be98a7SChris Wilson 		return -EINVAL;
193310be98a7SChris Wilson 
193440e1956eSLucas De Marchi 	if (GRAPHICS_VER(i915) != 11)
193510be98a7SChris Wilson 		return -ENODEV;
193610be98a7SChris Wilson 
193710be98a7SChris Wilson 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
193810be98a7SChris Wilson 			   sizeof(user_sseu)))
193910be98a7SChris Wilson 		return -EFAULT;
194010be98a7SChris Wilson 
194110be98a7SChris Wilson 	if (user_sseu.rsvd)
194210be98a7SChris Wilson 		return -EINVAL;
194310be98a7SChris Wilson 
194410be98a7SChris Wilson 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
194510be98a7SChris Wilson 		return -EINVAL;
194610be98a7SChris Wilson 
194710be98a7SChris Wilson 	lookup = 0;
194810be98a7SChris Wilson 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
194910be98a7SChris Wilson 		lookup |= LOOKUP_USER_INDEX;
195010be98a7SChris Wilson 
195110be98a7SChris Wilson 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
195210be98a7SChris Wilson 	if (IS_ERR(ce))
195310be98a7SChris Wilson 		return PTR_ERR(ce);
195410be98a7SChris Wilson 
195510be98a7SChris Wilson 	/* Only render engine supports RPCS configuration. */
195610be98a7SChris Wilson 	if (ce->engine->class != RENDER_CLASS) {
195710be98a7SChris Wilson 		ret = -ENODEV;
195810be98a7SChris Wilson 		goto out_ce;
195910be98a7SChris Wilson 	}
196010be98a7SChris Wilson 
19610b6613c6SVenkata Sandeep Dhanalakota 	ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
196210be98a7SChris Wilson 	if (ret)
196310be98a7SChris Wilson 		goto out_ce;
196410be98a7SChris Wilson 
196510be98a7SChris Wilson 	ret = intel_context_reconfigure_sseu(ce, sseu);
196610be98a7SChris Wilson 	if (ret)
196710be98a7SChris Wilson 		goto out_ce;
196810be98a7SChris Wilson 
196910be98a7SChris Wilson 	args->size = sizeof(user_sseu);
197010be98a7SChris Wilson 
197110be98a7SChris Wilson out_ce:
197210be98a7SChris Wilson 	intel_context_put(ce);
197310be98a7SChris Wilson 	return ret;
197410be98a7SChris Wilson }
197510be98a7SChris Wilson 
197610be98a7SChris Wilson static int
1977a0e04715SChris Wilson set_persistence(struct i915_gem_context *ctx,
1978a0e04715SChris Wilson 		const struct drm_i915_gem_context_param *args)
1979a0e04715SChris Wilson {
1980a0e04715SChris Wilson 	if (args->size)
1981a0e04715SChris Wilson 		return -EINVAL;
1982a0e04715SChris Wilson 
1983a0e04715SChris Wilson 	return __context_set_persistence(ctx, args->value);
1984a0e04715SChris Wilson }
1985a0e04715SChris Wilson 
19860f100b70SChris Wilson static int set_priority(struct i915_gem_context *ctx,
19870f100b70SChris Wilson 			const struct drm_i915_gem_context_param *args)
19880f100b70SChris Wilson {
1989b9709057SDaniel Vetter 	struct i915_gem_engines_iter it;
1990b9709057SDaniel Vetter 	struct intel_context *ce;
1991aaa5957cSJason Ekstrand 	int err;
19920f100b70SChris Wilson 
1993aaa5957cSJason Ekstrand 	err = validate_priority(ctx->i915, args);
1994aaa5957cSJason Ekstrand 	if (err)
1995aaa5957cSJason Ekstrand 		return err;
19960f100b70SChris Wilson 
1997aaa5957cSJason Ekstrand 	ctx->sched.priority = args->value;
1998b9709057SDaniel Vetter 
1999b9709057SDaniel Vetter 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2000b9709057SDaniel Vetter 		if (!intel_engine_has_timeslices(ce->engine))
2001b9709057SDaniel Vetter 			continue;
2002b9709057SDaniel Vetter 
2003b9709057SDaniel Vetter 		if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2004b9709057SDaniel Vetter 		    intel_engine_has_semaphores(ce->engine))
2005b9709057SDaniel Vetter 			intel_context_set_use_semaphores(ce);
2006b9709057SDaniel Vetter 		else
2007b9709057SDaniel Vetter 			intel_context_clear_use_semaphores(ce);
2008b9709057SDaniel Vetter 	}
2009b9709057SDaniel Vetter 	i915_gem_context_unlock_engines(ctx);
20100f100b70SChris Wilson 
20110f100b70SChris Wilson 	return 0;
20120f100b70SChris Wilson }
20130f100b70SChris Wilson 
2014d3ac8d42SDaniele Ceraolo Spurio static int get_protected(struct i915_gem_context *ctx,
2015d3ac8d42SDaniele Ceraolo Spurio 			 struct drm_i915_gem_context_param *args)
2016d3ac8d42SDaniele Ceraolo Spurio {
2017d3ac8d42SDaniele Ceraolo Spurio 	args->size = 0;
2018d3ac8d42SDaniele Ceraolo Spurio 	args->value = i915_gem_context_uses_protected_content(ctx);
2019d3ac8d42SDaniele Ceraolo Spurio 
2020d3ac8d42SDaniele Ceraolo Spurio 	return 0;
2021d3ac8d42SDaniele Ceraolo Spurio }
2022d3ac8d42SDaniele Ceraolo Spurio 
202310be98a7SChris Wilson static int ctx_setparam(struct drm_i915_file_private *fpriv,
202410be98a7SChris Wilson 			struct i915_gem_context *ctx,
202510be98a7SChris Wilson 			struct drm_i915_gem_context_param *args)
202610be98a7SChris Wilson {
202710be98a7SChris Wilson 	int ret = 0;
202810be98a7SChris Wilson 
202910be98a7SChris Wilson 	switch (args->param) {
203010be98a7SChris Wilson 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
203110be98a7SChris Wilson 		if (args->size)
203210be98a7SChris Wilson 			ret = -EINVAL;
203310be98a7SChris Wilson 		else if (args->value)
203410be98a7SChris Wilson 			i915_gem_context_set_no_error_capture(ctx);
203510be98a7SChris Wilson 		else
203610be98a7SChris Wilson 			i915_gem_context_clear_no_error_capture(ctx);
203710be98a7SChris Wilson 		break;
203810be98a7SChris Wilson 
203910be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BANNABLE:
204010be98a7SChris Wilson 		if (args->size)
204110be98a7SChris Wilson 			ret = -EINVAL;
204210be98a7SChris Wilson 		else if (!capable(CAP_SYS_ADMIN) && !args->value)
204310be98a7SChris Wilson 			ret = -EPERM;
204410be98a7SChris Wilson 		else if (args->value)
204510be98a7SChris Wilson 			i915_gem_context_set_bannable(ctx);
2046d3ac8d42SDaniele Ceraolo Spurio 		else if (i915_gem_context_uses_protected_content(ctx))
2047d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM; /* can't clear this for protected contexts */
204810be98a7SChris Wilson 		else
204910be98a7SChris Wilson 			i915_gem_context_clear_bannable(ctx);
205010be98a7SChris Wilson 		break;
205110be98a7SChris Wilson 
205210be98a7SChris Wilson 	case I915_CONTEXT_PARAM_RECOVERABLE:
205310be98a7SChris Wilson 		if (args->size)
205410be98a7SChris Wilson 			ret = -EINVAL;
2055d3ac8d42SDaniele Ceraolo Spurio 		else if (!args->value)
205610be98a7SChris Wilson 			i915_gem_context_clear_recoverable(ctx);
2057d3ac8d42SDaniele Ceraolo Spurio 		else if (i915_gem_context_uses_protected_content(ctx))
2058d3ac8d42SDaniele Ceraolo Spurio 			ret = -EPERM; /* can't set this for protected contexts */
2059d3ac8d42SDaniele Ceraolo Spurio 		else
2060d3ac8d42SDaniele Ceraolo Spurio 			i915_gem_context_set_recoverable(ctx);
206110be98a7SChris Wilson 		break;
206210be98a7SChris Wilson 
206310be98a7SChris Wilson 	case I915_CONTEXT_PARAM_PRIORITY:
20640f100b70SChris Wilson 		ret = set_priority(ctx, args);
206510be98a7SChris Wilson 		break;
206610be98a7SChris Wilson 
206710be98a7SChris Wilson 	case I915_CONTEXT_PARAM_SSEU:
206810be98a7SChris Wilson 		ret = set_sseu(ctx, args);
206910be98a7SChris Wilson 		break;
207010be98a7SChris Wilson 
2071a0e04715SChris Wilson 	case I915_CONTEXT_PARAM_PERSISTENCE:
2072a0e04715SChris Wilson 		ret = set_persistence(ctx, args);
2073a0e04715SChris Wilson 		break;
2074a0e04715SChris Wilson 
2075d3ac8d42SDaniele Ceraolo Spurio 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
20766ff6d61dSJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
207710be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2078fe4751c3SJason Ekstrand 	case I915_CONTEXT_PARAM_RINGSIZE:
2079ccbc1b97SJason Ekstrand 	case I915_CONTEXT_PARAM_VM:
2080d9d29c74SJason Ekstrand 	case I915_CONTEXT_PARAM_ENGINES:
208110be98a7SChris Wilson 	default:
208210be98a7SChris Wilson 		ret = -EINVAL;
208310be98a7SChris Wilson 		break;
208410be98a7SChris Wilson 	}
208510be98a7SChris Wilson 
208610be98a7SChris Wilson 	return ret;
208710be98a7SChris Wilson }
208810be98a7SChris Wilson 
208910be98a7SChris Wilson struct create_ext {
2090d4433c76SJason Ekstrand 	struct i915_gem_proto_context *pc;
209110be98a7SChris Wilson 	struct drm_i915_file_private *fpriv;
209210be98a7SChris Wilson };
209310be98a7SChris Wilson 
209410be98a7SChris Wilson static int create_setparam(struct i915_user_extension __user *ext, void *data)
209510be98a7SChris Wilson {
209610be98a7SChris Wilson 	struct drm_i915_gem_context_create_ext_setparam local;
209710be98a7SChris Wilson 	const struct create_ext *arg = data;
209810be98a7SChris Wilson 
209910be98a7SChris Wilson 	if (copy_from_user(&local, ext, sizeof(local)))
210010be98a7SChris Wilson 		return -EFAULT;
210110be98a7SChris Wilson 
210210be98a7SChris Wilson 	if (local.param.ctx_id)
210310be98a7SChris Wilson 		return -EINVAL;
210410be98a7SChris Wilson 
2105d4433c76SJason Ekstrand 	return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
210610be98a7SChris Wilson }
210710be98a7SChris Wilson 
21084a766ae4SJason Ekstrand static int invalid_ext(struct i915_user_extension __user *ext, void *data)
210910be98a7SChris Wilson {
211010be98a7SChris Wilson 	return -EINVAL;
211110be98a7SChris Wilson }
211210be98a7SChris Wilson 
211310be98a7SChris Wilson static const i915_user_extension_fn create_extensions[] = {
211410be98a7SChris Wilson 	[I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
21154a766ae4SJason Ekstrand 	[I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
211610be98a7SChris Wilson };
211710be98a7SChris Wilson 
211810be98a7SChris Wilson static bool client_is_banned(struct drm_i915_file_private *file_priv)
211910be98a7SChris Wilson {
212010be98a7SChris Wilson 	return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
212110be98a7SChris Wilson }
212210be98a7SChris Wilson 
2123a4c1cdd3SJason Ekstrand static inline struct i915_gem_context *
2124a4c1cdd3SJason Ekstrand __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2125a4c1cdd3SJason Ekstrand {
2126a4c1cdd3SJason Ekstrand 	struct i915_gem_context *ctx;
2127a4c1cdd3SJason Ekstrand 
2128a4c1cdd3SJason Ekstrand 	rcu_read_lock();
2129a4c1cdd3SJason Ekstrand 	ctx = xa_load(&file_priv->context_xa, id);
2130a4c1cdd3SJason Ekstrand 	if (ctx && !kref_get_unless_zero(&ctx->ref))
2131a4c1cdd3SJason Ekstrand 		ctx = NULL;
2132a4c1cdd3SJason Ekstrand 	rcu_read_unlock();
2133a4c1cdd3SJason Ekstrand 
2134a4c1cdd3SJason Ekstrand 	return ctx;
2135a4c1cdd3SJason Ekstrand }
2136a4c1cdd3SJason Ekstrand 
2137a4c1cdd3SJason Ekstrand static struct i915_gem_context *
2138a4c1cdd3SJason Ekstrand finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2139a4c1cdd3SJason Ekstrand 			       struct i915_gem_proto_context *pc, u32 id)
2140a4c1cdd3SJason Ekstrand {
2141a4c1cdd3SJason Ekstrand 	struct i915_gem_context *ctx;
2142a4c1cdd3SJason Ekstrand 	void *old;
2143a4c1cdd3SJason Ekstrand 
2144a4c1cdd3SJason Ekstrand 	lockdep_assert_held(&file_priv->proto_context_lock);
2145a4c1cdd3SJason Ekstrand 
2146a4c1cdd3SJason Ekstrand 	ctx = i915_gem_create_context(file_priv->dev_priv, pc);
2147a4c1cdd3SJason Ekstrand 	if (IS_ERR(ctx))
2148a4c1cdd3SJason Ekstrand 		return ctx;
2149a4c1cdd3SJason Ekstrand 
2150a4c1cdd3SJason Ekstrand 	gem_context_register(ctx, file_priv, id);
2151a4c1cdd3SJason Ekstrand 
2152a4c1cdd3SJason Ekstrand 	old = xa_erase(&file_priv->proto_context_xa, id);
2153a4c1cdd3SJason Ekstrand 	GEM_BUG_ON(old != pc);
2154d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(file_priv->dev_priv, pc);
2155a4c1cdd3SJason Ekstrand 
2156a4c1cdd3SJason Ekstrand 	/* One for the xarray and one for the caller */
2157a4c1cdd3SJason Ekstrand 	return i915_gem_context_get(ctx);
2158a4c1cdd3SJason Ekstrand }
2159a4c1cdd3SJason Ekstrand 
2160a4c1cdd3SJason Ekstrand struct i915_gem_context *
2161a4c1cdd3SJason Ekstrand i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2162a4c1cdd3SJason Ekstrand {
2163a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
2164a4c1cdd3SJason Ekstrand 	struct i915_gem_context *ctx;
2165a4c1cdd3SJason Ekstrand 
2166a4c1cdd3SJason Ekstrand 	ctx = __context_lookup(file_priv, id);
2167a4c1cdd3SJason Ekstrand 	if (ctx)
2168a4c1cdd3SJason Ekstrand 		return ctx;
2169a4c1cdd3SJason Ekstrand 
2170a4c1cdd3SJason Ekstrand 	mutex_lock(&file_priv->proto_context_lock);
2171a4c1cdd3SJason Ekstrand 	/* Try one more time under the lock */
2172a4c1cdd3SJason Ekstrand 	ctx = __context_lookup(file_priv, id);
2173a4c1cdd3SJason Ekstrand 	if (!ctx) {
2174a4c1cdd3SJason Ekstrand 		pc = xa_load(&file_priv->proto_context_xa, id);
2175a4c1cdd3SJason Ekstrand 		if (!pc)
2176a4c1cdd3SJason Ekstrand 			ctx = ERR_PTR(-ENOENT);
2177a4c1cdd3SJason Ekstrand 		else
2178a4c1cdd3SJason Ekstrand 			ctx = finalize_create_context_locked(file_priv, pc, id);
2179a4c1cdd3SJason Ekstrand 	}
2180a4c1cdd3SJason Ekstrand 	mutex_unlock(&file_priv->proto_context_lock);
2181a4c1cdd3SJason Ekstrand 
2182a4c1cdd3SJason Ekstrand 	return ctx;
2183a4c1cdd3SJason Ekstrand }
2184a4c1cdd3SJason Ekstrand 
218510be98a7SChris Wilson int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
218610be98a7SChris Wilson 				  struct drm_file *file)
218710be98a7SChris Wilson {
218810be98a7SChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
218910be98a7SChris Wilson 	struct drm_i915_gem_context_create_ext *args = data;
219010be98a7SChris Wilson 	struct create_ext ext_data;
219110be98a7SChris Wilson 	int ret;
2192c100777cSTvrtko Ursulin 	u32 id;
219310be98a7SChris Wilson 
219410be98a7SChris Wilson 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
219510be98a7SChris Wilson 		return -ENODEV;
219610be98a7SChris Wilson 
219710be98a7SChris Wilson 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
219810be98a7SChris Wilson 		return -EINVAL;
219910be98a7SChris Wilson 
22001a9c4db4SMichał Winiarski 	ret = intel_gt_terminally_wedged(to_gt(i915));
220110be98a7SChris Wilson 	if (ret)
220210be98a7SChris Wilson 		return ret;
220310be98a7SChris Wilson 
220410be98a7SChris Wilson 	ext_data.fpriv = file->driver_priv;
220510be98a7SChris Wilson 	if (client_is_banned(ext_data.fpriv)) {
2206baa89ba3SWambui Karuga 		drm_dbg(&i915->drm,
2207baa89ba3SWambui Karuga 			"client %s[%d] banned from creating ctx\n",
2208ba16a48aSTvrtko Ursulin 			current->comm, task_pid_nr(current));
220910be98a7SChris Wilson 		return -EIO;
221010be98a7SChris Wilson 	}
221110be98a7SChris Wilson 
2212d4433c76SJason Ekstrand 	ext_data.pc = proto_context_create(i915, args->flags);
2213d4433c76SJason Ekstrand 	if (IS_ERR(ext_data.pc))
2214d4433c76SJason Ekstrand 		return PTR_ERR(ext_data.pc);
221510be98a7SChris Wilson 
221610be98a7SChris Wilson 	if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
221710be98a7SChris Wilson 		ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
221810be98a7SChris Wilson 					   create_extensions,
221910be98a7SChris Wilson 					   ARRAY_SIZE(create_extensions),
222010be98a7SChris Wilson 					   &ext_data);
2221a4c1cdd3SJason Ekstrand 		if (ret)
2222a4c1cdd3SJason Ekstrand 			goto err_pc;
222310be98a7SChris Wilson 	}
222410be98a7SChris Wilson 
2225ca06f936SJason Ekstrand 	if (GRAPHICS_VER(i915) > 12) {
2226ca06f936SJason Ekstrand 		struct i915_gem_context *ctx;
2227ca06f936SJason Ekstrand 
2228ca06f936SJason Ekstrand 		/* Get ourselves a context ID */
2229ca06f936SJason Ekstrand 		ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2230ca06f936SJason Ekstrand 			       xa_limit_32b, GFP_KERNEL);
2231ca06f936SJason Ekstrand 		if (ret)
2232ca06f936SJason Ekstrand 			goto err_pc;
2233ca06f936SJason Ekstrand 
2234ca06f936SJason Ekstrand 		ctx = i915_gem_create_context(i915, ext_data.pc);
2235ca06f936SJason Ekstrand 		if (IS_ERR(ctx)) {
2236ca06f936SJason Ekstrand 			ret = PTR_ERR(ctx);
2237ca06f936SJason Ekstrand 			goto err_pc;
2238ca06f936SJason Ekstrand 		}
2239ca06f936SJason Ekstrand 
2240d3ac8d42SDaniele Ceraolo Spurio 		proto_context_close(i915, ext_data.pc);
2241ca06f936SJason Ekstrand 		gem_context_register(ctx, ext_data.fpriv, id);
2242ca06f936SJason Ekstrand 	} else {
2243a4c1cdd3SJason Ekstrand 		ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
224410be98a7SChris Wilson 		if (ret < 0)
2245a4c1cdd3SJason Ekstrand 			goto err_pc;
2246ca06f936SJason Ekstrand 	}
224710be98a7SChris Wilson 
2248c100777cSTvrtko Ursulin 	args->ctx_id = id;
2249baa89ba3SWambui Karuga 	drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
225010be98a7SChris Wilson 
225110be98a7SChris Wilson 	return 0;
225210be98a7SChris Wilson 
2253a4c1cdd3SJason Ekstrand err_pc:
2254d3ac8d42SDaniele Ceraolo Spurio 	proto_context_close(i915, ext_data.pc);
225510be98a7SChris Wilson 	return ret;
225610be98a7SChris Wilson }
225710be98a7SChris Wilson 
225810be98a7SChris Wilson int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
225910be98a7SChris Wilson 				   struct drm_file *file)
226010be98a7SChris Wilson {
226110be98a7SChris Wilson 	struct drm_i915_gem_context_destroy *args = data;
226210be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
2263a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
226410be98a7SChris Wilson 	struct i915_gem_context *ctx;
226510be98a7SChris Wilson 
226610be98a7SChris Wilson 	if (args->pad != 0)
226710be98a7SChris Wilson 		return -EINVAL;
226810be98a7SChris Wilson 
226910be98a7SChris Wilson 	if (!args->ctx_id)
227010be98a7SChris Wilson 		return -ENOENT;
227110be98a7SChris Wilson 
2272a4c1cdd3SJason Ekstrand 	/* We need to hold the proto-context lock here to prevent races
2273a4c1cdd3SJason Ekstrand 	 * with finalize_create_context_locked().
2274a4c1cdd3SJason Ekstrand 	 */
2275a4c1cdd3SJason Ekstrand 	mutex_lock(&file_priv->proto_context_lock);
2276c100777cSTvrtko Ursulin 	ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2277a4c1cdd3SJason Ekstrand 	pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2278a4c1cdd3SJason Ekstrand 	mutex_unlock(&file_priv->proto_context_lock);
227910be98a7SChris Wilson 
2280a4c1cdd3SJason Ekstrand 	if (!ctx && !pc)
2281a4c1cdd3SJason Ekstrand 		return -ENOENT;
2282a4c1cdd3SJason Ekstrand 	GEM_WARN_ON(ctx && pc);
2283a4c1cdd3SJason Ekstrand 
2284a4c1cdd3SJason Ekstrand 	if (pc)
2285d3ac8d42SDaniele Ceraolo Spurio 		proto_context_close(file_priv->dev_priv, pc);
2286a4c1cdd3SJason Ekstrand 
2287a4c1cdd3SJason Ekstrand 	if (ctx)
228810be98a7SChris Wilson 		context_close(ctx);
2289a4c1cdd3SJason Ekstrand 
229010be98a7SChris Wilson 	return 0;
229110be98a7SChris Wilson }
229210be98a7SChris Wilson 
229310be98a7SChris Wilson static int get_sseu(struct i915_gem_context *ctx,
229410be98a7SChris Wilson 		    struct drm_i915_gem_context_param *args)
229510be98a7SChris Wilson {
229610be98a7SChris Wilson 	struct drm_i915_gem_context_param_sseu user_sseu;
229710be98a7SChris Wilson 	struct intel_context *ce;
229810be98a7SChris Wilson 	unsigned long lookup;
229910be98a7SChris Wilson 	int err;
230010be98a7SChris Wilson 
230110be98a7SChris Wilson 	if (args->size == 0)
230210be98a7SChris Wilson 		goto out;
230310be98a7SChris Wilson 	else if (args->size < sizeof(user_sseu))
230410be98a7SChris Wilson 		return -EINVAL;
230510be98a7SChris Wilson 
230610be98a7SChris Wilson 	if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
230710be98a7SChris Wilson 			   sizeof(user_sseu)))
230810be98a7SChris Wilson 		return -EFAULT;
230910be98a7SChris Wilson 
231010be98a7SChris Wilson 	if (user_sseu.rsvd)
231110be98a7SChris Wilson 		return -EINVAL;
231210be98a7SChris Wilson 
231310be98a7SChris Wilson 	if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
231410be98a7SChris Wilson 		return -EINVAL;
231510be98a7SChris Wilson 
231610be98a7SChris Wilson 	lookup = 0;
231710be98a7SChris Wilson 	if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
231810be98a7SChris Wilson 		lookup |= LOOKUP_USER_INDEX;
231910be98a7SChris Wilson 
232010be98a7SChris Wilson 	ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
232110be98a7SChris Wilson 	if (IS_ERR(ce))
232210be98a7SChris Wilson 		return PTR_ERR(ce);
232310be98a7SChris Wilson 
232410be98a7SChris Wilson 	err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
232510be98a7SChris Wilson 	if (err) {
232610be98a7SChris Wilson 		intel_context_put(ce);
232710be98a7SChris Wilson 		return err;
232810be98a7SChris Wilson 	}
232910be98a7SChris Wilson 
233010be98a7SChris Wilson 	user_sseu.slice_mask = ce->sseu.slice_mask;
233110be98a7SChris Wilson 	user_sseu.subslice_mask = ce->sseu.subslice_mask;
233210be98a7SChris Wilson 	user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
233310be98a7SChris Wilson 	user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
233410be98a7SChris Wilson 
233510be98a7SChris Wilson 	intel_context_unlock_pinned(ce);
233610be98a7SChris Wilson 	intel_context_put(ce);
233710be98a7SChris Wilson 
233810be98a7SChris Wilson 	if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
233910be98a7SChris Wilson 			 sizeof(user_sseu)))
234010be98a7SChris Wilson 		return -EFAULT;
234110be98a7SChris Wilson 
234210be98a7SChris Wilson out:
234310be98a7SChris Wilson 	args->size = sizeof(user_sseu);
234410be98a7SChris Wilson 
234510be98a7SChris Wilson 	return 0;
234610be98a7SChris Wilson }
234710be98a7SChris Wilson 
234810be98a7SChris Wilson int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
234910be98a7SChris Wilson 				    struct drm_file *file)
235010be98a7SChris Wilson {
235110be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
235210be98a7SChris Wilson 	struct drm_i915_gem_context_param *args = data;
235310be98a7SChris Wilson 	struct i915_gem_context *ctx;
235424fad29eSDaniel Vetter 	struct i915_address_space *vm;
235510be98a7SChris Wilson 	int ret = 0;
235610be98a7SChris Wilson 
235710be98a7SChris Wilson 	ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2358046d1660SJason Ekstrand 	if (IS_ERR(ctx))
2359046d1660SJason Ekstrand 		return PTR_ERR(ctx);
236010be98a7SChris Wilson 
236110be98a7SChris Wilson 	switch (args->param) {
236210be98a7SChris Wilson 	case I915_CONTEXT_PARAM_GTT_SIZE:
236310be98a7SChris Wilson 		args->size = 0;
236424fad29eSDaniel Vetter 		vm = i915_gem_context_get_eb_vm(ctx);
236524fad29eSDaniel Vetter 		args->value = vm->total;
236624fad29eSDaniel Vetter 		i915_vm_put(vm);
236724fad29eSDaniel Vetter 
236810be98a7SChris Wilson 		break;
236910be98a7SChris Wilson 
237010be98a7SChris Wilson 	case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
237110be98a7SChris Wilson 		args->size = 0;
237210be98a7SChris Wilson 		args->value = i915_gem_context_no_error_capture(ctx);
237310be98a7SChris Wilson 		break;
237410be98a7SChris Wilson 
237510be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BANNABLE:
237610be98a7SChris Wilson 		args->size = 0;
237710be98a7SChris Wilson 		args->value = i915_gem_context_is_bannable(ctx);
237810be98a7SChris Wilson 		break;
237910be98a7SChris Wilson 
238010be98a7SChris Wilson 	case I915_CONTEXT_PARAM_RECOVERABLE:
238110be98a7SChris Wilson 		args->size = 0;
238210be98a7SChris Wilson 		args->value = i915_gem_context_is_recoverable(ctx);
238310be98a7SChris Wilson 		break;
238410be98a7SChris Wilson 
238510be98a7SChris Wilson 	case I915_CONTEXT_PARAM_PRIORITY:
238610be98a7SChris Wilson 		args->size = 0;
2387eb5c10cbSChris Wilson 		args->value = ctx->sched.priority;
238810be98a7SChris Wilson 		break;
238910be98a7SChris Wilson 
239010be98a7SChris Wilson 	case I915_CONTEXT_PARAM_SSEU:
239110be98a7SChris Wilson 		ret = get_sseu(ctx, args);
239210be98a7SChris Wilson 		break;
239310be98a7SChris Wilson 
239410be98a7SChris Wilson 	case I915_CONTEXT_PARAM_VM:
239510be98a7SChris Wilson 		ret = get_ppgtt(file_priv, ctx, args);
239610be98a7SChris Wilson 		break;
239710be98a7SChris Wilson 
2398a0e04715SChris Wilson 	case I915_CONTEXT_PARAM_PERSISTENCE:
2399a0e04715SChris Wilson 		args->size = 0;
2400a0e04715SChris Wilson 		args->value = i915_gem_context_is_persistent(ctx);
2401a0e04715SChris Wilson 		break;
2402a0e04715SChris Wilson 
2403d3ac8d42SDaniele Ceraolo Spurio 	case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2404d3ac8d42SDaniele Ceraolo Spurio 		ret = get_protected(ctx, args);
2405d3ac8d42SDaniele Ceraolo Spurio 		break;
2406d3ac8d42SDaniele Ceraolo Spurio 
24076ff6d61dSJason Ekstrand 	case I915_CONTEXT_PARAM_NO_ZEROMAP:
240810be98a7SChris Wilson 	case I915_CONTEXT_PARAM_BAN_PERIOD:
2409c7a71fc8SJason Ekstrand 	case I915_CONTEXT_PARAM_ENGINES:
2410fe4751c3SJason Ekstrand 	case I915_CONTEXT_PARAM_RINGSIZE:
241110be98a7SChris Wilson 	default:
241210be98a7SChris Wilson 		ret = -EINVAL;
241310be98a7SChris Wilson 		break;
241410be98a7SChris Wilson 	}
241510be98a7SChris Wilson 
241610be98a7SChris Wilson 	i915_gem_context_put(ctx);
241710be98a7SChris Wilson 	return ret;
241810be98a7SChris Wilson }
241910be98a7SChris Wilson 
242010be98a7SChris Wilson int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
242110be98a7SChris Wilson 				    struct drm_file *file)
242210be98a7SChris Wilson {
242310be98a7SChris Wilson 	struct drm_i915_file_private *file_priv = file->driver_priv;
242410be98a7SChris Wilson 	struct drm_i915_gem_context_param *args = data;
2425a4c1cdd3SJason Ekstrand 	struct i915_gem_proto_context *pc;
242610be98a7SChris Wilson 	struct i915_gem_context *ctx;
2427a4c1cdd3SJason Ekstrand 	int ret = 0;
242810be98a7SChris Wilson 
2429a4c1cdd3SJason Ekstrand 	mutex_lock(&file_priv->proto_context_lock);
2430a4c1cdd3SJason Ekstrand 	ctx = __context_lookup(file_priv, args->ctx_id);
2431a4c1cdd3SJason Ekstrand 	if (!ctx) {
2432a4c1cdd3SJason Ekstrand 		pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2433ca06f936SJason Ekstrand 		if (pc) {
2434ca06f936SJason Ekstrand 			/* Contexts should be finalized inside
2435ca06f936SJason Ekstrand 			 * GEM_CONTEXT_CREATE starting with graphics
2436ca06f936SJason Ekstrand 			 * version 13.
2437ca06f936SJason Ekstrand 			 */
2438ca06f936SJason Ekstrand 			WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2439a4c1cdd3SJason Ekstrand 			ret = set_proto_ctx_param(file_priv, pc, args);
2440ca06f936SJason Ekstrand 		} else {
2441a4c1cdd3SJason Ekstrand 			ret = -ENOENT;
2442a4c1cdd3SJason Ekstrand 		}
2443ca06f936SJason Ekstrand 	}
2444a4c1cdd3SJason Ekstrand 	mutex_unlock(&file_priv->proto_context_lock);
244510be98a7SChris Wilson 
2446a4c1cdd3SJason Ekstrand 	if (ctx) {
244710be98a7SChris Wilson 		ret = ctx_setparam(file_priv, ctx, args);
244810be98a7SChris Wilson 		i915_gem_context_put(ctx);
2449a4c1cdd3SJason Ekstrand 	}
2450a4c1cdd3SJason Ekstrand 
245110be98a7SChris Wilson 	return ret;
245210be98a7SChris Wilson }
245310be98a7SChris Wilson 
245410be98a7SChris Wilson int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
245510be98a7SChris Wilson 				       void *data, struct drm_file *file)
245610be98a7SChris Wilson {
2457a4e7ccdaSChris Wilson 	struct drm_i915_private *i915 = to_i915(dev);
245810be98a7SChris Wilson 	struct drm_i915_reset_stats *args = data;
245910be98a7SChris Wilson 	struct i915_gem_context *ctx;
246010be98a7SChris Wilson 
246110be98a7SChris Wilson 	if (args->flags || args->pad)
246210be98a7SChris Wilson 		return -EINVAL;
246310be98a7SChris Wilson 
2464a4839cb1SJason Ekstrand 	ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2465046d1660SJason Ekstrand 	if (IS_ERR(ctx))
2466046d1660SJason Ekstrand 		return PTR_ERR(ctx);
246710be98a7SChris Wilson 
246810be98a7SChris Wilson 	/*
246910be98a7SChris Wilson 	 * We opt for unserialised reads here. This may result in tearing
247010be98a7SChris Wilson 	 * in the extremely unlikely event of a GPU hang on this context
247110be98a7SChris Wilson 	 * as we are querying them. If we need that extra layer of protection,
247210be98a7SChris Wilson 	 * we should wrap the hangstats with a seqlock.
247310be98a7SChris Wilson 	 */
247410be98a7SChris Wilson 
247510be98a7SChris Wilson 	if (capable(CAP_SYS_ADMIN))
2476a4e7ccdaSChris Wilson 		args->reset_count = i915_reset_count(&i915->gpu_error);
247710be98a7SChris Wilson 	else
247810be98a7SChris Wilson 		args->reset_count = 0;
247910be98a7SChris Wilson 
248010be98a7SChris Wilson 	args->batch_active = atomic_read(&ctx->guilty_count);
248110be98a7SChris Wilson 	args->batch_pending = atomic_read(&ctx->active_count);
248210be98a7SChris Wilson 
2483a4839cb1SJason Ekstrand 	i915_gem_context_put(ctx);
2484a4839cb1SJason Ekstrand 	return 0;
248510be98a7SChris Wilson }
248610be98a7SChris Wilson 
248710be98a7SChris Wilson /* GEM context-engines iterator: for_each_gem_engine() */
248810be98a7SChris Wilson struct intel_context *
248910be98a7SChris Wilson i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
249010be98a7SChris Wilson {
249110be98a7SChris Wilson 	const struct i915_gem_engines *e = it->engines;
249210be98a7SChris Wilson 	struct intel_context *ctx;
249310be98a7SChris Wilson 
2494130a95e9SChris Wilson 	if (unlikely(!e))
2495130a95e9SChris Wilson 		return NULL;
2496130a95e9SChris Wilson 
249710be98a7SChris Wilson 	do {
249810be98a7SChris Wilson 		if (it->idx >= e->num_engines)
249910be98a7SChris Wilson 			return NULL;
250010be98a7SChris Wilson 
250110be98a7SChris Wilson 		ctx = e->engines[it->idx++];
250210be98a7SChris Wilson 	} while (!ctx);
250310be98a7SChris Wilson 
250410be98a7SChris Wilson 	return ctx;
250510be98a7SChris Wilson }
250610be98a7SChris Wilson 
250710be98a7SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
250810be98a7SChris Wilson #include "selftests/mock_context.c"
250910be98a7SChris Wilson #include "selftests/i915_gem_context.c"
251010be98a7SChris Wilson #endif
251110be98a7SChris Wilson 
2512a6270d1dSDaniel Vetter void i915_gem_context_module_exit(void)
251310be98a7SChris Wilson {
2514a6270d1dSDaniel Vetter 	kmem_cache_destroy(slab_luts);
251510be98a7SChris Wilson }
251610be98a7SChris Wilson 
2517a6270d1dSDaniel Vetter int __init i915_gem_context_module_init(void)
251810be98a7SChris Wilson {
2519a6270d1dSDaniel Vetter 	slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2520a6270d1dSDaniel Vetter 	if (!slab_luts)
252110be98a7SChris Wilson 		return -ENOMEM;
252210be98a7SChris Wilson 
252310be98a7SChris Wilson 	return 0;
252410be98a7SChris Wilson }
2525