1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008,2010 Intel Corporation
5  */
6 
7 #include <linux/intel-iommu.h>
8 #include <linux/dma-resv.h>
9 #include <linux/sync_file.h>
10 #include <linux/uaccess.h>
11 
12 #include <drm/drm_syncobj.h>
13 
14 #include "display/intel_frontbuffer.h"
15 
16 #include "gem/i915_gem_ioctls.h"
17 #include "gt/intel_context.h"
18 #include "gt/intel_gt.h"
19 #include "gt/intel_gt_buffer_pool.h"
20 #include "gt/intel_gt_pm.h"
21 #include "gt/intel_ring.h"
22 
23 #include "i915_drv.h"
24 #include "i915_gem_clflush.h"
25 #include "i915_gem_context.h"
26 #include "i915_gem_ioctls.h"
27 #include "i915_sw_fence_work.h"
28 #include "i915_trace.h"
29 
30 struct eb_vma {
31 	struct i915_vma *vma;
32 	unsigned int flags;
33 
34 	/** This vma's place in the execbuf reservation list */
35 	struct drm_i915_gem_exec_object2 *exec;
36 	struct list_head bind_link;
37 	struct list_head reloc_link;
38 
39 	struct hlist_node node;
40 	u32 handle;
41 };
42 
43 struct eb_vma_array {
44 	struct kref kref;
45 	struct eb_vma vma[];
46 };
47 
48 #define __EXEC_OBJECT_HAS_PIN		BIT(31)
49 #define __EXEC_OBJECT_HAS_FENCE		BIT(30)
50 #define __EXEC_OBJECT_NEEDS_MAP		BIT(29)
51 #define __EXEC_OBJECT_NEEDS_BIAS	BIT(28)
52 #define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 28) /* all of the above */
53 
54 #define __EXEC_HAS_RELOC	BIT(31)
55 #define __EXEC_INTERNAL_FLAGS	(~0u << 31)
56 #define UPDATE			PIN_OFFSET_FIXED
57 
58 #define BATCH_OFFSET_BIAS (256*1024)
59 
60 #define __I915_EXEC_ILLEGAL_FLAGS \
61 	(__I915_EXEC_UNKNOWN_FLAGS | \
62 	 I915_EXEC_CONSTANTS_MASK  | \
63 	 I915_EXEC_RESOURCE_STREAMER)
64 
65 /* Catch emission of unexpected errors for CI! */
66 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
67 #undef EINVAL
68 #define EINVAL ({ \
69 	DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
70 	22; \
71 })
72 #endif
73 
74 /**
75  * DOC: User command execution
76  *
77  * Userspace submits commands to be executed on the GPU as an instruction
78  * stream within a GEM object we call a batchbuffer. This instructions may
79  * refer to other GEM objects containing auxiliary state such as kernels,
80  * samplers, render targets and even secondary batchbuffers. Userspace does
81  * not know where in the GPU memory these objects reside and so before the
82  * batchbuffer is passed to the GPU for execution, those addresses in the
83  * batchbuffer and auxiliary objects are updated. This is known as relocation,
84  * or patching. To try and avoid having to relocate each object on the next
85  * execution, userspace is told the location of those objects in this pass,
86  * but this remains just a hint as the kernel may choose a new location for
87  * any object in the future.
88  *
89  * At the level of talking to the hardware, submitting a batchbuffer for the
90  * GPU to execute is to add content to a buffer from which the HW
91  * command streamer is reading.
92  *
93  * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
94  *    Execlists, this command is not placed on the same buffer as the
95  *    remaining items.
96  *
97  * 2. Add a command to invalidate caches to the buffer.
98  *
99  * 3. Add a batchbuffer start command to the buffer; the start command is
100  *    essentially a token together with the GPU address of the batchbuffer
101  *    to be executed.
102  *
103  * 4. Add a pipeline flush to the buffer.
104  *
105  * 5. Add a memory write command to the buffer to record when the GPU
106  *    is done executing the batchbuffer. The memory write writes the
107  *    global sequence number of the request, ``i915_request::global_seqno``;
108  *    the i915 driver uses the current value in the register to determine
109  *    if the GPU has completed the batchbuffer.
110  *
111  * 6. Add a user interrupt command to the buffer. This command instructs
112  *    the GPU to issue an interrupt when the command, pipeline flush and
113  *    memory write are completed.
114  *
115  * 7. Inform the hardware of the additional commands added to the buffer
116  *    (by updating the tail pointer).
117  *
118  * Processing an execbuf ioctl is conceptually split up into a few phases.
119  *
120  * 1. Validation - Ensure all the pointers, handles and flags are valid.
121  * 2. Reservation - Assign GPU address space for every object
122  * 3. Relocation - Update any addresses to point to the final locations
123  * 4. Serialisation - Order the request with respect to its dependencies
124  * 5. Construction - Construct a request to execute the batchbuffer
125  * 6. Submission (at some point in the future execution)
126  *
127  * Reserving resources for the execbuf is the most complicated phase. We
128  * neither want to have to migrate the object in the address space, nor do
129  * we want to have to update any relocations pointing to this object. Ideally,
130  * we want to leave the object where it is and for all the existing relocations
131  * to match. If the object is given a new address, or if userspace thinks the
132  * object is elsewhere, we have to parse all the relocation entries and update
133  * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
134  * all the target addresses in all of its objects match the value in the
135  * relocation entries and that they all match the presumed offsets given by the
136  * list of execbuffer objects. Using this knowledge, we know that if we haven't
137  * moved any buffers, all the relocation entries are valid and we can skip
138  * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
139  * hang.) The requirement for using I915_EXEC_NO_RELOC are:
140  *
141  *      The addresses written in the objects must match the corresponding
142  *      reloc.presumed_offset which in turn must match the corresponding
143  *      execobject.offset.
144  *
145  *      Any render targets written to in the batch must be flagged with
146  *      EXEC_OBJECT_WRITE.
147  *
148  *      To avoid stalling, execobject.offset should match the current
149  *      address of that object within the active context.
150  *
151  * The reservation is done is multiple phases. First we try and keep any
152  * object already bound in its current location - so as long as meets the
153  * constraints imposed by the new execbuffer. Any object left unbound after the
154  * first pass is then fitted into any available idle space. If an object does
155  * not fit, all objects are removed from the reservation and the process rerun
156  * after sorting the objects into a priority order (more difficult to fit
157  * objects are tried first). Failing that, the entire VM is cleared and we try
158  * to fit the execbuf once last time before concluding that it simply will not
159  * fit.
160  *
161  * A small complication to all of this is that we allow userspace not only to
162  * specify an alignment and a size for the object in the address space, but
163  * we also allow userspace to specify the exact offset. This objects are
164  * simpler to place (the location is known a priori) all we have to do is make
165  * sure the space is available.
166  *
167  * Once all the objects are in place, patching up the buried pointers to point
168  * to the final locations is a fairly simple job of walking over the relocation
169  * entry arrays, looking up the right address and rewriting the value into
170  * the object. Simple! ... The relocation entries are stored in user memory
171  * and so to access them we have to copy them into a local buffer. That copy
172  * has to avoid taking any pagefaults as they may lead back to a GEM object
173  * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
174  * the relocation into multiple passes. First we try to do everything within an
175  * atomic context (avoid the pagefaults) which requires that we never wait. If
176  * we detect that we may wait, or if we need to fault, then we have to fallback
177  * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
178  * bells yet?) Dropping the mutex means that we lose all the state we have
179  * built up so far for the execbuf and we must reset any global data. However,
180  * we do leave the objects pinned in their final locations - which is a
181  * potential issue for concurrent execbufs. Once we have left the mutex, we can
182  * allocate and copy all the relocation entries into a large array at our
183  * leisure, reacquire the mutex, reclaim all the objects and other state and
184  * then proceed to update any incorrect addresses with the objects.
185  *
186  * As we process the relocation entries, we maintain a record of whether the
187  * object is being written to. Using NORELOC, we expect userspace to provide
188  * this information instead. We also check whether we can skip the relocation
189  * by comparing the expected value inside the relocation entry with the target's
190  * final address. If they differ, we have to map the current object and rewrite
191  * the 4 or 8 byte pointer within.
192  *
193  * Serialising an execbuf is quite simple according to the rules of the GEM
194  * ABI. Execution within each context is ordered by the order of submission.
195  * Writes to any GEM object are in order of submission and are exclusive. Reads
196  * from a GEM object are unordered with respect to other reads, but ordered by
197  * writes. A write submitted after a read cannot occur before the read, and
198  * similarly any read submitted after a write cannot occur before the write.
199  * Writes are ordered between engines such that only one write occurs at any
200  * time (completing any reads beforehand) - using semaphores where available
201  * and CPU serialisation otherwise. Other GEM access obey the same rules, any
202  * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
203  * reads before starting, and any read (either using set-domain or pread) must
204  * flush all GPU writes before starting. (Note we only employ a barrier before,
205  * we currently rely on userspace not concurrently starting a new execution
206  * whilst reading or writing to an object. This may be an advantage or not
207  * depending on how much you trust userspace not to shoot themselves in the
208  * foot.) Serialisation may just result in the request being inserted into
209  * a DAG awaiting its turn, but most simple is to wait on the CPU until
210  * all dependencies are resolved.
211  *
212  * After all of that, is just a matter of closing the request and handing it to
213  * the hardware (well, leaving it in a queue to be executed). However, we also
214  * offer the ability for batchbuffers to be run with elevated privileges so
215  * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
216  * Before any batch is given extra privileges we first must check that it
217  * contains no nefarious instructions, we check that each instruction is from
218  * our whitelist and all registers are also from an allowed list. We first
219  * copy the user's batchbuffer to a shadow (so that the user doesn't have
220  * access to it, either by the CPU or GPU as we scan it) and then parse each
221  * instruction. If everything is ok, we set a flag telling the hardware to run
222  * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
223  */
224 
225 struct i915_execbuffer {
226 	struct drm_i915_private *i915; /** i915 backpointer */
227 	struct drm_file *file; /** per-file lookup tables and limits */
228 	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
229 	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
230 	struct eb_vma *vma;
231 
232 	struct intel_engine_cs *engine; /** engine to queue the request to */
233 	struct intel_context *context; /* logical state for the request */
234 	struct i915_gem_context *gem_context; /** caller's context */
235 
236 	struct i915_request *request; /** our request to build */
237 	struct eb_vma *batch; /** identity of the batch obj/vma */
238 	struct i915_vma *trampoline; /** trampoline used for chaining */
239 
240 	/** actual size of execobj[] as we may extend it for the cmdparser */
241 	unsigned int buffer_count;
242 
243 	/** list of vma not yet bound during reservation phase */
244 	struct list_head unbound;
245 
246 	/** list of vma that have execobj.relocation_count */
247 	struct list_head relocs;
248 
249 	/**
250 	 * Track the most recently used object for relocations, as we
251 	 * frequently have to perform multiple relocations within the same
252 	 * obj/page
253 	 */
254 	struct reloc_cache {
255 		struct drm_mm_node node; /** temporary GTT binding */
256 		unsigned int gen; /** Cached value of INTEL_GEN */
257 		bool use_64bit_reloc : 1;
258 		bool has_llc : 1;
259 		bool has_fence : 1;
260 		bool needs_unfenced : 1;
261 
262 		struct i915_vma *target;
263 		struct i915_request *rq;
264 		struct i915_vma *rq_vma;
265 		u32 *rq_cmd;
266 		unsigned int rq_size;
267 	} reloc_cache;
268 
269 	u64 invalid_flags; /** Set of execobj.flags that are invalid */
270 	u32 context_flags; /** Set of execobj.flags to insert from the ctx */
271 
272 	u32 batch_start_offset; /** Location within object of batch */
273 	u32 batch_len; /** Length of batch within object */
274 	u32 batch_flags; /** Flags composed for emit_bb_start() */
275 
276 	/**
277 	 * Indicate either the size of the hastable used to resolve
278 	 * relocation handles, or if negative that we are using a direct
279 	 * index into the execobj[].
280 	 */
281 	int lut_size;
282 	struct hlist_head *buckets; /** ht for relocation handles */
283 	struct eb_vma_array *array;
284 };
285 
286 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
287 {
288 	return intel_engine_requires_cmd_parser(eb->engine) ||
289 		(intel_engine_using_cmd_parser(eb->engine) &&
290 		 eb->args->batch_len);
291 }
292 
293 static struct eb_vma_array *eb_vma_array_create(unsigned int count)
294 {
295 	struct eb_vma_array *arr;
296 
297 	arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN);
298 	if (!arr)
299 		return NULL;
300 
301 	kref_init(&arr->kref);
302 	arr->vma[0].vma = NULL;
303 
304 	return arr;
305 }
306 
307 static inline void eb_unreserve_vma(struct eb_vma *ev)
308 {
309 	struct i915_vma *vma = ev->vma;
310 
311 	if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
312 		__i915_vma_unpin_fence(vma);
313 
314 	if (ev->flags & __EXEC_OBJECT_HAS_PIN)
315 		__i915_vma_unpin(vma);
316 
317 	ev->flags &= ~(__EXEC_OBJECT_HAS_PIN |
318 		       __EXEC_OBJECT_HAS_FENCE);
319 }
320 
321 static void eb_vma_array_destroy(struct kref *kref)
322 {
323 	struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref);
324 	struct eb_vma *ev = arr->vma;
325 
326 	while (ev->vma) {
327 		eb_unreserve_vma(ev);
328 		i915_vma_put(ev->vma);
329 		ev++;
330 	}
331 
332 	kvfree(arr);
333 }
334 
335 static void eb_vma_array_put(struct eb_vma_array *arr)
336 {
337 	kref_put(&arr->kref, eb_vma_array_destroy);
338 }
339 
340 static int eb_create(struct i915_execbuffer *eb)
341 {
342 	/* Allocate an extra slot for use by the command parser + sentinel */
343 	eb->array = eb_vma_array_create(eb->buffer_count + 2);
344 	if (!eb->array)
345 		return -ENOMEM;
346 
347 	eb->vma = eb->array->vma;
348 
349 	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
350 		unsigned int size = 1 + ilog2(eb->buffer_count);
351 
352 		/*
353 		 * Without a 1:1 association between relocation handles and
354 		 * the execobject[] index, we instead create a hashtable.
355 		 * We size it dynamically based on available memory, starting
356 		 * first with 1:1 assocative hash and scaling back until
357 		 * the allocation succeeds.
358 		 *
359 		 * Later on we use a positive lut_size to indicate we are
360 		 * using this hashtable, and a negative value to indicate a
361 		 * direct lookup.
362 		 */
363 		do {
364 			gfp_t flags;
365 
366 			/* While we can still reduce the allocation size, don't
367 			 * raise a warning and allow the allocation to fail.
368 			 * On the last pass though, we want to try as hard
369 			 * as possible to perform the allocation and warn
370 			 * if it fails.
371 			 */
372 			flags = GFP_KERNEL;
373 			if (size > 1)
374 				flags |= __GFP_NORETRY | __GFP_NOWARN;
375 
376 			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
377 					      flags);
378 			if (eb->buckets)
379 				break;
380 		} while (--size);
381 
382 		if (unlikely(!size)) {
383 			eb_vma_array_put(eb->array);
384 			return -ENOMEM;
385 		}
386 
387 		eb->lut_size = size;
388 	} else {
389 		eb->lut_size = -eb->buffer_count;
390 	}
391 
392 	return 0;
393 }
394 
395 static bool
396 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
397 		 const struct i915_vma *vma,
398 		 unsigned int flags)
399 {
400 	if (vma->node.size < entry->pad_to_size)
401 		return true;
402 
403 	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
404 		return true;
405 
406 	if (flags & EXEC_OBJECT_PINNED &&
407 	    vma->node.start != entry->offset)
408 		return true;
409 
410 	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
411 	    vma->node.start < BATCH_OFFSET_BIAS)
412 		return true;
413 
414 	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
415 	    (vma->node.start + vma->node.size - 1) >> 32)
416 		return true;
417 
418 	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
419 	    !i915_vma_is_map_and_fenceable(vma))
420 		return true;
421 
422 	return false;
423 }
424 
425 static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
426 			unsigned int exec_flags)
427 {
428 	u64 pin_flags = 0;
429 
430 	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
431 		pin_flags |= PIN_GLOBAL;
432 
433 	/*
434 	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
435 	 * limit address to the first 4GBs for unflagged objects.
436 	 */
437 	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
438 		pin_flags |= PIN_ZONE_4G;
439 
440 	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
441 		pin_flags |= PIN_MAPPABLE;
442 
443 	if (exec_flags & EXEC_OBJECT_PINNED)
444 		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
445 	else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
446 		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
447 
448 	return pin_flags;
449 }
450 
451 static inline bool
452 eb_pin_vma(struct i915_execbuffer *eb,
453 	   const struct drm_i915_gem_exec_object2 *entry,
454 	   struct eb_vma *ev)
455 {
456 	struct i915_vma *vma = ev->vma;
457 	u64 pin_flags;
458 
459 	if (vma->node.size)
460 		pin_flags = vma->node.start;
461 	else
462 		pin_flags = entry->offset & PIN_OFFSET_MASK;
463 
464 	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
465 	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
466 		pin_flags |= PIN_GLOBAL;
467 
468 	/* Attempt to reuse the current location if available */
469 	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) {
470 		if (entry->flags & EXEC_OBJECT_PINNED)
471 			return false;
472 
473 		/* Failing that pick any _free_ space if suitable */
474 		if (unlikely(i915_vma_pin(vma,
475 					  entry->pad_to_size,
476 					  entry->alignment,
477 					  eb_pin_flags(entry, ev->flags) |
478 					  PIN_USER | PIN_NOEVICT)))
479 			return false;
480 	}
481 
482 	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
483 		if (unlikely(i915_vma_pin_fence(vma))) {
484 			i915_vma_unpin(vma);
485 			return false;
486 		}
487 
488 		if (vma->fence)
489 			ev->flags |= __EXEC_OBJECT_HAS_FENCE;
490 	}
491 
492 	ev->flags |= __EXEC_OBJECT_HAS_PIN;
493 	return !eb_vma_misplaced(entry, vma, ev->flags);
494 }
495 
496 static int
497 eb_validate_vma(struct i915_execbuffer *eb,
498 		struct drm_i915_gem_exec_object2 *entry,
499 		struct i915_vma *vma)
500 {
501 	if (unlikely(entry->flags & eb->invalid_flags))
502 		return -EINVAL;
503 
504 	if (unlikely(entry->alignment &&
505 		     !is_power_of_2_u64(entry->alignment)))
506 		return -EINVAL;
507 
508 	/*
509 	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
510 	 * any non-page-aligned or non-canonical addresses.
511 	 */
512 	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
513 		     entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
514 		return -EINVAL;
515 
516 	/* pad_to_size was once a reserved field, so sanitize it */
517 	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
518 		if (unlikely(offset_in_page(entry->pad_to_size)))
519 			return -EINVAL;
520 	} else {
521 		entry->pad_to_size = 0;
522 	}
523 	/*
524 	 * From drm_mm perspective address space is continuous,
525 	 * so from this point we're always using non-canonical
526 	 * form internally.
527 	 */
528 	entry->offset = gen8_noncanonical_addr(entry->offset);
529 
530 	if (!eb->reloc_cache.has_fence) {
531 		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
532 	} else {
533 		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
534 		     eb->reloc_cache.needs_unfenced) &&
535 		    i915_gem_object_is_tiled(vma->obj))
536 			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
537 	}
538 
539 	if (!(entry->flags & EXEC_OBJECT_PINNED))
540 		entry->flags |= eb->context_flags;
541 
542 	return 0;
543 }
544 
545 static void
546 eb_add_vma(struct i915_execbuffer *eb,
547 	   unsigned int i, unsigned batch_idx,
548 	   struct i915_vma *vma)
549 {
550 	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
551 	struct eb_vma *ev = &eb->vma[i];
552 
553 	GEM_BUG_ON(i915_vma_is_closed(vma));
554 
555 	ev->vma = vma;
556 	ev->exec = entry;
557 	ev->flags = entry->flags;
558 
559 	if (eb->lut_size > 0) {
560 		ev->handle = entry->handle;
561 		hlist_add_head(&ev->node,
562 			       &eb->buckets[hash_32(entry->handle,
563 						    eb->lut_size)]);
564 	}
565 
566 	if (entry->relocation_count)
567 		list_add_tail(&ev->reloc_link, &eb->relocs);
568 
569 	/*
570 	 * SNA is doing fancy tricks with compressing batch buffers, which leads
571 	 * to negative relocation deltas. Usually that works out ok since the
572 	 * relocate address is still positive, except when the batch is placed
573 	 * very low in the GTT. Ensure this doesn't happen.
574 	 *
575 	 * Note that actual hangs have only been observed on gen7, but for
576 	 * paranoia do it everywhere.
577 	 */
578 	if (i == batch_idx) {
579 		if (entry->relocation_count &&
580 		    !(ev->flags & EXEC_OBJECT_PINNED))
581 			ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
582 		if (eb->reloc_cache.has_fence)
583 			ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
584 
585 		eb->batch = ev;
586 	}
587 
588 	if (eb_pin_vma(eb, entry, ev)) {
589 		if (entry->offset != vma->node.start) {
590 			entry->offset = vma->node.start | UPDATE;
591 			eb->args->flags |= __EXEC_HAS_RELOC;
592 		}
593 	} else {
594 		eb_unreserve_vma(ev);
595 		list_add_tail(&ev->bind_link, &eb->unbound);
596 	}
597 }
598 
599 static int eb_reserve_vma(const struct i915_execbuffer *eb,
600 			  struct eb_vma *ev,
601 			  u64 pin_flags)
602 {
603 	struct drm_i915_gem_exec_object2 *entry = ev->exec;
604 	struct i915_vma *vma = ev->vma;
605 	int err;
606 
607 	if (drm_mm_node_allocated(&vma->node) &&
608 	    eb_vma_misplaced(entry, vma, ev->flags)) {
609 		err = i915_vma_unbind(vma);
610 		if (err)
611 			return err;
612 	}
613 
614 	err = i915_vma_pin(vma,
615 			   entry->pad_to_size, entry->alignment,
616 			   eb_pin_flags(entry, ev->flags) | pin_flags);
617 	if (err)
618 		return err;
619 
620 	if (entry->offset != vma->node.start) {
621 		entry->offset = vma->node.start | UPDATE;
622 		eb->args->flags |= __EXEC_HAS_RELOC;
623 	}
624 
625 	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
626 		err = i915_vma_pin_fence(vma);
627 		if (unlikely(err)) {
628 			i915_vma_unpin(vma);
629 			return err;
630 		}
631 
632 		if (vma->fence)
633 			ev->flags |= __EXEC_OBJECT_HAS_FENCE;
634 	}
635 
636 	ev->flags |= __EXEC_OBJECT_HAS_PIN;
637 	GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
638 
639 	return 0;
640 }
641 
642 static int eb_reserve(struct i915_execbuffer *eb)
643 {
644 	const unsigned int count = eb->buffer_count;
645 	unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
646 	struct list_head last;
647 	struct eb_vma *ev;
648 	unsigned int i, pass;
649 	int err = 0;
650 
651 	/*
652 	 * Attempt to pin all of the buffers into the GTT.
653 	 * This is done in 3 phases:
654 	 *
655 	 * 1a. Unbind all objects that do not match the GTT constraints for
656 	 *     the execbuffer (fenceable, mappable, alignment etc).
657 	 * 1b. Increment pin count for already bound objects.
658 	 * 2.  Bind new objects.
659 	 * 3.  Decrement pin count.
660 	 *
661 	 * This avoid unnecessary unbinding of later objects in order to make
662 	 * room for the earlier objects *unless* we need to defragment.
663 	 */
664 
665 	if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
666 		return -EINTR;
667 
668 	pass = 0;
669 	do {
670 		list_for_each_entry(ev, &eb->unbound, bind_link) {
671 			err = eb_reserve_vma(eb, ev, pin_flags);
672 			if (err)
673 				break;
674 		}
675 		if (!(err == -ENOSPC || err == -EAGAIN))
676 			break;
677 
678 		/* Resort *all* the objects into priority order */
679 		INIT_LIST_HEAD(&eb->unbound);
680 		INIT_LIST_HEAD(&last);
681 		for (i = 0; i < count; i++) {
682 			unsigned int flags;
683 
684 			ev = &eb->vma[i];
685 			flags = ev->flags;
686 			if (flags & EXEC_OBJECT_PINNED &&
687 			    flags & __EXEC_OBJECT_HAS_PIN)
688 				continue;
689 
690 			eb_unreserve_vma(ev);
691 
692 			if (flags & EXEC_OBJECT_PINNED)
693 				/* Pinned must have their slot */
694 				list_add(&ev->bind_link, &eb->unbound);
695 			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
696 				/* Map require the lowest 256MiB (aperture) */
697 				list_add_tail(&ev->bind_link, &eb->unbound);
698 			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
699 				/* Prioritise 4GiB region for restricted bo */
700 				list_add(&ev->bind_link, &last);
701 			else
702 				list_add_tail(&ev->bind_link, &last);
703 		}
704 		list_splice_tail(&last, &eb->unbound);
705 
706 		if (err == -EAGAIN) {
707 			mutex_unlock(&eb->i915->drm.struct_mutex);
708 			flush_workqueue(eb->i915->mm.userptr_wq);
709 			mutex_lock(&eb->i915->drm.struct_mutex);
710 			continue;
711 		}
712 
713 		switch (pass++) {
714 		case 0:
715 			break;
716 
717 		case 1:
718 			/* Too fragmented, unbind everything and retry */
719 			mutex_lock(&eb->context->vm->mutex);
720 			err = i915_gem_evict_vm(eb->context->vm);
721 			mutex_unlock(&eb->context->vm->mutex);
722 			if (err)
723 				goto unlock;
724 			break;
725 
726 		default:
727 			err = -ENOSPC;
728 			goto unlock;
729 		}
730 
731 		pin_flags = PIN_USER;
732 	} while (1);
733 
734 unlock:
735 	mutex_unlock(&eb->i915->drm.struct_mutex);
736 	return err;
737 }
738 
739 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
740 {
741 	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
742 		return 0;
743 	else
744 		return eb->buffer_count - 1;
745 }
746 
747 static int eb_select_context(struct i915_execbuffer *eb)
748 {
749 	struct i915_gem_context *ctx;
750 
751 	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
752 	if (unlikely(!ctx))
753 		return -ENOENT;
754 
755 	eb->gem_context = ctx;
756 	if (rcu_access_pointer(ctx->vm))
757 		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
758 
759 	eb->context_flags = 0;
760 	if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
761 		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
762 
763 	return 0;
764 }
765 
766 static int __eb_add_lut(struct i915_execbuffer *eb,
767 			u32 handle, struct i915_vma *vma)
768 {
769 	struct i915_gem_context *ctx = eb->gem_context;
770 	struct i915_lut_handle *lut;
771 	int err;
772 
773 	lut = i915_lut_handle_alloc();
774 	if (unlikely(!lut))
775 		return -ENOMEM;
776 
777 	i915_vma_get(vma);
778 	if (!atomic_fetch_inc(&vma->open_count))
779 		i915_vma_reopen(vma);
780 	lut->handle = handle;
781 	lut->ctx = ctx;
782 
783 	/* Check that the context hasn't been closed in the meantime */
784 	err = -EINTR;
785 	if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
786 		struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
787 
788 		if (unlikely(vm && vma->vm != vm))
789 			err = -EAGAIN; /* user racing with ctx set-vm */
790 		else if (likely(!i915_gem_context_is_closed(ctx)))
791 			err = radix_tree_insert(&ctx->handles_vma, handle, vma);
792 		else
793 			err = -ENOENT;
794 		if (err == 0) { /* And nor has this handle */
795 			struct drm_i915_gem_object *obj = vma->obj;
796 
797 			spin_lock(&obj->lut_lock);
798 			if (idr_find(&eb->file->object_idr, handle) == obj) {
799 				list_add(&lut->obj_link, &obj->lut_list);
800 			} else {
801 				radix_tree_delete(&ctx->handles_vma, handle);
802 				err = -ENOENT;
803 			}
804 			spin_unlock(&obj->lut_lock);
805 		}
806 		mutex_unlock(&ctx->lut_mutex);
807 	}
808 	if (unlikely(err))
809 		goto err;
810 
811 	return 0;
812 
813 err:
814 	i915_vma_close(vma);
815 	i915_vma_put(vma);
816 	i915_lut_handle_free(lut);
817 	return err;
818 }
819 
820 static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
821 {
822 	struct i915_address_space *vm = eb->context->vm;
823 
824 	do {
825 		struct drm_i915_gem_object *obj;
826 		struct i915_vma *vma;
827 		int err;
828 
829 		rcu_read_lock();
830 		vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
831 		if (likely(vma && vma->vm == vm))
832 			vma = i915_vma_tryget(vma);
833 		rcu_read_unlock();
834 		if (likely(vma))
835 			return vma;
836 
837 		obj = i915_gem_object_lookup(eb->file, handle);
838 		if (unlikely(!obj))
839 			return ERR_PTR(-ENOENT);
840 
841 		vma = i915_vma_instance(obj, vm, NULL);
842 		if (IS_ERR(vma)) {
843 			i915_gem_object_put(obj);
844 			return vma;
845 		}
846 
847 		err = __eb_add_lut(eb, handle, vma);
848 		if (likely(!err))
849 			return vma;
850 
851 		i915_gem_object_put(obj);
852 		if (err != -EEXIST)
853 			return ERR_PTR(err);
854 	} while (1);
855 }
856 
857 static int eb_lookup_vmas(struct i915_execbuffer *eb)
858 {
859 	unsigned int batch = eb_batch_index(eb);
860 	unsigned int i;
861 	int err = 0;
862 
863 	INIT_LIST_HEAD(&eb->relocs);
864 	INIT_LIST_HEAD(&eb->unbound);
865 
866 	for (i = 0; i < eb->buffer_count; i++) {
867 		struct i915_vma *vma;
868 
869 		vma = eb_lookup_vma(eb, eb->exec[i].handle);
870 		if (IS_ERR(vma)) {
871 			err = PTR_ERR(vma);
872 			break;
873 		}
874 
875 		err = eb_validate_vma(eb, &eb->exec[i], vma);
876 		if (unlikely(err)) {
877 			i915_vma_put(vma);
878 			break;
879 		}
880 
881 		eb_add_vma(eb, i, batch, vma);
882 	}
883 
884 	eb->vma[i].vma = NULL;
885 	return err;
886 }
887 
888 static struct eb_vma *
889 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
890 {
891 	if (eb->lut_size < 0) {
892 		if (handle >= -eb->lut_size)
893 			return NULL;
894 		return &eb->vma[handle];
895 	} else {
896 		struct hlist_head *head;
897 		struct eb_vma *ev;
898 
899 		head = &eb->buckets[hash_32(handle, eb->lut_size)];
900 		hlist_for_each_entry(ev, head, node) {
901 			if (ev->handle == handle)
902 				return ev;
903 		}
904 		return NULL;
905 	}
906 }
907 
908 static void eb_destroy(const struct i915_execbuffer *eb)
909 {
910 	GEM_BUG_ON(eb->reloc_cache.rq);
911 
912 	if (eb->array)
913 		eb_vma_array_put(eb->array);
914 
915 	if (eb->lut_size > 0)
916 		kfree(eb->buckets);
917 }
918 
919 static inline u64
920 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
921 		  const struct i915_vma *target)
922 {
923 	return gen8_canonical_addr((int)reloc->delta + target->node.start);
924 }
925 
926 static void reloc_cache_init(struct reloc_cache *cache,
927 			     struct drm_i915_private *i915)
928 {
929 	/* Must be a variable in the struct to allow GCC to unroll. */
930 	cache->gen = INTEL_GEN(i915);
931 	cache->has_llc = HAS_LLC(i915);
932 	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
933 	cache->has_fence = cache->gen < 4;
934 	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
935 	cache->node.flags = 0;
936 	cache->rq = NULL;
937 	cache->target = NULL;
938 }
939 
940 #define RELOC_TAIL 4
941 
942 static int reloc_gpu_chain(struct reloc_cache *cache)
943 {
944 	struct intel_gt_buffer_pool_node *pool;
945 	struct i915_request *rq = cache->rq;
946 	struct i915_vma *batch;
947 	u32 *cmd;
948 	int err;
949 
950 	pool = intel_gt_get_buffer_pool(rq->engine->gt, PAGE_SIZE);
951 	if (IS_ERR(pool))
952 		return PTR_ERR(pool);
953 
954 	batch = i915_vma_instance(pool->obj, rq->context->vm, NULL);
955 	if (IS_ERR(batch)) {
956 		err = PTR_ERR(batch);
957 		goto out_pool;
958 	}
959 
960 	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
961 	if (err)
962 		goto out_pool;
963 
964 	GEM_BUG_ON(cache->rq_size + RELOC_TAIL > PAGE_SIZE  / sizeof(u32));
965 	cmd = cache->rq_cmd + cache->rq_size;
966 	*cmd++ = MI_ARB_CHECK;
967 	if (cache->gen >= 8)
968 		*cmd++ = MI_BATCH_BUFFER_START_GEN8;
969 	else if (cache->gen >= 6)
970 		*cmd++ = MI_BATCH_BUFFER_START;
971 	else
972 		*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
973 	*cmd++ = lower_32_bits(batch->node.start);
974 	*cmd++ = upper_32_bits(batch->node.start); /* Always 0 for gen<8 */
975 	i915_gem_object_flush_map(cache->rq_vma->obj);
976 	i915_gem_object_unpin_map(cache->rq_vma->obj);
977 	cache->rq_vma = NULL;
978 
979 	err = intel_gt_buffer_pool_mark_active(pool, rq);
980 	if (err == 0) {
981 		i915_vma_lock(batch);
982 		err = i915_request_await_object(rq, batch->obj, false);
983 		if (err == 0)
984 			err = i915_vma_move_to_active(batch, rq, 0);
985 		i915_vma_unlock(batch);
986 	}
987 	i915_vma_unpin(batch);
988 	if (err)
989 		goto out_pool;
990 
991 	cmd = i915_gem_object_pin_map(batch->obj,
992 				      cache->has_llc ?
993 				      I915_MAP_FORCE_WB :
994 				      I915_MAP_FORCE_WC);
995 	if (IS_ERR(cmd)) {
996 		err = PTR_ERR(cmd);
997 		goto out_pool;
998 	}
999 
1000 	/* Return with batch mapping (cmd) still pinned */
1001 	cache->rq_cmd = cmd;
1002 	cache->rq_size = 0;
1003 	cache->rq_vma = batch;
1004 
1005 out_pool:
1006 	intel_gt_buffer_pool_put(pool);
1007 	return err;
1008 }
1009 
1010 static unsigned int reloc_bb_flags(const struct reloc_cache *cache)
1011 {
1012 	return cache->gen > 5 ? 0 : I915_DISPATCH_SECURE;
1013 }
1014 
1015 static int reloc_gpu_flush(struct reloc_cache *cache)
1016 {
1017 	struct i915_request *rq;
1018 	int err;
1019 
1020 	rq = fetch_and_zero(&cache->rq);
1021 	if (!rq)
1022 		return 0;
1023 
1024 	if (cache->rq_vma) {
1025 		struct drm_i915_gem_object *obj = cache->rq_vma->obj;
1026 
1027 		GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
1028 		cache->rq_cmd[cache->rq_size++] = MI_BATCH_BUFFER_END;
1029 
1030 		__i915_gem_object_flush_map(obj,
1031 					    0, sizeof(u32) * cache->rq_size);
1032 		i915_gem_object_unpin_map(obj);
1033 	}
1034 
1035 	err = 0;
1036 	if (rq->engine->emit_init_breadcrumb)
1037 		err = rq->engine->emit_init_breadcrumb(rq);
1038 	if (!err)
1039 		err = rq->engine->emit_bb_start(rq,
1040 						rq->batch->node.start,
1041 						PAGE_SIZE,
1042 						reloc_bb_flags(cache));
1043 	if (err)
1044 		i915_request_set_error_once(rq, err);
1045 
1046 	intel_gt_chipset_flush(rq->engine->gt);
1047 	i915_request_add(rq);
1048 
1049 	return err;
1050 }
1051 
1052 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
1053 {
1054 	struct drm_i915_gem_object *obj = vma->obj;
1055 	int err;
1056 
1057 	i915_vma_lock(vma);
1058 
1059 	if (obj->cache_dirty & ~obj->cache_coherent)
1060 		i915_gem_clflush_object(obj, 0);
1061 	obj->write_domain = 0;
1062 
1063 	err = i915_request_await_object(rq, vma->obj, true);
1064 	if (err == 0)
1065 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1066 
1067 	i915_vma_unlock(vma);
1068 
1069 	return err;
1070 }
1071 
1072 static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1073 			     struct intel_engine_cs *engine,
1074 			     unsigned int len)
1075 {
1076 	struct reloc_cache *cache = &eb->reloc_cache;
1077 	struct intel_gt_buffer_pool_node *pool;
1078 	struct i915_request *rq;
1079 	struct i915_vma *batch;
1080 	u32 *cmd;
1081 	int err;
1082 
1083 	pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE);
1084 	if (IS_ERR(pool))
1085 		return PTR_ERR(pool);
1086 
1087 	cmd = i915_gem_object_pin_map(pool->obj,
1088 				      cache->has_llc ?
1089 				      I915_MAP_FORCE_WB :
1090 				      I915_MAP_FORCE_WC);
1091 	if (IS_ERR(cmd)) {
1092 		err = PTR_ERR(cmd);
1093 		goto out_pool;
1094 	}
1095 
1096 	batch = i915_vma_instance(pool->obj, eb->context->vm, NULL);
1097 	if (IS_ERR(batch)) {
1098 		err = PTR_ERR(batch);
1099 		goto err_unmap;
1100 	}
1101 
1102 	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
1103 	if (err)
1104 		goto err_unmap;
1105 
1106 	if (engine == eb->context->engine) {
1107 		rq = i915_request_create(eb->context);
1108 	} else {
1109 		struct intel_context *ce;
1110 
1111 		ce = intel_context_create(engine);
1112 		if (IS_ERR(ce)) {
1113 			err = PTR_ERR(ce);
1114 			goto err_unpin;
1115 		}
1116 
1117 		i915_vm_put(ce->vm);
1118 		ce->vm = i915_vm_get(eb->context->vm);
1119 
1120 		rq = intel_context_create_request(ce);
1121 		intel_context_put(ce);
1122 	}
1123 	if (IS_ERR(rq)) {
1124 		err = PTR_ERR(rq);
1125 		goto err_unpin;
1126 	}
1127 
1128 	err = intel_gt_buffer_pool_mark_active(pool, rq);
1129 	if (err)
1130 		goto err_request;
1131 
1132 	i915_vma_lock(batch);
1133 	err = i915_request_await_object(rq, batch->obj, false);
1134 	if (err == 0)
1135 		err = i915_vma_move_to_active(batch, rq, 0);
1136 	i915_vma_unlock(batch);
1137 	if (err)
1138 		goto skip_request;
1139 
1140 	rq->batch = batch;
1141 	i915_vma_unpin(batch);
1142 
1143 	cache->rq = rq;
1144 	cache->rq_cmd = cmd;
1145 	cache->rq_size = 0;
1146 	cache->rq_vma = batch;
1147 
1148 	/* Return with batch mapping (cmd) still pinned */
1149 	goto out_pool;
1150 
1151 skip_request:
1152 	i915_request_set_error_once(rq, err);
1153 err_request:
1154 	i915_request_add(rq);
1155 err_unpin:
1156 	i915_vma_unpin(batch);
1157 err_unmap:
1158 	i915_gem_object_unpin_map(pool->obj);
1159 out_pool:
1160 	intel_gt_buffer_pool_put(pool);
1161 	return err;
1162 }
1163 
1164 static bool reloc_can_use_engine(const struct intel_engine_cs *engine)
1165 {
1166 	return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6);
1167 }
1168 
1169 static u32 *reloc_gpu(struct i915_execbuffer *eb,
1170 		      struct i915_vma *vma,
1171 		      unsigned int len)
1172 {
1173 	struct reloc_cache *cache = &eb->reloc_cache;
1174 	u32 *cmd;
1175 	int err;
1176 
1177 	if (unlikely(!cache->rq)) {
1178 		struct intel_engine_cs *engine = eb->engine;
1179 
1180 		if (!reloc_can_use_engine(engine)) {
1181 			engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
1182 			if (!engine)
1183 				return ERR_PTR(-ENODEV);
1184 		}
1185 
1186 		err = __reloc_gpu_alloc(eb, engine, len);
1187 		if (unlikely(err))
1188 			return ERR_PTR(err);
1189 	}
1190 
1191 	if (vma != cache->target) {
1192 		err = reloc_move_to_gpu(cache->rq, vma);
1193 		if (unlikely(err)) {
1194 			i915_request_set_error_once(cache->rq, err);
1195 			return ERR_PTR(err);
1196 		}
1197 
1198 		cache->target = vma;
1199 	}
1200 
1201 	if (unlikely(cache->rq_size + len >
1202 		     PAGE_SIZE / sizeof(u32) - RELOC_TAIL)) {
1203 		err = reloc_gpu_chain(cache);
1204 		if (unlikely(err)) {
1205 			i915_request_set_error_once(cache->rq, err);
1206 			return ERR_PTR(err);
1207 		}
1208 	}
1209 
1210 	GEM_BUG_ON(cache->rq_size + len >= PAGE_SIZE  / sizeof(u32));
1211 	cmd = cache->rq_cmd + cache->rq_size;
1212 	cache->rq_size += len;
1213 
1214 	return cmd;
1215 }
1216 
1217 static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
1218 {
1219 	struct page *page;
1220 	unsigned long addr;
1221 
1222 	GEM_BUG_ON(vma->pages != vma->obj->mm.pages);
1223 
1224 	page = i915_gem_object_get_page(vma->obj, offset >> PAGE_SHIFT);
1225 	addr = PFN_PHYS(page_to_pfn(page));
1226 	GEM_BUG_ON(overflows_type(addr, u32)); /* expected dma32 */
1227 
1228 	return addr + offset_in_page(offset);
1229 }
1230 
1231 static int __reloc_entry_gpu(struct i915_execbuffer *eb,
1232 			     struct i915_vma *vma,
1233 			     u64 offset,
1234 			     u64 target_addr)
1235 {
1236 	const unsigned int gen = eb->reloc_cache.gen;
1237 	unsigned int len;
1238 	u32 *batch;
1239 	u64 addr;
1240 
1241 	if (gen >= 8)
1242 		len = offset & 7 ? 8 : 5;
1243 	else if (gen >= 4)
1244 		len = 4;
1245 	else
1246 		len = 3;
1247 
1248 	batch = reloc_gpu(eb, vma, len);
1249 	if (IS_ERR(batch))
1250 		return PTR_ERR(batch);
1251 
1252 	addr = gen8_canonical_addr(vma->node.start + offset);
1253 	if (gen >= 8) {
1254 		if (offset & 7) {
1255 			*batch++ = MI_STORE_DWORD_IMM_GEN4;
1256 			*batch++ = lower_32_bits(addr);
1257 			*batch++ = upper_32_bits(addr);
1258 			*batch++ = lower_32_bits(target_addr);
1259 
1260 			addr = gen8_canonical_addr(addr + 4);
1261 
1262 			*batch++ = MI_STORE_DWORD_IMM_GEN4;
1263 			*batch++ = lower_32_bits(addr);
1264 			*batch++ = upper_32_bits(addr);
1265 			*batch++ = upper_32_bits(target_addr);
1266 		} else {
1267 			*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
1268 			*batch++ = lower_32_bits(addr);
1269 			*batch++ = upper_32_bits(addr);
1270 			*batch++ = lower_32_bits(target_addr);
1271 			*batch++ = upper_32_bits(target_addr);
1272 		}
1273 	} else if (gen >= 6) {
1274 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
1275 		*batch++ = 0;
1276 		*batch++ = addr;
1277 		*batch++ = target_addr;
1278 	} else if (IS_I965G(eb->i915)) {
1279 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
1280 		*batch++ = 0;
1281 		*batch++ = vma_phys_addr(vma, offset);
1282 		*batch++ = target_addr;
1283 	} else if (gen >= 4) {
1284 		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1285 		*batch++ = 0;
1286 		*batch++ = addr;
1287 		*batch++ = target_addr;
1288 	} else if (gen >= 3 &&
1289 		   !(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) {
1290 		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1291 		*batch++ = addr;
1292 		*batch++ = target_addr;
1293 	} else {
1294 		*batch++ = MI_STORE_DWORD_IMM;
1295 		*batch++ = vma_phys_addr(vma, offset);
1296 		*batch++ = target_addr;
1297 	}
1298 
1299 	return 0;
1300 }
1301 
1302 static u64
1303 relocate_entry(struct i915_execbuffer *eb,
1304 	       struct i915_vma *vma,
1305 	       const struct drm_i915_gem_relocation_entry *reloc,
1306 	       const struct i915_vma *target)
1307 {
1308 	u64 target_addr = relocation_target(reloc, target);
1309 	int err;
1310 
1311 	err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr);
1312 	if (err)
1313 		return err;
1314 
1315 	return target->node.start | UPDATE;
1316 }
1317 
1318 static u64
1319 eb_relocate_entry(struct i915_execbuffer *eb,
1320 		  struct eb_vma *ev,
1321 		  const struct drm_i915_gem_relocation_entry *reloc)
1322 {
1323 	struct drm_i915_private *i915 = eb->i915;
1324 	struct eb_vma *target;
1325 	int err;
1326 
1327 	/* we've already hold a reference to all valid objects */
1328 	target = eb_get_vma(eb, reloc->target_handle);
1329 	if (unlikely(!target))
1330 		return -ENOENT;
1331 
1332 	/* Validate that the target is in a valid r/w GPU domain */
1333 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1334 		drm_dbg(&i915->drm, "reloc with multiple write domains: "
1335 			  "target %d offset %d "
1336 			  "read %08x write %08x",
1337 			  reloc->target_handle,
1338 			  (int) reloc->offset,
1339 			  reloc->read_domains,
1340 			  reloc->write_domain);
1341 		return -EINVAL;
1342 	}
1343 	if (unlikely((reloc->write_domain | reloc->read_domains)
1344 		     & ~I915_GEM_GPU_DOMAINS)) {
1345 		drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
1346 			  "target %d offset %d "
1347 			  "read %08x write %08x",
1348 			  reloc->target_handle,
1349 			  (int) reloc->offset,
1350 			  reloc->read_domains,
1351 			  reloc->write_domain);
1352 		return -EINVAL;
1353 	}
1354 
1355 	if (reloc->write_domain) {
1356 		target->flags |= EXEC_OBJECT_WRITE;
1357 
1358 		/*
1359 		 * Sandybridge PPGTT errata: We need a global gtt mapping
1360 		 * for MI and pipe_control writes because the gpu doesn't
1361 		 * properly redirect them through the ppgtt for non_secure
1362 		 * batchbuffers.
1363 		 */
1364 		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1365 		    IS_GEN(eb->i915, 6)) {
1366 			err = i915_vma_bind(target->vma,
1367 					    target->vma->obj->cache_level,
1368 					    PIN_GLOBAL, NULL);
1369 			if (err)
1370 				return err;
1371 		}
1372 	}
1373 
1374 	/*
1375 	 * If the relocation already has the right value in it, no
1376 	 * more work needs to be done.
1377 	 */
1378 	if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
1379 		return 0;
1380 
1381 	/* Check that the relocation address is valid... */
1382 	if (unlikely(reloc->offset >
1383 		     ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1384 		drm_dbg(&i915->drm, "Relocation beyond object bounds: "
1385 			  "target %d offset %d size %d.\n",
1386 			  reloc->target_handle,
1387 			  (int)reloc->offset,
1388 			  (int)ev->vma->size);
1389 		return -EINVAL;
1390 	}
1391 	if (unlikely(reloc->offset & 3)) {
1392 		drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
1393 			  "target %d offset %d.\n",
1394 			  reloc->target_handle,
1395 			  (int)reloc->offset);
1396 		return -EINVAL;
1397 	}
1398 
1399 	/*
1400 	 * If we write into the object, we need to force the synchronisation
1401 	 * barrier, either with an asynchronous clflush or if we executed the
1402 	 * patching using the GPU (though that should be serialised by the
1403 	 * timeline). To be completely sure, and since we are required to
1404 	 * do relocations we are already stalling, disable the user's opt
1405 	 * out of our synchronisation.
1406 	 */
1407 	ev->flags &= ~EXEC_OBJECT_ASYNC;
1408 
1409 	/* and update the user's relocation entry */
1410 	return relocate_entry(eb, ev->vma, reloc, target->vma);
1411 }
1412 
1413 static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
1414 {
1415 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1416 	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
1417 	const struct drm_i915_gem_exec_object2 *entry = ev->exec;
1418 	struct drm_i915_gem_relocation_entry __user *urelocs =
1419 		u64_to_user_ptr(entry->relocs_ptr);
1420 	unsigned long remain = entry->relocation_count;
1421 
1422 	if (unlikely(remain > N_RELOC(ULONG_MAX)))
1423 		return -EINVAL;
1424 
1425 	/*
1426 	 * We must check that the entire relocation array is safe
1427 	 * to read. However, if the array is not writable the user loses
1428 	 * the updated relocation values.
1429 	 */
1430 	if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs))))
1431 		return -EFAULT;
1432 
1433 	do {
1434 		struct drm_i915_gem_relocation_entry *r = stack;
1435 		unsigned int count =
1436 			min_t(unsigned long, remain, ARRAY_SIZE(stack));
1437 		unsigned int copied;
1438 
1439 		/*
1440 		 * This is the fast path and we cannot handle a pagefault
1441 		 * whilst holding the struct mutex lest the user pass in the
1442 		 * relocations contained within a mmaped bo. For in such a case
1443 		 * we, the page fault handler would call i915_gem_fault() and
1444 		 * we would try to acquire the struct mutex again. Obviously
1445 		 * this is bad and so lockdep complains vehemently.
1446 		 */
1447 		copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
1448 		if (unlikely(copied))
1449 			return -EFAULT;
1450 
1451 		remain -= count;
1452 		do {
1453 			u64 offset = eb_relocate_entry(eb, ev, r);
1454 
1455 			if (likely(offset == 0)) {
1456 			} else if ((s64)offset < 0) {
1457 				return (int)offset;
1458 			} else {
1459 				/*
1460 				 * Note that reporting an error now
1461 				 * leaves everything in an inconsistent
1462 				 * state as we have *already* changed
1463 				 * the relocation value inside the
1464 				 * object. As we have not changed the
1465 				 * reloc.presumed_offset or will not
1466 				 * change the execobject.offset, on the
1467 				 * call we may not rewrite the value
1468 				 * inside the object, leaving it
1469 				 * dangling and causing a GPU hang. Unless
1470 				 * userspace dynamically rebuilds the
1471 				 * relocations on each execbuf rather than
1472 				 * presume a static tree.
1473 				 *
1474 				 * We did previously check if the relocations
1475 				 * were writable (access_ok), an error now
1476 				 * would be a strange race with mprotect,
1477 				 * having already demonstrated that we
1478 				 * can read from this userspace address.
1479 				 */
1480 				offset = gen8_canonical_addr(offset & ~UPDATE);
1481 				__put_user(offset,
1482 					   &urelocs[r - stack].presumed_offset);
1483 			}
1484 		} while (r++, --count);
1485 		urelocs += ARRAY_SIZE(stack);
1486 	} while (remain);
1487 
1488 	return 0;
1489 }
1490 
1491 static int eb_relocate(struct i915_execbuffer *eb)
1492 {
1493 	int err;
1494 
1495 	err = eb_lookup_vmas(eb);
1496 	if (err)
1497 		return err;
1498 
1499 	if (!list_empty(&eb->unbound)) {
1500 		err = eb_reserve(eb);
1501 		if (err)
1502 			return err;
1503 	}
1504 
1505 	/* The objects are in their final locations, apply the relocations. */
1506 	if (eb->args->flags & __EXEC_HAS_RELOC) {
1507 		struct eb_vma *ev;
1508 		int flush;
1509 
1510 		list_for_each_entry(ev, &eb->relocs, reloc_link) {
1511 			err = eb_relocate_vma(eb, ev);
1512 			if (err)
1513 				break;
1514 		}
1515 
1516 		flush = reloc_gpu_flush(&eb->reloc_cache);
1517 		if (!err)
1518 			err = flush;
1519 	}
1520 
1521 	return err;
1522 }
1523 
1524 static int eb_move_to_gpu(struct i915_execbuffer *eb)
1525 {
1526 	const unsigned int count = eb->buffer_count;
1527 	struct ww_acquire_ctx acquire;
1528 	unsigned int i;
1529 	int err = 0;
1530 
1531 	ww_acquire_init(&acquire, &reservation_ww_class);
1532 
1533 	for (i = 0; i < count; i++) {
1534 		struct eb_vma *ev = &eb->vma[i];
1535 		struct i915_vma *vma = ev->vma;
1536 
1537 		err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
1538 		if (err == -EDEADLK) {
1539 			GEM_BUG_ON(i == 0);
1540 			do {
1541 				int j = i - 1;
1542 
1543 				ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
1544 
1545 				swap(eb->vma[i],  eb->vma[j]);
1546 			} while (--i);
1547 
1548 			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
1549 							       &acquire);
1550 		}
1551 		if (err)
1552 			break;
1553 	}
1554 	ww_acquire_done(&acquire);
1555 
1556 	while (i--) {
1557 		struct eb_vma *ev = &eb->vma[i];
1558 		struct i915_vma *vma = ev->vma;
1559 		unsigned int flags = ev->flags;
1560 		struct drm_i915_gem_object *obj = vma->obj;
1561 
1562 		assert_vma_held(vma);
1563 
1564 		if (flags & EXEC_OBJECT_CAPTURE) {
1565 			struct i915_capture_list *capture;
1566 
1567 			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1568 			if (capture) {
1569 				capture->next = eb->request->capture_list;
1570 				capture->vma = vma;
1571 				eb->request->capture_list = capture;
1572 			}
1573 		}
1574 
1575 		/*
1576 		 * If the GPU is not _reading_ through the CPU cache, we need
1577 		 * to make sure that any writes (both previous GPU writes from
1578 		 * before a change in snooping levels and normal CPU writes)
1579 		 * caught in that cache are flushed to main memory.
1580 		 *
1581 		 * We want to say
1582 		 *   obj->cache_dirty &&
1583 		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
1584 		 * but gcc's optimiser doesn't handle that as well and emits
1585 		 * two jumps instead of one. Maybe one day...
1586 		 */
1587 		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1588 			if (i915_gem_clflush_object(obj, 0))
1589 				flags &= ~EXEC_OBJECT_ASYNC;
1590 		}
1591 
1592 		if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
1593 			err = i915_request_await_object
1594 				(eb->request, obj, flags & EXEC_OBJECT_WRITE);
1595 		}
1596 
1597 		if (err == 0)
1598 			err = i915_vma_move_to_active(vma, eb->request, flags);
1599 
1600 		i915_vma_unlock(vma);
1601 		eb_unreserve_vma(ev);
1602 	}
1603 	ww_acquire_fini(&acquire);
1604 
1605 	eb_vma_array_put(fetch_and_zero(&eb->array));
1606 
1607 	if (unlikely(err))
1608 		goto err_skip;
1609 
1610 	/* Unconditionally flush any chipset caches (for streaming writes). */
1611 	intel_gt_chipset_flush(eb->engine->gt);
1612 	return 0;
1613 
1614 err_skip:
1615 	i915_request_set_error_once(eb->request, err);
1616 	return err;
1617 }
1618 
1619 static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1620 {
1621 	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1622 		return -EINVAL;
1623 
1624 	/* Kernel clipping was a DRI1 misfeature */
1625 	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
1626 		if (exec->num_cliprects || exec->cliprects_ptr)
1627 			return -EINVAL;
1628 	}
1629 
1630 	if (exec->DR4 == 0xffffffff) {
1631 		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1632 		exec->DR4 = 0;
1633 	}
1634 	if (exec->DR1 || exec->DR4)
1635 		return -EINVAL;
1636 
1637 	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1638 		return -EINVAL;
1639 
1640 	return 0;
1641 }
1642 
1643 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1644 {
1645 	u32 *cs;
1646 	int i;
1647 
1648 	if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
1649 		drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
1650 		return -EINVAL;
1651 	}
1652 
1653 	cs = intel_ring_begin(rq, 4 * 2 + 2);
1654 	if (IS_ERR(cs))
1655 		return PTR_ERR(cs);
1656 
1657 	*cs++ = MI_LOAD_REGISTER_IMM(4);
1658 	for (i = 0; i < 4; i++) {
1659 		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1660 		*cs++ = 0;
1661 	}
1662 	*cs++ = MI_NOOP;
1663 	intel_ring_advance(rq, cs);
1664 
1665 	return 0;
1666 }
1667 
1668 static struct i915_vma *
1669 shadow_batch_pin(struct drm_i915_gem_object *obj,
1670 		 struct i915_address_space *vm,
1671 		 unsigned int flags)
1672 {
1673 	struct i915_vma *vma;
1674 	int err;
1675 
1676 	vma = i915_vma_instance(obj, vm, NULL);
1677 	if (IS_ERR(vma))
1678 		return vma;
1679 
1680 	err = i915_vma_pin(vma, 0, 0, flags);
1681 	if (err)
1682 		return ERR_PTR(err);
1683 
1684 	return vma;
1685 }
1686 
1687 struct eb_parse_work {
1688 	struct dma_fence_work base;
1689 	struct intel_engine_cs *engine;
1690 	struct i915_vma *batch;
1691 	struct i915_vma *shadow;
1692 	struct i915_vma *trampoline;
1693 	unsigned int batch_offset;
1694 	unsigned int batch_length;
1695 };
1696 
1697 static int __eb_parse(struct dma_fence_work *work)
1698 {
1699 	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
1700 
1701 	return intel_engine_cmd_parser(pw->engine,
1702 				       pw->batch,
1703 				       pw->batch_offset,
1704 				       pw->batch_length,
1705 				       pw->shadow,
1706 				       pw->trampoline);
1707 }
1708 
1709 static void __eb_parse_release(struct dma_fence_work *work)
1710 {
1711 	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
1712 
1713 	if (pw->trampoline)
1714 		i915_active_release(&pw->trampoline->active);
1715 	i915_active_release(&pw->shadow->active);
1716 	i915_active_release(&pw->batch->active);
1717 }
1718 
1719 static const struct dma_fence_work_ops eb_parse_ops = {
1720 	.name = "eb_parse",
1721 	.work = __eb_parse,
1722 	.release = __eb_parse_release,
1723 };
1724 
1725 static inline int
1726 __parser_mark_active(struct i915_vma *vma,
1727 		     struct intel_timeline *tl,
1728 		     struct dma_fence *fence)
1729 {
1730 	struct intel_gt_buffer_pool_node *node = vma->private;
1731 
1732 	return i915_active_ref(&node->active, tl, fence);
1733 }
1734 
1735 static int
1736 parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
1737 {
1738 	int err;
1739 
1740 	mutex_lock(&tl->mutex);
1741 
1742 	err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
1743 	if (err)
1744 		goto unlock;
1745 
1746 	if (pw->trampoline) {
1747 		err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
1748 		if (err)
1749 			goto unlock;
1750 	}
1751 
1752 unlock:
1753 	mutex_unlock(&tl->mutex);
1754 	return err;
1755 }
1756 
1757 static int eb_parse_pipeline(struct i915_execbuffer *eb,
1758 			     struct i915_vma *shadow,
1759 			     struct i915_vma *trampoline)
1760 {
1761 	struct eb_parse_work *pw;
1762 	int err;
1763 
1764 	pw = kzalloc(sizeof(*pw), GFP_KERNEL);
1765 	if (!pw)
1766 		return -ENOMEM;
1767 
1768 	err = i915_active_acquire(&eb->batch->vma->active);
1769 	if (err)
1770 		goto err_free;
1771 
1772 	err = i915_active_acquire(&shadow->active);
1773 	if (err)
1774 		goto err_batch;
1775 
1776 	if (trampoline) {
1777 		err = i915_active_acquire(&trampoline->active);
1778 		if (err)
1779 			goto err_shadow;
1780 	}
1781 
1782 	dma_fence_work_init(&pw->base, &eb_parse_ops);
1783 
1784 	pw->engine = eb->engine;
1785 	pw->batch = eb->batch->vma;
1786 	pw->batch_offset = eb->batch_start_offset;
1787 	pw->batch_length = eb->batch_len;
1788 	pw->shadow = shadow;
1789 	pw->trampoline = trampoline;
1790 
1791 	/* Mark active refs early for this worker, in case we get interrupted */
1792 	err = parser_mark_active(pw, eb->context->timeline);
1793 	if (err)
1794 		goto err_commit;
1795 
1796 	err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
1797 	if (err)
1798 		goto err_commit;
1799 
1800 	err = dma_resv_reserve_shared(pw->batch->resv, 1);
1801 	if (err)
1802 		goto err_commit_unlock;
1803 
1804 	/* Wait for all writes (and relocs) into the batch to complete */
1805 	err = i915_sw_fence_await_reservation(&pw->base.chain,
1806 					      pw->batch->resv, NULL, false,
1807 					      0, I915_FENCE_GFP);
1808 	if (err < 0)
1809 		goto err_commit_unlock;
1810 
1811 	/* Keep the batch alive and unwritten as we parse */
1812 	dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
1813 
1814 	dma_resv_unlock(pw->batch->resv);
1815 
1816 	/* Force execution to wait for completion of the parser */
1817 	dma_resv_lock(shadow->resv, NULL);
1818 	dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
1819 	dma_resv_unlock(shadow->resv);
1820 
1821 	dma_fence_work_commit_imm(&pw->base);
1822 	return 0;
1823 
1824 err_commit_unlock:
1825 	dma_resv_unlock(pw->batch->resv);
1826 err_commit:
1827 	i915_sw_fence_set_error_once(&pw->base.chain, err);
1828 	dma_fence_work_commit_imm(&pw->base);
1829 	return err;
1830 
1831 err_shadow:
1832 	i915_active_release(&shadow->active);
1833 err_batch:
1834 	i915_active_release(&eb->batch->vma->active);
1835 err_free:
1836 	kfree(pw);
1837 	return err;
1838 }
1839 
1840 static int eb_parse(struct i915_execbuffer *eb)
1841 {
1842 	struct drm_i915_private *i915 = eb->i915;
1843 	struct intel_gt_buffer_pool_node *pool;
1844 	struct i915_vma *shadow, *trampoline;
1845 	unsigned int len;
1846 	int err;
1847 
1848 	if (!eb_use_cmdparser(eb))
1849 		return 0;
1850 
1851 	len = eb->batch_len;
1852 	if (!CMDPARSER_USES_GGTT(eb->i915)) {
1853 		/*
1854 		 * ppGTT backed shadow buffers must be mapped RO, to prevent
1855 		 * post-scan tampering
1856 		 */
1857 		if (!eb->context->vm->has_read_only) {
1858 			drm_dbg(&i915->drm,
1859 				"Cannot prevent post-scan tampering without RO capable vm\n");
1860 			return -EINVAL;
1861 		}
1862 	} else {
1863 		len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
1864 	}
1865 
1866 	pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
1867 	if (IS_ERR(pool))
1868 		return PTR_ERR(pool);
1869 
1870 	shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
1871 	if (IS_ERR(shadow)) {
1872 		err = PTR_ERR(shadow);
1873 		goto err;
1874 	}
1875 	i915_gem_object_set_readonly(shadow->obj);
1876 	shadow->private = pool;
1877 
1878 	trampoline = NULL;
1879 	if (CMDPARSER_USES_GGTT(eb->i915)) {
1880 		trampoline = shadow;
1881 
1882 		shadow = shadow_batch_pin(pool->obj,
1883 					  &eb->engine->gt->ggtt->vm,
1884 					  PIN_GLOBAL);
1885 		if (IS_ERR(shadow)) {
1886 			err = PTR_ERR(shadow);
1887 			shadow = trampoline;
1888 			goto err_shadow;
1889 		}
1890 		shadow->private = pool;
1891 
1892 		eb->batch_flags |= I915_DISPATCH_SECURE;
1893 	}
1894 
1895 	err = eb_parse_pipeline(eb, shadow, trampoline);
1896 	if (err)
1897 		goto err_trampoline;
1898 
1899 	eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
1900 	eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
1901 	eb->batch = &eb->vma[eb->buffer_count++];
1902 	eb->vma[eb->buffer_count].vma = NULL;
1903 
1904 	eb->trampoline = trampoline;
1905 	eb->batch_start_offset = 0;
1906 
1907 	return 0;
1908 
1909 err_trampoline:
1910 	if (trampoline)
1911 		i915_vma_unpin(trampoline);
1912 err_shadow:
1913 	i915_vma_unpin(shadow);
1914 err:
1915 	intel_gt_buffer_pool_put(pool);
1916 	return err;
1917 }
1918 
1919 static void
1920 add_to_client(struct i915_request *rq, struct drm_file *file)
1921 {
1922 	struct drm_i915_file_private *file_priv = file->driver_priv;
1923 
1924 	rq->file_priv = file_priv;
1925 
1926 	spin_lock(&file_priv->mm.lock);
1927 	list_add_tail(&rq->client_link, &file_priv->mm.request_list);
1928 	spin_unlock(&file_priv->mm.lock);
1929 }
1930 
1931 static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
1932 {
1933 	int err;
1934 
1935 	err = eb_move_to_gpu(eb);
1936 	if (err)
1937 		return err;
1938 
1939 	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
1940 		err = i915_reset_gen7_sol_offsets(eb->request);
1941 		if (err)
1942 			return err;
1943 	}
1944 
1945 	/*
1946 	 * After we completed waiting for other engines (using HW semaphores)
1947 	 * then we can signal that this request/batch is ready to run. This
1948 	 * allows us to determine if the batch is still waiting on the GPU
1949 	 * or actually running by checking the breadcrumb.
1950 	 */
1951 	if (eb->engine->emit_init_breadcrumb) {
1952 		err = eb->engine->emit_init_breadcrumb(eb->request);
1953 		if (err)
1954 			return err;
1955 	}
1956 
1957 	err = eb->engine->emit_bb_start(eb->request,
1958 					batch->node.start +
1959 					eb->batch_start_offset,
1960 					eb->batch_len,
1961 					eb->batch_flags);
1962 	if (err)
1963 		return err;
1964 
1965 	if (eb->trampoline) {
1966 		GEM_BUG_ON(eb->batch_start_offset);
1967 		err = eb->engine->emit_bb_start(eb->request,
1968 						eb->trampoline->node.start +
1969 						eb->batch_len,
1970 						0, 0);
1971 		if (err)
1972 			return err;
1973 	}
1974 
1975 	if (intel_context_nopreempt(eb->context))
1976 		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
1977 
1978 	return 0;
1979 }
1980 
1981 static int num_vcs_engines(const struct drm_i915_private *i915)
1982 {
1983 	return hweight64(VDBOX_MASK(&i915->gt));
1984 }
1985 
1986 /*
1987  * Find one BSD ring to dispatch the corresponding BSD command.
1988  * The engine index is returned.
1989  */
1990 static unsigned int
1991 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1992 			 struct drm_file *file)
1993 {
1994 	struct drm_i915_file_private *file_priv = file->driver_priv;
1995 
1996 	/* Check whether the file_priv has already selected one ring. */
1997 	if ((int)file_priv->bsd_engine < 0)
1998 		file_priv->bsd_engine =
1999 			get_random_int() % num_vcs_engines(dev_priv);
2000 
2001 	return file_priv->bsd_engine;
2002 }
2003 
2004 static const enum intel_engine_id user_ring_map[] = {
2005 	[I915_EXEC_DEFAULT]	= RCS0,
2006 	[I915_EXEC_RENDER]	= RCS0,
2007 	[I915_EXEC_BLT]		= BCS0,
2008 	[I915_EXEC_BSD]		= VCS0,
2009 	[I915_EXEC_VEBOX]	= VECS0
2010 };
2011 
2012 static struct i915_request *eb_throttle(struct intel_context *ce)
2013 {
2014 	struct intel_ring *ring = ce->ring;
2015 	struct intel_timeline *tl = ce->timeline;
2016 	struct i915_request *rq;
2017 
2018 	/*
2019 	 * Completely unscientific finger-in-the-air estimates for suitable
2020 	 * maximum user request size (to avoid blocking) and then backoff.
2021 	 */
2022 	if (intel_ring_update_space(ring) >= PAGE_SIZE)
2023 		return NULL;
2024 
2025 	/*
2026 	 * Find a request that after waiting upon, there will be at least half
2027 	 * the ring available. The hysteresis allows us to compete for the
2028 	 * shared ring and should mean that we sleep less often prior to
2029 	 * claiming our resources, but not so long that the ring completely
2030 	 * drains before we can submit our next request.
2031 	 */
2032 	list_for_each_entry(rq, &tl->requests, link) {
2033 		if (rq->ring != ring)
2034 			continue;
2035 
2036 		if (__intel_ring_space(rq->postfix,
2037 				       ring->emit, ring->size) > ring->size / 2)
2038 			break;
2039 	}
2040 	if (&rq->link == &tl->requests)
2041 		return NULL; /* weird, we will check again later for real */
2042 
2043 	return i915_request_get(rq);
2044 }
2045 
2046 static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
2047 {
2048 	struct intel_timeline *tl;
2049 	struct i915_request *rq;
2050 	int err;
2051 
2052 	/*
2053 	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
2054 	 * EIO if the GPU is already wedged.
2055 	 */
2056 	err = intel_gt_terminally_wedged(ce->engine->gt);
2057 	if (err)
2058 		return err;
2059 
2060 	if (unlikely(intel_context_is_banned(ce)))
2061 		return -EIO;
2062 
2063 	/*
2064 	 * Pinning the contexts may generate requests in order to acquire
2065 	 * GGTT space, so do this first before we reserve a seqno for
2066 	 * ourselves.
2067 	 */
2068 	err = intel_context_pin(ce);
2069 	if (err)
2070 		return err;
2071 
2072 	/*
2073 	 * Take a local wakeref for preparing to dispatch the execbuf as
2074 	 * we expect to access the hardware fairly frequently in the
2075 	 * process, and require the engine to be kept awake between accesses.
2076 	 * Upon dispatch, we acquire another prolonged wakeref that we hold
2077 	 * until the timeline is idle, which in turn releases the wakeref
2078 	 * taken on the engine, and the parent device.
2079 	 */
2080 	tl = intel_context_timeline_lock(ce);
2081 	if (IS_ERR(tl)) {
2082 		err = PTR_ERR(tl);
2083 		goto err_unpin;
2084 	}
2085 
2086 	intel_context_enter(ce);
2087 	rq = eb_throttle(ce);
2088 
2089 	intel_context_timeline_unlock(tl);
2090 
2091 	if (rq) {
2092 		bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
2093 		long timeout;
2094 
2095 		timeout = MAX_SCHEDULE_TIMEOUT;
2096 		if (nonblock)
2097 			timeout = 0;
2098 
2099 		timeout = i915_request_wait(rq,
2100 					    I915_WAIT_INTERRUPTIBLE,
2101 					    timeout);
2102 		i915_request_put(rq);
2103 
2104 		if (timeout < 0) {
2105 			err = nonblock ? -EWOULDBLOCK : timeout;
2106 			goto err_exit;
2107 		}
2108 	}
2109 
2110 	eb->engine = ce->engine;
2111 	eb->context = ce;
2112 	return 0;
2113 
2114 err_exit:
2115 	mutex_lock(&tl->mutex);
2116 	intel_context_exit(ce);
2117 	intel_context_timeline_unlock(tl);
2118 err_unpin:
2119 	intel_context_unpin(ce);
2120 	return err;
2121 }
2122 
2123 static void eb_unpin_engine(struct i915_execbuffer *eb)
2124 {
2125 	struct intel_context *ce = eb->context;
2126 	struct intel_timeline *tl = ce->timeline;
2127 
2128 	mutex_lock(&tl->mutex);
2129 	intel_context_exit(ce);
2130 	mutex_unlock(&tl->mutex);
2131 
2132 	intel_context_unpin(ce);
2133 }
2134 
2135 static unsigned int
2136 eb_select_legacy_ring(struct i915_execbuffer *eb,
2137 		      struct drm_file *file,
2138 		      struct drm_i915_gem_execbuffer2 *args)
2139 {
2140 	struct drm_i915_private *i915 = eb->i915;
2141 	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2142 
2143 	if (user_ring_id != I915_EXEC_BSD &&
2144 	    (args->flags & I915_EXEC_BSD_MASK)) {
2145 		drm_dbg(&i915->drm,
2146 			"execbuf with non bsd ring but with invalid "
2147 			"bsd dispatch flags: %d\n", (int)(args->flags));
2148 		return -1;
2149 	}
2150 
2151 	if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
2152 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
2153 
2154 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2155 			bsd_idx = gen8_dispatch_bsd_engine(i915, file);
2156 		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
2157 			   bsd_idx <= I915_EXEC_BSD_RING2) {
2158 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2159 			bsd_idx--;
2160 		} else {
2161 			drm_dbg(&i915->drm,
2162 				"execbuf with unknown bsd ring: %u\n",
2163 				bsd_idx);
2164 			return -1;
2165 		}
2166 
2167 		return _VCS(bsd_idx);
2168 	}
2169 
2170 	if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
2171 		drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
2172 			user_ring_id);
2173 		return -1;
2174 	}
2175 
2176 	return user_ring_map[user_ring_id];
2177 }
2178 
2179 static int
2180 eb_pin_engine(struct i915_execbuffer *eb,
2181 	      struct drm_file *file,
2182 	      struct drm_i915_gem_execbuffer2 *args)
2183 {
2184 	struct intel_context *ce;
2185 	unsigned int idx;
2186 	int err;
2187 
2188 	if (i915_gem_context_user_engines(eb->gem_context))
2189 		idx = args->flags & I915_EXEC_RING_MASK;
2190 	else
2191 		idx = eb_select_legacy_ring(eb, file, args);
2192 
2193 	ce = i915_gem_context_get_engine(eb->gem_context, idx);
2194 	if (IS_ERR(ce))
2195 		return PTR_ERR(ce);
2196 
2197 	err = __eb_pin_engine(eb, ce);
2198 	intel_context_put(ce);
2199 
2200 	return err;
2201 }
2202 
2203 static void
2204 __free_fence_array(struct drm_syncobj **fences, unsigned int n)
2205 {
2206 	while (n--)
2207 		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
2208 	kvfree(fences);
2209 }
2210 
2211 static struct drm_syncobj **
2212 get_fence_array(struct drm_i915_gem_execbuffer2 *args,
2213 		struct drm_file *file)
2214 {
2215 	const unsigned long nfences = args->num_cliprects;
2216 	struct drm_i915_gem_exec_fence __user *user;
2217 	struct drm_syncobj **fences;
2218 	unsigned long n;
2219 	int err;
2220 
2221 	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
2222 		return NULL;
2223 
2224 	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
2225 	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2226 	if (nfences > min_t(unsigned long,
2227 			    ULONG_MAX / sizeof(*user),
2228 			    SIZE_MAX / sizeof(*fences)))
2229 		return ERR_PTR(-EINVAL);
2230 
2231 	user = u64_to_user_ptr(args->cliprects_ptr);
2232 	if (!access_ok(user, nfences * sizeof(*user)))
2233 		return ERR_PTR(-EFAULT);
2234 
2235 	fences = kvmalloc_array(nfences, sizeof(*fences),
2236 				__GFP_NOWARN | GFP_KERNEL);
2237 	if (!fences)
2238 		return ERR_PTR(-ENOMEM);
2239 
2240 	for (n = 0; n < nfences; n++) {
2241 		struct drm_i915_gem_exec_fence fence;
2242 		struct drm_syncobj *syncobj;
2243 
2244 		if (__copy_from_user(&fence, user++, sizeof(fence))) {
2245 			err = -EFAULT;
2246 			goto err;
2247 		}
2248 
2249 		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
2250 			err = -EINVAL;
2251 			goto err;
2252 		}
2253 
2254 		syncobj = drm_syncobj_find(file, fence.handle);
2255 		if (!syncobj) {
2256 			DRM_DEBUG("Invalid syncobj handle provided\n");
2257 			err = -ENOENT;
2258 			goto err;
2259 		}
2260 
2261 		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
2262 			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
2263 
2264 		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
2265 	}
2266 
2267 	return fences;
2268 
2269 err:
2270 	__free_fence_array(fences, n);
2271 	return ERR_PTR(err);
2272 }
2273 
2274 static void
2275 put_fence_array(struct drm_i915_gem_execbuffer2 *args,
2276 		struct drm_syncobj **fences)
2277 {
2278 	if (fences)
2279 		__free_fence_array(fences, args->num_cliprects);
2280 }
2281 
2282 static int
2283 await_fence_array(struct i915_execbuffer *eb,
2284 		  struct drm_syncobj **fences)
2285 {
2286 	const unsigned int nfences = eb->args->num_cliprects;
2287 	unsigned int n;
2288 	int err;
2289 
2290 	for (n = 0; n < nfences; n++) {
2291 		struct drm_syncobj *syncobj;
2292 		struct dma_fence *fence;
2293 		unsigned int flags;
2294 
2295 		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2296 		if (!(flags & I915_EXEC_FENCE_WAIT))
2297 			continue;
2298 
2299 		fence = drm_syncobj_fence_get(syncobj);
2300 		if (!fence)
2301 			return -EINVAL;
2302 
2303 		err = i915_request_await_dma_fence(eb->request, fence);
2304 		dma_fence_put(fence);
2305 		if (err < 0)
2306 			return err;
2307 	}
2308 
2309 	return 0;
2310 }
2311 
2312 static void
2313 signal_fence_array(struct i915_execbuffer *eb,
2314 		   struct drm_syncobj **fences)
2315 {
2316 	const unsigned int nfences = eb->args->num_cliprects;
2317 	struct dma_fence * const fence = &eb->request->fence;
2318 	unsigned int n;
2319 
2320 	for (n = 0; n < nfences; n++) {
2321 		struct drm_syncobj *syncobj;
2322 		unsigned int flags;
2323 
2324 		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2325 		if (!(flags & I915_EXEC_FENCE_SIGNAL))
2326 			continue;
2327 
2328 		drm_syncobj_replace_fence(syncobj, fence);
2329 	}
2330 }
2331 
2332 static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
2333 {
2334 	struct i915_request *rq, *rn;
2335 
2336 	list_for_each_entry_safe(rq, rn, &tl->requests, link)
2337 		if (rq == end || !i915_request_retire(rq))
2338 			break;
2339 }
2340 
2341 static void eb_request_add(struct i915_execbuffer *eb)
2342 {
2343 	struct i915_request *rq = eb->request;
2344 	struct intel_timeline * const tl = i915_request_timeline(rq);
2345 	struct i915_sched_attr attr = {};
2346 	struct i915_request *prev;
2347 
2348 	lockdep_assert_held(&tl->mutex);
2349 	lockdep_unpin_lock(&tl->mutex, rq->cookie);
2350 
2351 	trace_i915_request_add(rq);
2352 
2353 	prev = __i915_request_commit(rq);
2354 
2355 	/* Check that the context wasn't destroyed before submission */
2356 	if (likely(!intel_context_is_closed(eb->context))) {
2357 		attr = eb->gem_context->sched;
2358 	} else {
2359 		/* Serialise with context_close via the add_to_timeline */
2360 		i915_request_set_error_once(rq, -ENOENT);
2361 		__i915_request_skip(rq);
2362 	}
2363 
2364 	__i915_request_queue(rq, &attr);
2365 
2366 	/* Try to clean up the client's timeline after submitting the request */
2367 	if (prev)
2368 		retire_requests(tl, prev);
2369 
2370 	mutex_unlock(&tl->mutex);
2371 }
2372 
2373 static int
2374 i915_gem_do_execbuffer(struct drm_device *dev,
2375 		       struct drm_file *file,
2376 		       struct drm_i915_gem_execbuffer2 *args,
2377 		       struct drm_i915_gem_exec_object2 *exec,
2378 		       struct drm_syncobj **fences)
2379 {
2380 	struct drm_i915_private *i915 = to_i915(dev);
2381 	struct i915_execbuffer eb;
2382 	struct dma_fence *in_fence = NULL;
2383 	struct sync_file *out_fence = NULL;
2384 	struct i915_vma *batch;
2385 	int out_fence_fd = -1;
2386 	int err;
2387 
2388 	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2389 	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
2390 		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2391 
2392 	eb.i915 = i915;
2393 	eb.file = file;
2394 	eb.args = args;
2395 	if (!(args->flags & I915_EXEC_NO_RELOC))
2396 		args->flags |= __EXEC_HAS_RELOC;
2397 
2398 	eb.exec = exec;
2399 
2400 	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2401 	reloc_cache_init(&eb.reloc_cache, eb.i915);
2402 
2403 	eb.buffer_count = args->buffer_count;
2404 	eb.batch_start_offset = args->batch_start_offset;
2405 	eb.batch_len = args->batch_len;
2406 	eb.trampoline = NULL;
2407 
2408 	eb.batch_flags = 0;
2409 	if (args->flags & I915_EXEC_SECURE) {
2410 		if (INTEL_GEN(i915) >= 11)
2411 			return -ENODEV;
2412 
2413 		/* Return -EPERM to trigger fallback code on old binaries. */
2414 		if (!HAS_SECURE_BATCHES(i915))
2415 			return -EPERM;
2416 
2417 		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2418 			return -EPERM;
2419 
2420 		eb.batch_flags |= I915_DISPATCH_SECURE;
2421 	}
2422 	if (args->flags & I915_EXEC_IS_PINNED)
2423 		eb.batch_flags |= I915_DISPATCH_PINNED;
2424 
2425 #define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
2426 	if (args->flags & IN_FENCES) {
2427 		if ((args->flags & IN_FENCES) == IN_FENCES)
2428 			return -EINVAL;
2429 
2430 		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2431 		if (!in_fence)
2432 			return -EINVAL;
2433 	}
2434 #undef IN_FENCES
2435 
2436 	if (args->flags & I915_EXEC_FENCE_OUT) {
2437 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
2438 		if (out_fence_fd < 0) {
2439 			err = out_fence_fd;
2440 			goto err_in_fence;
2441 		}
2442 	}
2443 
2444 	err = eb_create(&eb);
2445 	if (err)
2446 		goto err_out_fence;
2447 
2448 	GEM_BUG_ON(!eb.lut_size);
2449 
2450 	err = eb_select_context(&eb);
2451 	if (unlikely(err))
2452 		goto err_destroy;
2453 
2454 	err = eb_pin_engine(&eb, file, args);
2455 	if (unlikely(err))
2456 		goto err_context;
2457 
2458 	err = eb_relocate(&eb);
2459 	if (err) {
2460 		/*
2461 		 * If the user expects the execobject.offset and
2462 		 * reloc.presumed_offset to be an exact match,
2463 		 * as for using NO_RELOC, then we cannot update
2464 		 * the execobject.offset until we have completed
2465 		 * relocation.
2466 		 */
2467 		args->flags &= ~__EXEC_HAS_RELOC;
2468 		goto err_vma;
2469 	}
2470 
2471 	if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
2472 		drm_dbg(&i915->drm,
2473 			"Attempting to use self-modifying batch buffer\n");
2474 		err = -EINVAL;
2475 		goto err_vma;
2476 	}
2477 
2478 	if (range_overflows_t(u64,
2479 			      eb.batch_start_offset, eb.batch_len,
2480 			      eb.batch->vma->size)) {
2481 		drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
2482 		err = -EINVAL;
2483 		goto err_vma;
2484 	}
2485 
2486 	if (eb.batch_len == 0)
2487 		eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
2488 
2489 	err = eb_parse(&eb);
2490 	if (err)
2491 		goto err_vma;
2492 
2493 	/*
2494 	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2495 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
2496 	 * hsw should have this fixed, but bdw mucks it up again. */
2497 	batch = eb.batch->vma;
2498 	if (eb.batch_flags & I915_DISPATCH_SECURE) {
2499 		struct i915_vma *vma;
2500 
2501 		/*
2502 		 * So on first glance it looks freaky that we pin the batch here
2503 		 * outside of the reservation loop. But:
2504 		 * - The batch is already pinned into the relevant ppgtt, so we
2505 		 *   already have the backing storage fully allocated.
2506 		 * - No other BO uses the global gtt (well contexts, but meh),
2507 		 *   so we don't really have issues with multiple objects not
2508 		 *   fitting due to fragmentation.
2509 		 * So this is actually safe.
2510 		 */
2511 		vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
2512 		if (IS_ERR(vma)) {
2513 			err = PTR_ERR(vma);
2514 			goto err_parse;
2515 		}
2516 
2517 		batch = vma;
2518 	}
2519 
2520 	/* All GPU relocation batches must be submitted prior to the user rq */
2521 	GEM_BUG_ON(eb.reloc_cache.rq);
2522 
2523 	/* Allocate a request for this batch buffer nice and early. */
2524 	eb.request = i915_request_create(eb.context);
2525 	if (IS_ERR(eb.request)) {
2526 		err = PTR_ERR(eb.request);
2527 		goto err_batch_unpin;
2528 	}
2529 
2530 	if (in_fence) {
2531 		if (args->flags & I915_EXEC_FENCE_SUBMIT)
2532 			err = i915_request_await_execution(eb.request,
2533 							   in_fence,
2534 							   eb.engine->bond_execute);
2535 		else
2536 			err = i915_request_await_dma_fence(eb.request,
2537 							   in_fence);
2538 		if (err < 0)
2539 			goto err_request;
2540 	}
2541 
2542 	if (fences) {
2543 		err = await_fence_array(&eb, fences);
2544 		if (err)
2545 			goto err_request;
2546 	}
2547 
2548 	if (out_fence_fd != -1) {
2549 		out_fence = sync_file_create(&eb.request->fence);
2550 		if (!out_fence) {
2551 			err = -ENOMEM;
2552 			goto err_request;
2553 		}
2554 	}
2555 
2556 	/*
2557 	 * Whilst this request exists, batch_obj will be on the
2558 	 * active_list, and so will hold the active reference. Only when this
2559 	 * request is retired will the the batch_obj be moved onto the
2560 	 * inactive_list and lose its active reference. Hence we do not need
2561 	 * to explicitly hold another reference here.
2562 	 */
2563 	eb.request->batch = batch;
2564 	if (batch->private)
2565 		intel_gt_buffer_pool_mark_active(batch->private, eb.request);
2566 
2567 	trace_i915_request_queue(eb.request, eb.batch_flags);
2568 	err = eb_submit(&eb, batch);
2569 err_request:
2570 	add_to_client(eb.request, file);
2571 	i915_request_get(eb.request);
2572 	eb_request_add(&eb);
2573 
2574 	if (fences)
2575 		signal_fence_array(&eb, fences);
2576 
2577 	if (out_fence) {
2578 		if (err == 0) {
2579 			fd_install(out_fence_fd, out_fence->file);
2580 			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2581 			args->rsvd2 |= (u64)out_fence_fd << 32;
2582 			out_fence_fd = -1;
2583 		} else {
2584 			fput(out_fence->file);
2585 		}
2586 	}
2587 	i915_request_put(eb.request);
2588 
2589 err_batch_unpin:
2590 	if (eb.batch_flags & I915_DISPATCH_SECURE)
2591 		i915_vma_unpin(batch);
2592 err_parse:
2593 	if (batch->private)
2594 		intel_gt_buffer_pool_put(batch->private);
2595 err_vma:
2596 	if (eb.trampoline)
2597 		i915_vma_unpin(eb.trampoline);
2598 	eb_unpin_engine(&eb);
2599 err_context:
2600 	i915_gem_context_put(eb.gem_context);
2601 err_destroy:
2602 	eb_destroy(&eb);
2603 err_out_fence:
2604 	if (out_fence_fd != -1)
2605 		put_unused_fd(out_fence_fd);
2606 err_in_fence:
2607 	dma_fence_put(in_fence);
2608 	return err;
2609 }
2610 
2611 static size_t eb_element_size(void)
2612 {
2613 	return sizeof(struct drm_i915_gem_exec_object2);
2614 }
2615 
2616 static bool check_buffer_count(size_t count)
2617 {
2618 	const size_t sz = eb_element_size();
2619 
2620 	/*
2621 	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
2622 	 * array size (see eb_create()). Otherwise, we can accept an array as
2623 	 * large as can be addressed (though use large arrays at your peril)!
2624 	 */
2625 
2626 	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
2627 }
2628 
2629 /*
2630  * Legacy execbuffer just creates an exec2 list from the original exec object
2631  * list array and passes it to the real function.
2632  */
2633 int
2634 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
2635 			  struct drm_file *file)
2636 {
2637 	struct drm_i915_private *i915 = to_i915(dev);
2638 	struct drm_i915_gem_execbuffer *args = data;
2639 	struct drm_i915_gem_execbuffer2 exec2;
2640 	struct drm_i915_gem_exec_object *exec_list = NULL;
2641 	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2642 	const size_t count = args->buffer_count;
2643 	unsigned int i;
2644 	int err;
2645 
2646 	if (!check_buffer_count(count)) {
2647 		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
2648 		return -EINVAL;
2649 	}
2650 
2651 	exec2.buffers_ptr = args->buffers_ptr;
2652 	exec2.buffer_count = args->buffer_count;
2653 	exec2.batch_start_offset = args->batch_start_offset;
2654 	exec2.batch_len = args->batch_len;
2655 	exec2.DR1 = args->DR1;
2656 	exec2.DR4 = args->DR4;
2657 	exec2.num_cliprects = args->num_cliprects;
2658 	exec2.cliprects_ptr = args->cliprects_ptr;
2659 	exec2.flags = I915_EXEC_RENDER;
2660 	i915_execbuffer2_set_context_id(exec2, 0);
2661 
2662 	err = i915_gem_check_execbuffer(&exec2);
2663 	if (err)
2664 		return err;
2665 
2666 	/* Copy in the exec list from userland */
2667 	exec_list = kvmalloc_array(count, sizeof(*exec_list),
2668 				   __GFP_NOWARN | GFP_KERNEL);
2669 	exec2_list = kvmalloc_array(count, eb_element_size(),
2670 				    __GFP_NOWARN | GFP_KERNEL);
2671 	if (exec_list == NULL || exec2_list == NULL) {
2672 		drm_dbg(&i915->drm,
2673 			"Failed to allocate exec list for %d buffers\n",
2674 			args->buffer_count);
2675 		kvfree(exec_list);
2676 		kvfree(exec2_list);
2677 		return -ENOMEM;
2678 	}
2679 	err = copy_from_user(exec_list,
2680 			     u64_to_user_ptr(args->buffers_ptr),
2681 			     sizeof(*exec_list) * count);
2682 	if (err) {
2683 		drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
2684 			args->buffer_count, err);
2685 		kvfree(exec_list);
2686 		kvfree(exec2_list);
2687 		return -EFAULT;
2688 	}
2689 
2690 	for (i = 0; i < args->buffer_count; i++) {
2691 		exec2_list[i].handle = exec_list[i].handle;
2692 		exec2_list[i].relocation_count = exec_list[i].relocation_count;
2693 		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
2694 		exec2_list[i].alignment = exec_list[i].alignment;
2695 		exec2_list[i].offset = exec_list[i].offset;
2696 		if (INTEL_GEN(to_i915(dev)) < 4)
2697 			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
2698 		else
2699 			exec2_list[i].flags = 0;
2700 	}
2701 
2702 	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2703 	if (exec2.flags & __EXEC_HAS_RELOC) {
2704 		struct drm_i915_gem_exec_object __user *user_exec_list =
2705 			u64_to_user_ptr(args->buffers_ptr);
2706 
2707 		/* Copy the new buffer offsets back to the user's exec list. */
2708 		for (i = 0; i < args->buffer_count; i++) {
2709 			if (!(exec2_list[i].offset & UPDATE))
2710 				continue;
2711 
2712 			exec2_list[i].offset =
2713 				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2714 			exec2_list[i].offset &= PIN_OFFSET_MASK;
2715 			if (__copy_to_user(&user_exec_list[i].offset,
2716 					   &exec2_list[i].offset,
2717 					   sizeof(user_exec_list[i].offset)))
2718 				break;
2719 		}
2720 	}
2721 
2722 	kvfree(exec_list);
2723 	kvfree(exec2_list);
2724 	return err;
2725 }
2726 
2727 int
2728 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
2729 			   struct drm_file *file)
2730 {
2731 	struct drm_i915_private *i915 = to_i915(dev);
2732 	struct drm_i915_gem_execbuffer2 *args = data;
2733 	struct drm_i915_gem_exec_object2 *exec2_list;
2734 	struct drm_syncobj **fences = NULL;
2735 	const size_t count = args->buffer_count;
2736 	int err;
2737 
2738 	if (!check_buffer_count(count)) {
2739 		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
2740 		return -EINVAL;
2741 	}
2742 
2743 	err = i915_gem_check_execbuffer(args);
2744 	if (err)
2745 		return err;
2746 
2747 	exec2_list = kvmalloc_array(count, eb_element_size(),
2748 				    __GFP_NOWARN | GFP_KERNEL);
2749 	if (exec2_list == NULL) {
2750 		drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
2751 			count);
2752 		return -ENOMEM;
2753 	}
2754 	if (copy_from_user(exec2_list,
2755 			   u64_to_user_ptr(args->buffers_ptr),
2756 			   sizeof(*exec2_list) * count)) {
2757 		drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
2758 		kvfree(exec2_list);
2759 		return -EFAULT;
2760 	}
2761 
2762 	if (args->flags & I915_EXEC_FENCE_ARRAY) {
2763 		fences = get_fence_array(args, file);
2764 		if (IS_ERR(fences)) {
2765 			kvfree(exec2_list);
2766 			return PTR_ERR(fences);
2767 		}
2768 	}
2769 
2770 	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2771 
2772 	/*
2773 	 * Now that we have begun execution of the batchbuffer, we ignore
2774 	 * any new error after this point. Also given that we have already
2775 	 * updated the associated relocations, we try to write out the current
2776 	 * object locations irrespective of any error.
2777 	 */
2778 	if (args->flags & __EXEC_HAS_RELOC) {
2779 		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2780 			u64_to_user_ptr(args->buffers_ptr);
2781 		unsigned int i;
2782 
2783 		/* Copy the new buffer offsets back to the user's exec list. */
2784 		/*
2785 		 * Note: count * sizeof(*user_exec_list) does not overflow,
2786 		 * because we checked 'count' in check_buffer_count().
2787 		 *
2788 		 * And this range already got effectively checked earlier
2789 		 * when we did the "copy_from_user()" above.
2790 		 */
2791 		if (!user_write_access_begin(user_exec_list,
2792 					     count * sizeof(*user_exec_list)))
2793 			goto end;
2794 
2795 		for (i = 0; i < args->buffer_count; i++) {
2796 			if (!(exec2_list[i].offset & UPDATE))
2797 				continue;
2798 
2799 			exec2_list[i].offset =
2800 				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2801 			unsafe_put_user(exec2_list[i].offset,
2802 					&user_exec_list[i].offset,
2803 					end_user);
2804 		}
2805 end_user:
2806 		user_write_access_end();
2807 end:;
2808 	}
2809 
2810 	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2811 	put_fence_array(args, fences);
2812 	kvfree(exec2_list);
2813 	return err;
2814 }
2815 
2816 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2817 #include "selftests/i915_gem_execbuffer.c"
2818 #endif
2819