1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008,2010 Intel Corporation 5 */ 6 7 #include <linux/dma-resv.h> 8 #include <linux/highmem.h> 9 #include <linux/sync_file.h> 10 #include <linux/uaccess.h> 11 12 #include <drm/drm_syncobj.h> 13 14 #include "display/intel_frontbuffer.h" 15 16 #include "gem/i915_gem_ioctls.h" 17 #include "gt/intel_context.h" 18 #include "gt/intel_gpu_commands.h" 19 #include "gt/intel_gt.h" 20 #include "gt/intel_gt_buffer_pool.h" 21 #include "gt/intel_gt_pm.h" 22 #include "gt/intel_ring.h" 23 24 #include "pxp/intel_pxp.h" 25 26 #include "i915_cmd_parser.h" 27 #include "i915_drv.h" 28 #include "i915_file_private.h" 29 #include "i915_gem_clflush.h" 30 #include "i915_gem_context.h" 31 #include "i915_gem_evict.h" 32 #include "i915_gem_ioctls.h" 33 #include "i915_reg.h" 34 #include "i915_trace.h" 35 #include "i915_user_extensions.h" 36 37 struct eb_vma { 38 struct i915_vma *vma; 39 unsigned int flags; 40 41 /** This vma's place in the execbuf reservation list */ 42 struct drm_i915_gem_exec_object2 *exec; 43 struct list_head bind_link; 44 struct list_head reloc_link; 45 46 struct hlist_node node; 47 u32 handle; 48 }; 49 50 enum { 51 FORCE_CPU_RELOC = 1, 52 FORCE_GTT_RELOC, 53 FORCE_GPU_RELOC, 54 #define DBG_FORCE_RELOC 0 /* choose one of the above! */ 55 }; 56 57 /* __EXEC_OBJECT_ flags > BIT(29) defined in i915_vma.h */ 58 #define __EXEC_OBJECT_HAS_PIN BIT(29) 59 #define __EXEC_OBJECT_HAS_FENCE BIT(28) 60 #define __EXEC_OBJECT_USERPTR_INIT BIT(27) 61 #define __EXEC_OBJECT_NEEDS_MAP BIT(26) 62 #define __EXEC_OBJECT_NEEDS_BIAS BIT(25) 63 #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 25) /* all of the above + */ 64 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) 65 66 #define __EXEC_HAS_RELOC BIT(31) 67 #define __EXEC_ENGINE_PINNED BIT(30) 68 #define __EXEC_USERPTR_USED BIT(29) 69 #define __EXEC_INTERNAL_FLAGS (~0u << 29) 70 #define UPDATE PIN_OFFSET_FIXED 71 72 #define BATCH_OFFSET_BIAS (256*1024) 73 74 #define __I915_EXEC_ILLEGAL_FLAGS \ 75 (__I915_EXEC_UNKNOWN_FLAGS | \ 76 I915_EXEC_CONSTANTS_MASK | \ 77 I915_EXEC_RESOURCE_STREAMER) 78 79 /* Catch emission of unexpected errors for CI! */ 80 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 81 #undef EINVAL 82 #define EINVAL ({ \ 83 DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ 84 22; \ 85 }) 86 #endif 87 88 /** 89 * DOC: User command execution 90 * 91 * Userspace submits commands to be executed on the GPU as an instruction 92 * stream within a GEM object we call a batchbuffer. This instructions may 93 * refer to other GEM objects containing auxiliary state such as kernels, 94 * samplers, render targets and even secondary batchbuffers. Userspace does 95 * not know where in the GPU memory these objects reside and so before the 96 * batchbuffer is passed to the GPU for execution, those addresses in the 97 * batchbuffer and auxiliary objects are updated. This is known as relocation, 98 * or patching. To try and avoid having to relocate each object on the next 99 * execution, userspace is told the location of those objects in this pass, 100 * but this remains just a hint as the kernel may choose a new location for 101 * any object in the future. 102 * 103 * At the level of talking to the hardware, submitting a batchbuffer for the 104 * GPU to execute is to add content to a buffer from which the HW 105 * command streamer is reading. 106 * 107 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e. 108 * Execlists, this command is not placed on the same buffer as the 109 * remaining items. 110 * 111 * 2. Add a command to invalidate caches to the buffer. 112 * 113 * 3. Add a batchbuffer start command to the buffer; the start command is 114 * essentially a token together with the GPU address of the batchbuffer 115 * to be executed. 116 * 117 * 4. Add a pipeline flush to the buffer. 118 * 119 * 5. Add a memory write command to the buffer to record when the GPU 120 * is done executing the batchbuffer. The memory write writes the 121 * global sequence number of the request, ``i915_request::global_seqno``; 122 * the i915 driver uses the current value in the register to determine 123 * if the GPU has completed the batchbuffer. 124 * 125 * 6. Add a user interrupt command to the buffer. This command instructs 126 * the GPU to issue an interrupt when the command, pipeline flush and 127 * memory write are completed. 128 * 129 * 7. Inform the hardware of the additional commands added to the buffer 130 * (by updating the tail pointer). 131 * 132 * Processing an execbuf ioctl is conceptually split up into a few phases. 133 * 134 * 1. Validation - Ensure all the pointers, handles and flags are valid. 135 * 2. Reservation - Assign GPU address space for every object 136 * 3. Relocation - Update any addresses to point to the final locations 137 * 4. Serialisation - Order the request with respect to its dependencies 138 * 5. Construction - Construct a request to execute the batchbuffer 139 * 6. Submission (at some point in the future execution) 140 * 141 * Reserving resources for the execbuf is the most complicated phase. We 142 * neither want to have to migrate the object in the address space, nor do 143 * we want to have to update any relocations pointing to this object. Ideally, 144 * we want to leave the object where it is and for all the existing relocations 145 * to match. If the object is given a new address, or if userspace thinks the 146 * object is elsewhere, we have to parse all the relocation entries and update 147 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that 148 * all the target addresses in all of its objects match the value in the 149 * relocation entries and that they all match the presumed offsets given by the 150 * list of execbuffer objects. Using this knowledge, we know that if we haven't 151 * moved any buffers, all the relocation entries are valid and we can skip 152 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU 153 * hang.) The requirement for using I915_EXEC_NO_RELOC are: 154 * 155 * The addresses written in the objects must match the corresponding 156 * reloc.presumed_offset which in turn must match the corresponding 157 * execobject.offset. 158 * 159 * Any render targets written to in the batch must be flagged with 160 * EXEC_OBJECT_WRITE. 161 * 162 * To avoid stalling, execobject.offset should match the current 163 * address of that object within the active context. 164 * 165 * The reservation is done is multiple phases. First we try and keep any 166 * object already bound in its current location - so as long as meets the 167 * constraints imposed by the new execbuffer. Any object left unbound after the 168 * first pass is then fitted into any available idle space. If an object does 169 * not fit, all objects are removed from the reservation and the process rerun 170 * after sorting the objects into a priority order (more difficult to fit 171 * objects are tried first). Failing that, the entire VM is cleared and we try 172 * to fit the execbuf once last time before concluding that it simply will not 173 * fit. 174 * 175 * A small complication to all of this is that we allow userspace not only to 176 * specify an alignment and a size for the object in the address space, but 177 * we also allow userspace to specify the exact offset. This objects are 178 * simpler to place (the location is known a priori) all we have to do is make 179 * sure the space is available. 180 * 181 * Once all the objects are in place, patching up the buried pointers to point 182 * to the final locations is a fairly simple job of walking over the relocation 183 * entry arrays, looking up the right address and rewriting the value into 184 * the object. Simple! ... The relocation entries are stored in user memory 185 * and so to access them we have to copy them into a local buffer. That copy 186 * has to avoid taking any pagefaults as they may lead back to a GEM object 187 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split 188 * the relocation into multiple passes. First we try to do everything within an 189 * atomic context (avoid the pagefaults) which requires that we never wait. If 190 * we detect that we may wait, or if we need to fault, then we have to fallback 191 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm 192 * bells yet?) Dropping the mutex means that we lose all the state we have 193 * built up so far for the execbuf and we must reset any global data. However, 194 * we do leave the objects pinned in their final locations - which is a 195 * potential issue for concurrent execbufs. Once we have left the mutex, we can 196 * allocate and copy all the relocation entries into a large array at our 197 * leisure, reacquire the mutex, reclaim all the objects and other state and 198 * then proceed to update any incorrect addresses with the objects. 199 * 200 * As we process the relocation entries, we maintain a record of whether the 201 * object is being written to. Using NORELOC, we expect userspace to provide 202 * this information instead. We also check whether we can skip the relocation 203 * by comparing the expected value inside the relocation entry with the target's 204 * final address. If they differ, we have to map the current object and rewrite 205 * the 4 or 8 byte pointer within. 206 * 207 * Serialising an execbuf is quite simple according to the rules of the GEM 208 * ABI. Execution within each context is ordered by the order of submission. 209 * Writes to any GEM object are in order of submission and are exclusive. Reads 210 * from a GEM object are unordered with respect to other reads, but ordered by 211 * writes. A write submitted after a read cannot occur before the read, and 212 * similarly any read submitted after a write cannot occur before the write. 213 * Writes are ordered between engines such that only one write occurs at any 214 * time (completing any reads beforehand) - using semaphores where available 215 * and CPU serialisation otherwise. Other GEM access obey the same rules, any 216 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU 217 * reads before starting, and any read (either using set-domain or pread) must 218 * flush all GPU writes before starting. (Note we only employ a barrier before, 219 * we currently rely on userspace not concurrently starting a new execution 220 * whilst reading or writing to an object. This may be an advantage or not 221 * depending on how much you trust userspace not to shoot themselves in the 222 * foot.) Serialisation may just result in the request being inserted into 223 * a DAG awaiting its turn, but most simple is to wait on the CPU until 224 * all dependencies are resolved. 225 * 226 * After all of that, is just a matter of closing the request and handing it to 227 * the hardware (well, leaving it in a queue to be executed). However, we also 228 * offer the ability for batchbuffers to be run with elevated privileges so 229 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.) 230 * Before any batch is given extra privileges we first must check that it 231 * contains no nefarious instructions, we check that each instruction is from 232 * our whitelist and all registers are also from an allowed list. We first 233 * copy the user's batchbuffer to a shadow (so that the user doesn't have 234 * access to it, either by the CPU or GPU as we scan it) and then parse each 235 * instruction. If everything is ok, we set a flag telling the hardware to run 236 * the batchbuffer in trusted mode, otherwise the ioctl is rejected. 237 */ 238 239 struct eb_fence { 240 struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */ 241 struct dma_fence *dma_fence; 242 u64 value; 243 struct dma_fence_chain *chain_fence; 244 }; 245 246 struct i915_execbuffer { 247 struct drm_i915_private *i915; /** i915 backpointer */ 248 struct drm_file *file; /** per-file lookup tables and limits */ 249 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ 250 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ 251 struct eb_vma *vma; 252 253 struct intel_gt *gt; /* gt for the execbuf */ 254 struct intel_context *context; /* logical state for the request */ 255 struct i915_gem_context *gem_context; /** caller's context */ 256 257 /** our requests to build */ 258 struct i915_request *requests[MAX_ENGINE_INSTANCE + 1]; 259 /** identity of the batch obj/vma */ 260 struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1]; 261 struct i915_vma *trampoline; /** trampoline used for chaining */ 262 263 /** used for excl fence in dma_resv objects when > 1 BB submitted */ 264 struct dma_fence *composite_fence; 265 266 /** actual size of execobj[] as we may extend it for the cmdparser */ 267 unsigned int buffer_count; 268 269 /* number of batches in execbuf IOCTL */ 270 unsigned int num_batches; 271 272 /** list of vma not yet bound during reservation phase */ 273 struct list_head unbound; 274 275 /** list of vma that have execobj.relocation_count */ 276 struct list_head relocs; 277 278 struct i915_gem_ww_ctx ww; 279 280 /** 281 * Track the most recently used object for relocations, as we 282 * frequently have to perform multiple relocations within the same 283 * obj/page 284 */ 285 struct reloc_cache { 286 struct drm_mm_node node; /** temporary GTT binding */ 287 unsigned long vaddr; /** Current kmap address */ 288 unsigned long page; /** Currently mapped page index */ 289 unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */ 290 bool use_64bit_reloc : 1; 291 bool has_llc : 1; 292 bool has_fence : 1; 293 bool needs_unfenced : 1; 294 } reloc_cache; 295 296 u64 invalid_flags; /** Set of execobj.flags that are invalid */ 297 298 /** Length of batch within object */ 299 u64 batch_len[MAX_ENGINE_INSTANCE + 1]; 300 u32 batch_start_offset; /** Location within object of batch */ 301 u32 batch_flags; /** Flags composed for emit_bb_start() */ 302 struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */ 303 304 /** 305 * Indicate either the size of the hastable used to resolve 306 * relocation handles, or if negative that we are using a direct 307 * index into the execobj[]. 308 */ 309 int lut_size; 310 struct hlist_head *buckets; /** ht for relocation handles */ 311 312 struct eb_fence *fences; 313 unsigned long num_fences; 314 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 315 struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1]; 316 #endif 317 }; 318 319 static int eb_parse(struct i915_execbuffer *eb); 320 static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle); 321 static void eb_unpin_engine(struct i915_execbuffer *eb); 322 static void eb_capture_release(struct i915_execbuffer *eb); 323 324 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) 325 { 326 return intel_engine_requires_cmd_parser(eb->context->engine) || 327 (intel_engine_using_cmd_parser(eb->context->engine) && 328 eb->args->batch_len); 329 } 330 331 static int eb_create(struct i915_execbuffer *eb) 332 { 333 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { 334 unsigned int size = 1 + ilog2(eb->buffer_count); 335 336 /* 337 * Without a 1:1 association between relocation handles and 338 * the execobject[] index, we instead create a hashtable. 339 * We size it dynamically based on available memory, starting 340 * first with 1:1 assocative hash and scaling back until 341 * the allocation succeeds. 342 * 343 * Later on we use a positive lut_size to indicate we are 344 * using this hashtable, and a negative value to indicate a 345 * direct lookup. 346 */ 347 do { 348 gfp_t flags; 349 350 /* While we can still reduce the allocation size, don't 351 * raise a warning and allow the allocation to fail. 352 * On the last pass though, we want to try as hard 353 * as possible to perform the allocation and warn 354 * if it fails. 355 */ 356 flags = GFP_KERNEL; 357 if (size > 1) 358 flags |= __GFP_NORETRY | __GFP_NOWARN; 359 360 eb->buckets = kzalloc(sizeof(struct hlist_head) << size, 361 flags); 362 if (eb->buckets) 363 break; 364 } while (--size); 365 366 if (unlikely(!size)) 367 return -ENOMEM; 368 369 eb->lut_size = size; 370 } else { 371 eb->lut_size = -eb->buffer_count; 372 } 373 374 return 0; 375 } 376 377 static bool 378 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, 379 const struct i915_vma *vma, 380 unsigned int flags) 381 { 382 const u64 start = i915_vma_offset(vma); 383 const u64 size = i915_vma_size(vma); 384 385 if (size < entry->pad_to_size) 386 return true; 387 388 if (entry->alignment && !IS_ALIGNED(start, entry->alignment)) 389 return true; 390 391 if (flags & EXEC_OBJECT_PINNED && 392 start != entry->offset) 393 return true; 394 395 if (flags & __EXEC_OBJECT_NEEDS_BIAS && 396 start < BATCH_OFFSET_BIAS) 397 return true; 398 399 if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && 400 (start + size + 4095) >> 32) 401 return true; 402 403 if (flags & __EXEC_OBJECT_NEEDS_MAP && 404 !i915_vma_is_map_and_fenceable(vma)) 405 return true; 406 407 return false; 408 } 409 410 static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry, 411 unsigned int exec_flags) 412 { 413 u64 pin_flags = 0; 414 415 if (exec_flags & EXEC_OBJECT_NEEDS_GTT) 416 pin_flags |= PIN_GLOBAL; 417 418 /* 419 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, 420 * limit address to the first 4GBs for unflagged objects. 421 */ 422 if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) 423 pin_flags |= PIN_ZONE_4G; 424 425 if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) 426 pin_flags |= PIN_MAPPABLE; 427 428 if (exec_flags & EXEC_OBJECT_PINNED) 429 pin_flags |= entry->offset | PIN_OFFSET_FIXED; 430 else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) 431 pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; 432 433 return pin_flags; 434 } 435 436 static inline int 437 eb_pin_vma(struct i915_execbuffer *eb, 438 const struct drm_i915_gem_exec_object2 *entry, 439 struct eb_vma *ev) 440 { 441 struct i915_vma *vma = ev->vma; 442 u64 pin_flags; 443 int err; 444 445 if (vma->node.size) 446 pin_flags = __i915_vma_offset(vma); 447 else 448 pin_flags = entry->offset & PIN_OFFSET_MASK; 449 450 pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED | PIN_VALIDATE; 451 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT)) 452 pin_flags |= PIN_GLOBAL; 453 454 /* Attempt to reuse the current location if available */ 455 err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags); 456 if (err == -EDEADLK) 457 return err; 458 459 if (unlikely(err)) { 460 if (entry->flags & EXEC_OBJECT_PINNED) 461 return err; 462 463 /* Failing that pick any _free_ space if suitable */ 464 err = i915_vma_pin_ww(vma, &eb->ww, 465 entry->pad_to_size, 466 entry->alignment, 467 eb_pin_flags(entry, ev->flags) | 468 PIN_USER | PIN_NOEVICT | PIN_VALIDATE); 469 if (unlikely(err)) 470 return err; 471 } 472 473 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { 474 err = i915_vma_pin_fence(vma); 475 if (unlikely(err)) 476 return err; 477 478 if (vma->fence) 479 ev->flags |= __EXEC_OBJECT_HAS_FENCE; 480 } 481 482 ev->flags |= __EXEC_OBJECT_HAS_PIN; 483 if (eb_vma_misplaced(entry, vma, ev->flags)) 484 return -EBADSLT; 485 486 return 0; 487 } 488 489 static inline void 490 eb_unreserve_vma(struct eb_vma *ev) 491 { 492 if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE)) 493 __i915_vma_unpin_fence(ev->vma); 494 495 ev->flags &= ~__EXEC_OBJECT_RESERVED; 496 } 497 498 static int 499 eb_validate_vma(struct i915_execbuffer *eb, 500 struct drm_i915_gem_exec_object2 *entry, 501 struct i915_vma *vma) 502 { 503 /* Relocations are disallowed for all platforms after TGL-LP. This 504 * also covers all platforms with local memory. 505 */ 506 if (entry->relocation_count && 507 GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915)) 508 return -EINVAL; 509 510 if (unlikely(entry->flags & eb->invalid_flags)) 511 return -EINVAL; 512 513 if (unlikely(entry->alignment && 514 !is_power_of_2_u64(entry->alignment))) 515 return -EINVAL; 516 517 /* 518 * Offset can be used as input (EXEC_OBJECT_PINNED), reject 519 * any non-page-aligned or non-canonical addresses. 520 */ 521 if (unlikely(entry->flags & EXEC_OBJECT_PINNED && 522 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) 523 return -EINVAL; 524 525 /* pad_to_size was once a reserved field, so sanitize it */ 526 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) { 527 if (unlikely(offset_in_page(entry->pad_to_size))) 528 return -EINVAL; 529 } else { 530 entry->pad_to_size = 0; 531 } 532 /* 533 * From drm_mm perspective address space is continuous, 534 * so from this point we're always using non-canonical 535 * form internally. 536 */ 537 entry->offset = gen8_noncanonical_addr(entry->offset); 538 539 if (!eb->reloc_cache.has_fence) { 540 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; 541 } else { 542 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE || 543 eb->reloc_cache.needs_unfenced) && 544 i915_gem_object_is_tiled(vma->obj)) 545 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP; 546 } 547 548 return 0; 549 } 550 551 static inline bool 552 is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx) 553 { 554 return eb->args->flags & I915_EXEC_BATCH_FIRST ? 555 buffer_idx < eb->num_batches : 556 buffer_idx >= eb->args->buffer_count - eb->num_batches; 557 } 558 559 static int 560 eb_add_vma(struct i915_execbuffer *eb, 561 unsigned int *current_batch, 562 unsigned int i, 563 struct i915_vma *vma) 564 { 565 struct drm_i915_private *i915 = eb->i915; 566 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 567 struct eb_vma *ev = &eb->vma[i]; 568 569 ev->vma = vma; 570 ev->exec = entry; 571 ev->flags = entry->flags; 572 573 if (eb->lut_size > 0) { 574 ev->handle = entry->handle; 575 hlist_add_head(&ev->node, 576 &eb->buckets[hash_32(entry->handle, 577 eb->lut_size)]); 578 } 579 580 if (entry->relocation_count) 581 list_add_tail(&ev->reloc_link, &eb->relocs); 582 583 /* 584 * SNA is doing fancy tricks with compressing batch buffers, which leads 585 * to negative relocation deltas. Usually that works out ok since the 586 * relocate address is still positive, except when the batch is placed 587 * very low in the GTT. Ensure this doesn't happen. 588 * 589 * Note that actual hangs have only been observed on gen7, but for 590 * paranoia do it everywhere. 591 */ 592 if (is_batch_buffer(eb, i)) { 593 if (entry->relocation_count && 594 !(ev->flags & EXEC_OBJECT_PINNED)) 595 ev->flags |= __EXEC_OBJECT_NEEDS_BIAS; 596 if (eb->reloc_cache.has_fence) 597 ev->flags |= EXEC_OBJECT_NEEDS_FENCE; 598 599 eb->batches[*current_batch] = ev; 600 601 if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) { 602 drm_dbg(&i915->drm, 603 "Attempting to use self-modifying batch buffer\n"); 604 return -EINVAL; 605 } 606 607 if (range_overflows_t(u64, 608 eb->batch_start_offset, 609 eb->args->batch_len, 610 ev->vma->size)) { 611 drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); 612 return -EINVAL; 613 } 614 615 if (eb->args->batch_len == 0) 616 eb->batch_len[*current_batch] = ev->vma->size - 617 eb->batch_start_offset; 618 else 619 eb->batch_len[*current_batch] = eb->args->batch_len; 620 if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */ 621 drm_dbg(&i915->drm, "Invalid batch length\n"); 622 return -EINVAL; 623 } 624 625 ++*current_batch; 626 } 627 628 return 0; 629 } 630 631 static inline int use_cpu_reloc(const struct reloc_cache *cache, 632 const struct drm_i915_gem_object *obj) 633 { 634 if (!i915_gem_object_has_struct_page(obj)) 635 return false; 636 637 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC) 638 return true; 639 640 if (DBG_FORCE_RELOC == FORCE_GTT_RELOC) 641 return false; 642 643 /* 644 * For objects created by userspace through GEM_CREATE with pat_index 645 * set by set_pat extension, i915_gem_object_has_cache_level() always 646 * return true, otherwise the call would fall back to checking whether 647 * the object is un-cached. 648 */ 649 return (cache->has_llc || 650 obj->cache_dirty || 651 !i915_gem_object_has_cache_level(obj, I915_CACHE_NONE)); 652 } 653 654 static int eb_reserve_vma(struct i915_execbuffer *eb, 655 struct eb_vma *ev, 656 u64 pin_flags) 657 { 658 struct drm_i915_gem_exec_object2 *entry = ev->exec; 659 struct i915_vma *vma = ev->vma; 660 int err; 661 662 if (drm_mm_node_allocated(&vma->node) && 663 eb_vma_misplaced(entry, vma, ev->flags)) { 664 err = i915_vma_unbind(vma); 665 if (err) 666 return err; 667 } 668 669 err = i915_vma_pin_ww(vma, &eb->ww, 670 entry->pad_to_size, entry->alignment, 671 eb_pin_flags(entry, ev->flags) | pin_flags); 672 if (err) 673 return err; 674 675 if (entry->offset != i915_vma_offset(vma)) { 676 entry->offset = i915_vma_offset(vma) | UPDATE; 677 eb->args->flags |= __EXEC_HAS_RELOC; 678 } 679 680 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { 681 err = i915_vma_pin_fence(vma); 682 if (unlikely(err)) 683 return err; 684 685 if (vma->fence) 686 ev->flags |= __EXEC_OBJECT_HAS_FENCE; 687 } 688 689 ev->flags |= __EXEC_OBJECT_HAS_PIN; 690 GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags)); 691 692 return 0; 693 } 694 695 static bool eb_unbind(struct i915_execbuffer *eb, bool force) 696 { 697 const unsigned int count = eb->buffer_count; 698 unsigned int i; 699 struct list_head last; 700 bool unpinned = false; 701 702 /* Resort *all* the objects into priority order */ 703 INIT_LIST_HEAD(&eb->unbound); 704 INIT_LIST_HEAD(&last); 705 706 for (i = 0; i < count; i++) { 707 struct eb_vma *ev = &eb->vma[i]; 708 unsigned int flags = ev->flags; 709 710 if (!force && flags & EXEC_OBJECT_PINNED && 711 flags & __EXEC_OBJECT_HAS_PIN) 712 continue; 713 714 unpinned = true; 715 eb_unreserve_vma(ev); 716 717 if (flags & EXEC_OBJECT_PINNED) 718 /* Pinned must have their slot */ 719 list_add(&ev->bind_link, &eb->unbound); 720 else if (flags & __EXEC_OBJECT_NEEDS_MAP) 721 /* Map require the lowest 256MiB (aperture) */ 722 list_add_tail(&ev->bind_link, &eb->unbound); 723 else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) 724 /* Prioritise 4GiB region for restricted bo */ 725 list_add(&ev->bind_link, &last); 726 else 727 list_add_tail(&ev->bind_link, &last); 728 } 729 730 list_splice_tail(&last, &eb->unbound); 731 return unpinned; 732 } 733 734 static int eb_reserve(struct i915_execbuffer *eb) 735 { 736 struct eb_vma *ev; 737 unsigned int pass; 738 int err = 0; 739 bool unpinned; 740 741 /* 742 * We have one more buffers that we couldn't bind, which could be due to 743 * various reasons. To resolve this we have 4 passes, with every next 744 * level turning the screws tighter: 745 * 746 * 0. Unbind all objects that do not match the GTT constraints for the 747 * execbuffer (fenceable, mappable, alignment etc). Bind all new 748 * objects. This avoids unnecessary unbinding of later objects in order 749 * to make room for the earlier objects *unless* we need to defragment. 750 * 751 * 1. Reorder the buffers, where objects with the most restrictive 752 * placement requirements go first (ignoring fixed location buffers for 753 * now). For example, objects needing the mappable aperture (the first 754 * 256M of GTT), should go first vs objects that can be placed just 755 * about anywhere. Repeat the previous pass. 756 * 757 * 2. Consider buffers that are pinned at a fixed location. Also try to 758 * evict the entire VM this time, leaving only objects that we were 759 * unable to lock. Try again to bind the buffers. (still using the new 760 * buffer order). 761 * 762 * 3. We likely have object lock contention for one or more stubborn 763 * objects in the VM, for which we need to evict to make forward 764 * progress (perhaps we are fighting the shrinker?). When evicting the 765 * VM this time around, anything that we can't lock we now track using 766 * the busy_bo, using the full lock (after dropping the vm->mutex to 767 * prevent deadlocks), instead of trylock. We then continue to evict the 768 * VM, this time with the stubborn object locked, which we can now 769 * hopefully unbind (if still bound in the VM). Repeat until the VM is 770 * evicted. Finally we should be able bind everything. 771 */ 772 for (pass = 0; pass <= 3; pass++) { 773 int pin_flags = PIN_USER | PIN_VALIDATE; 774 775 if (pass == 0) 776 pin_flags |= PIN_NONBLOCK; 777 778 if (pass >= 1) 779 unpinned = eb_unbind(eb, pass >= 2); 780 781 if (pass == 2) { 782 err = mutex_lock_interruptible(&eb->context->vm->mutex); 783 if (!err) { 784 err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL); 785 mutex_unlock(&eb->context->vm->mutex); 786 } 787 if (err) 788 return err; 789 } 790 791 if (pass == 3) { 792 retry: 793 err = mutex_lock_interruptible(&eb->context->vm->mutex); 794 if (!err) { 795 struct drm_i915_gem_object *busy_bo = NULL; 796 797 err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo); 798 mutex_unlock(&eb->context->vm->mutex); 799 if (err && busy_bo) { 800 err = i915_gem_object_lock(busy_bo, &eb->ww); 801 i915_gem_object_put(busy_bo); 802 if (!err) 803 goto retry; 804 } 805 } 806 if (err) 807 return err; 808 } 809 810 list_for_each_entry(ev, &eb->unbound, bind_link) { 811 err = eb_reserve_vma(eb, ev, pin_flags); 812 if (err) 813 break; 814 } 815 816 if (err != -ENOSPC) 817 break; 818 } 819 820 return err; 821 } 822 823 static int eb_select_context(struct i915_execbuffer *eb) 824 { 825 struct i915_gem_context *ctx; 826 827 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); 828 if (unlikely(IS_ERR(ctx))) 829 return PTR_ERR(ctx); 830 831 eb->gem_context = ctx; 832 if (i915_gem_context_has_full_ppgtt(ctx)) 833 eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; 834 835 return 0; 836 } 837 838 static int __eb_add_lut(struct i915_execbuffer *eb, 839 u32 handle, struct i915_vma *vma) 840 { 841 struct i915_gem_context *ctx = eb->gem_context; 842 struct i915_lut_handle *lut; 843 int err; 844 845 lut = i915_lut_handle_alloc(); 846 if (unlikely(!lut)) 847 return -ENOMEM; 848 849 i915_vma_get(vma); 850 if (!atomic_fetch_inc(&vma->open_count)) 851 i915_vma_reopen(vma); 852 lut->handle = handle; 853 lut->ctx = ctx; 854 855 /* Check that the context hasn't been closed in the meantime */ 856 err = -EINTR; 857 if (!mutex_lock_interruptible(&ctx->lut_mutex)) { 858 if (likely(!i915_gem_context_is_closed(ctx))) 859 err = radix_tree_insert(&ctx->handles_vma, handle, vma); 860 else 861 err = -ENOENT; 862 if (err == 0) { /* And nor has this handle */ 863 struct drm_i915_gem_object *obj = vma->obj; 864 865 spin_lock(&obj->lut_lock); 866 if (idr_find(&eb->file->object_idr, handle) == obj) { 867 list_add(&lut->obj_link, &obj->lut_list); 868 } else { 869 radix_tree_delete(&ctx->handles_vma, handle); 870 err = -ENOENT; 871 } 872 spin_unlock(&obj->lut_lock); 873 } 874 mutex_unlock(&ctx->lut_mutex); 875 } 876 if (unlikely(err)) 877 goto err; 878 879 return 0; 880 881 err: 882 i915_vma_close(vma); 883 i915_vma_put(vma); 884 i915_lut_handle_free(lut); 885 return err; 886 } 887 888 static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) 889 { 890 struct i915_address_space *vm = eb->context->vm; 891 892 do { 893 struct drm_i915_gem_object *obj; 894 struct i915_vma *vma; 895 int err; 896 897 rcu_read_lock(); 898 vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle); 899 if (likely(vma && vma->vm == vm)) 900 vma = i915_vma_tryget(vma); 901 rcu_read_unlock(); 902 if (likely(vma)) 903 return vma; 904 905 obj = i915_gem_object_lookup(eb->file, handle); 906 if (unlikely(!obj)) 907 return ERR_PTR(-ENOENT); 908 909 /* 910 * If the user has opted-in for protected-object tracking, make 911 * sure the object encryption can be used. 912 * We only need to do this when the object is first used with 913 * this context, because the context itself will be banned when 914 * the protected objects become invalid. 915 */ 916 if (i915_gem_context_uses_protected_content(eb->gem_context) && 917 i915_gem_object_is_protected(obj)) { 918 err = intel_pxp_key_check(eb->i915->pxp, obj, true); 919 if (err) { 920 i915_gem_object_put(obj); 921 return ERR_PTR(err); 922 } 923 } 924 925 vma = i915_vma_instance(obj, vm, NULL); 926 if (IS_ERR(vma)) { 927 i915_gem_object_put(obj); 928 return vma; 929 } 930 931 err = __eb_add_lut(eb, handle, vma); 932 if (likely(!err)) 933 return vma; 934 935 i915_gem_object_put(obj); 936 if (err != -EEXIST) 937 return ERR_PTR(err); 938 } while (1); 939 } 940 941 static int eb_lookup_vmas(struct i915_execbuffer *eb) 942 { 943 unsigned int i, current_batch = 0; 944 int err = 0; 945 946 INIT_LIST_HEAD(&eb->relocs); 947 948 for (i = 0; i < eb->buffer_count; i++) { 949 struct i915_vma *vma; 950 951 vma = eb_lookup_vma(eb, eb->exec[i].handle); 952 if (IS_ERR(vma)) { 953 err = PTR_ERR(vma); 954 goto err; 955 } 956 957 err = eb_validate_vma(eb, &eb->exec[i], vma); 958 if (unlikely(err)) { 959 i915_vma_put(vma); 960 goto err; 961 } 962 963 err = eb_add_vma(eb, ¤t_batch, i, vma); 964 if (err) 965 return err; 966 967 if (i915_gem_object_is_userptr(vma->obj)) { 968 err = i915_gem_object_userptr_submit_init(vma->obj); 969 if (err) { 970 if (i + 1 < eb->buffer_count) { 971 /* 972 * Execbuffer code expects last vma entry to be NULL, 973 * since we already initialized this entry, 974 * set the next value to NULL or we mess up 975 * cleanup handling. 976 */ 977 eb->vma[i + 1].vma = NULL; 978 } 979 980 return err; 981 } 982 983 eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT; 984 eb->args->flags |= __EXEC_USERPTR_USED; 985 } 986 } 987 988 return 0; 989 990 err: 991 eb->vma[i].vma = NULL; 992 return err; 993 } 994 995 static int eb_lock_vmas(struct i915_execbuffer *eb) 996 { 997 unsigned int i; 998 int err; 999 1000 for (i = 0; i < eb->buffer_count; i++) { 1001 struct eb_vma *ev = &eb->vma[i]; 1002 struct i915_vma *vma = ev->vma; 1003 1004 err = i915_gem_object_lock(vma->obj, &eb->ww); 1005 if (err) 1006 return err; 1007 } 1008 1009 return 0; 1010 } 1011 1012 static int eb_validate_vmas(struct i915_execbuffer *eb) 1013 { 1014 unsigned int i; 1015 int err; 1016 1017 INIT_LIST_HEAD(&eb->unbound); 1018 1019 err = eb_lock_vmas(eb); 1020 if (err) 1021 return err; 1022 1023 for (i = 0; i < eb->buffer_count; i++) { 1024 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 1025 struct eb_vma *ev = &eb->vma[i]; 1026 struct i915_vma *vma = ev->vma; 1027 1028 err = eb_pin_vma(eb, entry, ev); 1029 if (err == -EDEADLK) 1030 return err; 1031 1032 if (!err) { 1033 if (entry->offset != i915_vma_offset(vma)) { 1034 entry->offset = i915_vma_offset(vma) | UPDATE; 1035 eb->args->flags |= __EXEC_HAS_RELOC; 1036 } 1037 } else { 1038 eb_unreserve_vma(ev); 1039 1040 list_add_tail(&ev->bind_link, &eb->unbound); 1041 if (drm_mm_node_allocated(&vma->node)) { 1042 err = i915_vma_unbind(vma); 1043 if (err) 1044 return err; 1045 } 1046 } 1047 1048 /* Reserve enough slots to accommodate composite fences */ 1049 err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches); 1050 if (err) 1051 return err; 1052 1053 GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && 1054 eb_vma_misplaced(&eb->exec[i], vma, ev->flags)); 1055 } 1056 1057 if (!list_empty(&eb->unbound)) 1058 return eb_reserve(eb); 1059 1060 return 0; 1061 } 1062 1063 static struct eb_vma * 1064 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) 1065 { 1066 if (eb->lut_size < 0) { 1067 if (handle >= -eb->lut_size) 1068 return NULL; 1069 return &eb->vma[handle]; 1070 } else { 1071 struct hlist_head *head; 1072 struct eb_vma *ev; 1073 1074 head = &eb->buckets[hash_32(handle, eb->lut_size)]; 1075 hlist_for_each_entry(ev, head, node) { 1076 if (ev->handle == handle) 1077 return ev; 1078 } 1079 return NULL; 1080 } 1081 } 1082 1083 static void eb_release_vmas(struct i915_execbuffer *eb, bool final) 1084 { 1085 const unsigned int count = eb->buffer_count; 1086 unsigned int i; 1087 1088 for (i = 0; i < count; i++) { 1089 struct eb_vma *ev = &eb->vma[i]; 1090 struct i915_vma *vma = ev->vma; 1091 1092 if (!vma) 1093 break; 1094 1095 eb_unreserve_vma(ev); 1096 1097 if (final) 1098 i915_vma_put(vma); 1099 } 1100 1101 eb_capture_release(eb); 1102 eb_unpin_engine(eb); 1103 } 1104 1105 static void eb_destroy(const struct i915_execbuffer *eb) 1106 { 1107 if (eb->lut_size > 0) 1108 kfree(eb->buckets); 1109 } 1110 1111 static inline u64 1112 relocation_target(const struct drm_i915_gem_relocation_entry *reloc, 1113 const struct i915_vma *target) 1114 { 1115 return gen8_canonical_addr((int)reloc->delta + i915_vma_offset(target)); 1116 } 1117 1118 static void reloc_cache_init(struct reloc_cache *cache, 1119 struct drm_i915_private *i915) 1120 { 1121 cache->page = -1; 1122 cache->vaddr = 0; 1123 /* Must be a variable in the struct to allow GCC to unroll. */ 1124 cache->graphics_ver = GRAPHICS_VER(i915); 1125 cache->has_llc = HAS_LLC(i915); 1126 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); 1127 cache->has_fence = cache->graphics_ver < 4; 1128 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; 1129 cache->node.flags = 0; 1130 } 1131 1132 static inline void *unmask_page(unsigned long p) 1133 { 1134 return (void *)(uintptr_t)(p & PAGE_MASK); 1135 } 1136 1137 static inline unsigned int unmask_flags(unsigned long p) 1138 { 1139 return p & ~PAGE_MASK; 1140 } 1141 1142 #define KMAP 0x4 /* after CLFLUSH_FLAGS */ 1143 1144 static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) 1145 { 1146 struct drm_i915_private *i915 = 1147 container_of(cache, struct i915_execbuffer, reloc_cache)->i915; 1148 return to_gt(i915)->ggtt; 1149 } 1150 1151 static void reloc_cache_unmap(struct reloc_cache *cache) 1152 { 1153 void *vaddr; 1154 1155 if (!cache->vaddr) 1156 return; 1157 1158 vaddr = unmask_page(cache->vaddr); 1159 if (cache->vaddr & KMAP) 1160 kunmap_atomic(vaddr); 1161 else 1162 io_mapping_unmap_atomic((void __iomem *)vaddr); 1163 } 1164 1165 static void reloc_cache_remap(struct reloc_cache *cache, 1166 struct drm_i915_gem_object *obj) 1167 { 1168 void *vaddr; 1169 1170 if (!cache->vaddr) 1171 return; 1172 1173 if (cache->vaddr & KMAP) { 1174 struct page *page = i915_gem_object_get_page(obj, cache->page); 1175 1176 vaddr = kmap_atomic(page); 1177 cache->vaddr = unmask_flags(cache->vaddr) | 1178 (unsigned long)vaddr; 1179 } else { 1180 struct i915_ggtt *ggtt = cache_to_ggtt(cache); 1181 unsigned long offset; 1182 1183 offset = cache->node.start; 1184 if (!drm_mm_node_allocated(&cache->node)) 1185 offset += cache->page << PAGE_SHIFT; 1186 1187 cache->vaddr = (unsigned long) 1188 io_mapping_map_atomic_wc(&ggtt->iomap, offset); 1189 } 1190 } 1191 1192 static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb) 1193 { 1194 void *vaddr; 1195 1196 if (!cache->vaddr) 1197 return; 1198 1199 vaddr = unmask_page(cache->vaddr); 1200 if (cache->vaddr & KMAP) { 1201 struct drm_i915_gem_object *obj = 1202 (struct drm_i915_gem_object *)cache->node.mm; 1203 if (cache->vaddr & CLFLUSH_AFTER) 1204 mb(); 1205 1206 kunmap_atomic(vaddr); 1207 i915_gem_object_finish_access(obj); 1208 } else { 1209 struct i915_ggtt *ggtt = cache_to_ggtt(cache); 1210 1211 intel_gt_flush_ggtt_writes(ggtt->vm.gt); 1212 io_mapping_unmap_atomic((void __iomem *)vaddr); 1213 1214 if (drm_mm_node_allocated(&cache->node)) { 1215 ggtt->vm.clear_range(&ggtt->vm, 1216 cache->node.start, 1217 cache->node.size); 1218 mutex_lock(&ggtt->vm.mutex); 1219 drm_mm_remove_node(&cache->node); 1220 mutex_unlock(&ggtt->vm.mutex); 1221 } else { 1222 i915_vma_unpin((struct i915_vma *)cache->node.mm); 1223 } 1224 } 1225 1226 cache->vaddr = 0; 1227 cache->page = -1; 1228 } 1229 1230 static void *reloc_kmap(struct drm_i915_gem_object *obj, 1231 struct reloc_cache *cache, 1232 unsigned long pageno) 1233 { 1234 void *vaddr; 1235 struct page *page; 1236 1237 if (cache->vaddr) { 1238 kunmap_atomic(unmask_page(cache->vaddr)); 1239 } else { 1240 unsigned int flushes; 1241 int err; 1242 1243 err = i915_gem_object_prepare_write(obj, &flushes); 1244 if (err) 1245 return ERR_PTR(err); 1246 1247 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); 1248 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); 1249 1250 cache->vaddr = flushes | KMAP; 1251 cache->node.mm = (void *)obj; 1252 if (flushes) 1253 mb(); 1254 } 1255 1256 page = i915_gem_object_get_page(obj, pageno); 1257 if (!obj->mm.dirty) 1258 set_page_dirty(page); 1259 1260 vaddr = kmap_atomic(page); 1261 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; 1262 cache->page = pageno; 1263 1264 return vaddr; 1265 } 1266 1267 static void *reloc_iomap(struct i915_vma *batch, 1268 struct i915_execbuffer *eb, 1269 unsigned long page) 1270 { 1271 struct drm_i915_gem_object *obj = batch->obj; 1272 struct reloc_cache *cache = &eb->reloc_cache; 1273 struct i915_ggtt *ggtt = cache_to_ggtt(cache); 1274 unsigned long offset; 1275 void *vaddr; 1276 1277 if (cache->vaddr) { 1278 intel_gt_flush_ggtt_writes(ggtt->vm.gt); 1279 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); 1280 } else { 1281 struct i915_vma *vma = ERR_PTR(-ENODEV); 1282 int err; 1283 1284 if (i915_gem_object_is_tiled(obj)) 1285 return ERR_PTR(-EINVAL); 1286 1287 if (use_cpu_reloc(cache, obj)) 1288 return NULL; 1289 1290 err = i915_gem_object_set_to_gtt_domain(obj, true); 1291 if (err) 1292 return ERR_PTR(err); 1293 1294 /* 1295 * i915_gem_object_ggtt_pin_ww may attempt to remove the batch 1296 * VMA from the object list because we no longer pin. 1297 * 1298 * Only attempt to pin the batch buffer to ggtt if the current batch 1299 * is not inside ggtt, or the batch buffer is not misplaced. 1300 */ 1301 if (!i915_is_ggtt(batch->vm) || 1302 !i915_vma_misplaced(batch, 0, 0, PIN_MAPPABLE)) { 1303 vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0, 1304 PIN_MAPPABLE | 1305 PIN_NONBLOCK /* NOWARN */ | 1306 PIN_NOEVICT); 1307 } 1308 1309 if (vma == ERR_PTR(-EDEADLK)) 1310 return vma; 1311 1312 if (IS_ERR(vma)) { 1313 memset(&cache->node, 0, sizeof(cache->node)); 1314 mutex_lock(&ggtt->vm.mutex); 1315 err = drm_mm_insert_node_in_range 1316 (&ggtt->vm.mm, &cache->node, 1317 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 1318 0, ggtt->mappable_end, 1319 DRM_MM_INSERT_LOW); 1320 mutex_unlock(&ggtt->vm.mutex); 1321 if (err) /* no inactive aperture space, use cpu reloc */ 1322 return NULL; 1323 } else { 1324 cache->node.start = i915_ggtt_offset(vma); 1325 cache->node.mm = (void *)vma; 1326 } 1327 } 1328 1329 offset = cache->node.start; 1330 if (drm_mm_node_allocated(&cache->node)) { 1331 ggtt->vm.insert_page(&ggtt->vm, 1332 i915_gem_object_get_dma_address(obj, page), 1333 offset, 1334 i915_gem_get_pat_index(ggtt->vm.i915, 1335 I915_CACHE_NONE), 1336 0); 1337 } else { 1338 offset += page << PAGE_SHIFT; 1339 } 1340 1341 vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, 1342 offset); 1343 cache->page = page; 1344 cache->vaddr = (unsigned long)vaddr; 1345 1346 return vaddr; 1347 } 1348 1349 static void *reloc_vaddr(struct i915_vma *vma, 1350 struct i915_execbuffer *eb, 1351 unsigned long page) 1352 { 1353 struct reloc_cache *cache = &eb->reloc_cache; 1354 void *vaddr; 1355 1356 if (cache->page == page) { 1357 vaddr = unmask_page(cache->vaddr); 1358 } else { 1359 vaddr = NULL; 1360 if ((cache->vaddr & KMAP) == 0) 1361 vaddr = reloc_iomap(vma, eb, page); 1362 if (!vaddr) 1363 vaddr = reloc_kmap(vma->obj, cache, page); 1364 } 1365 1366 return vaddr; 1367 } 1368 1369 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) 1370 { 1371 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { 1372 if (flushes & CLFLUSH_BEFORE) 1373 drm_clflush_virt_range(addr, sizeof(*addr)); 1374 1375 *addr = value; 1376 1377 /* 1378 * Writes to the same cacheline are serialised by the CPU 1379 * (including clflush). On the write path, we only require 1380 * that it hits memory in an orderly fashion and place 1381 * mb barriers at the start and end of the relocation phase 1382 * to ensure ordering of clflush wrt to the system. 1383 */ 1384 if (flushes & CLFLUSH_AFTER) 1385 drm_clflush_virt_range(addr, sizeof(*addr)); 1386 } else 1387 *addr = value; 1388 } 1389 1390 static u64 1391 relocate_entry(struct i915_vma *vma, 1392 const struct drm_i915_gem_relocation_entry *reloc, 1393 struct i915_execbuffer *eb, 1394 const struct i915_vma *target) 1395 { 1396 u64 target_addr = relocation_target(reloc, target); 1397 u64 offset = reloc->offset; 1398 bool wide = eb->reloc_cache.use_64bit_reloc; 1399 void *vaddr; 1400 1401 repeat: 1402 vaddr = reloc_vaddr(vma, eb, 1403 offset >> PAGE_SHIFT); 1404 if (IS_ERR(vaddr)) 1405 return PTR_ERR(vaddr); 1406 1407 GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32))); 1408 clflush_write32(vaddr + offset_in_page(offset), 1409 lower_32_bits(target_addr), 1410 eb->reloc_cache.vaddr); 1411 1412 if (wide) { 1413 offset += sizeof(u32); 1414 target_addr >>= 32; 1415 wide = false; 1416 goto repeat; 1417 } 1418 1419 return target->node.start | UPDATE; 1420 } 1421 1422 static u64 1423 eb_relocate_entry(struct i915_execbuffer *eb, 1424 struct eb_vma *ev, 1425 const struct drm_i915_gem_relocation_entry *reloc) 1426 { 1427 struct drm_i915_private *i915 = eb->i915; 1428 struct eb_vma *target; 1429 int err; 1430 1431 /* we've already hold a reference to all valid objects */ 1432 target = eb_get_vma(eb, reloc->target_handle); 1433 if (unlikely(!target)) 1434 return -ENOENT; 1435 1436 /* Validate that the target is in a valid r/w GPU domain */ 1437 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 1438 drm_dbg(&i915->drm, "reloc with multiple write domains: " 1439 "target %d offset %d " 1440 "read %08x write %08x", 1441 reloc->target_handle, 1442 (int) reloc->offset, 1443 reloc->read_domains, 1444 reloc->write_domain); 1445 return -EINVAL; 1446 } 1447 if (unlikely((reloc->write_domain | reloc->read_domains) 1448 & ~I915_GEM_GPU_DOMAINS)) { 1449 drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: " 1450 "target %d offset %d " 1451 "read %08x write %08x", 1452 reloc->target_handle, 1453 (int) reloc->offset, 1454 reloc->read_domains, 1455 reloc->write_domain); 1456 return -EINVAL; 1457 } 1458 1459 if (reloc->write_domain) { 1460 target->flags |= EXEC_OBJECT_WRITE; 1461 1462 /* 1463 * Sandybridge PPGTT errata: We need a global gtt mapping 1464 * for MI and pipe_control writes because the gpu doesn't 1465 * properly redirect them through the ppgtt for non_secure 1466 * batchbuffers. 1467 */ 1468 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 1469 GRAPHICS_VER(eb->i915) == 6 && 1470 !i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) { 1471 struct i915_vma *vma = target->vma; 1472 1473 reloc_cache_unmap(&eb->reloc_cache); 1474 mutex_lock(&vma->vm->mutex); 1475 err = i915_vma_bind(target->vma, 1476 target->vma->obj->pat_index, 1477 PIN_GLOBAL, NULL, NULL); 1478 mutex_unlock(&vma->vm->mutex); 1479 reloc_cache_remap(&eb->reloc_cache, ev->vma->obj); 1480 if (err) 1481 return err; 1482 } 1483 } 1484 1485 /* 1486 * If the relocation already has the right value in it, no 1487 * more work needs to be done. 1488 */ 1489 if (!DBG_FORCE_RELOC && 1490 gen8_canonical_addr(i915_vma_offset(target->vma)) == reloc->presumed_offset) 1491 return 0; 1492 1493 /* Check that the relocation address is valid... */ 1494 if (unlikely(reloc->offset > 1495 ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { 1496 drm_dbg(&i915->drm, "Relocation beyond object bounds: " 1497 "target %d offset %d size %d.\n", 1498 reloc->target_handle, 1499 (int)reloc->offset, 1500 (int)ev->vma->size); 1501 return -EINVAL; 1502 } 1503 if (unlikely(reloc->offset & 3)) { 1504 drm_dbg(&i915->drm, "Relocation not 4-byte aligned: " 1505 "target %d offset %d.\n", 1506 reloc->target_handle, 1507 (int)reloc->offset); 1508 return -EINVAL; 1509 } 1510 1511 /* 1512 * If we write into the object, we need to force the synchronisation 1513 * barrier, either with an asynchronous clflush or if we executed the 1514 * patching using the GPU (though that should be serialised by the 1515 * timeline). To be completely sure, and since we are required to 1516 * do relocations we are already stalling, disable the user's opt 1517 * out of our synchronisation. 1518 */ 1519 ev->flags &= ~EXEC_OBJECT_ASYNC; 1520 1521 /* and update the user's relocation entry */ 1522 return relocate_entry(ev->vma, reloc, eb, target->vma); 1523 } 1524 1525 static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) 1526 { 1527 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 1528 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; 1529 const struct drm_i915_gem_exec_object2 *entry = ev->exec; 1530 struct drm_i915_gem_relocation_entry __user *urelocs = 1531 u64_to_user_ptr(entry->relocs_ptr); 1532 unsigned long remain = entry->relocation_count; 1533 1534 if (unlikely(remain > N_RELOC(ULONG_MAX))) 1535 return -EINVAL; 1536 1537 /* 1538 * We must check that the entire relocation array is safe 1539 * to read. However, if the array is not writable the user loses 1540 * the updated relocation values. 1541 */ 1542 if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs)))) 1543 return -EFAULT; 1544 1545 do { 1546 struct drm_i915_gem_relocation_entry *r = stack; 1547 unsigned int count = 1548 min_t(unsigned long, remain, ARRAY_SIZE(stack)); 1549 unsigned int copied; 1550 1551 /* 1552 * This is the fast path and we cannot handle a pagefault 1553 * whilst holding the struct mutex lest the user pass in the 1554 * relocations contained within a mmaped bo. For in such a case 1555 * we, the page fault handler would call i915_gem_fault() and 1556 * we would try to acquire the struct mutex again. Obviously 1557 * this is bad and so lockdep complains vehemently. 1558 */ 1559 pagefault_disable(); 1560 copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0])); 1561 pagefault_enable(); 1562 if (unlikely(copied)) { 1563 remain = -EFAULT; 1564 goto out; 1565 } 1566 1567 remain -= count; 1568 do { 1569 u64 offset = eb_relocate_entry(eb, ev, r); 1570 1571 if (likely(offset == 0)) { 1572 } else if ((s64)offset < 0) { 1573 remain = (int)offset; 1574 goto out; 1575 } else { 1576 /* 1577 * Note that reporting an error now 1578 * leaves everything in an inconsistent 1579 * state as we have *already* changed 1580 * the relocation value inside the 1581 * object. As we have not changed the 1582 * reloc.presumed_offset or will not 1583 * change the execobject.offset, on the 1584 * call we may not rewrite the value 1585 * inside the object, leaving it 1586 * dangling and causing a GPU hang. Unless 1587 * userspace dynamically rebuilds the 1588 * relocations on each execbuf rather than 1589 * presume a static tree. 1590 * 1591 * We did previously check if the relocations 1592 * were writable (access_ok), an error now 1593 * would be a strange race with mprotect, 1594 * having already demonstrated that we 1595 * can read from this userspace address. 1596 */ 1597 offset = gen8_canonical_addr(offset & ~UPDATE); 1598 __put_user(offset, 1599 &urelocs[r - stack].presumed_offset); 1600 } 1601 } while (r++, --count); 1602 urelocs += ARRAY_SIZE(stack); 1603 } while (remain); 1604 out: 1605 reloc_cache_reset(&eb->reloc_cache, eb); 1606 return remain; 1607 } 1608 1609 static int 1610 eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev) 1611 { 1612 const struct drm_i915_gem_exec_object2 *entry = ev->exec; 1613 struct drm_i915_gem_relocation_entry *relocs = 1614 u64_to_ptr(typeof(*relocs), entry->relocs_ptr); 1615 unsigned int i; 1616 int err; 1617 1618 for (i = 0; i < entry->relocation_count; i++) { 1619 u64 offset = eb_relocate_entry(eb, ev, &relocs[i]); 1620 1621 if ((s64)offset < 0) { 1622 err = (int)offset; 1623 goto err; 1624 } 1625 } 1626 err = 0; 1627 err: 1628 reloc_cache_reset(&eb->reloc_cache, eb); 1629 return err; 1630 } 1631 1632 static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) 1633 { 1634 const char __user *addr, *end; 1635 unsigned long size; 1636 char __maybe_unused c; 1637 1638 size = entry->relocation_count; 1639 if (size == 0) 1640 return 0; 1641 1642 if (size > N_RELOC(ULONG_MAX)) 1643 return -EINVAL; 1644 1645 addr = u64_to_user_ptr(entry->relocs_ptr); 1646 size *= sizeof(struct drm_i915_gem_relocation_entry); 1647 if (!access_ok(addr, size)) 1648 return -EFAULT; 1649 1650 end = addr + size; 1651 for (; addr < end; addr += PAGE_SIZE) { 1652 int err = __get_user(c, addr); 1653 if (err) 1654 return err; 1655 } 1656 return __get_user(c, end - 1); 1657 } 1658 1659 static int eb_copy_relocations(const struct i915_execbuffer *eb) 1660 { 1661 struct drm_i915_gem_relocation_entry *relocs; 1662 const unsigned int count = eb->buffer_count; 1663 unsigned int i; 1664 int err; 1665 1666 for (i = 0; i < count; i++) { 1667 const unsigned int nreloc = eb->exec[i].relocation_count; 1668 struct drm_i915_gem_relocation_entry __user *urelocs; 1669 unsigned long size; 1670 unsigned long copied; 1671 1672 if (nreloc == 0) 1673 continue; 1674 1675 err = check_relocations(&eb->exec[i]); 1676 if (err) 1677 goto err; 1678 1679 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); 1680 size = nreloc * sizeof(*relocs); 1681 1682 relocs = kvmalloc_array(size, 1, GFP_KERNEL); 1683 if (!relocs) { 1684 err = -ENOMEM; 1685 goto err; 1686 } 1687 1688 /* copy_from_user is limited to < 4GiB */ 1689 copied = 0; 1690 do { 1691 unsigned int len = 1692 min_t(u64, BIT_ULL(31), size - copied); 1693 1694 if (__copy_from_user((char *)relocs + copied, 1695 (char __user *)urelocs + copied, 1696 len)) 1697 goto end; 1698 1699 copied += len; 1700 } while (copied < size); 1701 1702 /* 1703 * As we do not update the known relocation offsets after 1704 * relocating (due to the complexities in lock handling), 1705 * we need to mark them as invalid now so that we force the 1706 * relocation processing next time. Just in case the target 1707 * object is evicted and then rebound into its old 1708 * presumed_offset before the next execbuffer - if that 1709 * happened we would make the mistake of assuming that the 1710 * relocations were valid. 1711 */ 1712 if (!user_access_begin(urelocs, size)) 1713 goto end; 1714 1715 for (copied = 0; copied < nreloc; copied++) 1716 unsafe_put_user(-1, 1717 &urelocs[copied].presumed_offset, 1718 end_user); 1719 user_access_end(); 1720 1721 eb->exec[i].relocs_ptr = (uintptr_t)relocs; 1722 } 1723 1724 return 0; 1725 1726 end_user: 1727 user_access_end(); 1728 end: 1729 kvfree(relocs); 1730 err = -EFAULT; 1731 err: 1732 while (i--) { 1733 relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); 1734 if (eb->exec[i].relocation_count) 1735 kvfree(relocs); 1736 } 1737 return err; 1738 } 1739 1740 static int eb_prefault_relocations(const struct i915_execbuffer *eb) 1741 { 1742 const unsigned int count = eb->buffer_count; 1743 unsigned int i; 1744 1745 for (i = 0; i < count; i++) { 1746 int err; 1747 1748 err = check_relocations(&eb->exec[i]); 1749 if (err) 1750 return err; 1751 } 1752 1753 return 0; 1754 } 1755 1756 static int eb_reinit_userptr(struct i915_execbuffer *eb) 1757 { 1758 const unsigned int count = eb->buffer_count; 1759 unsigned int i; 1760 int ret; 1761 1762 if (likely(!(eb->args->flags & __EXEC_USERPTR_USED))) 1763 return 0; 1764 1765 for (i = 0; i < count; i++) { 1766 struct eb_vma *ev = &eb->vma[i]; 1767 1768 if (!i915_gem_object_is_userptr(ev->vma->obj)) 1769 continue; 1770 1771 ret = i915_gem_object_userptr_submit_init(ev->vma->obj); 1772 if (ret) 1773 return ret; 1774 1775 ev->flags |= __EXEC_OBJECT_USERPTR_INIT; 1776 } 1777 1778 return 0; 1779 } 1780 1781 static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb) 1782 { 1783 bool have_copy = false; 1784 struct eb_vma *ev; 1785 int err = 0; 1786 1787 repeat: 1788 if (signal_pending(current)) { 1789 err = -ERESTARTSYS; 1790 goto out; 1791 } 1792 1793 /* We may process another execbuffer during the unlock... */ 1794 eb_release_vmas(eb, false); 1795 i915_gem_ww_ctx_fini(&eb->ww); 1796 1797 /* 1798 * We take 3 passes through the slowpatch. 1799 * 1800 * 1 - we try to just prefault all the user relocation entries and 1801 * then attempt to reuse the atomic pagefault disabled fast path again. 1802 * 1803 * 2 - we copy the user entries to a local buffer here outside of the 1804 * local and allow ourselves to wait upon any rendering before 1805 * relocations 1806 * 1807 * 3 - we already have a local copy of the relocation entries, but 1808 * were interrupted (EAGAIN) whilst waiting for the objects, try again. 1809 */ 1810 if (!err) { 1811 err = eb_prefault_relocations(eb); 1812 } else if (!have_copy) { 1813 err = eb_copy_relocations(eb); 1814 have_copy = err == 0; 1815 } else { 1816 cond_resched(); 1817 err = 0; 1818 } 1819 1820 if (!err) 1821 err = eb_reinit_userptr(eb); 1822 1823 i915_gem_ww_ctx_init(&eb->ww, true); 1824 if (err) 1825 goto out; 1826 1827 /* reacquire the objects */ 1828 repeat_validate: 1829 err = eb_pin_engine(eb, false); 1830 if (err) 1831 goto err; 1832 1833 err = eb_validate_vmas(eb); 1834 if (err) 1835 goto err; 1836 1837 GEM_BUG_ON(!eb->batches[0]); 1838 1839 list_for_each_entry(ev, &eb->relocs, reloc_link) { 1840 if (!have_copy) { 1841 err = eb_relocate_vma(eb, ev); 1842 if (err) 1843 break; 1844 } else { 1845 err = eb_relocate_vma_slow(eb, ev); 1846 if (err) 1847 break; 1848 } 1849 } 1850 1851 if (err == -EDEADLK) 1852 goto err; 1853 1854 if (err && !have_copy) 1855 goto repeat; 1856 1857 if (err) 1858 goto err; 1859 1860 /* as last step, parse the command buffer */ 1861 err = eb_parse(eb); 1862 if (err) 1863 goto err; 1864 1865 /* 1866 * Leave the user relocations as are, this is the painfully slow path, 1867 * and we want to avoid the complication of dropping the lock whilst 1868 * having buffers reserved in the aperture and so causing spurious 1869 * ENOSPC for random operations. 1870 */ 1871 1872 err: 1873 if (err == -EDEADLK) { 1874 eb_release_vmas(eb, false); 1875 err = i915_gem_ww_ctx_backoff(&eb->ww); 1876 if (!err) 1877 goto repeat_validate; 1878 } 1879 1880 if (err == -EAGAIN) 1881 goto repeat; 1882 1883 out: 1884 if (have_copy) { 1885 const unsigned int count = eb->buffer_count; 1886 unsigned int i; 1887 1888 for (i = 0; i < count; i++) { 1889 const struct drm_i915_gem_exec_object2 *entry = 1890 &eb->exec[i]; 1891 struct drm_i915_gem_relocation_entry *relocs; 1892 1893 if (!entry->relocation_count) 1894 continue; 1895 1896 relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); 1897 kvfree(relocs); 1898 } 1899 } 1900 1901 return err; 1902 } 1903 1904 static int eb_relocate_parse(struct i915_execbuffer *eb) 1905 { 1906 int err; 1907 bool throttle = true; 1908 1909 retry: 1910 err = eb_pin_engine(eb, throttle); 1911 if (err) { 1912 if (err != -EDEADLK) 1913 return err; 1914 1915 goto err; 1916 } 1917 1918 /* only throttle once, even if we didn't need to throttle */ 1919 throttle = false; 1920 1921 err = eb_validate_vmas(eb); 1922 if (err == -EAGAIN) 1923 goto slow; 1924 else if (err) 1925 goto err; 1926 1927 /* The objects are in their final locations, apply the relocations. */ 1928 if (eb->args->flags & __EXEC_HAS_RELOC) { 1929 struct eb_vma *ev; 1930 1931 list_for_each_entry(ev, &eb->relocs, reloc_link) { 1932 err = eb_relocate_vma(eb, ev); 1933 if (err) 1934 break; 1935 } 1936 1937 if (err == -EDEADLK) 1938 goto err; 1939 else if (err) 1940 goto slow; 1941 } 1942 1943 if (!err) 1944 err = eb_parse(eb); 1945 1946 err: 1947 if (err == -EDEADLK) { 1948 eb_release_vmas(eb, false); 1949 err = i915_gem_ww_ctx_backoff(&eb->ww); 1950 if (!err) 1951 goto retry; 1952 } 1953 1954 return err; 1955 1956 slow: 1957 err = eb_relocate_parse_slow(eb); 1958 if (err) 1959 /* 1960 * If the user expects the execobject.offset and 1961 * reloc.presumed_offset to be an exact match, 1962 * as for using NO_RELOC, then we cannot update 1963 * the execobject.offset until we have completed 1964 * relocation. 1965 */ 1966 eb->args->flags &= ~__EXEC_HAS_RELOC; 1967 1968 return err; 1969 } 1970 1971 /* 1972 * Using two helper loops for the order of which requests / batches are created 1973 * and added the to backend. Requests are created in order from the parent to 1974 * the last child. Requests are added in the reverse order, from the last child 1975 * to parent. This is done for locking reasons as the timeline lock is acquired 1976 * during request creation and released when the request is added to the 1977 * backend. To make lockdep happy (see intel_context_timeline_lock) this must be 1978 * the ordering. 1979 */ 1980 #define for_each_batch_create_order(_eb, _i) \ 1981 for ((_i) = 0; (_i) < (_eb)->num_batches; ++(_i)) 1982 #define for_each_batch_add_order(_eb, _i) \ 1983 BUILD_BUG_ON(!typecheck(int, _i)); \ 1984 for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i)) 1985 1986 static struct i915_request * 1987 eb_find_first_request_added(struct i915_execbuffer *eb) 1988 { 1989 int i; 1990 1991 for_each_batch_add_order(eb, i) 1992 if (eb->requests[i]) 1993 return eb->requests[i]; 1994 1995 GEM_BUG_ON("Request not found"); 1996 1997 return NULL; 1998 } 1999 2000 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 2001 2002 /* Stage with GFP_KERNEL allocations before we enter the signaling critical path */ 2003 static int eb_capture_stage(struct i915_execbuffer *eb) 2004 { 2005 const unsigned int count = eb->buffer_count; 2006 unsigned int i = count, j; 2007 2008 while (i--) { 2009 struct eb_vma *ev = &eb->vma[i]; 2010 struct i915_vma *vma = ev->vma; 2011 unsigned int flags = ev->flags; 2012 2013 if (!(flags & EXEC_OBJECT_CAPTURE)) 2014 continue; 2015 2016 if (i915_gem_context_is_recoverable(eb->gem_context) && 2017 (IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0))) 2018 return -EINVAL; 2019 2020 for_each_batch_create_order(eb, j) { 2021 struct i915_capture_list *capture; 2022 2023 capture = kmalloc(sizeof(*capture), GFP_KERNEL); 2024 if (!capture) 2025 continue; 2026 2027 capture->next = eb->capture_lists[j]; 2028 capture->vma_res = i915_vma_resource_get(vma->resource); 2029 eb->capture_lists[j] = capture; 2030 } 2031 } 2032 2033 return 0; 2034 } 2035 2036 /* Commit once we're in the critical path */ 2037 static void eb_capture_commit(struct i915_execbuffer *eb) 2038 { 2039 unsigned int j; 2040 2041 for_each_batch_create_order(eb, j) { 2042 struct i915_request *rq = eb->requests[j]; 2043 2044 if (!rq) 2045 break; 2046 2047 rq->capture_list = eb->capture_lists[j]; 2048 eb->capture_lists[j] = NULL; 2049 } 2050 } 2051 2052 /* 2053 * Release anything that didn't get committed due to errors. 2054 * The capture_list will otherwise be freed at request retire. 2055 */ 2056 static void eb_capture_release(struct i915_execbuffer *eb) 2057 { 2058 unsigned int j; 2059 2060 for_each_batch_create_order(eb, j) { 2061 if (eb->capture_lists[j]) { 2062 i915_request_free_capture_list(eb->capture_lists[j]); 2063 eb->capture_lists[j] = NULL; 2064 } 2065 } 2066 } 2067 2068 static void eb_capture_list_clear(struct i915_execbuffer *eb) 2069 { 2070 memset(eb->capture_lists, 0, sizeof(eb->capture_lists)); 2071 } 2072 2073 #else 2074 2075 static int eb_capture_stage(struct i915_execbuffer *eb) 2076 { 2077 return 0; 2078 } 2079 2080 static void eb_capture_commit(struct i915_execbuffer *eb) 2081 { 2082 } 2083 2084 static void eb_capture_release(struct i915_execbuffer *eb) 2085 { 2086 } 2087 2088 static void eb_capture_list_clear(struct i915_execbuffer *eb) 2089 { 2090 } 2091 2092 #endif 2093 2094 static int eb_move_to_gpu(struct i915_execbuffer *eb) 2095 { 2096 const unsigned int count = eb->buffer_count; 2097 unsigned int i = count; 2098 int err = 0, j; 2099 2100 while (i--) { 2101 struct eb_vma *ev = &eb->vma[i]; 2102 struct i915_vma *vma = ev->vma; 2103 unsigned int flags = ev->flags; 2104 struct drm_i915_gem_object *obj = vma->obj; 2105 2106 assert_vma_held(vma); 2107 2108 /* 2109 * If the GPU is not _reading_ through the CPU cache, we need 2110 * to make sure that any writes (both previous GPU writes from 2111 * before a change in snooping levels and normal CPU writes) 2112 * caught in that cache are flushed to main memory. 2113 * 2114 * We want to say 2115 * obj->cache_dirty && 2116 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) 2117 * but gcc's optimiser doesn't handle that as well and emits 2118 * two jumps instead of one. Maybe one day... 2119 * 2120 * FIXME: There is also sync flushing in set_pages(), which 2121 * serves a different purpose(some of the time at least). 2122 * 2123 * We should consider: 2124 * 2125 * 1. Rip out the async flush code. 2126 * 2127 * 2. Or make the sync flushing use the async clflush path 2128 * using mandatory fences underneath. Currently the below 2129 * async flush happens after we bind the object. 2130 */ 2131 if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { 2132 if (i915_gem_clflush_object(obj, 0)) 2133 flags &= ~EXEC_OBJECT_ASYNC; 2134 } 2135 2136 /* We only need to await on the first request */ 2137 if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { 2138 err = i915_request_await_object 2139 (eb_find_first_request_added(eb), obj, 2140 flags & EXEC_OBJECT_WRITE); 2141 } 2142 2143 for_each_batch_add_order(eb, j) { 2144 if (err) 2145 break; 2146 if (!eb->requests[j]) 2147 continue; 2148 2149 err = _i915_vma_move_to_active(vma, eb->requests[j], 2150 j ? NULL : 2151 eb->composite_fence ? 2152 eb->composite_fence : 2153 &eb->requests[j]->fence, 2154 flags | __EXEC_OBJECT_NO_RESERVE | 2155 __EXEC_OBJECT_NO_REQUEST_AWAIT); 2156 } 2157 } 2158 2159 #ifdef CONFIG_MMU_NOTIFIER 2160 if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) { 2161 read_lock(&eb->i915->mm.notifier_lock); 2162 2163 /* 2164 * count is always at least 1, otherwise __EXEC_USERPTR_USED 2165 * could not have been set 2166 */ 2167 for (i = 0; i < count; i++) { 2168 struct eb_vma *ev = &eb->vma[i]; 2169 struct drm_i915_gem_object *obj = ev->vma->obj; 2170 2171 if (!i915_gem_object_is_userptr(obj)) 2172 continue; 2173 2174 err = i915_gem_object_userptr_submit_done(obj); 2175 if (err) 2176 break; 2177 } 2178 2179 read_unlock(&eb->i915->mm.notifier_lock); 2180 } 2181 #endif 2182 2183 if (unlikely(err)) 2184 goto err_skip; 2185 2186 /* Unconditionally flush any chipset caches (for streaming writes). */ 2187 intel_gt_chipset_flush(eb->gt); 2188 eb_capture_commit(eb); 2189 2190 return 0; 2191 2192 err_skip: 2193 for_each_batch_create_order(eb, j) { 2194 if (!eb->requests[j]) 2195 break; 2196 2197 i915_request_set_error_once(eb->requests[j], err); 2198 } 2199 return err; 2200 } 2201 2202 static int i915_gem_check_execbuffer(struct drm_i915_private *i915, 2203 struct drm_i915_gem_execbuffer2 *exec) 2204 { 2205 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) 2206 return -EINVAL; 2207 2208 /* Kernel clipping was a DRI1 misfeature */ 2209 if (!(exec->flags & (I915_EXEC_FENCE_ARRAY | 2210 I915_EXEC_USE_EXTENSIONS))) { 2211 if (exec->num_cliprects || exec->cliprects_ptr) 2212 return -EINVAL; 2213 } 2214 2215 if (exec->DR4 == 0xffffffff) { 2216 drm_dbg(&i915->drm, "UXA submitting garbage DR4, fixing up\n"); 2217 exec->DR4 = 0; 2218 } 2219 if (exec->DR1 || exec->DR4) 2220 return -EINVAL; 2221 2222 if ((exec->batch_start_offset | exec->batch_len) & 0x7) 2223 return -EINVAL; 2224 2225 return 0; 2226 } 2227 2228 static int i915_reset_gen7_sol_offsets(struct i915_request *rq) 2229 { 2230 u32 *cs; 2231 int i; 2232 2233 if (GRAPHICS_VER(rq->i915) != 7 || rq->engine->id != RCS0) { 2234 drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n"); 2235 return -EINVAL; 2236 } 2237 2238 cs = intel_ring_begin(rq, 4 * 2 + 2); 2239 if (IS_ERR(cs)) 2240 return PTR_ERR(cs); 2241 2242 *cs++ = MI_LOAD_REGISTER_IMM(4); 2243 for (i = 0; i < 4; i++) { 2244 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); 2245 *cs++ = 0; 2246 } 2247 *cs++ = MI_NOOP; 2248 intel_ring_advance(rq, cs); 2249 2250 return 0; 2251 } 2252 2253 static struct i915_vma * 2254 shadow_batch_pin(struct i915_execbuffer *eb, 2255 struct drm_i915_gem_object *obj, 2256 struct i915_address_space *vm, 2257 unsigned int flags) 2258 { 2259 struct i915_vma *vma; 2260 int err; 2261 2262 vma = i915_vma_instance(obj, vm, NULL); 2263 if (IS_ERR(vma)) 2264 return vma; 2265 2266 err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags | PIN_VALIDATE); 2267 if (err) 2268 return ERR_PTR(err); 2269 2270 return vma; 2271 } 2272 2273 static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma) 2274 { 2275 /* 2276 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 2277 * batch" bit. Hence we need to pin secure batches into the global gtt. 2278 * hsw should have this fixed, but bdw mucks it up again. */ 2279 if (eb->batch_flags & I915_DISPATCH_SECURE) 2280 return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, PIN_VALIDATE); 2281 2282 return NULL; 2283 } 2284 2285 static int eb_parse(struct i915_execbuffer *eb) 2286 { 2287 struct drm_i915_private *i915 = eb->i915; 2288 struct intel_gt_buffer_pool_node *pool = eb->batch_pool; 2289 struct i915_vma *shadow, *trampoline, *batch; 2290 unsigned long len; 2291 int err; 2292 2293 if (!eb_use_cmdparser(eb)) { 2294 batch = eb_dispatch_secure(eb, eb->batches[0]->vma); 2295 if (IS_ERR(batch)) 2296 return PTR_ERR(batch); 2297 2298 goto secure_batch; 2299 } 2300 2301 if (intel_context_is_parallel(eb->context)) 2302 return -EINVAL; 2303 2304 len = eb->batch_len[0]; 2305 if (!CMDPARSER_USES_GGTT(eb->i915)) { 2306 /* 2307 * ppGTT backed shadow buffers must be mapped RO, to prevent 2308 * post-scan tampering 2309 */ 2310 if (!eb->context->vm->has_read_only) { 2311 drm_dbg(&i915->drm, 2312 "Cannot prevent post-scan tampering without RO capable vm\n"); 2313 return -EINVAL; 2314 } 2315 } else { 2316 len += I915_CMD_PARSER_TRAMPOLINE_SIZE; 2317 } 2318 if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */ 2319 return -EINVAL; 2320 2321 if (!pool) { 2322 pool = intel_gt_get_buffer_pool(eb->gt, len, 2323 I915_MAP_WB); 2324 if (IS_ERR(pool)) 2325 return PTR_ERR(pool); 2326 eb->batch_pool = pool; 2327 } 2328 2329 err = i915_gem_object_lock(pool->obj, &eb->ww); 2330 if (err) 2331 return err; 2332 2333 shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER); 2334 if (IS_ERR(shadow)) 2335 return PTR_ERR(shadow); 2336 2337 intel_gt_buffer_pool_mark_used(pool); 2338 i915_gem_object_set_readonly(shadow->obj); 2339 shadow->private = pool; 2340 2341 trampoline = NULL; 2342 if (CMDPARSER_USES_GGTT(eb->i915)) { 2343 trampoline = shadow; 2344 2345 shadow = shadow_batch_pin(eb, pool->obj, 2346 &eb->gt->ggtt->vm, 2347 PIN_GLOBAL); 2348 if (IS_ERR(shadow)) 2349 return PTR_ERR(shadow); 2350 2351 shadow->private = pool; 2352 2353 eb->batch_flags |= I915_DISPATCH_SECURE; 2354 } 2355 2356 batch = eb_dispatch_secure(eb, shadow); 2357 if (IS_ERR(batch)) 2358 return PTR_ERR(batch); 2359 2360 err = dma_resv_reserve_fences(shadow->obj->base.resv, 1); 2361 if (err) 2362 return err; 2363 2364 err = intel_engine_cmd_parser(eb->context->engine, 2365 eb->batches[0]->vma, 2366 eb->batch_start_offset, 2367 eb->batch_len[0], 2368 shadow, trampoline); 2369 if (err) 2370 return err; 2371 2372 eb->batches[0] = &eb->vma[eb->buffer_count++]; 2373 eb->batches[0]->vma = i915_vma_get(shadow); 2374 eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; 2375 2376 eb->trampoline = trampoline; 2377 eb->batch_start_offset = 0; 2378 2379 secure_batch: 2380 if (batch) { 2381 if (intel_context_is_parallel(eb->context)) 2382 return -EINVAL; 2383 2384 eb->batches[0] = &eb->vma[eb->buffer_count++]; 2385 eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; 2386 eb->batches[0]->vma = i915_vma_get(batch); 2387 } 2388 return 0; 2389 } 2390 2391 static int eb_request_submit(struct i915_execbuffer *eb, 2392 struct i915_request *rq, 2393 struct i915_vma *batch, 2394 u64 batch_len) 2395 { 2396 int err; 2397 2398 if (intel_context_nopreempt(rq->context)) 2399 __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); 2400 2401 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { 2402 err = i915_reset_gen7_sol_offsets(rq); 2403 if (err) 2404 return err; 2405 } 2406 2407 /* 2408 * After we completed waiting for other engines (using HW semaphores) 2409 * then we can signal that this request/batch is ready to run. This 2410 * allows us to determine if the batch is still waiting on the GPU 2411 * or actually running by checking the breadcrumb. 2412 */ 2413 if (rq->context->engine->emit_init_breadcrumb) { 2414 err = rq->context->engine->emit_init_breadcrumb(rq); 2415 if (err) 2416 return err; 2417 } 2418 2419 err = rq->context->engine->emit_bb_start(rq, 2420 i915_vma_offset(batch) + 2421 eb->batch_start_offset, 2422 batch_len, 2423 eb->batch_flags); 2424 if (err) 2425 return err; 2426 2427 if (eb->trampoline) { 2428 GEM_BUG_ON(intel_context_is_parallel(rq->context)); 2429 GEM_BUG_ON(eb->batch_start_offset); 2430 err = rq->context->engine->emit_bb_start(rq, 2431 i915_vma_offset(eb->trampoline) + 2432 batch_len, 0, 0); 2433 if (err) 2434 return err; 2435 } 2436 2437 return 0; 2438 } 2439 2440 static int eb_submit(struct i915_execbuffer *eb) 2441 { 2442 unsigned int i; 2443 int err; 2444 2445 err = eb_move_to_gpu(eb); 2446 2447 for_each_batch_create_order(eb, i) { 2448 if (!eb->requests[i]) 2449 break; 2450 2451 trace_i915_request_queue(eb->requests[i], eb->batch_flags); 2452 if (!err) 2453 err = eb_request_submit(eb, eb->requests[i], 2454 eb->batches[i]->vma, 2455 eb->batch_len[i]); 2456 } 2457 2458 return err; 2459 } 2460 2461 /* 2462 * Find one BSD ring to dispatch the corresponding BSD command. 2463 * The engine index is returned. 2464 */ 2465 static unsigned int 2466 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, 2467 struct drm_file *file) 2468 { 2469 struct drm_i915_file_private *file_priv = file->driver_priv; 2470 2471 /* Check whether the file_priv has already selected one ring. */ 2472 if ((int)file_priv->bsd_engine < 0) 2473 file_priv->bsd_engine = 2474 get_random_u32_below(dev_priv->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO]); 2475 2476 return file_priv->bsd_engine; 2477 } 2478 2479 static const enum intel_engine_id user_ring_map[] = { 2480 [I915_EXEC_DEFAULT] = RCS0, 2481 [I915_EXEC_RENDER] = RCS0, 2482 [I915_EXEC_BLT] = BCS0, 2483 [I915_EXEC_BSD] = VCS0, 2484 [I915_EXEC_VEBOX] = VECS0 2485 }; 2486 2487 static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce) 2488 { 2489 struct intel_ring *ring = ce->ring; 2490 struct intel_timeline *tl = ce->timeline; 2491 struct i915_request *rq; 2492 2493 /* 2494 * Completely unscientific finger-in-the-air estimates for suitable 2495 * maximum user request size (to avoid blocking) and then backoff. 2496 */ 2497 if (intel_ring_update_space(ring) >= PAGE_SIZE) 2498 return NULL; 2499 2500 /* 2501 * Find a request that after waiting upon, there will be at least half 2502 * the ring available. The hysteresis allows us to compete for the 2503 * shared ring and should mean that we sleep less often prior to 2504 * claiming our resources, but not so long that the ring completely 2505 * drains before we can submit our next request. 2506 */ 2507 list_for_each_entry(rq, &tl->requests, link) { 2508 if (rq->ring != ring) 2509 continue; 2510 2511 if (__intel_ring_space(rq->postfix, 2512 ring->emit, ring->size) > ring->size / 2) 2513 break; 2514 } 2515 if (&rq->link == &tl->requests) 2516 return NULL; /* weird, we will check again later for real */ 2517 2518 return i915_request_get(rq); 2519 } 2520 2521 static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce, 2522 bool throttle) 2523 { 2524 struct intel_timeline *tl; 2525 struct i915_request *rq = NULL; 2526 2527 /* 2528 * Take a local wakeref for preparing to dispatch the execbuf as 2529 * we expect to access the hardware fairly frequently in the 2530 * process, and require the engine to be kept awake between accesses. 2531 * Upon dispatch, we acquire another prolonged wakeref that we hold 2532 * until the timeline is idle, which in turn releases the wakeref 2533 * taken on the engine, and the parent device. 2534 */ 2535 tl = intel_context_timeline_lock(ce); 2536 if (IS_ERR(tl)) 2537 return PTR_ERR(tl); 2538 2539 intel_context_enter(ce); 2540 if (throttle) 2541 rq = eb_throttle(eb, ce); 2542 intel_context_timeline_unlock(tl); 2543 2544 if (rq) { 2545 bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; 2546 long timeout = nonblock ? 0 : MAX_SCHEDULE_TIMEOUT; 2547 2548 if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, 2549 timeout) < 0) { 2550 i915_request_put(rq); 2551 2552 /* 2553 * Error path, cannot use intel_context_timeline_lock as 2554 * that is user interruptable and this clean up step 2555 * must be done. 2556 */ 2557 mutex_lock(&ce->timeline->mutex); 2558 intel_context_exit(ce); 2559 mutex_unlock(&ce->timeline->mutex); 2560 2561 if (nonblock) 2562 return -EWOULDBLOCK; 2563 else 2564 return -EINTR; 2565 } 2566 i915_request_put(rq); 2567 } 2568 2569 return 0; 2570 } 2571 2572 static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle) 2573 { 2574 struct intel_context *ce = eb->context, *child; 2575 int err; 2576 int i = 0, j = 0; 2577 2578 GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED); 2579 2580 if (unlikely(intel_context_is_banned(ce))) 2581 return -EIO; 2582 2583 /* 2584 * Pinning the contexts may generate requests in order to acquire 2585 * GGTT space, so do this first before we reserve a seqno for 2586 * ourselves. 2587 */ 2588 err = intel_context_pin_ww(ce, &eb->ww); 2589 if (err) 2590 return err; 2591 for_each_child(ce, child) { 2592 err = intel_context_pin_ww(child, &eb->ww); 2593 GEM_BUG_ON(err); /* perma-pinned should incr a counter */ 2594 } 2595 2596 for_each_child(ce, child) { 2597 err = eb_pin_timeline(eb, child, throttle); 2598 if (err) 2599 goto unwind; 2600 ++i; 2601 } 2602 err = eb_pin_timeline(eb, ce, throttle); 2603 if (err) 2604 goto unwind; 2605 2606 eb->args->flags |= __EXEC_ENGINE_PINNED; 2607 return 0; 2608 2609 unwind: 2610 for_each_child(ce, child) { 2611 if (j++ < i) { 2612 mutex_lock(&child->timeline->mutex); 2613 intel_context_exit(child); 2614 mutex_unlock(&child->timeline->mutex); 2615 } 2616 } 2617 for_each_child(ce, child) 2618 intel_context_unpin(child); 2619 intel_context_unpin(ce); 2620 return err; 2621 } 2622 2623 static void eb_unpin_engine(struct i915_execbuffer *eb) 2624 { 2625 struct intel_context *ce = eb->context, *child; 2626 2627 if (!(eb->args->flags & __EXEC_ENGINE_PINNED)) 2628 return; 2629 2630 eb->args->flags &= ~__EXEC_ENGINE_PINNED; 2631 2632 for_each_child(ce, child) { 2633 mutex_lock(&child->timeline->mutex); 2634 intel_context_exit(child); 2635 mutex_unlock(&child->timeline->mutex); 2636 2637 intel_context_unpin(child); 2638 } 2639 2640 mutex_lock(&ce->timeline->mutex); 2641 intel_context_exit(ce); 2642 mutex_unlock(&ce->timeline->mutex); 2643 2644 intel_context_unpin(ce); 2645 } 2646 2647 static unsigned int 2648 eb_select_legacy_ring(struct i915_execbuffer *eb) 2649 { 2650 struct drm_i915_private *i915 = eb->i915; 2651 struct drm_i915_gem_execbuffer2 *args = eb->args; 2652 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; 2653 2654 if (user_ring_id != I915_EXEC_BSD && 2655 (args->flags & I915_EXEC_BSD_MASK)) { 2656 drm_dbg(&i915->drm, 2657 "execbuf with non bsd ring but with invalid " 2658 "bsd dispatch flags: %d\n", (int)(args->flags)); 2659 return -1; 2660 } 2661 2662 if (user_ring_id == I915_EXEC_BSD && 2663 i915->engine_uabi_class_count[I915_ENGINE_CLASS_VIDEO] > 1) { 2664 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; 2665 2666 if (bsd_idx == I915_EXEC_BSD_DEFAULT) { 2667 bsd_idx = gen8_dispatch_bsd_engine(i915, eb->file); 2668 } else if (bsd_idx >= I915_EXEC_BSD_RING1 && 2669 bsd_idx <= I915_EXEC_BSD_RING2) { 2670 bsd_idx >>= I915_EXEC_BSD_SHIFT; 2671 bsd_idx--; 2672 } else { 2673 drm_dbg(&i915->drm, 2674 "execbuf with unknown bsd ring: %u\n", 2675 bsd_idx); 2676 return -1; 2677 } 2678 2679 return _VCS(bsd_idx); 2680 } 2681 2682 if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { 2683 drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n", 2684 user_ring_id); 2685 return -1; 2686 } 2687 2688 return user_ring_map[user_ring_id]; 2689 } 2690 2691 static int 2692 eb_select_engine(struct i915_execbuffer *eb) 2693 { 2694 struct intel_context *ce, *child; 2695 struct intel_gt *gt; 2696 unsigned int idx; 2697 int err; 2698 2699 if (i915_gem_context_user_engines(eb->gem_context)) 2700 idx = eb->args->flags & I915_EXEC_RING_MASK; 2701 else 2702 idx = eb_select_legacy_ring(eb); 2703 2704 ce = i915_gem_context_get_engine(eb->gem_context, idx); 2705 if (IS_ERR(ce)) 2706 return PTR_ERR(ce); 2707 2708 if (intel_context_is_parallel(ce)) { 2709 if (eb->buffer_count < ce->parallel.number_children + 1) { 2710 intel_context_put(ce); 2711 return -EINVAL; 2712 } 2713 if (eb->batch_start_offset || eb->args->batch_len) { 2714 intel_context_put(ce); 2715 return -EINVAL; 2716 } 2717 } 2718 eb->num_batches = ce->parallel.number_children + 1; 2719 gt = ce->engine->gt; 2720 2721 for_each_child(ce, child) 2722 intel_context_get(child); 2723 intel_gt_pm_get(gt); 2724 /* 2725 * Keep GT0 active on MTL so that i915_vma_parked() doesn't 2726 * free VMAs while execbuf ioctl is validating VMAs. 2727 */ 2728 if (gt->info.id) 2729 intel_gt_pm_get(to_gt(gt->i915)); 2730 2731 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { 2732 err = intel_context_alloc_state(ce); 2733 if (err) 2734 goto err; 2735 } 2736 for_each_child(ce, child) { 2737 if (!test_bit(CONTEXT_ALLOC_BIT, &child->flags)) { 2738 err = intel_context_alloc_state(child); 2739 if (err) 2740 goto err; 2741 } 2742 } 2743 2744 /* 2745 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report 2746 * EIO if the GPU is already wedged. 2747 */ 2748 err = intel_gt_terminally_wedged(ce->engine->gt); 2749 if (err) 2750 goto err; 2751 2752 if (!i915_vm_tryget(ce->vm)) { 2753 err = -ENOENT; 2754 goto err; 2755 } 2756 2757 eb->context = ce; 2758 eb->gt = ce->engine->gt; 2759 2760 /* 2761 * Make sure engine pool stays alive even if we call intel_context_put 2762 * during ww handling. The pool is destroyed when last pm reference 2763 * is dropped, which breaks our -EDEADLK handling. 2764 */ 2765 return err; 2766 2767 err: 2768 if (gt->info.id) 2769 intel_gt_pm_put(to_gt(gt->i915)); 2770 2771 intel_gt_pm_put(gt); 2772 for_each_child(ce, child) 2773 intel_context_put(child); 2774 intel_context_put(ce); 2775 return err; 2776 } 2777 2778 static void 2779 eb_put_engine(struct i915_execbuffer *eb) 2780 { 2781 struct intel_context *child; 2782 2783 i915_vm_put(eb->context->vm); 2784 /* 2785 * This works in conjunction with eb_select_engine() to prevent 2786 * i915_vma_parked() from interfering while execbuf validates vmas. 2787 */ 2788 if (eb->gt->info.id) 2789 intel_gt_pm_put(to_gt(eb->gt->i915)); 2790 intel_gt_pm_put(eb->gt); 2791 for_each_child(eb->context, child) 2792 intel_context_put(child); 2793 intel_context_put(eb->context); 2794 } 2795 2796 static void 2797 __free_fence_array(struct eb_fence *fences, unsigned int n) 2798 { 2799 while (n--) { 2800 drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); 2801 dma_fence_put(fences[n].dma_fence); 2802 dma_fence_chain_free(fences[n].chain_fence); 2803 } 2804 kvfree(fences); 2805 } 2806 2807 static int 2808 add_timeline_fence_array(struct i915_execbuffer *eb, 2809 const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences) 2810 { 2811 struct drm_i915_gem_exec_fence __user *user_fences; 2812 u64 __user *user_values; 2813 struct eb_fence *f; 2814 u64 nfences; 2815 int err = 0; 2816 2817 nfences = timeline_fences->fence_count; 2818 if (!nfences) 2819 return 0; 2820 2821 /* Check multiplication overflow for access_ok() and kvmalloc_array() */ 2822 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); 2823 if (nfences > min_t(unsigned long, 2824 ULONG_MAX / sizeof(*user_fences), 2825 SIZE_MAX / sizeof(*f)) - eb->num_fences) 2826 return -EINVAL; 2827 2828 user_fences = u64_to_user_ptr(timeline_fences->handles_ptr); 2829 if (!access_ok(user_fences, nfences * sizeof(*user_fences))) 2830 return -EFAULT; 2831 2832 user_values = u64_to_user_ptr(timeline_fences->values_ptr); 2833 if (!access_ok(user_values, nfences * sizeof(*user_values))) 2834 return -EFAULT; 2835 2836 f = krealloc(eb->fences, 2837 (eb->num_fences + nfences) * sizeof(*f), 2838 __GFP_NOWARN | GFP_KERNEL); 2839 if (!f) 2840 return -ENOMEM; 2841 2842 eb->fences = f; 2843 f += eb->num_fences; 2844 2845 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & 2846 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); 2847 2848 while (nfences--) { 2849 struct drm_i915_gem_exec_fence user_fence; 2850 struct drm_syncobj *syncobj; 2851 struct dma_fence *fence = NULL; 2852 u64 point; 2853 2854 if (__copy_from_user(&user_fence, 2855 user_fences++, 2856 sizeof(user_fence))) 2857 return -EFAULT; 2858 2859 if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) 2860 return -EINVAL; 2861 2862 if (__get_user(point, user_values++)) 2863 return -EFAULT; 2864 2865 syncobj = drm_syncobj_find(eb->file, user_fence.handle); 2866 if (!syncobj) { 2867 drm_dbg(&eb->i915->drm, 2868 "Invalid syncobj handle provided\n"); 2869 return -ENOENT; 2870 } 2871 2872 fence = drm_syncobj_fence_get(syncobj); 2873 2874 if (!fence && user_fence.flags && 2875 !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { 2876 drm_dbg(&eb->i915->drm, 2877 "Syncobj handle has no fence\n"); 2878 drm_syncobj_put(syncobj); 2879 return -EINVAL; 2880 } 2881 2882 if (fence) 2883 err = dma_fence_chain_find_seqno(&fence, point); 2884 2885 if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { 2886 drm_dbg(&eb->i915->drm, 2887 "Syncobj handle missing requested point %llu\n", 2888 point); 2889 dma_fence_put(fence); 2890 drm_syncobj_put(syncobj); 2891 return err; 2892 } 2893 2894 /* 2895 * A point might have been signaled already and 2896 * garbage collected from the timeline. In this case 2897 * just ignore the point and carry on. 2898 */ 2899 if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) { 2900 drm_syncobj_put(syncobj); 2901 continue; 2902 } 2903 2904 /* 2905 * For timeline syncobjs we need to preallocate chains for 2906 * later signaling. 2907 */ 2908 if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) { 2909 /* 2910 * Waiting and signaling the same point (when point != 2911 * 0) would break the timeline. 2912 */ 2913 if (user_fence.flags & I915_EXEC_FENCE_WAIT) { 2914 drm_dbg(&eb->i915->drm, 2915 "Trying to wait & signal the same timeline point.\n"); 2916 dma_fence_put(fence); 2917 drm_syncobj_put(syncobj); 2918 return -EINVAL; 2919 } 2920 2921 f->chain_fence = dma_fence_chain_alloc(); 2922 if (!f->chain_fence) { 2923 drm_syncobj_put(syncobj); 2924 dma_fence_put(fence); 2925 return -ENOMEM; 2926 } 2927 } else { 2928 f->chain_fence = NULL; 2929 } 2930 2931 f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2); 2932 f->dma_fence = fence; 2933 f->value = point; 2934 f++; 2935 eb->num_fences++; 2936 } 2937 2938 return 0; 2939 } 2940 2941 static int add_fence_array(struct i915_execbuffer *eb) 2942 { 2943 struct drm_i915_gem_execbuffer2 *args = eb->args; 2944 struct drm_i915_gem_exec_fence __user *user; 2945 unsigned long num_fences = args->num_cliprects; 2946 struct eb_fence *f; 2947 2948 if (!(args->flags & I915_EXEC_FENCE_ARRAY)) 2949 return 0; 2950 2951 if (!num_fences) 2952 return 0; 2953 2954 /* Check multiplication overflow for access_ok() and kvmalloc_array() */ 2955 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); 2956 if (num_fences > min_t(unsigned long, 2957 ULONG_MAX / sizeof(*user), 2958 SIZE_MAX / sizeof(*f) - eb->num_fences)) 2959 return -EINVAL; 2960 2961 user = u64_to_user_ptr(args->cliprects_ptr); 2962 if (!access_ok(user, num_fences * sizeof(*user))) 2963 return -EFAULT; 2964 2965 f = krealloc(eb->fences, 2966 (eb->num_fences + num_fences) * sizeof(*f), 2967 __GFP_NOWARN | GFP_KERNEL); 2968 if (!f) 2969 return -ENOMEM; 2970 2971 eb->fences = f; 2972 f += eb->num_fences; 2973 while (num_fences--) { 2974 struct drm_i915_gem_exec_fence user_fence; 2975 struct drm_syncobj *syncobj; 2976 struct dma_fence *fence = NULL; 2977 2978 if (__copy_from_user(&user_fence, user++, sizeof(user_fence))) 2979 return -EFAULT; 2980 2981 if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) 2982 return -EINVAL; 2983 2984 syncobj = drm_syncobj_find(eb->file, user_fence.handle); 2985 if (!syncobj) { 2986 drm_dbg(&eb->i915->drm, 2987 "Invalid syncobj handle provided\n"); 2988 return -ENOENT; 2989 } 2990 2991 if (user_fence.flags & I915_EXEC_FENCE_WAIT) { 2992 fence = drm_syncobj_fence_get(syncobj); 2993 if (!fence) { 2994 drm_dbg(&eb->i915->drm, 2995 "Syncobj handle has no fence\n"); 2996 drm_syncobj_put(syncobj); 2997 return -EINVAL; 2998 } 2999 } 3000 3001 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & 3002 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); 3003 3004 f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2); 3005 f->dma_fence = fence; 3006 f->value = 0; 3007 f->chain_fence = NULL; 3008 f++; 3009 eb->num_fences++; 3010 } 3011 3012 return 0; 3013 } 3014 3015 static void put_fence_array(struct eb_fence *fences, int num_fences) 3016 { 3017 if (fences) 3018 __free_fence_array(fences, num_fences); 3019 } 3020 3021 static int 3022 await_fence_array(struct i915_execbuffer *eb, 3023 struct i915_request *rq) 3024 { 3025 unsigned int n; 3026 int err; 3027 3028 for (n = 0; n < eb->num_fences; n++) { 3029 if (!eb->fences[n].dma_fence) 3030 continue; 3031 3032 err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence); 3033 if (err < 0) 3034 return err; 3035 } 3036 3037 return 0; 3038 } 3039 3040 static void signal_fence_array(const struct i915_execbuffer *eb, 3041 struct dma_fence * const fence) 3042 { 3043 unsigned int n; 3044 3045 for (n = 0; n < eb->num_fences; n++) { 3046 struct drm_syncobj *syncobj; 3047 unsigned int flags; 3048 3049 syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2); 3050 if (!(flags & I915_EXEC_FENCE_SIGNAL)) 3051 continue; 3052 3053 if (eb->fences[n].chain_fence) { 3054 drm_syncobj_add_point(syncobj, 3055 eb->fences[n].chain_fence, 3056 fence, 3057 eb->fences[n].value); 3058 /* 3059 * The chain's ownership is transferred to the 3060 * timeline. 3061 */ 3062 eb->fences[n].chain_fence = NULL; 3063 } else { 3064 drm_syncobj_replace_fence(syncobj, fence); 3065 } 3066 } 3067 } 3068 3069 static int 3070 parse_timeline_fences(struct i915_user_extension __user *ext, void *data) 3071 { 3072 struct i915_execbuffer *eb = data; 3073 struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences; 3074 3075 if (copy_from_user(&timeline_fences, ext, sizeof(timeline_fences))) 3076 return -EFAULT; 3077 3078 return add_timeline_fence_array(eb, &timeline_fences); 3079 } 3080 3081 static void retire_requests(struct intel_timeline *tl, struct i915_request *end) 3082 { 3083 struct i915_request *rq, *rn; 3084 3085 list_for_each_entry_safe(rq, rn, &tl->requests, link) 3086 if (rq == end || !i915_request_retire(rq)) 3087 break; 3088 } 3089 3090 static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq, 3091 int err, bool last_parallel) 3092 { 3093 struct intel_timeline * const tl = i915_request_timeline(rq); 3094 struct i915_sched_attr attr = {}; 3095 struct i915_request *prev; 3096 3097 lockdep_assert_held(&tl->mutex); 3098 lockdep_unpin_lock(&tl->mutex, rq->cookie); 3099 3100 trace_i915_request_add(rq); 3101 3102 prev = __i915_request_commit(rq); 3103 3104 /* Check that the context wasn't destroyed before submission */ 3105 if (likely(!intel_context_is_closed(eb->context))) { 3106 attr = eb->gem_context->sched; 3107 } else { 3108 /* Serialise with context_close via the add_to_timeline */ 3109 i915_request_set_error_once(rq, -ENOENT); 3110 __i915_request_skip(rq); 3111 err = -ENOENT; /* override any transient errors */ 3112 } 3113 3114 if (intel_context_is_parallel(eb->context)) { 3115 if (err) { 3116 __i915_request_skip(rq); 3117 set_bit(I915_FENCE_FLAG_SKIP_PARALLEL, 3118 &rq->fence.flags); 3119 } 3120 if (last_parallel) 3121 set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, 3122 &rq->fence.flags); 3123 } 3124 3125 __i915_request_queue(rq, &attr); 3126 3127 /* Try to clean up the client's timeline after submitting the request */ 3128 if (prev) 3129 retire_requests(tl, prev); 3130 3131 mutex_unlock(&tl->mutex); 3132 3133 return err; 3134 } 3135 3136 static int eb_requests_add(struct i915_execbuffer *eb, int err) 3137 { 3138 int i; 3139 3140 /* 3141 * We iterate in reverse order of creation to release timeline mutexes in 3142 * same order. 3143 */ 3144 for_each_batch_add_order(eb, i) { 3145 struct i915_request *rq = eb->requests[i]; 3146 3147 if (!rq) 3148 continue; 3149 err |= eb_request_add(eb, rq, err, i == 0); 3150 } 3151 3152 return err; 3153 } 3154 3155 static const i915_user_extension_fn execbuf_extensions[] = { 3156 [DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences, 3157 }; 3158 3159 static int 3160 parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args, 3161 struct i915_execbuffer *eb) 3162 { 3163 if (!(args->flags & I915_EXEC_USE_EXTENSIONS)) 3164 return 0; 3165 3166 /* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot 3167 * have another flag also using it at the same time. 3168 */ 3169 if (eb->args->flags & I915_EXEC_FENCE_ARRAY) 3170 return -EINVAL; 3171 3172 if (args->num_cliprects != 0) 3173 return -EINVAL; 3174 3175 return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr), 3176 execbuf_extensions, 3177 ARRAY_SIZE(execbuf_extensions), 3178 eb); 3179 } 3180 3181 static void eb_requests_get(struct i915_execbuffer *eb) 3182 { 3183 unsigned int i; 3184 3185 for_each_batch_create_order(eb, i) { 3186 if (!eb->requests[i]) 3187 break; 3188 3189 i915_request_get(eb->requests[i]); 3190 } 3191 } 3192 3193 static void eb_requests_put(struct i915_execbuffer *eb) 3194 { 3195 unsigned int i; 3196 3197 for_each_batch_create_order(eb, i) { 3198 if (!eb->requests[i]) 3199 break; 3200 3201 i915_request_put(eb->requests[i]); 3202 } 3203 } 3204 3205 static struct sync_file * 3206 eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd) 3207 { 3208 struct sync_file *out_fence = NULL; 3209 struct dma_fence_array *fence_array; 3210 struct dma_fence **fences; 3211 unsigned int i; 3212 3213 GEM_BUG_ON(!intel_context_is_parent(eb->context)); 3214 3215 fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL); 3216 if (!fences) 3217 return ERR_PTR(-ENOMEM); 3218 3219 for_each_batch_create_order(eb, i) { 3220 fences[i] = &eb->requests[i]->fence; 3221 __set_bit(I915_FENCE_FLAG_COMPOSITE, 3222 &eb->requests[i]->fence.flags); 3223 } 3224 3225 fence_array = dma_fence_array_create(eb->num_batches, 3226 fences, 3227 eb->context->parallel.fence_context, 3228 eb->context->parallel.seqno++, 3229 false); 3230 if (!fence_array) { 3231 kfree(fences); 3232 return ERR_PTR(-ENOMEM); 3233 } 3234 3235 /* Move ownership to the dma_fence_array created above */ 3236 for_each_batch_create_order(eb, i) 3237 dma_fence_get(fences[i]); 3238 3239 if (out_fence_fd != -1) { 3240 out_fence = sync_file_create(&fence_array->base); 3241 /* sync_file now owns fence_arry, drop creation ref */ 3242 dma_fence_put(&fence_array->base); 3243 if (!out_fence) 3244 return ERR_PTR(-ENOMEM); 3245 } 3246 3247 eb->composite_fence = &fence_array->base; 3248 3249 return out_fence; 3250 } 3251 3252 static struct sync_file * 3253 eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq, 3254 struct dma_fence *in_fence, int out_fence_fd) 3255 { 3256 struct sync_file *out_fence = NULL; 3257 int err; 3258 3259 if (unlikely(eb->gem_context->syncobj)) { 3260 struct dma_fence *fence; 3261 3262 fence = drm_syncobj_fence_get(eb->gem_context->syncobj); 3263 err = i915_request_await_dma_fence(rq, fence); 3264 dma_fence_put(fence); 3265 if (err) 3266 return ERR_PTR(err); 3267 } 3268 3269 if (in_fence) { 3270 if (eb->args->flags & I915_EXEC_FENCE_SUBMIT) 3271 err = i915_request_await_execution(rq, in_fence); 3272 else 3273 err = i915_request_await_dma_fence(rq, in_fence); 3274 if (err < 0) 3275 return ERR_PTR(err); 3276 } 3277 3278 if (eb->fences) { 3279 err = await_fence_array(eb, rq); 3280 if (err) 3281 return ERR_PTR(err); 3282 } 3283 3284 if (intel_context_is_parallel(eb->context)) { 3285 out_fence = eb_composite_fence_create(eb, out_fence_fd); 3286 if (IS_ERR(out_fence)) 3287 return ERR_PTR(-ENOMEM); 3288 } else if (out_fence_fd != -1) { 3289 out_fence = sync_file_create(&rq->fence); 3290 if (!out_fence) 3291 return ERR_PTR(-ENOMEM); 3292 } 3293 3294 return out_fence; 3295 } 3296 3297 static struct intel_context * 3298 eb_find_context(struct i915_execbuffer *eb, unsigned int context_number) 3299 { 3300 struct intel_context *child; 3301 3302 if (likely(context_number == 0)) 3303 return eb->context; 3304 3305 for_each_child(eb->context, child) 3306 if (!--context_number) 3307 return child; 3308 3309 GEM_BUG_ON("Context not found"); 3310 3311 return NULL; 3312 } 3313 3314 static struct sync_file * 3315 eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence, 3316 int out_fence_fd) 3317 { 3318 struct sync_file *out_fence = NULL; 3319 unsigned int i; 3320 3321 for_each_batch_create_order(eb, i) { 3322 /* Allocate a request for this batch buffer nice and early. */ 3323 eb->requests[i] = i915_request_create(eb_find_context(eb, i)); 3324 if (IS_ERR(eb->requests[i])) { 3325 out_fence = ERR_CAST(eb->requests[i]); 3326 eb->requests[i] = NULL; 3327 return out_fence; 3328 } 3329 3330 /* 3331 * Only the first request added (committed to backend) has to 3332 * take the in fences into account as all subsequent requests 3333 * will have fences inserted inbetween them. 3334 */ 3335 if (i + 1 == eb->num_batches) { 3336 out_fence = eb_fences_add(eb, eb->requests[i], 3337 in_fence, out_fence_fd); 3338 if (IS_ERR(out_fence)) 3339 return out_fence; 3340 } 3341 3342 /* 3343 * Not really on stack, but we don't want to call 3344 * kfree on the batch_snapshot when we put it, so use the 3345 * _onstack interface. 3346 */ 3347 if (eb->batches[i]->vma) 3348 eb->requests[i]->batch_res = 3349 i915_vma_resource_get(eb->batches[i]->vma->resource); 3350 if (eb->batch_pool) { 3351 GEM_BUG_ON(intel_context_is_parallel(eb->context)); 3352 intel_gt_buffer_pool_mark_active(eb->batch_pool, 3353 eb->requests[i]); 3354 } 3355 } 3356 3357 return out_fence; 3358 } 3359 3360 static int 3361 i915_gem_do_execbuffer(struct drm_device *dev, 3362 struct drm_file *file, 3363 struct drm_i915_gem_execbuffer2 *args, 3364 struct drm_i915_gem_exec_object2 *exec) 3365 { 3366 struct drm_i915_private *i915 = to_i915(dev); 3367 struct i915_execbuffer eb; 3368 struct dma_fence *in_fence = NULL; 3369 struct sync_file *out_fence = NULL; 3370 int out_fence_fd = -1; 3371 int err; 3372 3373 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS); 3374 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & 3375 ~__EXEC_OBJECT_UNKNOWN_FLAGS); 3376 3377 eb.i915 = i915; 3378 eb.file = file; 3379 eb.args = args; 3380 if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) 3381 args->flags |= __EXEC_HAS_RELOC; 3382 3383 eb.exec = exec; 3384 eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1); 3385 eb.vma[0].vma = NULL; 3386 eb.batch_pool = NULL; 3387 3388 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; 3389 reloc_cache_init(&eb.reloc_cache, eb.i915); 3390 3391 eb.buffer_count = args->buffer_count; 3392 eb.batch_start_offset = args->batch_start_offset; 3393 eb.trampoline = NULL; 3394 3395 eb.fences = NULL; 3396 eb.num_fences = 0; 3397 3398 eb_capture_list_clear(&eb); 3399 3400 memset(eb.requests, 0, sizeof(struct i915_request *) * 3401 ARRAY_SIZE(eb.requests)); 3402 eb.composite_fence = NULL; 3403 3404 eb.batch_flags = 0; 3405 if (args->flags & I915_EXEC_SECURE) { 3406 if (GRAPHICS_VER(i915) >= 11) 3407 return -ENODEV; 3408 3409 /* Return -EPERM to trigger fallback code on old binaries. */ 3410 if (!HAS_SECURE_BATCHES(i915)) 3411 return -EPERM; 3412 3413 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) 3414 return -EPERM; 3415 3416 eb.batch_flags |= I915_DISPATCH_SECURE; 3417 } 3418 if (args->flags & I915_EXEC_IS_PINNED) 3419 eb.batch_flags |= I915_DISPATCH_PINNED; 3420 3421 err = parse_execbuf2_extensions(args, &eb); 3422 if (err) 3423 goto err_ext; 3424 3425 err = add_fence_array(&eb); 3426 if (err) 3427 goto err_ext; 3428 3429 #define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT) 3430 if (args->flags & IN_FENCES) { 3431 if ((args->flags & IN_FENCES) == IN_FENCES) 3432 return -EINVAL; 3433 3434 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); 3435 if (!in_fence) { 3436 err = -EINVAL; 3437 goto err_ext; 3438 } 3439 } 3440 #undef IN_FENCES 3441 3442 if (args->flags & I915_EXEC_FENCE_OUT) { 3443 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 3444 if (out_fence_fd < 0) { 3445 err = out_fence_fd; 3446 goto err_in_fence; 3447 } 3448 } 3449 3450 err = eb_create(&eb); 3451 if (err) 3452 goto err_out_fence; 3453 3454 GEM_BUG_ON(!eb.lut_size); 3455 3456 err = eb_select_context(&eb); 3457 if (unlikely(err)) 3458 goto err_destroy; 3459 3460 err = eb_select_engine(&eb); 3461 if (unlikely(err)) 3462 goto err_context; 3463 3464 err = eb_lookup_vmas(&eb); 3465 if (err) { 3466 eb_release_vmas(&eb, true); 3467 goto err_engine; 3468 } 3469 3470 i915_gem_ww_ctx_init(&eb.ww, true); 3471 3472 err = eb_relocate_parse(&eb); 3473 if (err) { 3474 /* 3475 * If the user expects the execobject.offset and 3476 * reloc.presumed_offset to be an exact match, 3477 * as for using NO_RELOC, then we cannot update 3478 * the execobject.offset until we have completed 3479 * relocation. 3480 */ 3481 args->flags &= ~__EXEC_HAS_RELOC; 3482 goto err_vma; 3483 } 3484 3485 ww_acquire_done(&eb.ww.ctx); 3486 err = eb_capture_stage(&eb); 3487 if (err) 3488 goto err_vma; 3489 3490 out_fence = eb_requests_create(&eb, in_fence, out_fence_fd); 3491 if (IS_ERR(out_fence)) { 3492 err = PTR_ERR(out_fence); 3493 out_fence = NULL; 3494 if (eb.requests[0]) 3495 goto err_request; 3496 else 3497 goto err_vma; 3498 } 3499 3500 err = eb_submit(&eb); 3501 3502 err_request: 3503 eb_requests_get(&eb); 3504 err = eb_requests_add(&eb, err); 3505 3506 if (eb.fences) 3507 signal_fence_array(&eb, eb.composite_fence ? 3508 eb.composite_fence : 3509 &eb.requests[0]->fence); 3510 3511 if (unlikely(eb.gem_context->syncobj)) { 3512 drm_syncobj_replace_fence(eb.gem_context->syncobj, 3513 eb.composite_fence ? 3514 eb.composite_fence : 3515 &eb.requests[0]->fence); 3516 } 3517 3518 if (out_fence) { 3519 if (err == 0) { 3520 fd_install(out_fence_fd, out_fence->file); 3521 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ 3522 args->rsvd2 |= (u64)out_fence_fd << 32; 3523 out_fence_fd = -1; 3524 } else { 3525 fput(out_fence->file); 3526 } 3527 } 3528 3529 if (!out_fence && eb.composite_fence) 3530 dma_fence_put(eb.composite_fence); 3531 3532 eb_requests_put(&eb); 3533 3534 err_vma: 3535 eb_release_vmas(&eb, true); 3536 WARN_ON(err == -EDEADLK); 3537 i915_gem_ww_ctx_fini(&eb.ww); 3538 3539 if (eb.batch_pool) 3540 intel_gt_buffer_pool_put(eb.batch_pool); 3541 err_engine: 3542 eb_put_engine(&eb); 3543 err_context: 3544 i915_gem_context_put(eb.gem_context); 3545 err_destroy: 3546 eb_destroy(&eb); 3547 err_out_fence: 3548 if (out_fence_fd != -1) 3549 put_unused_fd(out_fence_fd); 3550 err_in_fence: 3551 dma_fence_put(in_fence); 3552 err_ext: 3553 put_fence_array(eb.fences, eb.num_fences); 3554 return err; 3555 } 3556 3557 static size_t eb_element_size(void) 3558 { 3559 return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma); 3560 } 3561 3562 static bool check_buffer_count(size_t count) 3563 { 3564 const size_t sz = eb_element_size(); 3565 3566 /* 3567 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup 3568 * array size (see eb_create()). Otherwise, we can accept an array as 3569 * large as can be addressed (though use large arrays at your peril)! 3570 */ 3571 3572 return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1); 3573 } 3574 3575 int 3576 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, 3577 struct drm_file *file) 3578 { 3579 struct drm_i915_private *i915 = to_i915(dev); 3580 struct drm_i915_gem_execbuffer2 *args = data; 3581 struct drm_i915_gem_exec_object2 *exec2_list; 3582 const size_t count = args->buffer_count; 3583 int err; 3584 3585 if (!check_buffer_count(count)) { 3586 drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); 3587 return -EINVAL; 3588 } 3589 3590 err = i915_gem_check_execbuffer(i915, args); 3591 if (err) 3592 return err; 3593 3594 /* Allocate extra slots for use by the command parser */ 3595 exec2_list = kvmalloc_array(count + 2, eb_element_size(), 3596 __GFP_NOWARN | GFP_KERNEL); 3597 if (exec2_list == NULL) { 3598 drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n", 3599 count); 3600 return -ENOMEM; 3601 } 3602 if (copy_from_user(exec2_list, 3603 u64_to_user_ptr(args->buffers_ptr), 3604 sizeof(*exec2_list) * count)) { 3605 drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count); 3606 kvfree(exec2_list); 3607 return -EFAULT; 3608 } 3609 3610 err = i915_gem_do_execbuffer(dev, file, args, exec2_list); 3611 3612 /* 3613 * Now that we have begun execution of the batchbuffer, we ignore 3614 * any new error after this point. Also given that we have already 3615 * updated the associated relocations, we try to write out the current 3616 * object locations irrespective of any error. 3617 */ 3618 if (args->flags & __EXEC_HAS_RELOC) { 3619 struct drm_i915_gem_exec_object2 __user *user_exec_list = 3620 u64_to_user_ptr(args->buffers_ptr); 3621 unsigned int i; 3622 3623 /* Copy the new buffer offsets back to the user's exec list. */ 3624 /* 3625 * Note: count * sizeof(*user_exec_list) does not overflow, 3626 * because we checked 'count' in check_buffer_count(). 3627 * 3628 * And this range already got effectively checked earlier 3629 * when we did the "copy_from_user()" above. 3630 */ 3631 if (!user_write_access_begin(user_exec_list, 3632 count * sizeof(*user_exec_list))) 3633 goto end; 3634 3635 for (i = 0; i < args->buffer_count; i++) { 3636 if (!(exec2_list[i].offset & UPDATE)) 3637 continue; 3638 3639 exec2_list[i].offset = 3640 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); 3641 unsafe_put_user(exec2_list[i].offset, 3642 &user_exec_list[i].offset, 3643 end_user); 3644 } 3645 end_user: 3646 user_write_access_end(); 3647 end:; 3648 } 3649 3650 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; 3651 kvfree(exec2_list); 3652 return err; 3653 } 3654