1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008,2010 Intel Corporation 5 */ 6 7 #include <linux/intel-iommu.h> 8 #include <linux/dma-resv.h> 9 #include <linux/sync_file.h> 10 #include <linux/uaccess.h> 11 12 #include <drm/drm_syncobj.h> 13 #include <drm/i915_drm.h> 14 15 #include "display/intel_frontbuffer.h" 16 17 #include "gem/i915_gem_ioctls.h" 18 #include "gt/intel_context.h" 19 #include "gt/intel_engine_pool.h" 20 #include "gt/intel_gt.h" 21 #include "gt/intel_gt_pm.h" 22 23 #include "i915_drv.h" 24 #include "i915_gem_clflush.h" 25 #include "i915_gem_context.h" 26 #include "i915_gem_ioctls.h" 27 #include "i915_trace.h" 28 29 enum { 30 FORCE_CPU_RELOC = 1, 31 FORCE_GTT_RELOC, 32 FORCE_GPU_RELOC, 33 #define DBG_FORCE_RELOC 0 /* choose one of the above! */ 34 }; 35 36 #define __EXEC_OBJECT_HAS_REF BIT(31) 37 #define __EXEC_OBJECT_HAS_PIN BIT(30) 38 #define __EXEC_OBJECT_HAS_FENCE BIT(29) 39 #define __EXEC_OBJECT_NEEDS_MAP BIT(28) 40 #define __EXEC_OBJECT_NEEDS_BIAS BIT(27) 41 #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */ 42 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) 43 44 #define __EXEC_HAS_RELOC BIT(31) 45 #define __EXEC_VALIDATED BIT(30) 46 #define __EXEC_INTERNAL_FLAGS (~0u << 30) 47 #define UPDATE PIN_OFFSET_FIXED 48 49 #define BATCH_OFFSET_BIAS (256*1024) 50 51 #define __I915_EXEC_ILLEGAL_FLAGS \ 52 (__I915_EXEC_UNKNOWN_FLAGS | \ 53 I915_EXEC_CONSTANTS_MASK | \ 54 I915_EXEC_RESOURCE_STREAMER) 55 56 /* Catch emission of unexpected errors for CI! */ 57 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 58 #undef EINVAL 59 #define EINVAL ({ \ 60 DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ 61 22; \ 62 }) 63 #endif 64 65 /** 66 * DOC: User command execution 67 * 68 * Userspace submits commands to be executed on the GPU as an instruction 69 * stream within a GEM object we call a batchbuffer. This instructions may 70 * refer to other GEM objects containing auxiliary state such as kernels, 71 * samplers, render targets and even secondary batchbuffers. Userspace does 72 * not know where in the GPU memory these objects reside and so before the 73 * batchbuffer is passed to the GPU for execution, those addresses in the 74 * batchbuffer and auxiliary objects are updated. This is known as relocation, 75 * or patching. To try and avoid having to relocate each object on the next 76 * execution, userspace is told the location of those objects in this pass, 77 * but this remains just a hint as the kernel may choose a new location for 78 * any object in the future. 79 * 80 * At the level of talking to the hardware, submitting a batchbuffer for the 81 * GPU to execute is to add content to a buffer from which the HW 82 * command streamer is reading. 83 * 84 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e. 85 * Execlists, this command is not placed on the same buffer as the 86 * remaining items. 87 * 88 * 2. Add a command to invalidate caches to the buffer. 89 * 90 * 3. Add a batchbuffer start command to the buffer; the start command is 91 * essentially a token together with the GPU address of the batchbuffer 92 * to be executed. 93 * 94 * 4. Add a pipeline flush to the buffer. 95 * 96 * 5. Add a memory write command to the buffer to record when the GPU 97 * is done executing the batchbuffer. The memory write writes the 98 * global sequence number of the request, ``i915_request::global_seqno``; 99 * the i915 driver uses the current value in the register to determine 100 * if the GPU has completed the batchbuffer. 101 * 102 * 6. Add a user interrupt command to the buffer. This command instructs 103 * the GPU to issue an interrupt when the command, pipeline flush and 104 * memory write are completed. 105 * 106 * 7. Inform the hardware of the additional commands added to the buffer 107 * (by updating the tail pointer). 108 * 109 * Processing an execbuf ioctl is conceptually split up into a few phases. 110 * 111 * 1. Validation - Ensure all the pointers, handles and flags are valid. 112 * 2. Reservation - Assign GPU address space for every object 113 * 3. Relocation - Update any addresses to point to the final locations 114 * 4. Serialisation - Order the request with respect to its dependencies 115 * 5. Construction - Construct a request to execute the batchbuffer 116 * 6. Submission (at some point in the future execution) 117 * 118 * Reserving resources for the execbuf is the most complicated phase. We 119 * neither want to have to migrate the object in the address space, nor do 120 * we want to have to update any relocations pointing to this object. Ideally, 121 * we want to leave the object where it is and for all the existing relocations 122 * to match. If the object is given a new address, or if userspace thinks the 123 * object is elsewhere, we have to parse all the relocation entries and update 124 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that 125 * all the target addresses in all of its objects match the value in the 126 * relocation entries and that they all match the presumed offsets given by the 127 * list of execbuffer objects. Using this knowledge, we know that if we haven't 128 * moved any buffers, all the relocation entries are valid and we can skip 129 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU 130 * hang.) The requirement for using I915_EXEC_NO_RELOC are: 131 * 132 * The addresses written in the objects must match the corresponding 133 * reloc.presumed_offset which in turn must match the corresponding 134 * execobject.offset. 135 * 136 * Any render targets written to in the batch must be flagged with 137 * EXEC_OBJECT_WRITE. 138 * 139 * To avoid stalling, execobject.offset should match the current 140 * address of that object within the active context. 141 * 142 * The reservation is done is multiple phases. First we try and keep any 143 * object already bound in its current location - so as long as meets the 144 * constraints imposed by the new execbuffer. Any object left unbound after the 145 * first pass is then fitted into any available idle space. If an object does 146 * not fit, all objects are removed from the reservation and the process rerun 147 * after sorting the objects into a priority order (more difficult to fit 148 * objects are tried first). Failing that, the entire VM is cleared and we try 149 * to fit the execbuf once last time before concluding that it simply will not 150 * fit. 151 * 152 * A small complication to all of this is that we allow userspace not only to 153 * specify an alignment and a size for the object in the address space, but 154 * we also allow userspace to specify the exact offset. This objects are 155 * simpler to place (the location is known a priori) all we have to do is make 156 * sure the space is available. 157 * 158 * Once all the objects are in place, patching up the buried pointers to point 159 * to the final locations is a fairly simple job of walking over the relocation 160 * entry arrays, looking up the right address and rewriting the value into 161 * the object. Simple! ... The relocation entries are stored in user memory 162 * and so to access them we have to copy them into a local buffer. That copy 163 * has to avoid taking any pagefaults as they may lead back to a GEM object 164 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split 165 * the relocation into multiple passes. First we try to do everything within an 166 * atomic context (avoid the pagefaults) which requires that we never wait. If 167 * we detect that we may wait, or if we need to fault, then we have to fallback 168 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm 169 * bells yet?) Dropping the mutex means that we lose all the state we have 170 * built up so far for the execbuf and we must reset any global data. However, 171 * we do leave the objects pinned in their final locations - which is a 172 * potential issue for concurrent execbufs. Once we have left the mutex, we can 173 * allocate and copy all the relocation entries into a large array at our 174 * leisure, reacquire the mutex, reclaim all the objects and other state and 175 * then proceed to update any incorrect addresses with the objects. 176 * 177 * As we process the relocation entries, we maintain a record of whether the 178 * object is being written to. Using NORELOC, we expect userspace to provide 179 * this information instead. We also check whether we can skip the relocation 180 * by comparing the expected value inside the relocation entry with the target's 181 * final address. If they differ, we have to map the current object and rewrite 182 * the 4 or 8 byte pointer within. 183 * 184 * Serialising an execbuf is quite simple according to the rules of the GEM 185 * ABI. Execution within each context is ordered by the order of submission. 186 * Writes to any GEM object are in order of submission and are exclusive. Reads 187 * from a GEM object are unordered with respect to other reads, but ordered by 188 * writes. A write submitted after a read cannot occur before the read, and 189 * similarly any read submitted after a write cannot occur before the write. 190 * Writes are ordered between engines such that only one write occurs at any 191 * time (completing any reads beforehand) - using semaphores where available 192 * and CPU serialisation otherwise. Other GEM access obey the same rules, any 193 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU 194 * reads before starting, and any read (either using set-domain or pread) must 195 * flush all GPU writes before starting. (Note we only employ a barrier before, 196 * we currently rely on userspace not concurrently starting a new execution 197 * whilst reading or writing to an object. This may be an advantage or not 198 * depending on how much you trust userspace not to shoot themselves in the 199 * foot.) Serialisation may just result in the request being inserted into 200 * a DAG awaiting its turn, but most simple is to wait on the CPU until 201 * all dependencies are resolved. 202 * 203 * After all of that, is just a matter of closing the request and handing it to 204 * the hardware (well, leaving it in a queue to be executed). However, we also 205 * offer the ability for batchbuffers to be run with elevated privileges so 206 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.) 207 * Before any batch is given extra privileges we first must check that it 208 * contains no nefarious instructions, we check that each instruction is from 209 * our whitelist and all registers are also from an allowed list. We first 210 * copy the user's batchbuffer to a shadow (so that the user doesn't have 211 * access to it, either by the CPU or GPU as we scan it) and then parse each 212 * instruction. If everything is ok, we set a flag telling the hardware to run 213 * the batchbuffer in trusted mode, otherwise the ioctl is rejected. 214 */ 215 216 struct i915_execbuffer { 217 struct drm_i915_private *i915; /** i915 backpointer */ 218 struct drm_file *file; /** per-file lookup tables and limits */ 219 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ 220 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ 221 struct i915_vma **vma; 222 unsigned int *flags; 223 224 struct intel_engine_cs *engine; /** engine to queue the request to */ 225 struct intel_context *context; /* logical state for the request */ 226 struct i915_gem_context *gem_context; /** caller's context */ 227 228 struct i915_request *request; /** our request to build */ 229 struct i915_vma *batch; /** identity of the batch obj/vma */ 230 231 /** actual size of execobj[] as we may extend it for the cmdparser */ 232 unsigned int buffer_count; 233 234 /** list of vma not yet bound during reservation phase */ 235 struct list_head unbound; 236 237 /** list of vma that have execobj.relocation_count */ 238 struct list_head relocs; 239 240 /** 241 * Track the most recently used object for relocations, as we 242 * frequently have to perform multiple relocations within the same 243 * obj/page 244 */ 245 struct reloc_cache { 246 struct drm_mm_node node; /** temporary GTT binding */ 247 unsigned long vaddr; /** Current kmap address */ 248 unsigned long page; /** Currently mapped page index */ 249 unsigned int gen; /** Cached value of INTEL_GEN */ 250 bool use_64bit_reloc : 1; 251 bool has_llc : 1; 252 bool has_fence : 1; 253 bool needs_unfenced : 1; 254 255 struct intel_context *ce; 256 struct i915_request *rq; 257 u32 *rq_cmd; 258 unsigned int rq_size; 259 } reloc_cache; 260 261 u64 invalid_flags; /** Set of execobj.flags that are invalid */ 262 u32 context_flags; /** Set of execobj.flags to insert from the ctx */ 263 264 u32 batch_start_offset; /** Location within object of batch */ 265 u32 batch_len; /** Length of batch within object */ 266 u32 batch_flags; /** Flags composed for emit_bb_start() */ 267 268 /** 269 * Indicate either the size of the hastable used to resolve 270 * relocation handles, or if negative that we are using a direct 271 * index into the execobj[]. 272 */ 273 int lut_size; 274 struct hlist_head *buckets; /** ht for relocation handles */ 275 }; 276 277 #define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags]) 278 279 /* 280 * Used to convert any address to canonical form. 281 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS, 282 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the 283 * addresses to be in a canonical form: 284 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct 285 * canonical form [63:48] == [47]." 286 */ 287 #define GEN8_HIGH_ADDRESS_BIT 47 288 static inline u64 gen8_canonical_addr(u64 address) 289 { 290 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT); 291 } 292 293 static inline u64 gen8_noncanonical_addr(u64 address) 294 { 295 return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); 296 } 297 298 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) 299 { 300 return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len; 301 } 302 303 static int eb_create(struct i915_execbuffer *eb) 304 { 305 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { 306 unsigned int size = 1 + ilog2(eb->buffer_count); 307 308 /* 309 * Without a 1:1 association between relocation handles and 310 * the execobject[] index, we instead create a hashtable. 311 * We size it dynamically based on available memory, starting 312 * first with 1:1 assocative hash and scaling back until 313 * the allocation succeeds. 314 * 315 * Later on we use a positive lut_size to indicate we are 316 * using this hashtable, and a negative value to indicate a 317 * direct lookup. 318 */ 319 do { 320 gfp_t flags; 321 322 /* While we can still reduce the allocation size, don't 323 * raise a warning and allow the allocation to fail. 324 * On the last pass though, we want to try as hard 325 * as possible to perform the allocation and warn 326 * if it fails. 327 */ 328 flags = GFP_KERNEL; 329 if (size > 1) 330 flags |= __GFP_NORETRY | __GFP_NOWARN; 331 332 eb->buckets = kzalloc(sizeof(struct hlist_head) << size, 333 flags); 334 if (eb->buckets) 335 break; 336 } while (--size); 337 338 if (unlikely(!size)) 339 return -ENOMEM; 340 341 eb->lut_size = size; 342 } else { 343 eb->lut_size = -eb->buffer_count; 344 } 345 346 return 0; 347 } 348 349 static bool 350 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, 351 const struct i915_vma *vma, 352 unsigned int flags) 353 { 354 if (vma->node.size < entry->pad_to_size) 355 return true; 356 357 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment)) 358 return true; 359 360 if (flags & EXEC_OBJECT_PINNED && 361 vma->node.start != entry->offset) 362 return true; 363 364 if (flags & __EXEC_OBJECT_NEEDS_BIAS && 365 vma->node.start < BATCH_OFFSET_BIAS) 366 return true; 367 368 if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && 369 (vma->node.start + vma->node.size - 1) >> 32) 370 return true; 371 372 if (flags & __EXEC_OBJECT_NEEDS_MAP && 373 !i915_vma_is_map_and_fenceable(vma)) 374 return true; 375 376 return false; 377 } 378 379 static inline bool 380 eb_pin_vma(struct i915_execbuffer *eb, 381 const struct drm_i915_gem_exec_object2 *entry, 382 struct i915_vma *vma) 383 { 384 unsigned int exec_flags = *vma->exec_flags; 385 u64 pin_flags; 386 387 if (vma->node.size) 388 pin_flags = vma->node.start; 389 else 390 pin_flags = entry->offset & PIN_OFFSET_MASK; 391 392 pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED; 393 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT)) 394 pin_flags |= PIN_GLOBAL; 395 396 if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) 397 return false; 398 399 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { 400 if (unlikely(i915_vma_pin_fence(vma))) { 401 i915_vma_unpin(vma); 402 return false; 403 } 404 405 if (vma->fence) 406 exec_flags |= __EXEC_OBJECT_HAS_FENCE; 407 } 408 409 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; 410 return !eb_vma_misplaced(entry, vma, exec_flags); 411 } 412 413 static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) 414 { 415 GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN)); 416 417 if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE)) 418 __i915_vma_unpin_fence(vma); 419 420 __i915_vma_unpin(vma); 421 } 422 423 static inline void 424 eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags) 425 { 426 if (!(*flags & __EXEC_OBJECT_HAS_PIN)) 427 return; 428 429 __eb_unreserve_vma(vma, *flags); 430 *flags &= ~__EXEC_OBJECT_RESERVED; 431 } 432 433 static int 434 eb_validate_vma(struct i915_execbuffer *eb, 435 struct drm_i915_gem_exec_object2 *entry, 436 struct i915_vma *vma) 437 { 438 if (unlikely(entry->flags & eb->invalid_flags)) 439 return -EINVAL; 440 441 if (unlikely(entry->alignment && !is_power_of_2(entry->alignment))) 442 return -EINVAL; 443 444 /* 445 * Offset can be used as input (EXEC_OBJECT_PINNED), reject 446 * any non-page-aligned or non-canonical addresses. 447 */ 448 if (unlikely(entry->flags & EXEC_OBJECT_PINNED && 449 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) 450 return -EINVAL; 451 452 /* pad_to_size was once a reserved field, so sanitize it */ 453 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) { 454 if (unlikely(offset_in_page(entry->pad_to_size))) 455 return -EINVAL; 456 } else { 457 entry->pad_to_size = 0; 458 } 459 460 if (unlikely(vma->exec_flags)) { 461 DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n", 462 entry->handle, (int)(entry - eb->exec)); 463 return -EINVAL; 464 } 465 466 /* 467 * From drm_mm perspective address space is continuous, 468 * so from this point we're always using non-canonical 469 * form internally. 470 */ 471 entry->offset = gen8_noncanonical_addr(entry->offset); 472 473 if (!eb->reloc_cache.has_fence) { 474 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; 475 } else { 476 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE || 477 eb->reloc_cache.needs_unfenced) && 478 i915_gem_object_is_tiled(vma->obj)) 479 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP; 480 } 481 482 if (!(entry->flags & EXEC_OBJECT_PINNED)) 483 entry->flags |= eb->context_flags; 484 485 return 0; 486 } 487 488 static int 489 eb_add_vma(struct i915_execbuffer *eb, 490 unsigned int i, unsigned batch_idx, 491 struct i915_vma *vma) 492 { 493 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 494 int err; 495 496 GEM_BUG_ON(i915_vma_is_closed(vma)); 497 498 if (!(eb->args->flags & __EXEC_VALIDATED)) { 499 err = eb_validate_vma(eb, entry, vma); 500 if (unlikely(err)) 501 return err; 502 } 503 504 if (eb->lut_size > 0) { 505 vma->exec_handle = entry->handle; 506 hlist_add_head(&vma->exec_node, 507 &eb->buckets[hash_32(entry->handle, 508 eb->lut_size)]); 509 } 510 511 if (entry->relocation_count) 512 list_add_tail(&vma->reloc_link, &eb->relocs); 513 514 /* 515 * Stash a pointer from the vma to execobj, so we can query its flags, 516 * size, alignment etc as provided by the user. Also we stash a pointer 517 * to the vma inside the execobj so that we can use a direct lookup 518 * to find the right target VMA when doing relocations. 519 */ 520 eb->vma[i] = vma; 521 eb->flags[i] = entry->flags; 522 vma->exec_flags = &eb->flags[i]; 523 524 /* 525 * SNA is doing fancy tricks with compressing batch buffers, which leads 526 * to negative relocation deltas. Usually that works out ok since the 527 * relocate address is still positive, except when the batch is placed 528 * very low in the GTT. Ensure this doesn't happen. 529 * 530 * Note that actual hangs have only been observed on gen7, but for 531 * paranoia do it everywhere. 532 */ 533 if (i == batch_idx) { 534 if (entry->relocation_count && 535 !(eb->flags[i] & EXEC_OBJECT_PINNED)) 536 eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; 537 if (eb->reloc_cache.has_fence) 538 eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; 539 540 eb->batch = vma; 541 } 542 543 err = 0; 544 if (eb_pin_vma(eb, entry, vma)) { 545 if (entry->offset != vma->node.start) { 546 entry->offset = vma->node.start | UPDATE; 547 eb->args->flags |= __EXEC_HAS_RELOC; 548 } 549 } else { 550 eb_unreserve_vma(vma, vma->exec_flags); 551 552 list_add_tail(&vma->exec_link, &eb->unbound); 553 if (drm_mm_node_allocated(&vma->node)) 554 err = i915_vma_unbind(vma); 555 if (unlikely(err)) 556 vma->exec_flags = NULL; 557 } 558 return err; 559 } 560 561 static inline int use_cpu_reloc(const struct reloc_cache *cache, 562 const struct drm_i915_gem_object *obj) 563 { 564 if (!i915_gem_object_has_struct_page(obj)) 565 return false; 566 567 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC) 568 return true; 569 570 if (DBG_FORCE_RELOC == FORCE_GTT_RELOC) 571 return false; 572 573 return (cache->has_llc || 574 obj->cache_dirty || 575 obj->cache_level != I915_CACHE_NONE); 576 } 577 578 static int eb_reserve_vma(const struct i915_execbuffer *eb, 579 struct i915_vma *vma) 580 { 581 struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); 582 unsigned int exec_flags = *vma->exec_flags; 583 u64 pin_flags; 584 int err; 585 586 pin_flags = PIN_USER | PIN_NONBLOCK; 587 if (exec_flags & EXEC_OBJECT_NEEDS_GTT) 588 pin_flags |= PIN_GLOBAL; 589 590 /* 591 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, 592 * limit address to the first 4GBs for unflagged objects. 593 */ 594 if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) 595 pin_flags |= PIN_ZONE_4G; 596 597 if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) 598 pin_flags |= PIN_MAPPABLE; 599 600 if (exec_flags & EXEC_OBJECT_PINNED) { 601 pin_flags |= entry->offset | PIN_OFFSET_FIXED; 602 pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */ 603 } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) { 604 pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; 605 } 606 607 err = i915_vma_pin(vma, 608 entry->pad_to_size, entry->alignment, 609 pin_flags); 610 if (err) 611 return err; 612 613 if (entry->offset != vma->node.start) { 614 entry->offset = vma->node.start | UPDATE; 615 eb->args->flags |= __EXEC_HAS_RELOC; 616 } 617 618 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { 619 err = i915_vma_pin_fence(vma); 620 if (unlikely(err)) { 621 i915_vma_unpin(vma); 622 return err; 623 } 624 625 if (vma->fence) 626 exec_flags |= __EXEC_OBJECT_HAS_FENCE; 627 } 628 629 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; 630 GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags)); 631 632 return 0; 633 } 634 635 static int eb_reserve(struct i915_execbuffer *eb) 636 { 637 const unsigned int count = eb->buffer_count; 638 struct list_head last; 639 struct i915_vma *vma; 640 unsigned int i, pass; 641 int err; 642 643 /* 644 * Attempt to pin all of the buffers into the GTT. 645 * This is done in 3 phases: 646 * 647 * 1a. Unbind all objects that do not match the GTT constraints for 648 * the execbuffer (fenceable, mappable, alignment etc). 649 * 1b. Increment pin count for already bound objects. 650 * 2. Bind new objects. 651 * 3. Decrement pin count. 652 * 653 * This avoid unnecessary unbinding of later objects in order to make 654 * room for the earlier objects *unless* we need to defragment. 655 */ 656 657 pass = 0; 658 err = 0; 659 do { 660 list_for_each_entry(vma, &eb->unbound, exec_link) { 661 err = eb_reserve_vma(eb, vma); 662 if (err) 663 break; 664 } 665 if (err != -ENOSPC) 666 return err; 667 668 /* Resort *all* the objects into priority order */ 669 INIT_LIST_HEAD(&eb->unbound); 670 INIT_LIST_HEAD(&last); 671 for (i = 0; i < count; i++) { 672 unsigned int flags = eb->flags[i]; 673 struct i915_vma *vma = eb->vma[i]; 674 675 if (flags & EXEC_OBJECT_PINNED && 676 flags & __EXEC_OBJECT_HAS_PIN) 677 continue; 678 679 eb_unreserve_vma(vma, &eb->flags[i]); 680 681 if (flags & EXEC_OBJECT_PINNED) 682 /* Pinned must have their slot */ 683 list_add(&vma->exec_link, &eb->unbound); 684 else if (flags & __EXEC_OBJECT_NEEDS_MAP) 685 /* Map require the lowest 256MiB (aperture) */ 686 list_add_tail(&vma->exec_link, &eb->unbound); 687 else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) 688 /* Prioritise 4GiB region for restricted bo */ 689 list_add(&vma->exec_link, &last); 690 else 691 list_add_tail(&vma->exec_link, &last); 692 } 693 list_splice_tail(&last, &eb->unbound); 694 695 switch (pass++) { 696 case 0: 697 break; 698 699 case 1: 700 /* Too fragmented, unbind everything and retry */ 701 mutex_lock(&eb->context->vm->mutex); 702 err = i915_gem_evict_vm(eb->context->vm); 703 mutex_unlock(&eb->context->vm->mutex); 704 if (err) 705 return err; 706 break; 707 708 default: 709 return -ENOSPC; 710 } 711 } while (1); 712 } 713 714 static unsigned int eb_batch_index(const struct i915_execbuffer *eb) 715 { 716 if (eb->args->flags & I915_EXEC_BATCH_FIRST) 717 return 0; 718 else 719 return eb->buffer_count - 1; 720 } 721 722 static int eb_select_context(struct i915_execbuffer *eb) 723 { 724 struct i915_gem_context *ctx; 725 726 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); 727 if (unlikely(!ctx)) 728 return -ENOENT; 729 730 eb->gem_context = ctx; 731 if (rcu_access_pointer(ctx->vm)) 732 eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; 733 734 eb->context_flags = 0; 735 if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags)) 736 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS; 737 738 return 0; 739 } 740 741 static int eb_lookup_vmas(struct i915_execbuffer *eb) 742 { 743 struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma; 744 struct drm_i915_gem_object *obj; 745 unsigned int i, batch; 746 int err; 747 748 if (unlikely(i915_gem_context_is_banned(eb->gem_context))) 749 return -EIO; 750 751 INIT_LIST_HEAD(&eb->relocs); 752 INIT_LIST_HEAD(&eb->unbound); 753 754 batch = eb_batch_index(eb); 755 756 mutex_lock(&eb->gem_context->mutex); 757 if (unlikely(i915_gem_context_is_closed(eb->gem_context))) { 758 err = -ENOENT; 759 goto err_ctx; 760 } 761 762 for (i = 0; i < eb->buffer_count; i++) { 763 u32 handle = eb->exec[i].handle; 764 struct i915_lut_handle *lut; 765 struct i915_vma *vma; 766 767 vma = radix_tree_lookup(handles_vma, handle); 768 if (likely(vma)) 769 goto add_vma; 770 771 obj = i915_gem_object_lookup(eb->file, handle); 772 if (unlikely(!obj)) { 773 err = -ENOENT; 774 goto err_vma; 775 } 776 777 vma = i915_vma_instance(obj, eb->context->vm, NULL); 778 if (IS_ERR(vma)) { 779 err = PTR_ERR(vma); 780 goto err_obj; 781 } 782 783 lut = i915_lut_handle_alloc(); 784 if (unlikely(!lut)) { 785 err = -ENOMEM; 786 goto err_obj; 787 } 788 789 err = radix_tree_insert(handles_vma, handle, vma); 790 if (unlikely(err)) { 791 i915_lut_handle_free(lut); 792 goto err_obj; 793 } 794 795 /* transfer ref to lut */ 796 if (!atomic_fetch_inc(&vma->open_count)) 797 i915_vma_reopen(vma); 798 lut->handle = handle; 799 lut->ctx = eb->gem_context; 800 801 i915_gem_object_lock(obj); 802 list_add(&lut->obj_link, &obj->lut_list); 803 i915_gem_object_unlock(obj); 804 805 add_vma: 806 err = eb_add_vma(eb, i, batch, vma); 807 if (unlikely(err)) 808 goto err_vma; 809 810 GEM_BUG_ON(vma != eb->vma[i]); 811 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); 812 GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && 813 eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); 814 } 815 816 mutex_unlock(&eb->gem_context->mutex); 817 818 eb->args->flags |= __EXEC_VALIDATED; 819 return eb_reserve(eb); 820 821 err_obj: 822 i915_gem_object_put(obj); 823 err_vma: 824 eb->vma[i] = NULL; 825 err_ctx: 826 mutex_unlock(&eb->gem_context->mutex); 827 return err; 828 } 829 830 static struct i915_vma * 831 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) 832 { 833 if (eb->lut_size < 0) { 834 if (handle >= -eb->lut_size) 835 return NULL; 836 return eb->vma[handle]; 837 } else { 838 struct hlist_head *head; 839 struct i915_vma *vma; 840 841 head = &eb->buckets[hash_32(handle, eb->lut_size)]; 842 hlist_for_each_entry(vma, head, exec_node) { 843 if (vma->exec_handle == handle) 844 return vma; 845 } 846 return NULL; 847 } 848 } 849 850 static void eb_release_vmas(const struct i915_execbuffer *eb) 851 { 852 const unsigned int count = eb->buffer_count; 853 unsigned int i; 854 855 for (i = 0; i < count; i++) { 856 struct i915_vma *vma = eb->vma[i]; 857 unsigned int flags = eb->flags[i]; 858 859 if (!vma) 860 break; 861 862 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); 863 vma->exec_flags = NULL; 864 eb->vma[i] = NULL; 865 866 if (flags & __EXEC_OBJECT_HAS_PIN) 867 __eb_unreserve_vma(vma, flags); 868 869 if (flags & __EXEC_OBJECT_HAS_REF) 870 i915_vma_put(vma); 871 } 872 } 873 874 static void eb_reset_vmas(const struct i915_execbuffer *eb) 875 { 876 eb_release_vmas(eb); 877 if (eb->lut_size > 0) 878 memset(eb->buckets, 0, 879 sizeof(struct hlist_head) << eb->lut_size); 880 } 881 882 static void eb_destroy(const struct i915_execbuffer *eb) 883 { 884 GEM_BUG_ON(eb->reloc_cache.rq); 885 886 if (eb->reloc_cache.ce) 887 intel_context_put(eb->reloc_cache.ce); 888 889 if (eb->lut_size > 0) 890 kfree(eb->buckets); 891 } 892 893 static inline u64 894 relocation_target(const struct drm_i915_gem_relocation_entry *reloc, 895 const struct i915_vma *target) 896 { 897 return gen8_canonical_addr((int)reloc->delta + target->node.start); 898 } 899 900 static void reloc_cache_init(struct reloc_cache *cache, 901 struct drm_i915_private *i915) 902 { 903 cache->page = -1; 904 cache->vaddr = 0; 905 /* Must be a variable in the struct to allow GCC to unroll. */ 906 cache->gen = INTEL_GEN(i915); 907 cache->has_llc = HAS_LLC(i915); 908 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); 909 cache->has_fence = cache->gen < 4; 910 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; 911 cache->node.flags = 0; 912 cache->ce = NULL; 913 cache->rq = NULL; 914 cache->rq_size = 0; 915 } 916 917 static inline void *unmask_page(unsigned long p) 918 { 919 return (void *)(uintptr_t)(p & PAGE_MASK); 920 } 921 922 static inline unsigned int unmask_flags(unsigned long p) 923 { 924 return p & ~PAGE_MASK; 925 } 926 927 #define KMAP 0x4 /* after CLFLUSH_FLAGS */ 928 929 static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) 930 { 931 struct drm_i915_private *i915 = 932 container_of(cache, struct i915_execbuffer, reloc_cache)->i915; 933 return &i915->ggtt; 934 } 935 936 static void reloc_gpu_flush(struct reloc_cache *cache) 937 { 938 GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); 939 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; 940 941 __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); 942 i915_gem_object_unpin_map(cache->rq->batch->obj); 943 944 intel_gt_chipset_flush(cache->rq->engine->gt); 945 946 i915_request_add(cache->rq); 947 cache->rq = NULL; 948 } 949 950 static void reloc_cache_reset(struct reloc_cache *cache) 951 { 952 void *vaddr; 953 954 if (cache->rq) 955 reloc_gpu_flush(cache); 956 957 if (!cache->vaddr) 958 return; 959 960 vaddr = unmask_page(cache->vaddr); 961 if (cache->vaddr & KMAP) { 962 if (cache->vaddr & CLFLUSH_AFTER) 963 mb(); 964 965 kunmap_atomic(vaddr); 966 i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); 967 } else { 968 struct i915_ggtt *ggtt = cache_to_ggtt(cache); 969 970 intel_gt_flush_ggtt_writes(ggtt->vm.gt); 971 io_mapping_unmap_atomic((void __iomem *)vaddr); 972 973 if (drm_mm_node_allocated(&cache->node)) { 974 ggtt->vm.clear_range(&ggtt->vm, 975 cache->node.start, 976 cache->node.size); 977 mutex_lock(&ggtt->vm.mutex); 978 drm_mm_remove_node(&cache->node); 979 mutex_unlock(&ggtt->vm.mutex); 980 } else { 981 i915_vma_unpin((struct i915_vma *)cache->node.mm); 982 } 983 } 984 985 cache->vaddr = 0; 986 cache->page = -1; 987 } 988 989 static void *reloc_kmap(struct drm_i915_gem_object *obj, 990 struct reloc_cache *cache, 991 unsigned long page) 992 { 993 void *vaddr; 994 995 if (cache->vaddr) { 996 kunmap_atomic(unmask_page(cache->vaddr)); 997 } else { 998 unsigned int flushes; 999 int err; 1000 1001 err = i915_gem_object_prepare_write(obj, &flushes); 1002 if (err) 1003 return ERR_PTR(err); 1004 1005 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); 1006 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); 1007 1008 cache->vaddr = flushes | KMAP; 1009 cache->node.mm = (void *)obj; 1010 if (flushes) 1011 mb(); 1012 } 1013 1014 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page)); 1015 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; 1016 cache->page = page; 1017 1018 return vaddr; 1019 } 1020 1021 static void *reloc_iomap(struct drm_i915_gem_object *obj, 1022 struct reloc_cache *cache, 1023 unsigned long page) 1024 { 1025 struct i915_ggtt *ggtt = cache_to_ggtt(cache); 1026 unsigned long offset; 1027 void *vaddr; 1028 1029 if (cache->vaddr) { 1030 intel_gt_flush_ggtt_writes(ggtt->vm.gt); 1031 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); 1032 } else { 1033 struct i915_vma *vma; 1034 int err; 1035 1036 if (i915_gem_object_is_tiled(obj)) 1037 return ERR_PTR(-EINVAL); 1038 1039 if (use_cpu_reloc(cache, obj)) 1040 return NULL; 1041 1042 i915_gem_object_lock(obj); 1043 err = i915_gem_object_set_to_gtt_domain(obj, true); 1044 i915_gem_object_unlock(obj); 1045 if (err) 1046 return ERR_PTR(err); 1047 1048 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1049 PIN_MAPPABLE | 1050 PIN_NONBLOCK /* NOWARN */ | 1051 PIN_NOEVICT); 1052 if (IS_ERR(vma)) { 1053 memset(&cache->node, 0, sizeof(cache->node)); 1054 mutex_lock(&ggtt->vm.mutex); 1055 err = drm_mm_insert_node_in_range 1056 (&ggtt->vm.mm, &cache->node, 1057 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 1058 0, ggtt->mappable_end, 1059 DRM_MM_INSERT_LOW); 1060 mutex_unlock(&ggtt->vm.mutex); 1061 if (err) /* no inactive aperture space, use cpu reloc */ 1062 return NULL; 1063 } else { 1064 cache->node.start = vma->node.start; 1065 cache->node.mm = (void *)vma; 1066 } 1067 } 1068 1069 offset = cache->node.start; 1070 if (drm_mm_node_allocated(&cache->node)) { 1071 ggtt->vm.insert_page(&ggtt->vm, 1072 i915_gem_object_get_dma_address(obj, page), 1073 offset, I915_CACHE_NONE, 0); 1074 } else { 1075 offset += page << PAGE_SHIFT; 1076 } 1077 1078 vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, 1079 offset); 1080 cache->page = page; 1081 cache->vaddr = (unsigned long)vaddr; 1082 1083 return vaddr; 1084 } 1085 1086 static void *reloc_vaddr(struct drm_i915_gem_object *obj, 1087 struct reloc_cache *cache, 1088 unsigned long page) 1089 { 1090 void *vaddr; 1091 1092 if (cache->page == page) { 1093 vaddr = unmask_page(cache->vaddr); 1094 } else { 1095 vaddr = NULL; 1096 if ((cache->vaddr & KMAP) == 0) 1097 vaddr = reloc_iomap(obj, cache, page); 1098 if (!vaddr) 1099 vaddr = reloc_kmap(obj, cache, page); 1100 } 1101 1102 return vaddr; 1103 } 1104 1105 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) 1106 { 1107 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { 1108 if (flushes & CLFLUSH_BEFORE) { 1109 clflushopt(addr); 1110 mb(); 1111 } 1112 1113 *addr = value; 1114 1115 /* 1116 * Writes to the same cacheline are serialised by the CPU 1117 * (including clflush). On the write path, we only require 1118 * that it hits memory in an orderly fashion and place 1119 * mb barriers at the start and end of the relocation phase 1120 * to ensure ordering of clflush wrt to the system. 1121 */ 1122 if (flushes & CLFLUSH_AFTER) 1123 clflushopt(addr); 1124 } else 1125 *addr = value; 1126 } 1127 1128 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma) 1129 { 1130 struct drm_i915_gem_object *obj = vma->obj; 1131 int err; 1132 1133 i915_vma_lock(vma); 1134 1135 if (obj->cache_dirty & ~obj->cache_coherent) 1136 i915_gem_clflush_object(obj, 0); 1137 obj->write_domain = 0; 1138 1139 err = i915_request_await_object(rq, vma->obj, true); 1140 if (err == 0) 1141 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 1142 1143 i915_vma_unlock(vma); 1144 1145 return err; 1146 } 1147 1148 static int __reloc_gpu_alloc(struct i915_execbuffer *eb, 1149 struct i915_vma *vma, 1150 unsigned int len) 1151 { 1152 struct reloc_cache *cache = &eb->reloc_cache; 1153 struct intel_engine_pool_node *pool; 1154 struct i915_request *rq; 1155 struct i915_vma *batch; 1156 u32 *cmd; 1157 int err; 1158 1159 pool = intel_engine_get_pool(eb->engine, PAGE_SIZE); 1160 if (IS_ERR(pool)) 1161 return PTR_ERR(pool); 1162 1163 cmd = i915_gem_object_pin_map(pool->obj, 1164 cache->has_llc ? 1165 I915_MAP_FORCE_WB : 1166 I915_MAP_FORCE_WC); 1167 if (IS_ERR(cmd)) { 1168 err = PTR_ERR(cmd); 1169 goto out_pool; 1170 } 1171 1172 batch = i915_vma_instance(pool->obj, vma->vm, NULL); 1173 if (IS_ERR(batch)) { 1174 err = PTR_ERR(batch); 1175 goto err_unmap; 1176 } 1177 1178 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK); 1179 if (err) 1180 goto err_unmap; 1181 1182 rq = intel_context_create_request(cache->ce); 1183 if (IS_ERR(rq)) { 1184 err = PTR_ERR(rq); 1185 goto err_unpin; 1186 } 1187 1188 err = intel_engine_pool_mark_active(pool, rq); 1189 if (err) 1190 goto err_request; 1191 1192 err = reloc_move_to_gpu(rq, vma); 1193 if (err) 1194 goto err_request; 1195 1196 err = eb->engine->emit_bb_start(rq, 1197 batch->node.start, PAGE_SIZE, 1198 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); 1199 if (err) 1200 goto skip_request; 1201 1202 i915_vma_lock(batch); 1203 err = i915_request_await_object(rq, batch->obj, false); 1204 if (err == 0) 1205 err = i915_vma_move_to_active(batch, rq, 0); 1206 i915_vma_unlock(batch); 1207 if (err) 1208 goto skip_request; 1209 1210 rq->batch = batch; 1211 i915_vma_unpin(batch); 1212 1213 cache->rq = rq; 1214 cache->rq_cmd = cmd; 1215 cache->rq_size = 0; 1216 1217 /* Return with batch mapping (cmd) still pinned */ 1218 goto out_pool; 1219 1220 skip_request: 1221 i915_request_skip(rq, err); 1222 err_request: 1223 i915_request_add(rq); 1224 err_unpin: 1225 i915_vma_unpin(batch); 1226 err_unmap: 1227 i915_gem_object_unpin_map(pool->obj); 1228 out_pool: 1229 intel_engine_pool_put(pool); 1230 return err; 1231 } 1232 1233 static u32 *reloc_gpu(struct i915_execbuffer *eb, 1234 struct i915_vma *vma, 1235 unsigned int len) 1236 { 1237 struct reloc_cache *cache = &eb->reloc_cache; 1238 u32 *cmd; 1239 1240 if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1)) 1241 reloc_gpu_flush(cache); 1242 1243 if (unlikely(!cache->rq)) { 1244 int err; 1245 1246 /* If we need to copy for the cmdparser, we will stall anyway */ 1247 if (eb_use_cmdparser(eb)) 1248 return ERR_PTR(-EWOULDBLOCK); 1249 1250 if (!intel_engine_can_store_dword(eb->engine)) 1251 return ERR_PTR(-ENODEV); 1252 1253 if (!cache->ce) { 1254 struct intel_context *ce; 1255 1256 /* 1257 * The CS pre-parser can pre-fetch commands across 1258 * memory sync points and starting gen12 it is able to 1259 * pre-fetch across BB_START and BB_END boundaries 1260 * (within the same context). We therefore use a 1261 * separate context gen12+ to guarantee that the reloc 1262 * writes land before the parser gets to the target 1263 * memory location. 1264 */ 1265 if (cache->gen >= 12) 1266 ce = intel_context_create(eb->context->gem_context, 1267 eb->engine); 1268 else 1269 ce = intel_context_get(eb->context); 1270 if (IS_ERR(ce)) 1271 return ERR_CAST(ce); 1272 1273 cache->ce = ce; 1274 } 1275 1276 err = __reloc_gpu_alloc(eb, vma, len); 1277 if (unlikely(err)) 1278 return ERR_PTR(err); 1279 } 1280 1281 cmd = cache->rq_cmd + cache->rq_size; 1282 cache->rq_size += len; 1283 1284 return cmd; 1285 } 1286 1287 static u64 1288 relocate_entry(struct i915_vma *vma, 1289 const struct drm_i915_gem_relocation_entry *reloc, 1290 struct i915_execbuffer *eb, 1291 const struct i915_vma *target) 1292 { 1293 u64 offset = reloc->offset; 1294 u64 target_offset = relocation_target(reloc, target); 1295 bool wide = eb->reloc_cache.use_64bit_reloc; 1296 void *vaddr; 1297 1298 if (!eb->reloc_cache.vaddr && 1299 (DBG_FORCE_RELOC == FORCE_GPU_RELOC || 1300 !dma_resv_test_signaled_rcu(vma->resv, true))) { 1301 const unsigned int gen = eb->reloc_cache.gen; 1302 unsigned int len; 1303 u32 *batch; 1304 u64 addr; 1305 1306 if (wide) 1307 len = offset & 7 ? 8 : 5; 1308 else if (gen >= 4) 1309 len = 4; 1310 else 1311 len = 3; 1312 1313 batch = reloc_gpu(eb, vma, len); 1314 if (IS_ERR(batch)) 1315 goto repeat; 1316 1317 addr = gen8_canonical_addr(vma->node.start + offset); 1318 if (wide) { 1319 if (offset & 7) { 1320 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1321 *batch++ = lower_32_bits(addr); 1322 *batch++ = upper_32_bits(addr); 1323 *batch++ = lower_32_bits(target_offset); 1324 1325 addr = gen8_canonical_addr(addr + 4); 1326 1327 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1328 *batch++ = lower_32_bits(addr); 1329 *batch++ = upper_32_bits(addr); 1330 *batch++ = upper_32_bits(target_offset); 1331 } else { 1332 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1; 1333 *batch++ = lower_32_bits(addr); 1334 *batch++ = upper_32_bits(addr); 1335 *batch++ = lower_32_bits(target_offset); 1336 *batch++ = upper_32_bits(target_offset); 1337 } 1338 } else if (gen >= 6) { 1339 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1340 *batch++ = 0; 1341 *batch++ = addr; 1342 *batch++ = target_offset; 1343 } else if (gen >= 4) { 1344 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1345 *batch++ = 0; 1346 *batch++ = addr; 1347 *batch++ = target_offset; 1348 } else { 1349 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 1350 *batch++ = addr; 1351 *batch++ = target_offset; 1352 } 1353 1354 goto out; 1355 } 1356 1357 repeat: 1358 vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT); 1359 if (IS_ERR(vaddr)) 1360 return PTR_ERR(vaddr); 1361 1362 clflush_write32(vaddr + offset_in_page(offset), 1363 lower_32_bits(target_offset), 1364 eb->reloc_cache.vaddr); 1365 1366 if (wide) { 1367 offset += sizeof(u32); 1368 target_offset >>= 32; 1369 wide = false; 1370 goto repeat; 1371 } 1372 1373 out: 1374 return target->node.start | UPDATE; 1375 } 1376 1377 static u64 1378 eb_relocate_entry(struct i915_execbuffer *eb, 1379 struct i915_vma *vma, 1380 const struct drm_i915_gem_relocation_entry *reloc) 1381 { 1382 struct i915_vma *target; 1383 int err; 1384 1385 /* we've already hold a reference to all valid objects */ 1386 target = eb_get_vma(eb, reloc->target_handle); 1387 if (unlikely(!target)) 1388 return -ENOENT; 1389 1390 /* Validate that the target is in a valid r/w GPU domain */ 1391 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 1392 DRM_DEBUG("reloc with multiple write domains: " 1393 "target %d offset %d " 1394 "read %08x write %08x", 1395 reloc->target_handle, 1396 (int) reloc->offset, 1397 reloc->read_domains, 1398 reloc->write_domain); 1399 return -EINVAL; 1400 } 1401 if (unlikely((reloc->write_domain | reloc->read_domains) 1402 & ~I915_GEM_GPU_DOMAINS)) { 1403 DRM_DEBUG("reloc with read/write non-GPU domains: " 1404 "target %d offset %d " 1405 "read %08x write %08x", 1406 reloc->target_handle, 1407 (int) reloc->offset, 1408 reloc->read_domains, 1409 reloc->write_domain); 1410 return -EINVAL; 1411 } 1412 1413 if (reloc->write_domain) { 1414 *target->exec_flags |= EXEC_OBJECT_WRITE; 1415 1416 /* 1417 * Sandybridge PPGTT errata: We need a global gtt mapping 1418 * for MI and pipe_control writes because the gpu doesn't 1419 * properly redirect them through the ppgtt for non_secure 1420 * batchbuffers. 1421 */ 1422 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 1423 IS_GEN(eb->i915, 6)) { 1424 err = i915_vma_bind(target, target->obj->cache_level, 1425 PIN_GLOBAL, NULL); 1426 if (WARN_ONCE(err, 1427 "Unexpected failure to bind target VMA!")) 1428 return err; 1429 } 1430 } 1431 1432 /* 1433 * If the relocation already has the right value in it, no 1434 * more work needs to be done. 1435 */ 1436 if (!DBG_FORCE_RELOC && 1437 gen8_canonical_addr(target->node.start) == reloc->presumed_offset) 1438 return 0; 1439 1440 /* Check that the relocation address is valid... */ 1441 if (unlikely(reloc->offset > 1442 vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { 1443 DRM_DEBUG("Relocation beyond object bounds: " 1444 "target %d offset %d size %d.\n", 1445 reloc->target_handle, 1446 (int)reloc->offset, 1447 (int)vma->size); 1448 return -EINVAL; 1449 } 1450 if (unlikely(reloc->offset & 3)) { 1451 DRM_DEBUG("Relocation not 4-byte aligned: " 1452 "target %d offset %d.\n", 1453 reloc->target_handle, 1454 (int)reloc->offset); 1455 return -EINVAL; 1456 } 1457 1458 /* 1459 * If we write into the object, we need to force the synchronisation 1460 * barrier, either with an asynchronous clflush or if we executed the 1461 * patching using the GPU (though that should be serialised by the 1462 * timeline). To be completely sure, and since we are required to 1463 * do relocations we are already stalling, disable the user's opt 1464 * out of our synchronisation. 1465 */ 1466 *vma->exec_flags &= ~EXEC_OBJECT_ASYNC; 1467 1468 /* and update the user's relocation entry */ 1469 return relocate_entry(vma, reloc, eb, target); 1470 } 1471 1472 static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) 1473 { 1474 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 1475 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; 1476 struct drm_i915_gem_relocation_entry __user *urelocs; 1477 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); 1478 unsigned int remain; 1479 1480 urelocs = u64_to_user_ptr(entry->relocs_ptr); 1481 remain = entry->relocation_count; 1482 if (unlikely(remain > N_RELOC(ULONG_MAX))) 1483 return -EINVAL; 1484 1485 /* 1486 * We must check that the entire relocation array is safe 1487 * to read. However, if the array is not writable the user loses 1488 * the updated relocation values. 1489 */ 1490 if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs)))) 1491 return -EFAULT; 1492 1493 do { 1494 struct drm_i915_gem_relocation_entry *r = stack; 1495 unsigned int count = 1496 min_t(unsigned int, remain, ARRAY_SIZE(stack)); 1497 unsigned int copied; 1498 1499 /* 1500 * This is the fast path and we cannot handle a pagefault 1501 * whilst holding the struct mutex lest the user pass in the 1502 * relocations contained within a mmaped bo. For in such a case 1503 * we, the page fault handler would call i915_gem_fault() and 1504 * we would try to acquire the struct mutex again. Obviously 1505 * this is bad and so lockdep complains vehemently. 1506 */ 1507 pagefault_disable(); 1508 copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0])); 1509 pagefault_enable(); 1510 if (unlikely(copied)) { 1511 remain = -EFAULT; 1512 goto out; 1513 } 1514 1515 remain -= count; 1516 do { 1517 u64 offset = eb_relocate_entry(eb, vma, r); 1518 1519 if (likely(offset == 0)) { 1520 } else if ((s64)offset < 0) { 1521 remain = (int)offset; 1522 goto out; 1523 } else { 1524 /* 1525 * Note that reporting an error now 1526 * leaves everything in an inconsistent 1527 * state as we have *already* changed 1528 * the relocation value inside the 1529 * object. As we have not changed the 1530 * reloc.presumed_offset or will not 1531 * change the execobject.offset, on the 1532 * call we may not rewrite the value 1533 * inside the object, leaving it 1534 * dangling and causing a GPU hang. Unless 1535 * userspace dynamically rebuilds the 1536 * relocations on each execbuf rather than 1537 * presume a static tree. 1538 * 1539 * We did previously check if the relocations 1540 * were writable (access_ok), an error now 1541 * would be a strange race with mprotect, 1542 * having already demonstrated that we 1543 * can read from this userspace address. 1544 */ 1545 offset = gen8_canonical_addr(offset & ~UPDATE); 1546 if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { 1547 remain = -EFAULT; 1548 goto out; 1549 } 1550 } 1551 } while (r++, --count); 1552 urelocs += ARRAY_SIZE(stack); 1553 } while (remain); 1554 out: 1555 reloc_cache_reset(&eb->reloc_cache); 1556 return remain; 1557 } 1558 1559 static int 1560 eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma) 1561 { 1562 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); 1563 struct drm_i915_gem_relocation_entry *relocs = 1564 u64_to_ptr(typeof(*relocs), entry->relocs_ptr); 1565 unsigned int i; 1566 int err; 1567 1568 for (i = 0; i < entry->relocation_count; i++) { 1569 u64 offset = eb_relocate_entry(eb, vma, &relocs[i]); 1570 1571 if ((s64)offset < 0) { 1572 err = (int)offset; 1573 goto err; 1574 } 1575 } 1576 err = 0; 1577 err: 1578 reloc_cache_reset(&eb->reloc_cache); 1579 return err; 1580 } 1581 1582 static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) 1583 { 1584 const char __user *addr, *end; 1585 unsigned long size; 1586 char __maybe_unused c; 1587 1588 size = entry->relocation_count; 1589 if (size == 0) 1590 return 0; 1591 1592 if (size > N_RELOC(ULONG_MAX)) 1593 return -EINVAL; 1594 1595 addr = u64_to_user_ptr(entry->relocs_ptr); 1596 size *= sizeof(struct drm_i915_gem_relocation_entry); 1597 if (!access_ok(addr, size)) 1598 return -EFAULT; 1599 1600 end = addr + size; 1601 for (; addr < end; addr += PAGE_SIZE) { 1602 int err = __get_user(c, addr); 1603 if (err) 1604 return err; 1605 } 1606 return __get_user(c, end - 1); 1607 } 1608 1609 static int eb_copy_relocations(const struct i915_execbuffer *eb) 1610 { 1611 struct drm_i915_gem_relocation_entry *relocs; 1612 const unsigned int count = eb->buffer_count; 1613 unsigned int i; 1614 int err; 1615 1616 for (i = 0; i < count; i++) { 1617 const unsigned int nreloc = eb->exec[i].relocation_count; 1618 struct drm_i915_gem_relocation_entry __user *urelocs; 1619 unsigned long size; 1620 unsigned long copied; 1621 1622 if (nreloc == 0) 1623 continue; 1624 1625 err = check_relocations(&eb->exec[i]); 1626 if (err) 1627 goto err; 1628 1629 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); 1630 size = nreloc * sizeof(*relocs); 1631 1632 relocs = kvmalloc_array(size, 1, GFP_KERNEL); 1633 if (!relocs) { 1634 err = -ENOMEM; 1635 goto err; 1636 } 1637 1638 /* copy_from_user is limited to < 4GiB */ 1639 copied = 0; 1640 do { 1641 unsigned int len = 1642 min_t(u64, BIT_ULL(31), size - copied); 1643 1644 if (__copy_from_user((char *)relocs + copied, 1645 (char __user *)urelocs + copied, 1646 len)) 1647 goto end; 1648 1649 copied += len; 1650 } while (copied < size); 1651 1652 /* 1653 * As we do not update the known relocation offsets after 1654 * relocating (due to the complexities in lock handling), 1655 * we need to mark them as invalid now so that we force the 1656 * relocation processing next time. Just in case the target 1657 * object is evicted and then rebound into its old 1658 * presumed_offset before the next execbuffer - if that 1659 * happened we would make the mistake of assuming that the 1660 * relocations were valid. 1661 */ 1662 if (!user_access_begin(urelocs, size)) 1663 goto end; 1664 1665 for (copied = 0; copied < nreloc; copied++) 1666 unsafe_put_user(-1, 1667 &urelocs[copied].presumed_offset, 1668 end_user); 1669 user_access_end(); 1670 1671 eb->exec[i].relocs_ptr = (uintptr_t)relocs; 1672 } 1673 1674 return 0; 1675 1676 end_user: 1677 user_access_end(); 1678 end: 1679 kvfree(relocs); 1680 err = -EFAULT; 1681 err: 1682 while (i--) { 1683 relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); 1684 if (eb->exec[i].relocation_count) 1685 kvfree(relocs); 1686 } 1687 return err; 1688 } 1689 1690 static int eb_prefault_relocations(const struct i915_execbuffer *eb) 1691 { 1692 const unsigned int count = eb->buffer_count; 1693 unsigned int i; 1694 1695 if (unlikely(i915_modparams.prefault_disable)) 1696 return 0; 1697 1698 for (i = 0; i < count; i++) { 1699 int err; 1700 1701 err = check_relocations(&eb->exec[i]); 1702 if (err) 1703 return err; 1704 } 1705 1706 return 0; 1707 } 1708 1709 static noinline int eb_relocate_slow(struct i915_execbuffer *eb) 1710 { 1711 struct drm_device *dev = &eb->i915->drm; 1712 bool have_copy = false; 1713 struct i915_vma *vma; 1714 int err = 0; 1715 1716 repeat: 1717 if (signal_pending(current)) { 1718 err = -ERESTARTSYS; 1719 goto out; 1720 } 1721 1722 /* We may process another execbuffer during the unlock... */ 1723 eb_reset_vmas(eb); 1724 mutex_unlock(&dev->struct_mutex); 1725 1726 /* 1727 * We take 3 passes through the slowpatch. 1728 * 1729 * 1 - we try to just prefault all the user relocation entries and 1730 * then attempt to reuse the atomic pagefault disabled fast path again. 1731 * 1732 * 2 - we copy the user entries to a local buffer here outside of the 1733 * local and allow ourselves to wait upon any rendering before 1734 * relocations 1735 * 1736 * 3 - we already have a local copy of the relocation entries, but 1737 * were interrupted (EAGAIN) whilst waiting for the objects, try again. 1738 */ 1739 if (!err) { 1740 err = eb_prefault_relocations(eb); 1741 } else if (!have_copy) { 1742 err = eb_copy_relocations(eb); 1743 have_copy = err == 0; 1744 } else { 1745 cond_resched(); 1746 err = 0; 1747 } 1748 if (err) { 1749 mutex_lock(&dev->struct_mutex); 1750 goto out; 1751 } 1752 1753 /* A frequent cause for EAGAIN are currently unavailable client pages */ 1754 flush_workqueue(eb->i915->mm.userptr_wq); 1755 1756 err = i915_mutex_lock_interruptible(dev); 1757 if (err) { 1758 mutex_lock(&dev->struct_mutex); 1759 goto out; 1760 } 1761 1762 /* reacquire the objects */ 1763 err = eb_lookup_vmas(eb); 1764 if (err) 1765 goto err; 1766 1767 GEM_BUG_ON(!eb->batch); 1768 1769 list_for_each_entry(vma, &eb->relocs, reloc_link) { 1770 if (!have_copy) { 1771 pagefault_disable(); 1772 err = eb_relocate_vma(eb, vma); 1773 pagefault_enable(); 1774 if (err) 1775 goto repeat; 1776 } else { 1777 err = eb_relocate_vma_slow(eb, vma); 1778 if (err) 1779 goto err; 1780 } 1781 } 1782 1783 /* 1784 * Leave the user relocations as are, this is the painfully slow path, 1785 * and we want to avoid the complication of dropping the lock whilst 1786 * having buffers reserved in the aperture and so causing spurious 1787 * ENOSPC for random operations. 1788 */ 1789 1790 err: 1791 if (err == -EAGAIN) 1792 goto repeat; 1793 1794 out: 1795 if (have_copy) { 1796 const unsigned int count = eb->buffer_count; 1797 unsigned int i; 1798 1799 for (i = 0; i < count; i++) { 1800 const struct drm_i915_gem_exec_object2 *entry = 1801 &eb->exec[i]; 1802 struct drm_i915_gem_relocation_entry *relocs; 1803 1804 if (!entry->relocation_count) 1805 continue; 1806 1807 relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); 1808 kvfree(relocs); 1809 } 1810 } 1811 1812 return err; 1813 } 1814 1815 static int eb_relocate(struct i915_execbuffer *eb) 1816 { 1817 if (eb_lookup_vmas(eb)) 1818 goto slow; 1819 1820 /* The objects are in their final locations, apply the relocations. */ 1821 if (eb->args->flags & __EXEC_HAS_RELOC) { 1822 struct i915_vma *vma; 1823 1824 list_for_each_entry(vma, &eb->relocs, reloc_link) { 1825 if (eb_relocate_vma(eb, vma)) 1826 goto slow; 1827 } 1828 } 1829 1830 return 0; 1831 1832 slow: 1833 return eb_relocate_slow(eb); 1834 } 1835 1836 static int eb_move_to_gpu(struct i915_execbuffer *eb) 1837 { 1838 const unsigned int count = eb->buffer_count; 1839 struct ww_acquire_ctx acquire; 1840 unsigned int i; 1841 int err = 0; 1842 1843 ww_acquire_init(&acquire, &reservation_ww_class); 1844 1845 for (i = 0; i < count; i++) { 1846 struct i915_vma *vma = eb->vma[i]; 1847 1848 err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire); 1849 if (!err) 1850 continue; 1851 1852 GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */ 1853 1854 if (err == -EDEADLK) { 1855 GEM_BUG_ON(i == 0); 1856 do { 1857 int j = i - 1; 1858 1859 ww_mutex_unlock(&eb->vma[j]->resv->lock); 1860 1861 swap(eb->flags[i], eb->flags[j]); 1862 swap(eb->vma[i], eb->vma[j]); 1863 eb->vma[i]->exec_flags = &eb->flags[i]; 1864 } while (--i); 1865 GEM_BUG_ON(vma != eb->vma[0]); 1866 vma->exec_flags = &eb->flags[0]; 1867 1868 err = ww_mutex_lock_slow_interruptible(&vma->resv->lock, 1869 &acquire); 1870 } 1871 if (err) 1872 break; 1873 } 1874 ww_acquire_done(&acquire); 1875 1876 while (i--) { 1877 unsigned int flags = eb->flags[i]; 1878 struct i915_vma *vma = eb->vma[i]; 1879 struct drm_i915_gem_object *obj = vma->obj; 1880 1881 assert_vma_held(vma); 1882 1883 if (flags & EXEC_OBJECT_CAPTURE) { 1884 struct i915_capture_list *capture; 1885 1886 capture = kmalloc(sizeof(*capture), GFP_KERNEL); 1887 if (capture) { 1888 capture->next = eb->request->capture_list; 1889 capture->vma = vma; 1890 eb->request->capture_list = capture; 1891 } 1892 } 1893 1894 /* 1895 * If the GPU is not _reading_ through the CPU cache, we need 1896 * to make sure that any writes (both previous GPU writes from 1897 * before a change in snooping levels and normal CPU writes) 1898 * caught in that cache are flushed to main memory. 1899 * 1900 * We want to say 1901 * obj->cache_dirty && 1902 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) 1903 * but gcc's optimiser doesn't handle that as well and emits 1904 * two jumps instead of one. Maybe one day... 1905 */ 1906 if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { 1907 if (i915_gem_clflush_object(obj, 0)) 1908 flags &= ~EXEC_OBJECT_ASYNC; 1909 } 1910 1911 if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { 1912 err = i915_request_await_object 1913 (eb->request, obj, flags & EXEC_OBJECT_WRITE); 1914 } 1915 1916 if (err == 0) 1917 err = i915_vma_move_to_active(vma, eb->request, flags); 1918 1919 i915_vma_unlock(vma); 1920 1921 __eb_unreserve_vma(vma, flags); 1922 vma->exec_flags = NULL; 1923 1924 if (unlikely(flags & __EXEC_OBJECT_HAS_REF)) 1925 i915_vma_put(vma); 1926 } 1927 ww_acquire_fini(&acquire); 1928 1929 if (unlikely(err)) 1930 goto err_skip; 1931 1932 eb->exec = NULL; 1933 1934 /* Unconditionally flush any chipset caches (for streaming writes). */ 1935 intel_gt_chipset_flush(eb->engine->gt); 1936 return 0; 1937 1938 err_skip: 1939 i915_request_skip(eb->request, err); 1940 return err; 1941 } 1942 1943 static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) 1944 { 1945 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) 1946 return false; 1947 1948 /* Kernel clipping was a DRI1 misfeature */ 1949 if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) { 1950 if (exec->num_cliprects || exec->cliprects_ptr) 1951 return false; 1952 } 1953 1954 if (exec->DR4 == 0xffffffff) { 1955 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); 1956 exec->DR4 = 0; 1957 } 1958 if (exec->DR1 || exec->DR4) 1959 return false; 1960 1961 if ((exec->batch_start_offset | exec->batch_len) & 0x7) 1962 return false; 1963 1964 return true; 1965 } 1966 1967 static int i915_reset_gen7_sol_offsets(struct i915_request *rq) 1968 { 1969 u32 *cs; 1970 int i; 1971 1972 if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) { 1973 DRM_DEBUG("sol reset is gen7/rcs only\n"); 1974 return -EINVAL; 1975 } 1976 1977 cs = intel_ring_begin(rq, 4 * 2 + 2); 1978 if (IS_ERR(cs)) 1979 return PTR_ERR(cs); 1980 1981 *cs++ = MI_LOAD_REGISTER_IMM(4); 1982 for (i = 0; i < 4; i++) { 1983 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); 1984 *cs++ = 0; 1985 } 1986 *cs++ = MI_NOOP; 1987 intel_ring_advance(rq, cs); 1988 1989 return 0; 1990 } 1991 1992 static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) 1993 { 1994 struct intel_engine_pool_node *pool; 1995 struct i915_vma *vma; 1996 int err; 1997 1998 pool = intel_engine_get_pool(eb->engine, eb->batch_len); 1999 if (IS_ERR(pool)) 2000 return ERR_CAST(pool); 2001 2002 err = intel_engine_cmd_parser(eb->engine, 2003 eb->batch->obj, 2004 pool->obj, 2005 eb->batch_start_offset, 2006 eb->batch_len, 2007 is_master); 2008 if (err) { 2009 if (err == -EACCES) /* unhandled chained batch */ 2010 vma = NULL; 2011 else 2012 vma = ERR_PTR(err); 2013 goto err; 2014 } 2015 2016 vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0); 2017 if (IS_ERR(vma)) 2018 goto err; 2019 2020 eb->vma[eb->buffer_count] = i915_vma_get(vma); 2021 eb->flags[eb->buffer_count] = 2022 __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; 2023 vma->exec_flags = &eb->flags[eb->buffer_count]; 2024 eb->buffer_count++; 2025 2026 vma->private = pool; 2027 return vma; 2028 2029 err: 2030 intel_engine_pool_put(pool); 2031 return vma; 2032 } 2033 2034 static void 2035 add_to_client(struct i915_request *rq, struct drm_file *file) 2036 { 2037 struct drm_i915_file_private *file_priv = file->driver_priv; 2038 2039 rq->file_priv = file_priv; 2040 2041 spin_lock(&file_priv->mm.lock); 2042 list_add_tail(&rq->client_link, &file_priv->mm.request_list); 2043 spin_unlock(&file_priv->mm.lock); 2044 } 2045 2046 static int eb_submit(struct i915_execbuffer *eb) 2047 { 2048 int err; 2049 2050 err = eb_move_to_gpu(eb); 2051 if (err) 2052 return err; 2053 2054 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { 2055 err = i915_reset_gen7_sol_offsets(eb->request); 2056 if (err) 2057 return err; 2058 } 2059 2060 /* 2061 * After we completed waiting for other engines (using HW semaphores) 2062 * then we can signal that this request/batch is ready to run. This 2063 * allows us to determine if the batch is still waiting on the GPU 2064 * or actually running by checking the breadcrumb. 2065 */ 2066 if (eb->engine->emit_init_breadcrumb) { 2067 err = eb->engine->emit_init_breadcrumb(eb->request); 2068 if (err) 2069 return err; 2070 } 2071 2072 err = eb->engine->emit_bb_start(eb->request, 2073 eb->batch->node.start + 2074 eb->batch_start_offset, 2075 eb->batch_len, 2076 eb->batch_flags); 2077 if (err) 2078 return err; 2079 2080 return 0; 2081 } 2082 2083 static int num_vcs_engines(const struct drm_i915_private *i915) 2084 { 2085 return hweight64(INTEL_INFO(i915)->engine_mask & 2086 GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0)); 2087 } 2088 2089 /* 2090 * Find one BSD ring to dispatch the corresponding BSD command. 2091 * The engine index is returned. 2092 */ 2093 static unsigned int 2094 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, 2095 struct drm_file *file) 2096 { 2097 struct drm_i915_file_private *file_priv = file->driver_priv; 2098 2099 /* Check whether the file_priv has already selected one ring. */ 2100 if ((int)file_priv->bsd_engine < 0) 2101 file_priv->bsd_engine = 2102 get_random_int() % num_vcs_engines(dev_priv); 2103 2104 return file_priv->bsd_engine; 2105 } 2106 2107 static const enum intel_engine_id user_ring_map[] = { 2108 [I915_EXEC_DEFAULT] = RCS0, 2109 [I915_EXEC_RENDER] = RCS0, 2110 [I915_EXEC_BLT] = BCS0, 2111 [I915_EXEC_BSD] = VCS0, 2112 [I915_EXEC_VEBOX] = VECS0 2113 }; 2114 2115 static struct i915_request *eb_throttle(struct intel_context *ce) 2116 { 2117 struct intel_ring *ring = ce->ring; 2118 struct intel_timeline *tl = ce->timeline; 2119 struct i915_request *rq; 2120 2121 /* 2122 * Completely unscientific finger-in-the-air estimates for suitable 2123 * maximum user request size (to avoid blocking) and then backoff. 2124 */ 2125 if (intel_ring_update_space(ring) >= PAGE_SIZE) 2126 return NULL; 2127 2128 /* 2129 * Find a request that after waiting upon, there will be at least half 2130 * the ring available. The hysteresis allows us to compete for the 2131 * shared ring and should mean that we sleep less often prior to 2132 * claiming our resources, but not so long that the ring completely 2133 * drains before we can submit our next request. 2134 */ 2135 list_for_each_entry(rq, &tl->requests, link) { 2136 if (rq->ring != ring) 2137 continue; 2138 2139 if (__intel_ring_space(rq->postfix, 2140 ring->emit, ring->size) > ring->size / 2) 2141 break; 2142 } 2143 if (&rq->link == &tl->requests) 2144 return NULL; /* weird, we will check again later for real */ 2145 2146 return i915_request_get(rq); 2147 } 2148 2149 static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) 2150 { 2151 struct intel_timeline *tl; 2152 struct i915_request *rq; 2153 int err; 2154 2155 /* 2156 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report 2157 * EIO if the GPU is already wedged. 2158 */ 2159 err = intel_gt_terminally_wedged(ce->engine->gt); 2160 if (err) 2161 return err; 2162 2163 /* 2164 * Pinning the contexts may generate requests in order to acquire 2165 * GGTT space, so do this first before we reserve a seqno for 2166 * ourselves. 2167 */ 2168 err = intel_context_pin(ce); 2169 if (err) 2170 return err; 2171 2172 /* 2173 * Take a local wakeref for preparing to dispatch the execbuf as 2174 * we expect to access the hardware fairly frequently in the 2175 * process, and require the engine to be kept awake between accesses. 2176 * Upon dispatch, we acquire another prolonged wakeref that we hold 2177 * until the timeline is idle, which in turn releases the wakeref 2178 * taken on the engine, and the parent device. 2179 */ 2180 tl = intel_context_timeline_lock(ce); 2181 if (IS_ERR(tl)) { 2182 err = PTR_ERR(tl); 2183 goto err_unpin; 2184 } 2185 2186 intel_context_enter(ce); 2187 rq = eb_throttle(ce); 2188 2189 intel_context_timeline_unlock(tl); 2190 2191 if (rq) { 2192 if (i915_request_wait(rq, 2193 I915_WAIT_INTERRUPTIBLE, 2194 MAX_SCHEDULE_TIMEOUT) < 0) { 2195 i915_request_put(rq); 2196 err = -EINTR; 2197 goto err_exit; 2198 } 2199 2200 i915_request_put(rq); 2201 } 2202 2203 eb->engine = ce->engine; 2204 eb->context = ce; 2205 return 0; 2206 2207 err_exit: 2208 mutex_lock(&tl->mutex); 2209 intel_context_exit(ce); 2210 intel_context_timeline_unlock(tl); 2211 err_unpin: 2212 intel_context_unpin(ce); 2213 return err; 2214 } 2215 2216 static void eb_unpin_engine(struct i915_execbuffer *eb) 2217 { 2218 struct intel_context *ce = eb->context; 2219 struct intel_timeline *tl = ce->timeline; 2220 2221 mutex_lock(&tl->mutex); 2222 intel_context_exit(ce); 2223 mutex_unlock(&tl->mutex); 2224 2225 intel_context_unpin(ce); 2226 } 2227 2228 static unsigned int 2229 eb_select_legacy_ring(struct i915_execbuffer *eb, 2230 struct drm_file *file, 2231 struct drm_i915_gem_execbuffer2 *args) 2232 { 2233 struct drm_i915_private *i915 = eb->i915; 2234 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; 2235 2236 if (user_ring_id != I915_EXEC_BSD && 2237 (args->flags & I915_EXEC_BSD_MASK)) { 2238 DRM_DEBUG("execbuf with non bsd ring but with invalid " 2239 "bsd dispatch flags: %d\n", (int)(args->flags)); 2240 return -1; 2241 } 2242 2243 if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) { 2244 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; 2245 2246 if (bsd_idx == I915_EXEC_BSD_DEFAULT) { 2247 bsd_idx = gen8_dispatch_bsd_engine(i915, file); 2248 } else if (bsd_idx >= I915_EXEC_BSD_RING1 && 2249 bsd_idx <= I915_EXEC_BSD_RING2) { 2250 bsd_idx >>= I915_EXEC_BSD_SHIFT; 2251 bsd_idx--; 2252 } else { 2253 DRM_DEBUG("execbuf with unknown bsd ring: %u\n", 2254 bsd_idx); 2255 return -1; 2256 } 2257 2258 return _VCS(bsd_idx); 2259 } 2260 2261 if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { 2262 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id); 2263 return -1; 2264 } 2265 2266 return user_ring_map[user_ring_id]; 2267 } 2268 2269 static int 2270 eb_pin_engine(struct i915_execbuffer *eb, 2271 struct drm_file *file, 2272 struct drm_i915_gem_execbuffer2 *args) 2273 { 2274 struct intel_context *ce; 2275 unsigned int idx; 2276 int err; 2277 2278 if (i915_gem_context_user_engines(eb->gem_context)) 2279 idx = args->flags & I915_EXEC_RING_MASK; 2280 else 2281 idx = eb_select_legacy_ring(eb, file, args); 2282 2283 ce = i915_gem_context_get_engine(eb->gem_context, idx); 2284 if (IS_ERR(ce)) 2285 return PTR_ERR(ce); 2286 2287 err = __eb_pin_engine(eb, ce); 2288 intel_context_put(ce); 2289 2290 return err; 2291 } 2292 2293 static void 2294 __free_fence_array(struct drm_syncobj **fences, unsigned int n) 2295 { 2296 while (n--) 2297 drm_syncobj_put(ptr_mask_bits(fences[n], 2)); 2298 kvfree(fences); 2299 } 2300 2301 static struct drm_syncobj ** 2302 get_fence_array(struct drm_i915_gem_execbuffer2 *args, 2303 struct drm_file *file) 2304 { 2305 const unsigned long nfences = args->num_cliprects; 2306 struct drm_i915_gem_exec_fence __user *user; 2307 struct drm_syncobj **fences; 2308 unsigned long n; 2309 int err; 2310 2311 if (!(args->flags & I915_EXEC_FENCE_ARRAY)) 2312 return NULL; 2313 2314 /* Check multiplication overflow for access_ok() and kvmalloc_array() */ 2315 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); 2316 if (nfences > min_t(unsigned long, 2317 ULONG_MAX / sizeof(*user), 2318 SIZE_MAX / sizeof(*fences))) 2319 return ERR_PTR(-EINVAL); 2320 2321 user = u64_to_user_ptr(args->cliprects_ptr); 2322 if (!access_ok(user, nfences * sizeof(*user))) 2323 return ERR_PTR(-EFAULT); 2324 2325 fences = kvmalloc_array(nfences, sizeof(*fences), 2326 __GFP_NOWARN | GFP_KERNEL); 2327 if (!fences) 2328 return ERR_PTR(-ENOMEM); 2329 2330 for (n = 0; n < nfences; n++) { 2331 struct drm_i915_gem_exec_fence fence; 2332 struct drm_syncobj *syncobj; 2333 2334 if (__copy_from_user(&fence, user++, sizeof(fence))) { 2335 err = -EFAULT; 2336 goto err; 2337 } 2338 2339 if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) { 2340 err = -EINVAL; 2341 goto err; 2342 } 2343 2344 syncobj = drm_syncobj_find(file, fence.handle); 2345 if (!syncobj) { 2346 DRM_DEBUG("Invalid syncobj handle provided\n"); 2347 err = -ENOENT; 2348 goto err; 2349 } 2350 2351 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & 2352 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); 2353 2354 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); 2355 } 2356 2357 return fences; 2358 2359 err: 2360 __free_fence_array(fences, n); 2361 return ERR_PTR(err); 2362 } 2363 2364 static void 2365 put_fence_array(struct drm_i915_gem_execbuffer2 *args, 2366 struct drm_syncobj **fences) 2367 { 2368 if (fences) 2369 __free_fence_array(fences, args->num_cliprects); 2370 } 2371 2372 static int 2373 await_fence_array(struct i915_execbuffer *eb, 2374 struct drm_syncobj **fences) 2375 { 2376 const unsigned int nfences = eb->args->num_cliprects; 2377 unsigned int n; 2378 int err; 2379 2380 for (n = 0; n < nfences; n++) { 2381 struct drm_syncobj *syncobj; 2382 struct dma_fence *fence; 2383 unsigned int flags; 2384 2385 syncobj = ptr_unpack_bits(fences[n], &flags, 2); 2386 if (!(flags & I915_EXEC_FENCE_WAIT)) 2387 continue; 2388 2389 fence = drm_syncobj_fence_get(syncobj); 2390 if (!fence) 2391 return -EINVAL; 2392 2393 err = i915_request_await_dma_fence(eb->request, fence); 2394 dma_fence_put(fence); 2395 if (err < 0) 2396 return err; 2397 } 2398 2399 return 0; 2400 } 2401 2402 static void 2403 signal_fence_array(struct i915_execbuffer *eb, 2404 struct drm_syncobj **fences) 2405 { 2406 const unsigned int nfences = eb->args->num_cliprects; 2407 struct dma_fence * const fence = &eb->request->fence; 2408 unsigned int n; 2409 2410 for (n = 0; n < nfences; n++) { 2411 struct drm_syncobj *syncobj; 2412 unsigned int flags; 2413 2414 syncobj = ptr_unpack_bits(fences[n], &flags, 2); 2415 if (!(flags & I915_EXEC_FENCE_SIGNAL)) 2416 continue; 2417 2418 drm_syncobj_replace_fence(syncobj, fence); 2419 } 2420 } 2421 2422 static int 2423 i915_gem_do_execbuffer(struct drm_device *dev, 2424 struct drm_file *file, 2425 struct drm_i915_gem_execbuffer2 *args, 2426 struct drm_i915_gem_exec_object2 *exec, 2427 struct drm_syncobj **fences) 2428 { 2429 struct i915_execbuffer eb; 2430 struct dma_fence *in_fence = NULL; 2431 struct dma_fence *exec_fence = NULL; 2432 struct sync_file *out_fence = NULL; 2433 int out_fence_fd = -1; 2434 int err; 2435 2436 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS); 2437 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & 2438 ~__EXEC_OBJECT_UNKNOWN_FLAGS); 2439 2440 eb.i915 = to_i915(dev); 2441 eb.file = file; 2442 eb.args = args; 2443 if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) 2444 args->flags |= __EXEC_HAS_RELOC; 2445 2446 eb.exec = exec; 2447 eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1); 2448 eb.vma[0] = NULL; 2449 eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1); 2450 2451 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; 2452 reloc_cache_init(&eb.reloc_cache, eb.i915); 2453 2454 eb.buffer_count = args->buffer_count; 2455 eb.batch_start_offset = args->batch_start_offset; 2456 eb.batch_len = args->batch_len; 2457 2458 eb.batch_flags = 0; 2459 if (args->flags & I915_EXEC_SECURE) { 2460 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) 2461 return -EPERM; 2462 2463 eb.batch_flags |= I915_DISPATCH_SECURE; 2464 } 2465 if (args->flags & I915_EXEC_IS_PINNED) 2466 eb.batch_flags |= I915_DISPATCH_PINNED; 2467 2468 if (args->flags & I915_EXEC_FENCE_IN) { 2469 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); 2470 if (!in_fence) 2471 return -EINVAL; 2472 } 2473 2474 if (args->flags & I915_EXEC_FENCE_SUBMIT) { 2475 if (in_fence) { 2476 err = -EINVAL; 2477 goto err_in_fence; 2478 } 2479 2480 exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); 2481 if (!exec_fence) { 2482 err = -EINVAL; 2483 goto err_in_fence; 2484 } 2485 } 2486 2487 if (args->flags & I915_EXEC_FENCE_OUT) { 2488 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 2489 if (out_fence_fd < 0) { 2490 err = out_fence_fd; 2491 goto err_exec_fence; 2492 } 2493 } 2494 2495 err = eb_create(&eb); 2496 if (err) 2497 goto err_out_fence; 2498 2499 GEM_BUG_ON(!eb.lut_size); 2500 2501 err = eb_select_context(&eb); 2502 if (unlikely(err)) 2503 goto err_destroy; 2504 2505 err = eb_pin_engine(&eb, file, args); 2506 if (unlikely(err)) 2507 goto err_context; 2508 2509 err = i915_mutex_lock_interruptible(dev); 2510 if (err) 2511 goto err_engine; 2512 2513 err = eb_relocate(&eb); 2514 if (err) { 2515 /* 2516 * If the user expects the execobject.offset and 2517 * reloc.presumed_offset to be an exact match, 2518 * as for using NO_RELOC, then we cannot update 2519 * the execobject.offset until we have completed 2520 * relocation. 2521 */ 2522 args->flags &= ~__EXEC_HAS_RELOC; 2523 goto err_vma; 2524 } 2525 2526 if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) { 2527 DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); 2528 err = -EINVAL; 2529 goto err_vma; 2530 } 2531 if (eb.batch_start_offset > eb.batch->size || 2532 eb.batch_len > eb.batch->size - eb.batch_start_offset) { 2533 DRM_DEBUG("Attempting to use out-of-bounds batch\n"); 2534 err = -EINVAL; 2535 goto err_vma; 2536 } 2537 2538 if (eb_use_cmdparser(&eb)) { 2539 struct i915_vma *vma; 2540 2541 vma = eb_parse(&eb, drm_is_current_master(file)); 2542 if (IS_ERR(vma)) { 2543 err = PTR_ERR(vma); 2544 goto err_vma; 2545 } 2546 2547 if (vma) { 2548 /* 2549 * Batch parsed and accepted: 2550 * 2551 * Set the DISPATCH_SECURE bit to remove the NON_SECURE 2552 * bit from MI_BATCH_BUFFER_START commands issued in 2553 * the dispatch_execbuffer implementations. We 2554 * specifically don't want that set on batches the 2555 * command parser has accepted. 2556 */ 2557 eb.batch_flags |= I915_DISPATCH_SECURE; 2558 eb.batch_start_offset = 0; 2559 eb.batch = vma; 2560 } 2561 } 2562 2563 if (eb.batch_len == 0) 2564 eb.batch_len = eb.batch->size - eb.batch_start_offset; 2565 2566 /* 2567 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 2568 * batch" bit. Hence we need to pin secure batches into the global gtt. 2569 * hsw should have this fixed, but bdw mucks it up again. */ 2570 if (eb.batch_flags & I915_DISPATCH_SECURE) { 2571 struct i915_vma *vma; 2572 2573 /* 2574 * So on first glance it looks freaky that we pin the batch here 2575 * outside of the reservation loop. But: 2576 * - The batch is already pinned into the relevant ppgtt, so we 2577 * already have the backing storage fully allocated. 2578 * - No other BO uses the global gtt (well contexts, but meh), 2579 * so we don't really have issues with multiple objects not 2580 * fitting due to fragmentation. 2581 * So this is actually safe. 2582 */ 2583 vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0); 2584 if (IS_ERR(vma)) { 2585 err = PTR_ERR(vma); 2586 goto err_vma; 2587 } 2588 2589 eb.batch = vma; 2590 } 2591 2592 /* All GPU relocation batches must be submitted prior to the user rq */ 2593 GEM_BUG_ON(eb.reloc_cache.rq); 2594 2595 /* Allocate a request for this batch buffer nice and early. */ 2596 eb.request = i915_request_create(eb.context); 2597 if (IS_ERR(eb.request)) { 2598 err = PTR_ERR(eb.request); 2599 goto err_batch_unpin; 2600 } 2601 2602 if (in_fence) { 2603 err = i915_request_await_dma_fence(eb.request, in_fence); 2604 if (err < 0) 2605 goto err_request; 2606 } 2607 2608 if (exec_fence) { 2609 err = i915_request_await_execution(eb.request, exec_fence, 2610 eb.engine->bond_execute); 2611 if (err < 0) 2612 goto err_request; 2613 } 2614 2615 if (fences) { 2616 err = await_fence_array(&eb, fences); 2617 if (err) 2618 goto err_request; 2619 } 2620 2621 if (out_fence_fd != -1) { 2622 out_fence = sync_file_create(&eb.request->fence); 2623 if (!out_fence) { 2624 err = -ENOMEM; 2625 goto err_request; 2626 } 2627 } 2628 2629 /* 2630 * Whilst this request exists, batch_obj will be on the 2631 * active_list, and so will hold the active reference. Only when this 2632 * request is retired will the the batch_obj be moved onto the 2633 * inactive_list and lose its active reference. Hence we do not need 2634 * to explicitly hold another reference here. 2635 */ 2636 eb.request->batch = eb.batch; 2637 if (eb.batch->private) 2638 intel_engine_pool_mark_active(eb.batch->private, eb.request); 2639 2640 trace_i915_request_queue(eb.request, eb.batch_flags); 2641 err = eb_submit(&eb); 2642 err_request: 2643 add_to_client(eb.request, file); 2644 i915_request_add(eb.request); 2645 2646 if (fences) 2647 signal_fence_array(&eb, fences); 2648 2649 if (out_fence) { 2650 if (err == 0) { 2651 fd_install(out_fence_fd, out_fence->file); 2652 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ 2653 args->rsvd2 |= (u64)out_fence_fd << 32; 2654 out_fence_fd = -1; 2655 } else { 2656 fput(out_fence->file); 2657 } 2658 } 2659 2660 err_batch_unpin: 2661 if (eb.batch_flags & I915_DISPATCH_SECURE) 2662 i915_vma_unpin(eb.batch); 2663 if (eb.batch->private) 2664 intel_engine_pool_put(eb.batch->private); 2665 err_vma: 2666 if (eb.exec) 2667 eb_release_vmas(&eb); 2668 mutex_unlock(&dev->struct_mutex); 2669 err_engine: 2670 eb_unpin_engine(&eb); 2671 err_context: 2672 i915_gem_context_put(eb.gem_context); 2673 err_destroy: 2674 eb_destroy(&eb); 2675 err_out_fence: 2676 if (out_fence_fd != -1) 2677 put_unused_fd(out_fence_fd); 2678 err_exec_fence: 2679 dma_fence_put(exec_fence); 2680 err_in_fence: 2681 dma_fence_put(in_fence); 2682 return err; 2683 } 2684 2685 static size_t eb_element_size(void) 2686 { 2687 return (sizeof(struct drm_i915_gem_exec_object2) + 2688 sizeof(struct i915_vma *) + 2689 sizeof(unsigned int)); 2690 } 2691 2692 static bool check_buffer_count(size_t count) 2693 { 2694 const size_t sz = eb_element_size(); 2695 2696 /* 2697 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup 2698 * array size (see eb_create()). Otherwise, we can accept an array as 2699 * large as can be addressed (though use large arrays at your peril)! 2700 */ 2701 2702 return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1); 2703 } 2704 2705 /* 2706 * Legacy execbuffer just creates an exec2 list from the original exec object 2707 * list array and passes it to the real function. 2708 */ 2709 int 2710 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, 2711 struct drm_file *file) 2712 { 2713 struct drm_i915_gem_execbuffer *args = data; 2714 struct drm_i915_gem_execbuffer2 exec2; 2715 struct drm_i915_gem_exec_object *exec_list = NULL; 2716 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 2717 const size_t count = args->buffer_count; 2718 unsigned int i; 2719 int err; 2720 2721 if (!check_buffer_count(count)) { 2722 DRM_DEBUG("execbuf2 with %zd buffers\n", count); 2723 return -EINVAL; 2724 } 2725 2726 exec2.buffers_ptr = args->buffers_ptr; 2727 exec2.buffer_count = args->buffer_count; 2728 exec2.batch_start_offset = args->batch_start_offset; 2729 exec2.batch_len = args->batch_len; 2730 exec2.DR1 = args->DR1; 2731 exec2.DR4 = args->DR4; 2732 exec2.num_cliprects = args->num_cliprects; 2733 exec2.cliprects_ptr = args->cliprects_ptr; 2734 exec2.flags = I915_EXEC_RENDER; 2735 i915_execbuffer2_set_context_id(exec2, 0); 2736 2737 if (!i915_gem_check_execbuffer(&exec2)) 2738 return -EINVAL; 2739 2740 /* Copy in the exec list from userland */ 2741 exec_list = kvmalloc_array(count, sizeof(*exec_list), 2742 __GFP_NOWARN | GFP_KERNEL); 2743 exec2_list = kvmalloc_array(count + 1, eb_element_size(), 2744 __GFP_NOWARN | GFP_KERNEL); 2745 if (exec_list == NULL || exec2_list == NULL) { 2746 DRM_DEBUG("Failed to allocate exec list for %d buffers\n", 2747 args->buffer_count); 2748 kvfree(exec_list); 2749 kvfree(exec2_list); 2750 return -ENOMEM; 2751 } 2752 err = copy_from_user(exec_list, 2753 u64_to_user_ptr(args->buffers_ptr), 2754 sizeof(*exec_list) * count); 2755 if (err) { 2756 DRM_DEBUG("copy %d exec entries failed %d\n", 2757 args->buffer_count, err); 2758 kvfree(exec_list); 2759 kvfree(exec2_list); 2760 return -EFAULT; 2761 } 2762 2763 for (i = 0; i < args->buffer_count; i++) { 2764 exec2_list[i].handle = exec_list[i].handle; 2765 exec2_list[i].relocation_count = exec_list[i].relocation_count; 2766 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; 2767 exec2_list[i].alignment = exec_list[i].alignment; 2768 exec2_list[i].offset = exec_list[i].offset; 2769 if (INTEL_GEN(to_i915(dev)) < 4) 2770 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; 2771 else 2772 exec2_list[i].flags = 0; 2773 } 2774 2775 err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL); 2776 if (exec2.flags & __EXEC_HAS_RELOC) { 2777 struct drm_i915_gem_exec_object __user *user_exec_list = 2778 u64_to_user_ptr(args->buffers_ptr); 2779 2780 /* Copy the new buffer offsets back to the user's exec list. */ 2781 for (i = 0; i < args->buffer_count; i++) { 2782 if (!(exec2_list[i].offset & UPDATE)) 2783 continue; 2784 2785 exec2_list[i].offset = 2786 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); 2787 exec2_list[i].offset &= PIN_OFFSET_MASK; 2788 if (__copy_to_user(&user_exec_list[i].offset, 2789 &exec2_list[i].offset, 2790 sizeof(user_exec_list[i].offset))) 2791 break; 2792 } 2793 } 2794 2795 kvfree(exec_list); 2796 kvfree(exec2_list); 2797 return err; 2798 } 2799 2800 int 2801 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, 2802 struct drm_file *file) 2803 { 2804 struct drm_i915_gem_execbuffer2 *args = data; 2805 struct drm_i915_gem_exec_object2 *exec2_list; 2806 struct drm_syncobj **fences = NULL; 2807 const size_t count = args->buffer_count; 2808 int err; 2809 2810 if (!check_buffer_count(count)) { 2811 DRM_DEBUG("execbuf2 with %zd buffers\n", count); 2812 return -EINVAL; 2813 } 2814 2815 if (!i915_gem_check_execbuffer(args)) 2816 return -EINVAL; 2817 2818 /* Allocate an extra slot for use by the command parser */ 2819 exec2_list = kvmalloc_array(count + 1, eb_element_size(), 2820 __GFP_NOWARN | GFP_KERNEL); 2821 if (exec2_list == NULL) { 2822 DRM_DEBUG("Failed to allocate exec list for %zd buffers\n", 2823 count); 2824 return -ENOMEM; 2825 } 2826 if (copy_from_user(exec2_list, 2827 u64_to_user_ptr(args->buffers_ptr), 2828 sizeof(*exec2_list) * count)) { 2829 DRM_DEBUG("copy %zd exec entries failed\n", count); 2830 kvfree(exec2_list); 2831 return -EFAULT; 2832 } 2833 2834 if (args->flags & I915_EXEC_FENCE_ARRAY) { 2835 fences = get_fence_array(args, file); 2836 if (IS_ERR(fences)) { 2837 kvfree(exec2_list); 2838 return PTR_ERR(fences); 2839 } 2840 } 2841 2842 err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences); 2843 2844 /* 2845 * Now that we have begun execution of the batchbuffer, we ignore 2846 * any new error after this point. Also given that we have already 2847 * updated the associated relocations, we try to write out the current 2848 * object locations irrespective of any error. 2849 */ 2850 if (args->flags & __EXEC_HAS_RELOC) { 2851 struct drm_i915_gem_exec_object2 __user *user_exec_list = 2852 u64_to_user_ptr(args->buffers_ptr); 2853 unsigned int i; 2854 2855 /* Copy the new buffer offsets back to the user's exec list. */ 2856 /* 2857 * Note: count * sizeof(*user_exec_list) does not overflow, 2858 * because we checked 'count' in check_buffer_count(). 2859 * 2860 * And this range already got effectively checked earlier 2861 * when we did the "copy_from_user()" above. 2862 */ 2863 if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) 2864 goto end; 2865 2866 for (i = 0; i < args->buffer_count; i++) { 2867 if (!(exec2_list[i].offset & UPDATE)) 2868 continue; 2869 2870 exec2_list[i].offset = 2871 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); 2872 unsafe_put_user(exec2_list[i].offset, 2873 &user_exec_list[i].offset, 2874 end_user); 2875 } 2876 end_user: 2877 user_access_end(); 2878 end:; 2879 } 2880 2881 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; 2882 put_fence_array(args, fences); 2883 kvfree(exec2_list); 2884 return err; 2885 } 2886