1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008,2010 Intel Corporation 5 */ 6 7 #include <linux/intel-iommu.h> 8 #include <linux/dma-resv.h> 9 #include <linux/sync_file.h> 10 #include <linux/uaccess.h> 11 12 #include <drm/drm_syncobj.h> 13 14 #include "display/intel_frontbuffer.h" 15 16 #include "gem/i915_gem_ioctls.h" 17 #include "gt/intel_context.h" 18 #include "gt/intel_gt.h" 19 #include "gt/intel_gt_buffer_pool.h" 20 #include "gt/intel_gt_pm.h" 21 #include "gt/intel_ring.h" 22 23 #include "i915_drv.h" 24 #include "i915_gem_clflush.h" 25 #include "i915_gem_context.h" 26 #include "i915_gem_ioctls.h" 27 #include "i915_sw_fence_work.h" 28 #include "i915_trace.h" 29 30 struct eb_vma { 31 struct i915_vma *vma; 32 unsigned int flags; 33 34 /** This vma's place in the execbuf reservation list */ 35 struct drm_i915_gem_exec_object2 *exec; 36 struct list_head bind_link; 37 struct list_head reloc_link; 38 39 struct hlist_node node; 40 u32 handle; 41 }; 42 43 struct eb_vma_array { 44 struct kref kref; 45 struct eb_vma vma[]; 46 }; 47 48 enum { 49 FORCE_CPU_RELOC = 1, 50 FORCE_GTT_RELOC, 51 FORCE_GPU_RELOC, 52 #define DBG_FORCE_RELOC 0 /* choose one of the above! */ 53 }; 54 55 #define __EXEC_OBJECT_HAS_PIN BIT(31) 56 #define __EXEC_OBJECT_HAS_FENCE BIT(30) 57 #define __EXEC_OBJECT_NEEDS_MAP BIT(29) 58 #define __EXEC_OBJECT_NEEDS_BIAS BIT(28) 59 #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */ 60 61 #define __EXEC_HAS_RELOC BIT(31) 62 #define __EXEC_INTERNAL_FLAGS (~0u << 31) 63 #define UPDATE PIN_OFFSET_FIXED 64 65 #define BATCH_OFFSET_BIAS (256*1024) 66 67 #define __I915_EXEC_ILLEGAL_FLAGS \ 68 (__I915_EXEC_UNKNOWN_FLAGS | \ 69 I915_EXEC_CONSTANTS_MASK | \ 70 I915_EXEC_RESOURCE_STREAMER) 71 72 /* Catch emission of unexpected errors for CI! */ 73 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 74 #undef EINVAL 75 #define EINVAL ({ \ 76 DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ 77 22; \ 78 }) 79 #endif 80 81 /** 82 * DOC: User command execution 83 * 84 * Userspace submits commands to be executed on the GPU as an instruction 85 * stream within a GEM object we call a batchbuffer. This instructions may 86 * refer to other GEM objects containing auxiliary state such as kernels, 87 * samplers, render targets and even secondary batchbuffers. Userspace does 88 * not know where in the GPU memory these objects reside and so before the 89 * batchbuffer is passed to the GPU for execution, those addresses in the 90 * batchbuffer and auxiliary objects are updated. This is known as relocation, 91 * or patching. To try and avoid having to relocate each object on the next 92 * execution, userspace is told the location of those objects in this pass, 93 * but this remains just a hint as the kernel may choose a new location for 94 * any object in the future. 95 * 96 * At the level of talking to the hardware, submitting a batchbuffer for the 97 * GPU to execute is to add content to a buffer from which the HW 98 * command streamer is reading. 99 * 100 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e. 101 * Execlists, this command is not placed on the same buffer as the 102 * remaining items. 103 * 104 * 2. Add a command to invalidate caches to the buffer. 105 * 106 * 3. Add a batchbuffer start command to the buffer; the start command is 107 * essentially a token together with the GPU address of the batchbuffer 108 * to be executed. 109 * 110 * 4. Add a pipeline flush to the buffer. 111 * 112 * 5. Add a memory write command to the buffer to record when the GPU 113 * is done executing the batchbuffer. The memory write writes the 114 * global sequence number of the request, ``i915_request::global_seqno``; 115 * the i915 driver uses the current value in the register to determine 116 * if the GPU has completed the batchbuffer. 117 * 118 * 6. Add a user interrupt command to the buffer. This command instructs 119 * the GPU to issue an interrupt when the command, pipeline flush and 120 * memory write are completed. 121 * 122 * 7. Inform the hardware of the additional commands added to the buffer 123 * (by updating the tail pointer). 124 * 125 * Processing an execbuf ioctl is conceptually split up into a few phases. 126 * 127 * 1. Validation - Ensure all the pointers, handles and flags are valid. 128 * 2. Reservation - Assign GPU address space for every object 129 * 3. Relocation - Update any addresses to point to the final locations 130 * 4. Serialisation - Order the request with respect to its dependencies 131 * 5. Construction - Construct a request to execute the batchbuffer 132 * 6. Submission (at some point in the future execution) 133 * 134 * Reserving resources for the execbuf is the most complicated phase. We 135 * neither want to have to migrate the object in the address space, nor do 136 * we want to have to update any relocations pointing to this object. Ideally, 137 * we want to leave the object where it is and for all the existing relocations 138 * to match. If the object is given a new address, or if userspace thinks the 139 * object is elsewhere, we have to parse all the relocation entries and update 140 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that 141 * all the target addresses in all of its objects match the value in the 142 * relocation entries and that they all match the presumed offsets given by the 143 * list of execbuffer objects. Using this knowledge, we know that if we haven't 144 * moved any buffers, all the relocation entries are valid and we can skip 145 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU 146 * hang.) The requirement for using I915_EXEC_NO_RELOC are: 147 * 148 * The addresses written in the objects must match the corresponding 149 * reloc.presumed_offset which in turn must match the corresponding 150 * execobject.offset. 151 * 152 * Any render targets written to in the batch must be flagged with 153 * EXEC_OBJECT_WRITE. 154 * 155 * To avoid stalling, execobject.offset should match the current 156 * address of that object within the active context. 157 * 158 * The reservation is done is multiple phases. First we try and keep any 159 * object already bound in its current location - so as long as meets the 160 * constraints imposed by the new execbuffer. Any object left unbound after the 161 * first pass is then fitted into any available idle space. If an object does 162 * not fit, all objects are removed from the reservation and the process rerun 163 * after sorting the objects into a priority order (more difficult to fit 164 * objects are tried first). Failing that, the entire VM is cleared and we try 165 * to fit the execbuf once last time before concluding that it simply will not 166 * fit. 167 * 168 * A small complication to all of this is that we allow userspace not only to 169 * specify an alignment and a size for the object in the address space, but 170 * we also allow userspace to specify the exact offset. This objects are 171 * simpler to place (the location is known a priori) all we have to do is make 172 * sure the space is available. 173 * 174 * Once all the objects are in place, patching up the buried pointers to point 175 * to the final locations is a fairly simple job of walking over the relocation 176 * entry arrays, looking up the right address and rewriting the value into 177 * the object. Simple! ... The relocation entries are stored in user memory 178 * and so to access them we have to copy them into a local buffer. That copy 179 * has to avoid taking any pagefaults as they may lead back to a GEM object 180 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split 181 * the relocation into multiple passes. First we try to do everything within an 182 * atomic context (avoid the pagefaults) which requires that we never wait. If 183 * we detect that we may wait, or if we need to fault, then we have to fallback 184 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm 185 * bells yet?) Dropping the mutex means that we lose all the state we have 186 * built up so far for the execbuf and we must reset any global data. However, 187 * we do leave the objects pinned in their final locations - which is a 188 * potential issue for concurrent execbufs. Once we have left the mutex, we can 189 * allocate and copy all the relocation entries into a large array at our 190 * leisure, reacquire the mutex, reclaim all the objects and other state and 191 * then proceed to update any incorrect addresses with the objects. 192 * 193 * As we process the relocation entries, we maintain a record of whether the 194 * object is being written to. Using NORELOC, we expect userspace to provide 195 * this information instead. We also check whether we can skip the relocation 196 * by comparing the expected value inside the relocation entry with the target's 197 * final address. If they differ, we have to map the current object and rewrite 198 * the 4 or 8 byte pointer within. 199 * 200 * Serialising an execbuf is quite simple according to the rules of the GEM 201 * ABI. Execution within each context is ordered by the order of submission. 202 * Writes to any GEM object are in order of submission and are exclusive. Reads 203 * from a GEM object are unordered with respect to other reads, but ordered by 204 * writes. A write submitted after a read cannot occur before the read, and 205 * similarly any read submitted after a write cannot occur before the write. 206 * Writes are ordered between engines such that only one write occurs at any 207 * time (completing any reads beforehand) - using semaphores where available 208 * and CPU serialisation otherwise. Other GEM access obey the same rules, any 209 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU 210 * reads before starting, and any read (either using set-domain or pread) must 211 * flush all GPU writes before starting. (Note we only employ a barrier before, 212 * we currently rely on userspace not concurrently starting a new execution 213 * whilst reading or writing to an object. This may be an advantage or not 214 * depending on how much you trust userspace not to shoot themselves in the 215 * foot.) Serialisation may just result in the request being inserted into 216 * a DAG awaiting its turn, but most simple is to wait on the CPU until 217 * all dependencies are resolved. 218 * 219 * After all of that, is just a matter of closing the request and handing it to 220 * the hardware (well, leaving it in a queue to be executed). However, we also 221 * offer the ability for batchbuffers to be run with elevated privileges so 222 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.) 223 * Before any batch is given extra privileges we first must check that it 224 * contains no nefarious instructions, we check that each instruction is from 225 * our whitelist and all registers are also from an allowed list. We first 226 * copy the user's batchbuffer to a shadow (so that the user doesn't have 227 * access to it, either by the CPU or GPU as we scan it) and then parse each 228 * instruction. If everything is ok, we set a flag telling the hardware to run 229 * the batchbuffer in trusted mode, otherwise the ioctl is rejected. 230 */ 231 232 struct i915_execbuffer { 233 struct drm_i915_private *i915; /** i915 backpointer */ 234 struct drm_file *file; /** per-file lookup tables and limits */ 235 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ 236 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ 237 struct eb_vma *vma; 238 239 struct intel_engine_cs *engine; /** engine to queue the request to */ 240 struct intel_context *context; /* logical state for the request */ 241 struct i915_gem_context *gem_context; /** caller's context */ 242 243 struct i915_request *request; /** our request to build */ 244 struct eb_vma *batch; /** identity of the batch obj/vma */ 245 struct i915_vma *trampoline; /** trampoline used for chaining */ 246 247 /** actual size of execobj[] as we may extend it for the cmdparser */ 248 unsigned int buffer_count; 249 250 /** list of vma not yet bound during reservation phase */ 251 struct list_head unbound; 252 253 /** list of vma that have execobj.relocation_count */ 254 struct list_head relocs; 255 256 /** 257 * Track the most recently used object for relocations, as we 258 * frequently have to perform multiple relocations within the same 259 * obj/page 260 */ 261 struct reloc_cache { 262 struct drm_mm_node node; /** temporary GTT binding */ 263 unsigned long vaddr; /** Current kmap address */ 264 unsigned long page; /** Currently mapped page index */ 265 unsigned int gen; /** Cached value of INTEL_GEN */ 266 bool use_64bit_reloc : 1; 267 bool has_llc : 1; 268 bool has_fence : 1; 269 bool needs_unfenced : 1; 270 271 struct i915_request *rq; 272 u32 *rq_cmd; 273 unsigned int rq_size; 274 } reloc_cache; 275 276 u64 invalid_flags; /** Set of execobj.flags that are invalid */ 277 u32 context_flags; /** Set of execobj.flags to insert from the ctx */ 278 279 u32 batch_start_offset; /** Location within object of batch */ 280 u32 batch_len; /** Length of batch within object */ 281 u32 batch_flags; /** Flags composed for emit_bb_start() */ 282 283 /** 284 * Indicate either the size of the hastable used to resolve 285 * relocation handles, or if negative that we are using a direct 286 * index into the execobj[]. 287 */ 288 int lut_size; 289 struct hlist_head *buckets; /** ht for relocation handles */ 290 struct eb_vma_array *array; 291 }; 292 293 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) 294 { 295 return intel_engine_requires_cmd_parser(eb->engine) || 296 (intel_engine_using_cmd_parser(eb->engine) && 297 eb->args->batch_len); 298 } 299 300 static struct eb_vma_array *eb_vma_array_create(unsigned int count) 301 { 302 struct eb_vma_array *arr; 303 304 arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN); 305 if (!arr) 306 return NULL; 307 308 kref_init(&arr->kref); 309 arr->vma[0].vma = NULL; 310 311 return arr; 312 } 313 314 static inline void eb_unreserve_vma(struct eb_vma *ev) 315 { 316 struct i915_vma *vma = ev->vma; 317 318 if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE)) 319 __i915_vma_unpin_fence(vma); 320 321 if (ev->flags & __EXEC_OBJECT_HAS_PIN) 322 __i915_vma_unpin(vma); 323 324 ev->flags &= ~(__EXEC_OBJECT_HAS_PIN | 325 __EXEC_OBJECT_HAS_FENCE); 326 } 327 328 static void eb_vma_array_destroy(struct kref *kref) 329 { 330 struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref); 331 struct eb_vma *ev = arr->vma; 332 333 while (ev->vma) { 334 eb_unreserve_vma(ev); 335 i915_vma_put(ev->vma); 336 ev++; 337 } 338 339 kvfree(arr); 340 } 341 342 static void eb_vma_array_put(struct eb_vma_array *arr) 343 { 344 kref_put(&arr->kref, eb_vma_array_destroy); 345 } 346 347 static int eb_create(struct i915_execbuffer *eb) 348 { 349 /* Allocate an extra slot for use by the command parser + sentinel */ 350 eb->array = eb_vma_array_create(eb->buffer_count + 2); 351 if (!eb->array) 352 return -ENOMEM; 353 354 eb->vma = eb->array->vma; 355 356 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { 357 unsigned int size = 1 + ilog2(eb->buffer_count); 358 359 /* 360 * Without a 1:1 association between relocation handles and 361 * the execobject[] index, we instead create a hashtable. 362 * We size it dynamically based on available memory, starting 363 * first with 1:1 assocative hash and scaling back until 364 * the allocation succeeds. 365 * 366 * Later on we use a positive lut_size to indicate we are 367 * using this hashtable, and a negative value to indicate a 368 * direct lookup. 369 */ 370 do { 371 gfp_t flags; 372 373 /* While we can still reduce the allocation size, don't 374 * raise a warning and allow the allocation to fail. 375 * On the last pass though, we want to try as hard 376 * as possible to perform the allocation and warn 377 * if it fails. 378 */ 379 flags = GFP_KERNEL; 380 if (size > 1) 381 flags |= __GFP_NORETRY | __GFP_NOWARN; 382 383 eb->buckets = kzalloc(sizeof(struct hlist_head) << size, 384 flags); 385 if (eb->buckets) 386 break; 387 } while (--size); 388 389 if (unlikely(!size)) { 390 eb_vma_array_put(eb->array); 391 return -ENOMEM; 392 } 393 394 eb->lut_size = size; 395 } else { 396 eb->lut_size = -eb->buffer_count; 397 } 398 399 return 0; 400 } 401 402 static bool 403 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, 404 const struct i915_vma *vma, 405 unsigned int flags) 406 { 407 if (vma->node.size < entry->pad_to_size) 408 return true; 409 410 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment)) 411 return true; 412 413 if (flags & EXEC_OBJECT_PINNED && 414 vma->node.start != entry->offset) 415 return true; 416 417 if (flags & __EXEC_OBJECT_NEEDS_BIAS && 418 vma->node.start < BATCH_OFFSET_BIAS) 419 return true; 420 421 if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && 422 (vma->node.start + vma->node.size - 1) >> 32) 423 return true; 424 425 if (flags & __EXEC_OBJECT_NEEDS_MAP && 426 !i915_vma_is_map_and_fenceable(vma)) 427 return true; 428 429 return false; 430 } 431 432 static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry, 433 unsigned int exec_flags) 434 { 435 u64 pin_flags = 0; 436 437 if (exec_flags & EXEC_OBJECT_NEEDS_GTT) 438 pin_flags |= PIN_GLOBAL; 439 440 /* 441 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, 442 * limit address to the first 4GBs for unflagged objects. 443 */ 444 if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) 445 pin_flags |= PIN_ZONE_4G; 446 447 if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) 448 pin_flags |= PIN_MAPPABLE; 449 450 if (exec_flags & EXEC_OBJECT_PINNED) 451 pin_flags |= entry->offset | PIN_OFFSET_FIXED; 452 else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) 453 pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; 454 455 return pin_flags; 456 } 457 458 static inline bool 459 eb_pin_vma(struct i915_execbuffer *eb, 460 const struct drm_i915_gem_exec_object2 *entry, 461 struct eb_vma *ev) 462 { 463 struct i915_vma *vma = ev->vma; 464 u64 pin_flags; 465 466 if (vma->node.size) 467 pin_flags = vma->node.start; 468 else 469 pin_flags = entry->offset & PIN_OFFSET_MASK; 470 471 pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED; 472 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT)) 473 pin_flags |= PIN_GLOBAL; 474 475 /* Attempt to reuse the current location if available */ 476 if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) { 477 if (entry->flags & EXEC_OBJECT_PINNED) 478 return false; 479 480 /* Failing that pick any _free_ space if suitable */ 481 if (unlikely(i915_vma_pin(vma, 482 entry->pad_to_size, 483 entry->alignment, 484 eb_pin_flags(entry, ev->flags) | 485 PIN_USER | PIN_NOEVICT))) 486 return false; 487 } 488 489 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { 490 if (unlikely(i915_vma_pin_fence(vma))) { 491 i915_vma_unpin(vma); 492 return false; 493 } 494 495 if (vma->fence) 496 ev->flags |= __EXEC_OBJECT_HAS_FENCE; 497 } 498 499 ev->flags |= __EXEC_OBJECT_HAS_PIN; 500 return !eb_vma_misplaced(entry, vma, ev->flags); 501 } 502 503 static int 504 eb_validate_vma(struct i915_execbuffer *eb, 505 struct drm_i915_gem_exec_object2 *entry, 506 struct i915_vma *vma) 507 { 508 if (unlikely(entry->flags & eb->invalid_flags)) 509 return -EINVAL; 510 511 if (unlikely(entry->alignment && 512 !is_power_of_2_u64(entry->alignment))) 513 return -EINVAL; 514 515 /* 516 * Offset can be used as input (EXEC_OBJECT_PINNED), reject 517 * any non-page-aligned or non-canonical addresses. 518 */ 519 if (unlikely(entry->flags & EXEC_OBJECT_PINNED && 520 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) 521 return -EINVAL; 522 523 /* pad_to_size was once a reserved field, so sanitize it */ 524 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) { 525 if (unlikely(offset_in_page(entry->pad_to_size))) 526 return -EINVAL; 527 } else { 528 entry->pad_to_size = 0; 529 } 530 /* 531 * From drm_mm perspective address space is continuous, 532 * so from this point we're always using non-canonical 533 * form internally. 534 */ 535 entry->offset = gen8_noncanonical_addr(entry->offset); 536 537 if (!eb->reloc_cache.has_fence) { 538 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; 539 } else { 540 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE || 541 eb->reloc_cache.needs_unfenced) && 542 i915_gem_object_is_tiled(vma->obj)) 543 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP; 544 } 545 546 if (!(entry->flags & EXEC_OBJECT_PINNED)) 547 entry->flags |= eb->context_flags; 548 549 return 0; 550 } 551 552 static void 553 eb_add_vma(struct i915_execbuffer *eb, 554 unsigned int i, unsigned batch_idx, 555 struct i915_vma *vma) 556 { 557 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 558 struct eb_vma *ev = &eb->vma[i]; 559 560 GEM_BUG_ON(i915_vma_is_closed(vma)); 561 562 ev->vma = vma; 563 ev->exec = entry; 564 ev->flags = entry->flags; 565 566 if (eb->lut_size > 0) { 567 ev->handle = entry->handle; 568 hlist_add_head(&ev->node, 569 &eb->buckets[hash_32(entry->handle, 570 eb->lut_size)]); 571 } 572 573 if (entry->relocation_count) 574 list_add_tail(&ev->reloc_link, &eb->relocs); 575 576 /* 577 * SNA is doing fancy tricks with compressing batch buffers, which leads 578 * to negative relocation deltas. Usually that works out ok since the 579 * relocate address is still positive, except when the batch is placed 580 * very low in the GTT. Ensure this doesn't happen. 581 * 582 * Note that actual hangs have only been observed on gen7, but for 583 * paranoia do it everywhere. 584 */ 585 if (i == batch_idx) { 586 if (entry->relocation_count && 587 !(ev->flags & EXEC_OBJECT_PINNED)) 588 ev->flags |= __EXEC_OBJECT_NEEDS_BIAS; 589 if (eb->reloc_cache.has_fence) 590 ev->flags |= EXEC_OBJECT_NEEDS_FENCE; 591 592 eb->batch = ev; 593 } 594 595 if (eb_pin_vma(eb, entry, ev)) { 596 if (entry->offset != vma->node.start) { 597 entry->offset = vma->node.start | UPDATE; 598 eb->args->flags |= __EXEC_HAS_RELOC; 599 } 600 } else { 601 eb_unreserve_vma(ev); 602 list_add_tail(&ev->bind_link, &eb->unbound); 603 } 604 } 605 606 static inline int use_cpu_reloc(const struct reloc_cache *cache, 607 const struct drm_i915_gem_object *obj) 608 { 609 if (!i915_gem_object_has_struct_page(obj)) 610 return false; 611 612 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC) 613 return true; 614 615 if (DBG_FORCE_RELOC == FORCE_GTT_RELOC) 616 return false; 617 618 return (cache->has_llc || 619 obj->cache_dirty || 620 obj->cache_level != I915_CACHE_NONE); 621 } 622 623 static int eb_reserve_vma(const struct i915_execbuffer *eb, 624 struct eb_vma *ev, 625 u64 pin_flags) 626 { 627 struct drm_i915_gem_exec_object2 *entry = ev->exec; 628 struct i915_vma *vma = ev->vma; 629 int err; 630 631 if (drm_mm_node_allocated(&vma->node) && 632 eb_vma_misplaced(entry, vma, ev->flags)) { 633 err = i915_vma_unbind(vma); 634 if (err) 635 return err; 636 } 637 638 err = i915_vma_pin(vma, 639 entry->pad_to_size, entry->alignment, 640 eb_pin_flags(entry, ev->flags) | pin_flags); 641 if (err) 642 return err; 643 644 if (entry->offset != vma->node.start) { 645 entry->offset = vma->node.start | UPDATE; 646 eb->args->flags |= __EXEC_HAS_RELOC; 647 } 648 649 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { 650 err = i915_vma_pin_fence(vma); 651 if (unlikely(err)) { 652 i915_vma_unpin(vma); 653 return err; 654 } 655 656 if (vma->fence) 657 ev->flags |= __EXEC_OBJECT_HAS_FENCE; 658 } 659 660 ev->flags |= __EXEC_OBJECT_HAS_PIN; 661 GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags)); 662 663 return 0; 664 } 665 666 static int eb_reserve(struct i915_execbuffer *eb) 667 { 668 const unsigned int count = eb->buffer_count; 669 unsigned int pin_flags = PIN_USER | PIN_NONBLOCK; 670 struct list_head last; 671 struct eb_vma *ev; 672 unsigned int i, pass; 673 int err = 0; 674 675 /* 676 * Attempt to pin all of the buffers into the GTT. 677 * This is done in 3 phases: 678 * 679 * 1a. Unbind all objects that do not match the GTT constraints for 680 * the execbuffer (fenceable, mappable, alignment etc). 681 * 1b. Increment pin count for already bound objects. 682 * 2. Bind new objects. 683 * 3. Decrement pin count. 684 * 685 * This avoid unnecessary unbinding of later objects in order to make 686 * room for the earlier objects *unless* we need to defragment. 687 */ 688 689 if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex)) 690 return -EINTR; 691 692 pass = 0; 693 do { 694 list_for_each_entry(ev, &eb->unbound, bind_link) { 695 err = eb_reserve_vma(eb, ev, pin_flags); 696 if (err) 697 break; 698 } 699 if (!(err == -ENOSPC || err == -EAGAIN)) 700 break; 701 702 /* Resort *all* the objects into priority order */ 703 INIT_LIST_HEAD(&eb->unbound); 704 INIT_LIST_HEAD(&last); 705 for (i = 0; i < count; i++) { 706 unsigned int flags; 707 708 ev = &eb->vma[i]; 709 flags = ev->flags; 710 if (flags & EXEC_OBJECT_PINNED && 711 flags & __EXEC_OBJECT_HAS_PIN) 712 continue; 713 714 eb_unreserve_vma(ev); 715 716 if (flags & EXEC_OBJECT_PINNED) 717 /* Pinned must have their slot */ 718 list_add(&ev->bind_link, &eb->unbound); 719 else if (flags & __EXEC_OBJECT_NEEDS_MAP) 720 /* Map require the lowest 256MiB (aperture) */ 721 list_add_tail(&ev->bind_link, &eb->unbound); 722 else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) 723 /* Prioritise 4GiB region for restricted bo */ 724 list_add(&ev->bind_link, &last); 725 else 726 list_add_tail(&ev->bind_link, &last); 727 } 728 list_splice_tail(&last, &eb->unbound); 729 730 if (err == -EAGAIN) { 731 mutex_unlock(&eb->i915->drm.struct_mutex); 732 flush_workqueue(eb->i915->mm.userptr_wq); 733 mutex_lock(&eb->i915->drm.struct_mutex); 734 continue; 735 } 736 737 switch (pass++) { 738 case 0: 739 break; 740 741 case 1: 742 /* Too fragmented, unbind everything and retry */ 743 mutex_lock(&eb->context->vm->mutex); 744 err = i915_gem_evict_vm(eb->context->vm); 745 mutex_unlock(&eb->context->vm->mutex); 746 if (err) 747 goto unlock; 748 break; 749 750 default: 751 err = -ENOSPC; 752 goto unlock; 753 } 754 755 pin_flags = PIN_USER; 756 } while (1); 757 758 unlock: 759 mutex_unlock(&eb->i915->drm.struct_mutex); 760 return err; 761 } 762 763 static unsigned int eb_batch_index(const struct i915_execbuffer *eb) 764 { 765 if (eb->args->flags & I915_EXEC_BATCH_FIRST) 766 return 0; 767 else 768 return eb->buffer_count - 1; 769 } 770 771 static int eb_select_context(struct i915_execbuffer *eb) 772 { 773 struct i915_gem_context *ctx; 774 775 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); 776 if (unlikely(!ctx)) 777 return -ENOENT; 778 779 eb->gem_context = ctx; 780 if (rcu_access_pointer(ctx->vm)) 781 eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; 782 783 eb->context_flags = 0; 784 if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags)) 785 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS; 786 787 return 0; 788 } 789 790 static int __eb_add_lut(struct i915_execbuffer *eb, 791 u32 handle, struct i915_vma *vma) 792 { 793 struct i915_gem_context *ctx = eb->gem_context; 794 struct i915_lut_handle *lut; 795 int err; 796 797 lut = i915_lut_handle_alloc(); 798 if (unlikely(!lut)) 799 return -ENOMEM; 800 801 i915_vma_get(vma); 802 if (!atomic_fetch_inc(&vma->open_count)) 803 i915_vma_reopen(vma); 804 lut->handle = handle; 805 lut->ctx = ctx; 806 807 /* Check that the context hasn't been closed in the meantime */ 808 err = -EINTR; 809 if (!mutex_lock_interruptible(&ctx->mutex)) { 810 err = -ENOENT; 811 if (likely(!i915_gem_context_is_closed(ctx))) 812 err = radix_tree_insert(&ctx->handles_vma, handle, vma); 813 if (err == 0) { /* And nor has this handle */ 814 struct drm_i915_gem_object *obj = vma->obj; 815 816 i915_gem_object_lock(obj); 817 if (idr_find(&eb->file->object_idr, handle) == obj) { 818 list_add(&lut->obj_link, &obj->lut_list); 819 } else { 820 radix_tree_delete(&ctx->handles_vma, handle); 821 err = -ENOENT; 822 } 823 i915_gem_object_unlock(obj); 824 } 825 mutex_unlock(&ctx->mutex); 826 } 827 if (unlikely(err)) 828 goto err; 829 830 return 0; 831 832 err: 833 i915_vma_close(vma); 834 i915_vma_put(vma); 835 i915_lut_handle_free(lut); 836 return err; 837 } 838 839 static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) 840 { 841 do { 842 struct drm_i915_gem_object *obj; 843 struct i915_vma *vma; 844 int err; 845 846 rcu_read_lock(); 847 vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle); 848 if (likely(vma)) 849 vma = i915_vma_tryget(vma); 850 rcu_read_unlock(); 851 if (likely(vma)) 852 return vma; 853 854 obj = i915_gem_object_lookup(eb->file, handle); 855 if (unlikely(!obj)) 856 return ERR_PTR(-ENOENT); 857 858 vma = i915_vma_instance(obj, eb->context->vm, NULL); 859 if (IS_ERR(vma)) { 860 i915_gem_object_put(obj); 861 return vma; 862 } 863 864 err = __eb_add_lut(eb, handle, vma); 865 if (likely(!err)) 866 return vma; 867 868 i915_gem_object_put(obj); 869 if (err != -EEXIST) 870 return ERR_PTR(err); 871 } while (1); 872 } 873 874 static int eb_lookup_vmas(struct i915_execbuffer *eb) 875 { 876 unsigned int batch = eb_batch_index(eb); 877 unsigned int i; 878 int err = 0; 879 880 INIT_LIST_HEAD(&eb->relocs); 881 INIT_LIST_HEAD(&eb->unbound); 882 883 for (i = 0; i < eb->buffer_count; i++) { 884 struct i915_vma *vma; 885 886 vma = eb_lookup_vma(eb, eb->exec[i].handle); 887 if (IS_ERR(vma)) { 888 err = PTR_ERR(vma); 889 break; 890 } 891 892 err = eb_validate_vma(eb, &eb->exec[i], vma); 893 if (unlikely(err)) { 894 i915_vma_put(vma); 895 break; 896 } 897 898 eb_add_vma(eb, i, batch, vma); 899 } 900 901 eb->vma[i].vma = NULL; 902 return err; 903 } 904 905 static struct eb_vma * 906 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) 907 { 908 if (eb->lut_size < 0) { 909 if (handle >= -eb->lut_size) 910 return NULL; 911 return &eb->vma[handle]; 912 } else { 913 struct hlist_head *head; 914 struct eb_vma *ev; 915 916 head = &eb->buckets[hash_32(handle, eb->lut_size)]; 917 hlist_for_each_entry(ev, head, node) { 918 if (ev->handle == handle) 919 return ev; 920 } 921 return NULL; 922 } 923 } 924 925 static void eb_destroy(const struct i915_execbuffer *eb) 926 { 927 GEM_BUG_ON(eb->reloc_cache.rq); 928 929 if (eb->array) 930 eb_vma_array_put(eb->array); 931 932 if (eb->lut_size > 0) 933 kfree(eb->buckets); 934 } 935 936 static inline u64 937 relocation_target(const struct drm_i915_gem_relocation_entry *reloc, 938 const struct i915_vma *target) 939 { 940 return gen8_canonical_addr((int)reloc->delta + target->node.start); 941 } 942 943 static void reloc_cache_init(struct reloc_cache *cache, 944 struct drm_i915_private *i915) 945 { 946 cache->page = -1; 947 cache->vaddr = 0; 948 /* Must be a variable in the struct to allow GCC to unroll. */ 949 cache->gen = INTEL_GEN(i915); 950 cache->has_llc = HAS_LLC(i915); 951 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); 952 cache->has_fence = cache->gen < 4; 953 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; 954 cache->node.flags = 0; 955 cache->rq = NULL; 956 cache->rq_size = 0; 957 } 958 959 static inline void *unmask_page(unsigned long p) 960 { 961 return (void *)(uintptr_t)(p & PAGE_MASK); 962 } 963 964 static inline unsigned int unmask_flags(unsigned long p) 965 { 966 return p & ~PAGE_MASK; 967 } 968 969 #define KMAP 0x4 /* after CLFLUSH_FLAGS */ 970 971 static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) 972 { 973 struct drm_i915_private *i915 = 974 container_of(cache, struct i915_execbuffer, reloc_cache)->i915; 975 return &i915->ggtt; 976 } 977 978 static void reloc_gpu_flush(struct reloc_cache *cache) 979 { 980 struct drm_i915_gem_object *obj = cache->rq->batch->obj; 981 982 GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); 983 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; 984 985 __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1)); 986 i915_gem_object_unpin_map(obj); 987 988 intel_gt_chipset_flush(cache->rq->engine->gt); 989 990 i915_request_add(cache->rq); 991 cache->rq = NULL; 992 } 993 994 static void reloc_cache_reset(struct reloc_cache *cache) 995 { 996 void *vaddr; 997 998 if (cache->rq) 999 reloc_gpu_flush(cache); 1000 1001 if (!cache->vaddr) 1002 return; 1003 1004 vaddr = unmask_page(cache->vaddr); 1005 if (cache->vaddr & KMAP) { 1006 if (cache->vaddr & CLFLUSH_AFTER) 1007 mb(); 1008 1009 kunmap_atomic(vaddr); 1010 i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); 1011 } else { 1012 struct i915_ggtt *ggtt = cache_to_ggtt(cache); 1013 1014 intel_gt_flush_ggtt_writes(ggtt->vm.gt); 1015 io_mapping_unmap_atomic((void __iomem *)vaddr); 1016 1017 if (drm_mm_node_allocated(&cache->node)) { 1018 ggtt->vm.clear_range(&ggtt->vm, 1019 cache->node.start, 1020 cache->node.size); 1021 mutex_lock(&ggtt->vm.mutex); 1022 drm_mm_remove_node(&cache->node); 1023 mutex_unlock(&ggtt->vm.mutex); 1024 } else { 1025 i915_vma_unpin((struct i915_vma *)cache->node.mm); 1026 } 1027 } 1028 1029 cache->vaddr = 0; 1030 cache->page = -1; 1031 } 1032 1033 static void *reloc_kmap(struct drm_i915_gem_object *obj, 1034 struct reloc_cache *cache, 1035 unsigned long page) 1036 { 1037 void *vaddr; 1038 1039 if (cache->vaddr) { 1040 kunmap_atomic(unmask_page(cache->vaddr)); 1041 } else { 1042 unsigned int flushes; 1043 int err; 1044 1045 err = i915_gem_object_prepare_write(obj, &flushes); 1046 if (err) 1047 return ERR_PTR(err); 1048 1049 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); 1050 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); 1051 1052 cache->vaddr = flushes | KMAP; 1053 cache->node.mm = (void *)obj; 1054 if (flushes) 1055 mb(); 1056 } 1057 1058 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page)); 1059 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; 1060 cache->page = page; 1061 1062 return vaddr; 1063 } 1064 1065 static void *reloc_iomap(struct drm_i915_gem_object *obj, 1066 struct reloc_cache *cache, 1067 unsigned long page) 1068 { 1069 struct i915_ggtt *ggtt = cache_to_ggtt(cache); 1070 unsigned long offset; 1071 void *vaddr; 1072 1073 if (cache->vaddr) { 1074 intel_gt_flush_ggtt_writes(ggtt->vm.gt); 1075 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); 1076 } else { 1077 struct i915_vma *vma; 1078 int err; 1079 1080 if (i915_gem_object_is_tiled(obj)) 1081 return ERR_PTR(-EINVAL); 1082 1083 if (use_cpu_reloc(cache, obj)) 1084 return NULL; 1085 1086 i915_gem_object_lock(obj); 1087 err = i915_gem_object_set_to_gtt_domain(obj, true); 1088 i915_gem_object_unlock(obj); 1089 if (err) 1090 return ERR_PTR(err); 1091 1092 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1093 PIN_MAPPABLE | 1094 PIN_NONBLOCK /* NOWARN */ | 1095 PIN_NOEVICT); 1096 if (IS_ERR(vma)) { 1097 memset(&cache->node, 0, sizeof(cache->node)); 1098 mutex_lock(&ggtt->vm.mutex); 1099 err = drm_mm_insert_node_in_range 1100 (&ggtt->vm.mm, &cache->node, 1101 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 1102 0, ggtt->mappable_end, 1103 DRM_MM_INSERT_LOW); 1104 mutex_unlock(&ggtt->vm.mutex); 1105 if (err) /* no inactive aperture space, use cpu reloc */ 1106 return NULL; 1107 } else { 1108 cache->node.start = vma->node.start; 1109 cache->node.mm = (void *)vma; 1110 } 1111 } 1112 1113 offset = cache->node.start; 1114 if (drm_mm_node_allocated(&cache->node)) { 1115 ggtt->vm.insert_page(&ggtt->vm, 1116 i915_gem_object_get_dma_address(obj, page), 1117 offset, I915_CACHE_NONE, 0); 1118 } else { 1119 offset += page << PAGE_SHIFT; 1120 } 1121 1122 vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, 1123 offset); 1124 cache->page = page; 1125 cache->vaddr = (unsigned long)vaddr; 1126 1127 return vaddr; 1128 } 1129 1130 static void *reloc_vaddr(struct drm_i915_gem_object *obj, 1131 struct reloc_cache *cache, 1132 unsigned long page) 1133 { 1134 void *vaddr; 1135 1136 if (cache->page == page) { 1137 vaddr = unmask_page(cache->vaddr); 1138 } else { 1139 vaddr = NULL; 1140 if ((cache->vaddr & KMAP) == 0) 1141 vaddr = reloc_iomap(obj, cache, page); 1142 if (!vaddr) 1143 vaddr = reloc_kmap(obj, cache, page); 1144 } 1145 1146 return vaddr; 1147 } 1148 1149 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) 1150 { 1151 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { 1152 if (flushes & CLFLUSH_BEFORE) { 1153 clflushopt(addr); 1154 mb(); 1155 } 1156 1157 *addr = value; 1158 1159 /* 1160 * Writes to the same cacheline are serialised by the CPU 1161 * (including clflush). On the write path, we only require 1162 * that it hits memory in an orderly fashion and place 1163 * mb barriers at the start and end of the relocation phase 1164 * to ensure ordering of clflush wrt to the system. 1165 */ 1166 if (flushes & CLFLUSH_AFTER) 1167 clflushopt(addr); 1168 } else 1169 *addr = value; 1170 } 1171 1172 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma) 1173 { 1174 struct drm_i915_gem_object *obj = vma->obj; 1175 int err; 1176 1177 i915_vma_lock(vma); 1178 1179 if (obj->cache_dirty & ~obj->cache_coherent) 1180 i915_gem_clflush_object(obj, 0); 1181 obj->write_domain = 0; 1182 1183 err = i915_request_await_object(rq, vma->obj, true); 1184 if (err == 0) 1185 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 1186 1187 i915_vma_unlock(vma); 1188 1189 return err; 1190 } 1191 1192 static int __reloc_gpu_alloc(struct i915_execbuffer *eb, 1193 struct i915_vma *vma, 1194 unsigned int len) 1195 { 1196 struct reloc_cache *cache = &eb->reloc_cache; 1197 struct intel_gt_buffer_pool_node *pool; 1198 struct i915_request *rq; 1199 struct i915_vma *batch; 1200 u32 *cmd; 1201 int err; 1202 1203 pool = intel_gt_get_buffer_pool(eb->engine->gt, PAGE_SIZE); 1204 if (IS_ERR(pool)) 1205 return PTR_ERR(pool); 1206 1207 cmd = i915_gem_object_pin_map(pool->obj, 1208 cache->has_llc ? 1209 I915_MAP_FORCE_WB : 1210 I915_MAP_FORCE_WC); 1211 if (IS_ERR(cmd)) { 1212 err = PTR_ERR(cmd); 1213 goto out_pool; 1214 } 1215 1216 batch = i915_vma_instance(pool->obj, vma->vm, NULL); 1217 if (IS_ERR(batch)) { 1218 err = PTR_ERR(batch); 1219 goto err_unmap; 1220 } 1221 1222 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK); 1223 if (err) 1224 goto err_unmap; 1225 1226 rq = i915_request_create(eb->context); 1227 if (IS_ERR(rq)) { 1228 err = PTR_ERR(rq); 1229 goto err_unpin; 1230 } 1231 1232 err = intel_gt_buffer_pool_mark_active(pool, rq); 1233 if (err) 1234 goto err_request; 1235 1236 err = reloc_move_to_gpu(rq, vma); 1237 if (err) 1238 goto err_request; 1239 1240 err = eb->engine->emit_bb_start(rq, 1241 batch->node.start, PAGE_SIZE, 1242 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); 1243 if (err) 1244 goto skip_request; 1245 1246 i915_vma_lock(batch); 1247 err = i915_request_await_object(rq, batch->obj, false); 1248 if (err == 0) 1249 err = i915_vma_move_to_active(batch, rq, 0); 1250 i915_vma_unlock(batch); 1251 if (err) 1252 goto skip_request; 1253 1254 rq->batch = batch; 1255 i915_vma_unpin(batch); 1256 1257 cache->rq = rq; 1258 cache->rq_cmd = cmd; 1259 cache->rq_size = 0; 1260 1261 /* Return with batch mapping (cmd) still pinned */ 1262 goto out_pool; 1263 1264 skip_request: 1265 i915_request_set_error_once(rq, err); 1266 err_request: 1267 i915_request_add(rq); 1268 err_unpin: 1269 i915_vma_unpin(batch); 1270 err_unmap: 1271 i915_gem_object_unpin_map(pool->obj); 1272 out_pool: 1273 intel_gt_buffer_pool_put(pool); 1274 return err; 1275 } 1276 1277 static u32 *reloc_gpu(struct i915_execbuffer *eb, 1278 struct i915_vma *vma, 1279 unsigned int len) 1280 { 1281 struct reloc_cache *cache = &eb->reloc_cache; 1282 u32 *cmd; 1283 1284 if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1)) 1285 reloc_gpu_flush(cache); 1286 1287 if (unlikely(!cache->rq)) { 1288 int err; 1289 1290 if (!intel_engine_can_store_dword(eb->engine)) 1291 return ERR_PTR(-ENODEV); 1292 1293 err = __reloc_gpu_alloc(eb, vma, len); 1294 if (unlikely(err)) 1295 return ERR_PTR(err); 1296 } 1297 1298 cmd = cache->rq_cmd + cache->rq_size; 1299 cache->rq_size += len; 1300 1301 return cmd; 1302 } 1303 1304 static inline bool use_reloc_gpu(struct i915_vma *vma) 1305 { 1306 if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) 1307 return true; 1308 1309 if (DBG_FORCE_RELOC) 1310 return false; 1311 1312 return !dma_resv_test_signaled_rcu(vma->resv, true); 1313 } 1314 1315 static u64 1316 relocate_entry(struct i915_vma *vma, 1317 const struct drm_i915_gem_relocation_entry *reloc, 1318 struct i915_execbuffer *eb, 1319 const struct i915_vma *target) 1320 { 1321 u64 offset = reloc->offset; 1322 u64 target_offset = relocation_target(reloc, target); 1323 bool wide = eb->reloc_cache.use_64bit_reloc; 1324 void *vaddr; 1325 1326 if (!eb->reloc_cache.vaddr && use_reloc_gpu(vma)) { 1327 const unsigned int gen = eb->reloc_cache.gen; 1328 unsigned int len; 1329 u32 *batch; 1330 u64 addr; 1331 1332 if (wide) 1333 len = offset & 7 ? 8 : 5; 1334 else if (gen >= 4) 1335 len = 4; 1336 else 1337 len = 3; 1338 1339 batch = reloc_gpu(eb, vma, len); 1340 if (IS_ERR(batch)) 1341 goto repeat; 1342 1343 addr = gen8_canonical_addr(vma->node.start + offset); 1344 if (wide) { 1345 if (offset & 7) { 1346 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1347 *batch++ = lower_32_bits(addr); 1348 *batch++ = upper_32_bits(addr); 1349 *batch++ = lower_32_bits(target_offset); 1350 1351 addr = gen8_canonical_addr(addr + 4); 1352 1353 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1354 *batch++ = lower_32_bits(addr); 1355 *batch++ = upper_32_bits(addr); 1356 *batch++ = upper_32_bits(target_offset); 1357 } else { 1358 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1; 1359 *batch++ = lower_32_bits(addr); 1360 *batch++ = upper_32_bits(addr); 1361 *batch++ = lower_32_bits(target_offset); 1362 *batch++ = upper_32_bits(target_offset); 1363 } 1364 } else if (gen >= 6) { 1365 *batch++ = MI_STORE_DWORD_IMM_GEN4; 1366 *batch++ = 0; 1367 *batch++ = addr; 1368 *batch++ = target_offset; 1369 } else if (gen >= 4) { 1370 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1371 *batch++ = 0; 1372 *batch++ = addr; 1373 *batch++ = target_offset; 1374 } else { 1375 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 1376 *batch++ = addr; 1377 *batch++ = target_offset; 1378 } 1379 1380 goto out; 1381 } 1382 1383 repeat: 1384 vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT); 1385 if (IS_ERR(vaddr)) 1386 return PTR_ERR(vaddr); 1387 1388 clflush_write32(vaddr + offset_in_page(offset), 1389 lower_32_bits(target_offset), 1390 eb->reloc_cache.vaddr); 1391 1392 if (wide) { 1393 offset += sizeof(u32); 1394 target_offset >>= 32; 1395 wide = false; 1396 goto repeat; 1397 } 1398 1399 out: 1400 return target->node.start | UPDATE; 1401 } 1402 1403 static u64 1404 eb_relocate_entry(struct i915_execbuffer *eb, 1405 struct eb_vma *ev, 1406 const struct drm_i915_gem_relocation_entry *reloc) 1407 { 1408 struct drm_i915_private *i915 = eb->i915; 1409 struct eb_vma *target; 1410 int err; 1411 1412 /* we've already hold a reference to all valid objects */ 1413 target = eb_get_vma(eb, reloc->target_handle); 1414 if (unlikely(!target)) 1415 return -ENOENT; 1416 1417 /* Validate that the target is in a valid r/w GPU domain */ 1418 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 1419 drm_dbg(&i915->drm, "reloc with multiple write domains: " 1420 "target %d offset %d " 1421 "read %08x write %08x", 1422 reloc->target_handle, 1423 (int) reloc->offset, 1424 reloc->read_domains, 1425 reloc->write_domain); 1426 return -EINVAL; 1427 } 1428 if (unlikely((reloc->write_domain | reloc->read_domains) 1429 & ~I915_GEM_GPU_DOMAINS)) { 1430 drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: " 1431 "target %d offset %d " 1432 "read %08x write %08x", 1433 reloc->target_handle, 1434 (int) reloc->offset, 1435 reloc->read_domains, 1436 reloc->write_domain); 1437 return -EINVAL; 1438 } 1439 1440 if (reloc->write_domain) { 1441 target->flags |= EXEC_OBJECT_WRITE; 1442 1443 /* 1444 * Sandybridge PPGTT errata: We need a global gtt mapping 1445 * for MI and pipe_control writes because the gpu doesn't 1446 * properly redirect them through the ppgtt for non_secure 1447 * batchbuffers. 1448 */ 1449 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 1450 IS_GEN(eb->i915, 6)) { 1451 err = i915_vma_bind(target->vma, 1452 target->vma->obj->cache_level, 1453 PIN_GLOBAL, NULL); 1454 if (WARN_ONCE(err, 1455 "Unexpected failure to bind target VMA!")) 1456 return err; 1457 } 1458 } 1459 1460 /* 1461 * If the relocation already has the right value in it, no 1462 * more work needs to be done. 1463 */ 1464 if (!DBG_FORCE_RELOC && 1465 gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset) 1466 return 0; 1467 1468 /* Check that the relocation address is valid... */ 1469 if (unlikely(reloc->offset > 1470 ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { 1471 drm_dbg(&i915->drm, "Relocation beyond object bounds: " 1472 "target %d offset %d size %d.\n", 1473 reloc->target_handle, 1474 (int)reloc->offset, 1475 (int)ev->vma->size); 1476 return -EINVAL; 1477 } 1478 if (unlikely(reloc->offset & 3)) { 1479 drm_dbg(&i915->drm, "Relocation not 4-byte aligned: " 1480 "target %d offset %d.\n", 1481 reloc->target_handle, 1482 (int)reloc->offset); 1483 return -EINVAL; 1484 } 1485 1486 /* 1487 * If we write into the object, we need to force the synchronisation 1488 * barrier, either with an asynchronous clflush or if we executed the 1489 * patching using the GPU (though that should be serialised by the 1490 * timeline). To be completely sure, and since we are required to 1491 * do relocations we are already stalling, disable the user's opt 1492 * out of our synchronisation. 1493 */ 1494 ev->flags &= ~EXEC_OBJECT_ASYNC; 1495 1496 /* and update the user's relocation entry */ 1497 return relocate_entry(ev->vma, reloc, eb, target->vma); 1498 } 1499 1500 static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) 1501 { 1502 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) 1503 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; 1504 const struct drm_i915_gem_exec_object2 *entry = ev->exec; 1505 struct drm_i915_gem_relocation_entry __user *urelocs = 1506 u64_to_user_ptr(entry->relocs_ptr); 1507 unsigned long remain = entry->relocation_count; 1508 1509 if (unlikely(remain > N_RELOC(ULONG_MAX))) 1510 return -EINVAL; 1511 1512 /* 1513 * We must check that the entire relocation array is safe 1514 * to read. However, if the array is not writable the user loses 1515 * the updated relocation values. 1516 */ 1517 if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs)))) 1518 return -EFAULT; 1519 1520 do { 1521 struct drm_i915_gem_relocation_entry *r = stack; 1522 unsigned int count = 1523 min_t(unsigned long, remain, ARRAY_SIZE(stack)); 1524 unsigned int copied; 1525 1526 /* 1527 * This is the fast path and we cannot handle a pagefault 1528 * whilst holding the struct mutex lest the user pass in the 1529 * relocations contained within a mmaped bo. For in such a case 1530 * we, the page fault handler would call i915_gem_fault() and 1531 * we would try to acquire the struct mutex again. Obviously 1532 * this is bad and so lockdep complains vehemently. 1533 */ 1534 copied = __copy_from_user(r, urelocs, count * sizeof(r[0])); 1535 if (unlikely(copied)) { 1536 remain = -EFAULT; 1537 goto out; 1538 } 1539 1540 remain -= count; 1541 do { 1542 u64 offset = eb_relocate_entry(eb, ev, r); 1543 1544 if (likely(offset == 0)) { 1545 } else if ((s64)offset < 0) { 1546 remain = (int)offset; 1547 goto out; 1548 } else { 1549 /* 1550 * Note that reporting an error now 1551 * leaves everything in an inconsistent 1552 * state as we have *already* changed 1553 * the relocation value inside the 1554 * object. As we have not changed the 1555 * reloc.presumed_offset or will not 1556 * change the execobject.offset, on the 1557 * call we may not rewrite the value 1558 * inside the object, leaving it 1559 * dangling and causing a GPU hang. Unless 1560 * userspace dynamically rebuilds the 1561 * relocations on each execbuf rather than 1562 * presume a static tree. 1563 * 1564 * We did previously check if the relocations 1565 * were writable (access_ok), an error now 1566 * would be a strange race with mprotect, 1567 * having already demonstrated that we 1568 * can read from this userspace address. 1569 */ 1570 offset = gen8_canonical_addr(offset & ~UPDATE); 1571 __put_user(offset, 1572 &urelocs[r - stack].presumed_offset); 1573 } 1574 } while (r++, --count); 1575 urelocs += ARRAY_SIZE(stack); 1576 } while (remain); 1577 out: 1578 reloc_cache_reset(&eb->reloc_cache); 1579 return remain; 1580 } 1581 1582 static int eb_relocate(struct i915_execbuffer *eb) 1583 { 1584 int err; 1585 1586 err = eb_lookup_vmas(eb); 1587 if (err) 1588 return err; 1589 1590 if (!list_empty(&eb->unbound)) { 1591 err = eb_reserve(eb); 1592 if (err) 1593 return err; 1594 } 1595 1596 /* The objects are in their final locations, apply the relocations. */ 1597 if (eb->args->flags & __EXEC_HAS_RELOC) { 1598 struct eb_vma *ev; 1599 1600 list_for_each_entry(ev, &eb->relocs, reloc_link) { 1601 err = eb_relocate_vma(eb, ev); 1602 if (err) 1603 return err; 1604 } 1605 } 1606 1607 return 0; 1608 } 1609 1610 static int eb_move_to_gpu(struct i915_execbuffer *eb) 1611 { 1612 const unsigned int count = eb->buffer_count; 1613 struct ww_acquire_ctx acquire; 1614 unsigned int i; 1615 int err = 0; 1616 1617 ww_acquire_init(&acquire, &reservation_ww_class); 1618 1619 for (i = 0; i < count; i++) { 1620 struct eb_vma *ev = &eb->vma[i]; 1621 struct i915_vma *vma = ev->vma; 1622 1623 err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire); 1624 if (err == -EDEADLK) { 1625 GEM_BUG_ON(i == 0); 1626 do { 1627 int j = i - 1; 1628 1629 ww_mutex_unlock(&eb->vma[j].vma->resv->lock); 1630 1631 swap(eb->vma[i], eb->vma[j]); 1632 } while (--i); 1633 1634 err = ww_mutex_lock_slow_interruptible(&vma->resv->lock, 1635 &acquire); 1636 } 1637 if (err) 1638 break; 1639 } 1640 ww_acquire_done(&acquire); 1641 1642 while (i--) { 1643 struct eb_vma *ev = &eb->vma[i]; 1644 struct i915_vma *vma = ev->vma; 1645 unsigned int flags = ev->flags; 1646 struct drm_i915_gem_object *obj = vma->obj; 1647 1648 assert_vma_held(vma); 1649 1650 if (flags & EXEC_OBJECT_CAPTURE) { 1651 struct i915_capture_list *capture; 1652 1653 capture = kmalloc(sizeof(*capture), GFP_KERNEL); 1654 if (capture) { 1655 capture->next = eb->request->capture_list; 1656 capture->vma = vma; 1657 eb->request->capture_list = capture; 1658 } 1659 } 1660 1661 /* 1662 * If the GPU is not _reading_ through the CPU cache, we need 1663 * to make sure that any writes (both previous GPU writes from 1664 * before a change in snooping levels and normal CPU writes) 1665 * caught in that cache are flushed to main memory. 1666 * 1667 * We want to say 1668 * obj->cache_dirty && 1669 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) 1670 * but gcc's optimiser doesn't handle that as well and emits 1671 * two jumps instead of one. Maybe one day... 1672 */ 1673 if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { 1674 if (i915_gem_clflush_object(obj, 0)) 1675 flags &= ~EXEC_OBJECT_ASYNC; 1676 } 1677 1678 if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { 1679 err = i915_request_await_object 1680 (eb->request, obj, flags & EXEC_OBJECT_WRITE); 1681 } 1682 1683 if (err == 0) 1684 err = i915_vma_move_to_active(vma, eb->request, flags); 1685 1686 i915_vma_unlock(vma); 1687 eb_unreserve_vma(ev); 1688 } 1689 ww_acquire_fini(&acquire); 1690 1691 eb_vma_array_put(fetch_and_zero(&eb->array)); 1692 1693 if (unlikely(err)) 1694 goto err_skip; 1695 1696 /* Unconditionally flush any chipset caches (for streaming writes). */ 1697 intel_gt_chipset_flush(eb->engine->gt); 1698 return 0; 1699 1700 err_skip: 1701 i915_request_set_error_once(eb->request, err); 1702 return err; 1703 } 1704 1705 static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) 1706 { 1707 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) 1708 return -EINVAL; 1709 1710 /* Kernel clipping was a DRI1 misfeature */ 1711 if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) { 1712 if (exec->num_cliprects || exec->cliprects_ptr) 1713 return -EINVAL; 1714 } 1715 1716 if (exec->DR4 == 0xffffffff) { 1717 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); 1718 exec->DR4 = 0; 1719 } 1720 if (exec->DR1 || exec->DR4) 1721 return -EINVAL; 1722 1723 if ((exec->batch_start_offset | exec->batch_len) & 0x7) 1724 return -EINVAL; 1725 1726 return 0; 1727 } 1728 1729 static int i915_reset_gen7_sol_offsets(struct i915_request *rq) 1730 { 1731 u32 *cs; 1732 int i; 1733 1734 if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) { 1735 drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n"); 1736 return -EINVAL; 1737 } 1738 1739 cs = intel_ring_begin(rq, 4 * 2 + 2); 1740 if (IS_ERR(cs)) 1741 return PTR_ERR(cs); 1742 1743 *cs++ = MI_LOAD_REGISTER_IMM(4); 1744 for (i = 0; i < 4; i++) { 1745 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); 1746 *cs++ = 0; 1747 } 1748 *cs++ = MI_NOOP; 1749 intel_ring_advance(rq, cs); 1750 1751 return 0; 1752 } 1753 1754 static struct i915_vma * 1755 shadow_batch_pin(struct drm_i915_gem_object *obj, 1756 struct i915_address_space *vm, 1757 unsigned int flags) 1758 { 1759 struct i915_vma *vma; 1760 int err; 1761 1762 vma = i915_vma_instance(obj, vm, NULL); 1763 if (IS_ERR(vma)) 1764 return vma; 1765 1766 err = i915_vma_pin(vma, 0, 0, flags); 1767 if (err) 1768 return ERR_PTR(err); 1769 1770 return vma; 1771 } 1772 1773 struct eb_parse_work { 1774 struct dma_fence_work base; 1775 struct intel_engine_cs *engine; 1776 struct i915_vma *batch; 1777 struct i915_vma *shadow; 1778 struct i915_vma *trampoline; 1779 unsigned int batch_offset; 1780 unsigned int batch_length; 1781 }; 1782 1783 static int __eb_parse(struct dma_fence_work *work) 1784 { 1785 struct eb_parse_work *pw = container_of(work, typeof(*pw), base); 1786 1787 return intel_engine_cmd_parser(pw->engine, 1788 pw->batch, 1789 pw->batch_offset, 1790 pw->batch_length, 1791 pw->shadow, 1792 pw->trampoline); 1793 } 1794 1795 static void __eb_parse_release(struct dma_fence_work *work) 1796 { 1797 struct eb_parse_work *pw = container_of(work, typeof(*pw), base); 1798 1799 if (pw->trampoline) 1800 i915_active_release(&pw->trampoline->active); 1801 i915_active_release(&pw->shadow->active); 1802 i915_active_release(&pw->batch->active); 1803 } 1804 1805 static const struct dma_fence_work_ops eb_parse_ops = { 1806 .name = "eb_parse", 1807 .work = __eb_parse, 1808 .release = __eb_parse_release, 1809 }; 1810 1811 static int eb_parse_pipeline(struct i915_execbuffer *eb, 1812 struct i915_vma *shadow, 1813 struct i915_vma *trampoline) 1814 { 1815 struct eb_parse_work *pw; 1816 int err; 1817 1818 pw = kzalloc(sizeof(*pw), GFP_KERNEL); 1819 if (!pw) 1820 return -ENOMEM; 1821 1822 err = i915_active_acquire(&eb->batch->vma->active); 1823 if (err) 1824 goto err_free; 1825 1826 err = i915_active_acquire(&shadow->active); 1827 if (err) 1828 goto err_batch; 1829 1830 if (trampoline) { 1831 err = i915_active_acquire(&trampoline->active); 1832 if (err) 1833 goto err_shadow; 1834 } 1835 1836 dma_fence_work_init(&pw->base, &eb_parse_ops); 1837 1838 pw->engine = eb->engine; 1839 pw->batch = eb->batch->vma; 1840 pw->batch_offset = eb->batch_start_offset; 1841 pw->batch_length = eb->batch_len; 1842 pw->shadow = shadow; 1843 pw->trampoline = trampoline; 1844 1845 err = dma_resv_lock_interruptible(pw->batch->resv, NULL); 1846 if (err) 1847 goto err_trampoline; 1848 1849 err = dma_resv_reserve_shared(pw->batch->resv, 1); 1850 if (err) 1851 goto err_batch_unlock; 1852 1853 /* Wait for all writes (and relocs) into the batch to complete */ 1854 err = i915_sw_fence_await_reservation(&pw->base.chain, 1855 pw->batch->resv, NULL, false, 1856 0, I915_FENCE_GFP); 1857 if (err < 0) 1858 goto err_batch_unlock; 1859 1860 /* Keep the batch alive and unwritten as we parse */ 1861 dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma); 1862 1863 dma_resv_unlock(pw->batch->resv); 1864 1865 /* Force execution to wait for completion of the parser */ 1866 dma_resv_lock(shadow->resv, NULL); 1867 dma_resv_add_excl_fence(shadow->resv, &pw->base.dma); 1868 dma_resv_unlock(shadow->resv); 1869 1870 dma_fence_work_commit_imm(&pw->base); 1871 return 0; 1872 1873 err_batch_unlock: 1874 dma_resv_unlock(pw->batch->resv); 1875 err_trampoline: 1876 if (trampoline) 1877 i915_active_release(&trampoline->active); 1878 err_shadow: 1879 i915_active_release(&shadow->active); 1880 err_batch: 1881 i915_active_release(&eb->batch->vma->active); 1882 err_free: 1883 kfree(pw); 1884 return err; 1885 } 1886 1887 static int eb_parse(struct i915_execbuffer *eb) 1888 { 1889 struct drm_i915_private *i915 = eb->i915; 1890 struct intel_gt_buffer_pool_node *pool; 1891 struct i915_vma *shadow, *trampoline; 1892 unsigned int len; 1893 int err; 1894 1895 if (!eb_use_cmdparser(eb)) 1896 return 0; 1897 1898 len = eb->batch_len; 1899 if (!CMDPARSER_USES_GGTT(eb->i915)) { 1900 /* 1901 * ppGTT backed shadow buffers must be mapped RO, to prevent 1902 * post-scan tampering 1903 */ 1904 if (!eb->context->vm->has_read_only) { 1905 drm_dbg(&i915->drm, 1906 "Cannot prevent post-scan tampering without RO capable vm\n"); 1907 return -EINVAL; 1908 } 1909 } else { 1910 len += I915_CMD_PARSER_TRAMPOLINE_SIZE; 1911 } 1912 1913 pool = intel_gt_get_buffer_pool(eb->engine->gt, len); 1914 if (IS_ERR(pool)) 1915 return PTR_ERR(pool); 1916 1917 shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER); 1918 if (IS_ERR(shadow)) { 1919 err = PTR_ERR(shadow); 1920 goto err; 1921 } 1922 i915_gem_object_set_readonly(shadow->obj); 1923 1924 trampoline = NULL; 1925 if (CMDPARSER_USES_GGTT(eb->i915)) { 1926 trampoline = shadow; 1927 1928 shadow = shadow_batch_pin(pool->obj, 1929 &eb->engine->gt->ggtt->vm, 1930 PIN_GLOBAL); 1931 if (IS_ERR(shadow)) { 1932 err = PTR_ERR(shadow); 1933 shadow = trampoline; 1934 goto err_shadow; 1935 } 1936 1937 eb->batch_flags |= I915_DISPATCH_SECURE; 1938 } 1939 1940 err = eb_parse_pipeline(eb, shadow, trampoline); 1941 if (err) 1942 goto err_trampoline; 1943 1944 eb->vma[eb->buffer_count].vma = i915_vma_get(shadow); 1945 eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN; 1946 eb->batch = &eb->vma[eb->buffer_count++]; 1947 eb->vma[eb->buffer_count].vma = NULL; 1948 1949 eb->trampoline = trampoline; 1950 eb->batch_start_offset = 0; 1951 1952 shadow->private = pool; 1953 return 0; 1954 1955 err_trampoline: 1956 if (trampoline) 1957 i915_vma_unpin(trampoline); 1958 err_shadow: 1959 i915_vma_unpin(shadow); 1960 err: 1961 intel_gt_buffer_pool_put(pool); 1962 return err; 1963 } 1964 1965 static void 1966 add_to_client(struct i915_request *rq, struct drm_file *file) 1967 { 1968 struct drm_i915_file_private *file_priv = file->driver_priv; 1969 1970 rq->file_priv = file_priv; 1971 1972 spin_lock(&file_priv->mm.lock); 1973 list_add_tail(&rq->client_link, &file_priv->mm.request_list); 1974 spin_unlock(&file_priv->mm.lock); 1975 } 1976 1977 static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) 1978 { 1979 int err; 1980 1981 err = eb_move_to_gpu(eb); 1982 if (err) 1983 return err; 1984 1985 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { 1986 err = i915_reset_gen7_sol_offsets(eb->request); 1987 if (err) 1988 return err; 1989 } 1990 1991 /* 1992 * After we completed waiting for other engines (using HW semaphores) 1993 * then we can signal that this request/batch is ready to run. This 1994 * allows us to determine if the batch is still waiting on the GPU 1995 * or actually running by checking the breadcrumb. 1996 */ 1997 if (eb->engine->emit_init_breadcrumb) { 1998 err = eb->engine->emit_init_breadcrumb(eb->request); 1999 if (err) 2000 return err; 2001 } 2002 2003 err = eb->engine->emit_bb_start(eb->request, 2004 batch->node.start + 2005 eb->batch_start_offset, 2006 eb->batch_len, 2007 eb->batch_flags); 2008 if (err) 2009 return err; 2010 2011 if (eb->trampoline) { 2012 GEM_BUG_ON(eb->batch_start_offset); 2013 err = eb->engine->emit_bb_start(eb->request, 2014 eb->trampoline->node.start + 2015 eb->batch_len, 2016 0, 0); 2017 if (err) 2018 return err; 2019 } 2020 2021 if (intel_context_nopreempt(eb->context)) 2022 __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); 2023 2024 return 0; 2025 } 2026 2027 static int num_vcs_engines(const struct drm_i915_private *i915) 2028 { 2029 return hweight64(INTEL_INFO(i915)->engine_mask & 2030 GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0)); 2031 } 2032 2033 /* 2034 * Find one BSD ring to dispatch the corresponding BSD command. 2035 * The engine index is returned. 2036 */ 2037 static unsigned int 2038 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, 2039 struct drm_file *file) 2040 { 2041 struct drm_i915_file_private *file_priv = file->driver_priv; 2042 2043 /* Check whether the file_priv has already selected one ring. */ 2044 if ((int)file_priv->bsd_engine < 0) 2045 file_priv->bsd_engine = 2046 get_random_int() % num_vcs_engines(dev_priv); 2047 2048 return file_priv->bsd_engine; 2049 } 2050 2051 static const enum intel_engine_id user_ring_map[] = { 2052 [I915_EXEC_DEFAULT] = RCS0, 2053 [I915_EXEC_RENDER] = RCS0, 2054 [I915_EXEC_BLT] = BCS0, 2055 [I915_EXEC_BSD] = VCS0, 2056 [I915_EXEC_VEBOX] = VECS0 2057 }; 2058 2059 static struct i915_request *eb_throttle(struct intel_context *ce) 2060 { 2061 struct intel_ring *ring = ce->ring; 2062 struct intel_timeline *tl = ce->timeline; 2063 struct i915_request *rq; 2064 2065 /* 2066 * Completely unscientific finger-in-the-air estimates for suitable 2067 * maximum user request size (to avoid blocking) and then backoff. 2068 */ 2069 if (intel_ring_update_space(ring) >= PAGE_SIZE) 2070 return NULL; 2071 2072 /* 2073 * Find a request that after waiting upon, there will be at least half 2074 * the ring available. The hysteresis allows us to compete for the 2075 * shared ring and should mean that we sleep less often prior to 2076 * claiming our resources, but not so long that the ring completely 2077 * drains before we can submit our next request. 2078 */ 2079 list_for_each_entry(rq, &tl->requests, link) { 2080 if (rq->ring != ring) 2081 continue; 2082 2083 if (__intel_ring_space(rq->postfix, 2084 ring->emit, ring->size) > ring->size / 2) 2085 break; 2086 } 2087 if (&rq->link == &tl->requests) 2088 return NULL; /* weird, we will check again later for real */ 2089 2090 return i915_request_get(rq); 2091 } 2092 2093 static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) 2094 { 2095 struct intel_timeline *tl; 2096 struct i915_request *rq; 2097 int err; 2098 2099 /* 2100 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report 2101 * EIO if the GPU is already wedged. 2102 */ 2103 err = intel_gt_terminally_wedged(ce->engine->gt); 2104 if (err) 2105 return err; 2106 2107 if (unlikely(intel_context_is_banned(ce))) 2108 return -EIO; 2109 2110 /* 2111 * Pinning the contexts may generate requests in order to acquire 2112 * GGTT space, so do this first before we reserve a seqno for 2113 * ourselves. 2114 */ 2115 err = intel_context_pin(ce); 2116 if (err) 2117 return err; 2118 2119 /* 2120 * Take a local wakeref for preparing to dispatch the execbuf as 2121 * we expect to access the hardware fairly frequently in the 2122 * process, and require the engine to be kept awake between accesses. 2123 * Upon dispatch, we acquire another prolonged wakeref that we hold 2124 * until the timeline is idle, which in turn releases the wakeref 2125 * taken on the engine, and the parent device. 2126 */ 2127 tl = intel_context_timeline_lock(ce); 2128 if (IS_ERR(tl)) { 2129 err = PTR_ERR(tl); 2130 goto err_unpin; 2131 } 2132 2133 intel_context_enter(ce); 2134 rq = eb_throttle(ce); 2135 2136 intel_context_timeline_unlock(tl); 2137 2138 if (rq) { 2139 bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; 2140 long timeout; 2141 2142 timeout = MAX_SCHEDULE_TIMEOUT; 2143 if (nonblock) 2144 timeout = 0; 2145 2146 timeout = i915_request_wait(rq, 2147 I915_WAIT_INTERRUPTIBLE, 2148 timeout); 2149 i915_request_put(rq); 2150 2151 if (timeout < 0) { 2152 err = nonblock ? -EWOULDBLOCK : timeout; 2153 goto err_exit; 2154 } 2155 } 2156 2157 eb->engine = ce->engine; 2158 eb->context = ce; 2159 return 0; 2160 2161 err_exit: 2162 mutex_lock(&tl->mutex); 2163 intel_context_exit(ce); 2164 intel_context_timeline_unlock(tl); 2165 err_unpin: 2166 intel_context_unpin(ce); 2167 return err; 2168 } 2169 2170 static void eb_unpin_engine(struct i915_execbuffer *eb) 2171 { 2172 struct intel_context *ce = eb->context; 2173 struct intel_timeline *tl = ce->timeline; 2174 2175 mutex_lock(&tl->mutex); 2176 intel_context_exit(ce); 2177 mutex_unlock(&tl->mutex); 2178 2179 intel_context_unpin(ce); 2180 } 2181 2182 static unsigned int 2183 eb_select_legacy_ring(struct i915_execbuffer *eb, 2184 struct drm_file *file, 2185 struct drm_i915_gem_execbuffer2 *args) 2186 { 2187 struct drm_i915_private *i915 = eb->i915; 2188 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; 2189 2190 if (user_ring_id != I915_EXEC_BSD && 2191 (args->flags & I915_EXEC_BSD_MASK)) { 2192 drm_dbg(&i915->drm, 2193 "execbuf with non bsd ring but with invalid " 2194 "bsd dispatch flags: %d\n", (int)(args->flags)); 2195 return -1; 2196 } 2197 2198 if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) { 2199 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; 2200 2201 if (bsd_idx == I915_EXEC_BSD_DEFAULT) { 2202 bsd_idx = gen8_dispatch_bsd_engine(i915, file); 2203 } else if (bsd_idx >= I915_EXEC_BSD_RING1 && 2204 bsd_idx <= I915_EXEC_BSD_RING2) { 2205 bsd_idx >>= I915_EXEC_BSD_SHIFT; 2206 bsd_idx--; 2207 } else { 2208 drm_dbg(&i915->drm, 2209 "execbuf with unknown bsd ring: %u\n", 2210 bsd_idx); 2211 return -1; 2212 } 2213 2214 return _VCS(bsd_idx); 2215 } 2216 2217 if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { 2218 drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n", 2219 user_ring_id); 2220 return -1; 2221 } 2222 2223 return user_ring_map[user_ring_id]; 2224 } 2225 2226 static int 2227 eb_pin_engine(struct i915_execbuffer *eb, 2228 struct drm_file *file, 2229 struct drm_i915_gem_execbuffer2 *args) 2230 { 2231 struct intel_context *ce; 2232 unsigned int idx; 2233 int err; 2234 2235 if (i915_gem_context_user_engines(eb->gem_context)) 2236 idx = args->flags & I915_EXEC_RING_MASK; 2237 else 2238 idx = eb_select_legacy_ring(eb, file, args); 2239 2240 ce = i915_gem_context_get_engine(eb->gem_context, idx); 2241 if (IS_ERR(ce)) 2242 return PTR_ERR(ce); 2243 2244 err = __eb_pin_engine(eb, ce); 2245 intel_context_put(ce); 2246 2247 return err; 2248 } 2249 2250 static void 2251 __free_fence_array(struct drm_syncobj **fences, unsigned int n) 2252 { 2253 while (n--) 2254 drm_syncobj_put(ptr_mask_bits(fences[n], 2)); 2255 kvfree(fences); 2256 } 2257 2258 static struct drm_syncobj ** 2259 get_fence_array(struct drm_i915_gem_execbuffer2 *args, 2260 struct drm_file *file) 2261 { 2262 const unsigned long nfences = args->num_cliprects; 2263 struct drm_i915_gem_exec_fence __user *user; 2264 struct drm_syncobj **fences; 2265 unsigned long n; 2266 int err; 2267 2268 if (!(args->flags & I915_EXEC_FENCE_ARRAY)) 2269 return NULL; 2270 2271 /* Check multiplication overflow for access_ok() and kvmalloc_array() */ 2272 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); 2273 if (nfences > min_t(unsigned long, 2274 ULONG_MAX / sizeof(*user), 2275 SIZE_MAX / sizeof(*fences))) 2276 return ERR_PTR(-EINVAL); 2277 2278 user = u64_to_user_ptr(args->cliprects_ptr); 2279 if (!access_ok(user, nfences * sizeof(*user))) 2280 return ERR_PTR(-EFAULT); 2281 2282 fences = kvmalloc_array(nfences, sizeof(*fences), 2283 __GFP_NOWARN | GFP_KERNEL); 2284 if (!fences) 2285 return ERR_PTR(-ENOMEM); 2286 2287 for (n = 0; n < nfences; n++) { 2288 struct drm_i915_gem_exec_fence fence; 2289 struct drm_syncobj *syncobj; 2290 2291 if (__copy_from_user(&fence, user++, sizeof(fence))) { 2292 err = -EFAULT; 2293 goto err; 2294 } 2295 2296 if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) { 2297 err = -EINVAL; 2298 goto err; 2299 } 2300 2301 syncobj = drm_syncobj_find(file, fence.handle); 2302 if (!syncobj) { 2303 DRM_DEBUG("Invalid syncobj handle provided\n"); 2304 err = -ENOENT; 2305 goto err; 2306 } 2307 2308 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & 2309 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); 2310 2311 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); 2312 } 2313 2314 return fences; 2315 2316 err: 2317 __free_fence_array(fences, n); 2318 return ERR_PTR(err); 2319 } 2320 2321 static void 2322 put_fence_array(struct drm_i915_gem_execbuffer2 *args, 2323 struct drm_syncobj **fences) 2324 { 2325 if (fences) 2326 __free_fence_array(fences, args->num_cliprects); 2327 } 2328 2329 static int 2330 await_fence_array(struct i915_execbuffer *eb, 2331 struct drm_syncobj **fences) 2332 { 2333 const unsigned int nfences = eb->args->num_cliprects; 2334 unsigned int n; 2335 int err; 2336 2337 for (n = 0; n < nfences; n++) { 2338 struct drm_syncobj *syncobj; 2339 struct dma_fence *fence; 2340 unsigned int flags; 2341 2342 syncobj = ptr_unpack_bits(fences[n], &flags, 2); 2343 if (!(flags & I915_EXEC_FENCE_WAIT)) 2344 continue; 2345 2346 fence = drm_syncobj_fence_get(syncobj); 2347 if (!fence) 2348 return -EINVAL; 2349 2350 err = i915_request_await_dma_fence(eb->request, fence); 2351 dma_fence_put(fence); 2352 if (err < 0) 2353 return err; 2354 } 2355 2356 return 0; 2357 } 2358 2359 static void 2360 signal_fence_array(struct i915_execbuffer *eb, 2361 struct drm_syncobj **fences) 2362 { 2363 const unsigned int nfences = eb->args->num_cliprects; 2364 struct dma_fence * const fence = &eb->request->fence; 2365 unsigned int n; 2366 2367 for (n = 0; n < nfences; n++) { 2368 struct drm_syncobj *syncobj; 2369 unsigned int flags; 2370 2371 syncobj = ptr_unpack_bits(fences[n], &flags, 2); 2372 if (!(flags & I915_EXEC_FENCE_SIGNAL)) 2373 continue; 2374 2375 drm_syncobj_replace_fence(syncobj, fence); 2376 } 2377 } 2378 2379 static void retire_requests(struct intel_timeline *tl, struct i915_request *end) 2380 { 2381 struct i915_request *rq, *rn; 2382 2383 list_for_each_entry_safe(rq, rn, &tl->requests, link) 2384 if (rq == end || !i915_request_retire(rq)) 2385 break; 2386 } 2387 2388 static void eb_request_add(struct i915_execbuffer *eb) 2389 { 2390 struct i915_request *rq = eb->request; 2391 struct intel_timeline * const tl = i915_request_timeline(rq); 2392 struct i915_sched_attr attr = {}; 2393 struct i915_request *prev; 2394 2395 lockdep_assert_held(&tl->mutex); 2396 lockdep_unpin_lock(&tl->mutex, rq->cookie); 2397 2398 trace_i915_request_add(rq); 2399 2400 prev = __i915_request_commit(rq); 2401 2402 /* Check that the context wasn't destroyed before submission */ 2403 if (likely(!intel_context_is_closed(eb->context))) { 2404 attr = eb->gem_context->sched; 2405 2406 /* 2407 * Boost actual workloads past semaphores! 2408 * 2409 * With semaphores we spin on one engine waiting for another, 2410 * simply to reduce the latency of starting our work when 2411 * the signaler completes. However, if there is any other 2412 * work that we could be doing on this engine instead, that 2413 * is better utilisation and will reduce the overall duration 2414 * of the current work. To avoid PI boosting a semaphore 2415 * far in the distance past over useful work, we keep a history 2416 * of any semaphore use along our dependency chain. 2417 */ 2418 if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN)) 2419 attr.priority |= I915_PRIORITY_NOSEMAPHORE; 2420 2421 /* 2422 * Boost priorities to new clients (new request flows). 2423 * 2424 * Allow interactive/synchronous clients to jump ahead of 2425 * the bulk clients. (FQ_CODEL) 2426 */ 2427 if (list_empty(&rq->sched.signalers_list)) 2428 attr.priority |= I915_PRIORITY_WAIT; 2429 } else { 2430 /* Serialise with context_close via the add_to_timeline */ 2431 i915_request_set_error_once(rq, -ENOENT); 2432 __i915_request_skip(rq); 2433 } 2434 2435 __i915_request_queue(rq, &attr); 2436 2437 /* Try to clean up the client's timeline after submitting the request */ 2438 if (prev) 2439 retire_requests(tl, prev); 2440 2441 mutex_unlock(&tl->mutex); 2442 } 2443 2444 static int 2445 i915_gem_do_execbuffer(struct drm_device *dev, 2446 struct drm_file *file, 2447 struct drm_i915_gem_execbuffer2 *args, 2448 struct drm_i915_gem_exec_object2 *exec, 2449 struct drm_syncobj **fences) 2450 { 2451 struct drm_i915_private *i915 = to_i915(dev); 2452 struct i915_execbuffer eb; 2453 struct dma_fence *in_fence = NULL; 2454 struct dma_fence *exec_fence = NULL; 2455 struct sync_file *out_fence = NULL; 2456 struct i915_vma *batch; 2457 int out_fence_fd = -1; 2458 int err; 2459 2460 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS); 2461 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & 2462 ~__EXEC_OBJECT_UNKNOWN_FLAGS); 2463 2464 eb.i915 = i915; 2465 eb.file = file; 2466 eb.args = args; 2467 if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) 2468 args->flags |= __EXEC_HAS_RELOC; 2469 2470 eb.exec = exec; 2471 2472 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; 2473 reloc_cache_init(&eb.reloc_cache, eb.i915); 2474 2475 eb.buffer_count = args->buffer_count; 2476 eb.batch_start_offset = args->batch_start_offset; 2477 eb.batch_len = args->batch_len; 2478 eb.trampoline = NULL; 2479 2480 eb.batch_flags = 0; 2481 if (args->flags & I915_EXEC_SECURE) { 2482 if (INTEL_GEN(i915) >= 11) 2483 return -ENODEV; 2484 2485 /* Return -EPERM to trigger fallback code on old binaries. */ 2486 if (!HAS_SECURE_BATCHES(i915)) 2487 return -EPERM; 2488 2489 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) 2490 return -EPERM; 2491 2492 eb.batch_flags |= I915_DISPATCH_SECURE; 2493 } 2494 if (args->flags & I915_EXEC_IS_PINNED) 2495 eb.batch_flags |= I915_DISPATCH_PINNED; 2496 2497 if (args->flags & I915_EXEC_FENCE_IN) { 2498 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); 2499 if (!in_fence) 2500 return -EINVAL; 2501 } 2502 2503 if (args->flags & I915_EXEC_FENCE_SUBMIT) { 2504 if (in_fence) { 2505 err = -EINVAL; 2506 goto err_in_fence; 2507 } 2508 2509 exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); 2510 if (!exec_fence) { 2511 err = -EINVAL; 2512 goto err_in_fence; 2513 } 2514 } 2515 2516 if (args->flags & I915_EXEC_FENCE_OUT) { 2517 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 2518 if (out_fence_fd < 0) { 2519 err = out_fence_fd; 2520 goto err_exec_fence; 2521 } 2522 } 2523 2524 err = eb_create(&eb); 2525 if (err) 2526 goto err_out_fence; 2527 2528 GEM_BUG_ON(!eb.lut_size); 2529 2530 err = eb_select_context(&eb); 2531 if (unlikely(err)) 2532 goto err_destroy; 2533 2534 err = eb_pin_engine(&eb, file, args); 2535 if (unlikely(err)) 2536 goto err_context; 2537 2538 err = eb_relocate(&eb); 2539 if (err) { 2540 /* 2541 * If the user expects the execobject.offset and 2542 * reloc.presumed_offset to be an exact match, 2543 * as for using NO_RELOC, then we cannot update 2544 * the execobject.offset until we have completed 2545 * relocation. 2546 */ 2547 args->flags &= ~__EXEC_HAS_RELOC; 2548 goto err_vma; 2549 } 2550 2551 if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) { 2552 drm_dbg(&i915->drm, 2553 "Attempting to use self-modifying batch buffer\n"); 2554 err = -EINVAL; 2555 goto err_vma; 2556 } 2557 2558 if (range_overflows_t(u64, 2559 eb.batch_start_offset, eb.batch_len, 2560 eb.batch->vma->size)) { 2561 drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); 2562 err = -EINVAL; 2563 goto err_vma; 2564 } 2565 2566 if (eb.batch_len == 0) 2567 eb.batch_len = eb.batch->vma->size - eb.batch_start_offset; 2568 2569 err = eb_parse(&eb); 2570 if (err) 2571 goto err_vma; 2572 2573 /* 2574 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 2575 * batch" bit. Hence we need to pin secure batches into the global gtt. 2576 * hsw should have this fixed, but bdw mucks it up again. */ 2577 batch = eb.batch->vma; 2578 if (eb.batch_flags & I915_DISPATCH_SECURE) { 2579 struct i915_vma *vma; 2580 2581 /* 2582 * So on first glance it looks freaky that we pin the batch here 2583 * outside of the reservation loop. But: 2584 * - The batch is already pinned into the relevant ppgtt, so we 2585 * already have the backing storage fully allocated. 2586 * - No other BO uses the global gtt (well contexts, but meh), 2587 * so we don't really have issues with multiple objects not 2588 * fitting due to fragmentation. 2589 * So this is actually safe. 2590 */ 2591 vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0); 2592 if (IS_ERR(vma)) { 2593 err = PTR_ERR(vma); 2594 goto err_parse; 2595 } 2596 2597 batch = vma; 2598 } 2599 2600 /* All GPU relocation batches must be submitted prior to the user rq */ 2601 GEM_BUG_ON(eb.reloc_cache.rq); 2602 2603 /* Allocate a request for this batch buffer nice and early. */ 2604 eb.request = i915_request_create(eb.context); 2605 if (IS_ERR(eb.request)) { 2606 err = PTR_ERR(eb.request); 2607 goto err_batch_unpin; 2608 } 2609 2610 if (in_fence) { 2611 err = i915_request_await_dma_fence(eb.request, in_fence); 2612 if (err < 0) 2613 goto err_request; 2614 } 2615 2616 if (exec_fence) { 2617 err = i915_request_await_execution(eb.request, exec_fence, 2618 eb.engine->bond_execute); 2619 if (err < 0) 2620 goto err_request; 2621 } 2622 2623 if (fences) { 2624 err = await_fence_array(&eb, fences); 2625 if (err) 2626 goto err_request; 2627 } 2628 2629 if (out_fence_fd != -1) { 2630 out_fence = sync_file_create(&eb.request->fence); 2631 if (!out_fence) { 2632 err = -ENOMEM; 2633 goto err_request; 2634 } 2635 } 2636 2637 /* 2638 * Whilst this request exists, batch_obj will be on the 2639 * active_list, and so will hold the active reference. Only when this 2640 * request is retired will the the batch_obj be moved onto the 2641 * inactive_list and lose its active reference. Hence we do not need 2642 * to explicitly hold another reference here. 2643 */ 2644 eb.request->batch = batch; 2645 if (batch->private) 2646 intel_gt_buffer_pool_mark_active(batch->private, eb.request); 2647 2648 trace_i915_request_queue(eb.request, eb.batch_flags); 2649 err = eb_submit(&eb, batch); 2650 err_request: 2651 add_to_client(eb.request, file); 2652 i915_request_get(eb.request); 2653 eb_request_add(&eb); 2654 2655 if (fences) 2656 signal_fence_array(&eb, fences); 2657 2658 if (out_fence) { 2659 if (err == 0) { 2660 fd_install(out_fence_fd, out_fence->file); 2661 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ 2662 args->rsvd2 |= (u64)out_fence_fd << 32; 2663 out_fence_fd = -1; 2664 } else { 2665 fput(out_fence->file); 2666 } 2667 } 2668 i915_request_put(eb.request); 2669 2670 err_batch_unpin: 2671 if (eb.batch_flags & I915_DISPATCH_SECURE) 2672 i915_vma_unpin(batch); 2673 err_parse: 2674 if (batch->private) 2675 intel_gt_buffer_pool_put(batch->private); 2676 err_vma: 2677 if (eb.trampoline) 2678 i915_vma_unpin(eb.trampoline); 2679 eb_unpin_engine(&eb); 2680 err_context: 2681 i915_gem_context_put(eb.gem_context); 2682 err_destroy: 2683 eb_destroy(&eb); 2684 err_out_fence: 2685 if (out_fence_fd != -1) 2686 put_unused_fd(out_fence_fd); 2687 err_exec_fence: 2688 dma_fence_put(exec_fence); 2689 err_in_fence: 2690 dma_fence_put(in_fence); 2691 return err; 2692 } 2693 2694 static size_t eb_element_size(void) 2695 { 2696 return sizeof(struct drm_i915_gem_exec_object2); 2697 } 2698 2699 static bool check_buffer_count(size_t count) 2700 { 2701 const size_t sz = eb_element_size(); 2702 2703 /* 2704 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup 2705 * array size (see eb_create()). Otherwise, we can accept an array as 2706 * large as can be addressed (though use large arrays at your peril)! 2707 */ 2708 2709 return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1); 2710 } 2711 2712 /* 2713 * Legacy execbuffer just creates an exec2 list from the original exec object 2714 * list array and passes it to the real function. 2715 */ 2716 int 2717 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, 2718 struct drm_file *file) 2719 { 2720 struct drm_i915_private *i915 = to_i915(dev); 2721 struct drm_i915_gem_execbuffer *args = data; 2722 struct drm_i915_gem_execbuffer2 exec2; 2723 struct drm_i915_gem_exec_object *exec_list = NULL; 2724 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 2725 const size_t count = args->buffer_count; 2726 unsigned int i; 2727 int err; 2728 2729 if (!check_buffer_count(count)) { 2730 drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); 2731 return -EINVAL; 2732 } 2733 2734 exec2.buffers_ptr = args->buffers_ptr; 2735 exec2.buffer_count = args->buffer_count; 2736 exec2.batch_start_offset = args->batch_start_offset; 2737 exec2.batch_len = args->batch_len; 2738 exec2.DR1 = args->DR1; 2739 exec2.DR4 = args->DR4; 2740 exec2.num_cliprects = args->num_cliprects; 2741 exec2.cliprects_ptr = args->cliprects_ptr; 2742 exec2.flags = I915_EXEC_RENDER; 2743 i915_execbuffer2_set_context_id(exec2, 0); 2744 2745 err = i915_gem_check_execbuffer(&exec2); 2746 if (err) 2747 return err; 2748 2749 /* Copy in the exec list from userland */ 2750 exec_list = kvmalloc_array(count, sizeof(*exec_list), 2751 __GFP_NOWARN | GFP_KERNEL); 2752 exec2_list = kvmalloc_array(count, eb_element_size(), 2753 __GFP_NOWARN | GFP_KERNEL); 2754 if (exec_list == NULL || exec2_list == NULL) { 2755 drm_dbg(&i915->drm, 2756 "Failed to allocate exec list for %d buffers\n", 2757 args->buffer_count); 2758 kvfree(exec_list); 2759 kvfree(exec2_list); 2760 return -ENOMEM; 2761 } 2762 err = copy_from_user(exec_list, 2763 u64_to_user_ptr(args->buffers_ptr), 2764 sizeof(*exec_list) * count); 2765 if (err) { 2766 drm_dbg(&i915->drm, "copy %d exec entries failed %d\n", 2767 args->buffer_count, err); 2768 kvfree(exec_list); 2769 kvfree(exec2_list); 2770 return -EFAULT; 2771 } 2772 2773 for (i = 0; i < args->buffer_count; i++) { 2774 exec2_list[i].handle = exec_list[i].handle; 2775 exec2_list[i].relocation_count = exec_list[i].relocation_count; 2776 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; 2777 exec2_list[i].alignment = exec_list[i].alignment; 2778 exec2_list[i].offset = exec_list[i].offset; 2779 if (INTEL_GEN(to_i915(dev)) < 4) 2780 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; 2781 else 2782 exec2_list[i].flags = 0; 2783 } 2784 2785 err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL); 2786 if (exec2.flags & __EXEC_HAS_RELOC) { 2787 struct drm_i915_gem_exec_object __user *user_exec_list = 2788 u64_to_user_ptr(args->buffers_ptr); 2789 2790 /* Copy the new buffer offsets back to the user's exec list. */ 2791 for (i = 0; i < args->buffer_count; i++) { 2792 if (!(exec2_list[i].offset & UPDATE)) 2793 continue; 2794 2795 exec2_list[i].offset = 2796 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); 2797 exec2_list[i].offset &= PIN_OFFSET_MASK; 2798 if (__copy_to_user(&user_exec_list[i].offset, 2799 &exec2_list[i].offset, 2800 sizeof(user_exec_list[i].offset))) 2801 break; 2802 } 2803 } 2804 2805 kvfree(exec_list); 2806 kvfree(exec2_list); 2807 return err; 2808 } 2809 2810 int 2811 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, 2812 struct drm_file *file) 2813 { 2814 struct drm_i915_private *i915 = to_i915(dev); 2815 struct drm_i915_gem_execbuffer2 *args = data; 2816 struct drm_i915_gem_exec_object2 *exec2_list; 2817 struct drm_syncobj **fences = NULL; 2818 const size_t count = args->buffer_count; 2819 int err; 2820 2821 if (!check_buffer_count(count)) { 2822 drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); 2823 return -EINVAL; 2824 } 2825 2826 err = i915_gem_check_execbuffer(args); 2827 if (err) 2828 return err; 2829 2830 exec2_list = kvmalloc_array(count, eb_element_size(), 2831 __GFP_NOWARN | GFP_KERNEL); 2832 if (exec2_list == NULL) { 2833 drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n", 2834 count); 2835 return -ENOMEM; 2836 } 2837 if (copy_from_user(exec2_list, 2838 u64_to_user_ptr(args->buffers_ptr), 2839 sizeof(*exec2_list) * count)) { 2840 drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count); 2841 kvfree(exec2_list); 2842 return -EFAULT; 2843 } 2844 2845 if (args->flags & I915_EXEC_FENCE_ARRAY) { 2846 fences = get_fence_array(args, file); 2847 if (IS_ERR(fences)) { 2848 kvfree(exec2_list); 2849 return PTR_ERR(fences); 2850 } 2851 } 2852 2853 err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences); 2854 2855 /* 2856 * Now that we have begun execution of the batchbuffer, we ignore 2857 * any new error after this point. Also given that we have already 2858 * updated the associated relocations, we try to write out the current 2859 * object locations irrespective of any error. 2860 */ 2861 if (args->flags & __EXEC_HAS_RELOC) { 2862 struct drm_i915_gem_exec_object2 __user *user_exec_list = 2863 u64_to_user_ptr(args->buffers_ptr); 2864 unsigned int i; 2865 2866 /* Copy the new buffer offsets back to the user's exec list. */ 2867 /* 2868 * Note: count * sizeof(*user_exec_list) does not overflow, 2869 * because we checked 'count' in check_buffer_count(). 2870 * 2871 * And this range already got effectively checked earlier 2872 * when we did the "copy_from_user()" above. 2873 */ 2874 if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) 2875 goto end; 2876 2877 for (i = 0; i < args->buffer_count; i++) { 2878 if (!(exec2_list[i].offset & UPDATE)) 2879 continue; 2880 2881 exec2_list[i].offset = 2882 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); 2883 unsafe_put_user(exec2_list[i].offset, 2884 &user_exec_list[i].offset, 2885 end_user); 2886 } 2887 end_user: 2888 user_access_end(); 2889 end:; 2890 } 2891 2892 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; 2893 put_fence_array(args, fences); 2894 kvfree(exec2_list); 2895 return err; 2896 } 2897