1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_vma_manager.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_gem_clflush.h" 33 #include "i915_vgpu.h" 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 #include "intel_frontbuffer.h" 37 #include "intel_mocs.h" 38 #include <linux/dma-fence-array.h> 39 #include <linux/kthread.h> 40 #include <linux/reservation.h> 41 #include <linux/shmem_fs.h> 42 #include <linux/slab.h> 43 #include <linux/stop_machine.h> 44 #include <linux/swap.h> 45 #include <linux/pci.h> 46 #include <linux/dma-buf.h> 47 48 static void i915_gem_flush_free_objects(struct drm_i915_private *i915); 49 50 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 51 { 52 if (obj->cache_dirty) 53 return false; 54 55 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 56 return true; 57 58 return obj->pin_display; 59 } 60 61 static int 62 insert_mappable_node(struct i915_ggtt *ggtt, 63 struct drm_mm_node *node, u32 size) 64 { 65 memset(node, 0, sizeof(*node)); 66 return drm_mm_insert_node_in_range(&ggtt->base.mm, node, 67 size, 0, I915_COLOR_UNEVICTABLE, 68 0, ggtt->mappable_end, 69 DRM_MM_INSERT_LOW); 70 } 71 72 static void 73 remove_mappable_node(struct drm_mm_node *node) 74 { 75 drm_mm_remove_node(node); 76 } 77 78 /* some bookkeeping */ 79 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 80 u64 size) 81 { 82 spin_lock(&dev_priv->mm.object_stat_lock); 83 dev_priv->mm.object_count++; 84 dev_priv->mm.object_memory += size; 85 spin_unlock(&dev_priv->mm.object_stat_lock); 86 } 87 88 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 89 u64 size) 90 { 91 spin_lock(&dev_priv->mm.object_stat_lock); 92 dev_priv->mm.object_count--; 93 dev_priv->mm.object_memory -= size; 94 spin_unlock(&dev_priv->mm.object_stat_lock); 95 } 96 97 static int 98 i915_gem_wait_for_error(struct i915_gpu_error *error) 99 { 100 int ret; 101 102 might_sleep(); 103 104 /* 105 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 106 * userspace. If it takes that long something really bad is going on and 107 * we should simply try to bail out and fail as gracefully as possible. 108 */ 109 ret = wait_event_interruptible_timeout(error->reset_queue, 110 !i915_reset_backoff(error), 111 I915_RESET_TIMEOUT); 112 if (ret == 0) { 113 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 114 return -EIO; 115 } else if (ret < 0) { 116 return ret; 117 } else { 118 return 0; 119 } 120 } 121 122 int i915_mutex_lock_interruptible(struct drm_device *dev) 123 { 124 struct drm_i915_private *dev_priv = to_i915(dev); 125 int ret; 126 127 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 128 if (ret) 129 return ret; 130 131 ret = mutex_lock_interruptible(&dev->struct_mutex); 132 if (ret) 133 return ret; 134 135 return 0; 136 } 137 138 int 139 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 140 struct drm_file *file) 141 { 142 struct drm_i915_private *dev_priv = to_i915(dev); 143 struct i915_ggtt *ggtt = &dev_priv->ggtt; 144 struct drm_i915_gem_get_aperture *args = data; 145 struct i915_vma *vma; 146 u64 pinned; 147 148 pinned = ggtt->base.reserved; 149 mutex_lock(&dev->struct_mutex); 150 list_for_each_entry(vma, &ggtt->base.active_list, vm_link) 151 if (i915_vma_is_pinned(vma)) 152 pinned += vma->node.size; 153 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) 154 if (i915_vma_is_pinned(vma)) 155 pinned += vma->node.size; 156 mutex_unlock(&dev->struct_mutex); 157 158 args->aper_size = ggtt->base.total; 159 args->aper_available_size = args->aper_size - pinned; 160 161 return 0; 162 } 163 164 static struct sg_table * 165 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 166 { 167 struct address_space *mapping = obj->base.filp->f_mapping; 168 drm_dma_handle_t *phys; 169 struct sg_table *st; 170 struct scatterlist *sg; 171 char *vaddr; 172 int i; 173 174 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 175 return ERR_PTR(-EINVAL); 176 177 /* Always aligning to the object size, allows a single allocation 178 * to handle all possible callers, and given typical object sizes, 179 * the alignment of the buddy allocation will naturally match. 180 */ 181 phys = drm_pci_alloc(obj->base.dev, 182 obj->base.size, 183 roundup_pow_of_two(obj->base.size)); 184 if (!phys) 185 return ERR_PTR(-ENOMEM); 186 187 vaddr = phys->vaddr; 188 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 189 struct page *page; 190 char *src; 191 192 page = shmem_read_mapping_page(mapping, i); 193 if (IS_ERR(page)) { 194 st = ERR_CAST(page); 195 goto err_phys; 196 } 197 198 src = kmap_atomic(page); 199 memcpy(vaddr, src, PAGE_SIZE); 200 drm_clflush_virt_range(vaddr, PAGE_SIZE); 201 kunmap_atomic(src); 202 203 put_page(page); 204 vaddr += PAGE_SIZE; 205 } 206 207 i915_gem_chipset_flush(to_i915(obj->base.dev)); 208 209 st = kmalloc(sizeof(*st), GFP_KERNEL); 210 if (!st) { 211 st = ERR_PTR(-ENOMEM); 212 goto err_phys; 213 } 214 215 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 216 kfree(st); 217 st = ERR_PTR(-ENOMEM); 218 goto err_phys; 219 } 220 221 sg = st->sgl; 222 sg->offset = 0; 223 sg->length = obj->base.size; 224 225 sg_dma_address(sg) = phys->busaddr; 226 sg_dma_len(sg) = obj->base.size; 227 228 obj->phys_handle = phys; 229 return st; 230 231 err_phys: 232 drm_pci_free(obj->base.dev, phys); 233 return st; 234 } 235 236 static void __start_cpu_write(struct drm_i915_gem_object *obj) 237 { 238 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 239 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 240 if (cpu_write_needs_clflush(obj)) 241 obj->cache_dirty = true; 242 } 243 244 static void 245 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 246 struct sg_table *pages, 247 bool needs_clflush) 248 { 249 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 250 251 if (obj->mm.madv == I915_MADV_DONTNEED) 252 obj->mm.dirty = false; 253 254 if (needs_clflush && 255 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 256 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 257 drm_clflush_sg(pages); 258 259 __start_cpu_write(obj); 260 } 261 262 static void 263 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 264 struct sg_table *pages) 265 { 266 __i915_gem_object_release_shmem(obj, pages, false); 267 268 if (obj->mm.dirty) { 269 struct address_space *mapping = obj->base.filp->f_mapping; 270 char *vaddr = obj->phys_handle->vaddr; 271 int i; 272 273 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 274 struct page *page; 275 char *dst; 276 277 page = shmem_read_mapping_page(mapping, i); 278 if (IS_ERR(page)) 279 continue; 280 281 dst = kmap_atomic(page); 282 drm_clflush_virt_range(vaddr, PAGE_SIZE); 283 memcpy(dst, vaddr, PAGE_SIZE); 284 kunmap_atomic(dst); 285 286 set_page_dirty(page); 287 if (obj->mm.madv == I915_MADV_WILLNEED) 288 mark_page_accessed(page); 289 put_page(page); 290 vaddr += PAGE_SIZE; 291 } 292 obj->mm.dirty = false; 293 } 294 295 sg_free_table(pages); 296 kfree(pages); 297 298 drm_pci_free(obj->base.dev, obj->phys_handle); 299 } 300 301 static void 302 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 303 { 304 i915_gem_object_unpin_pages(obj); 305 } 306 307 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 308 .get_pages = i915_gem_object_get_pages_phys, 309 .put_pages = i915_gem_object_put_pages_phys, 310 .release = i915_gem_object_release_phys, 311 }; 312 313 static const struct drm_i915_gem_object_ops i915_gem_object_ops; 314 315 int i915_gem_object_unbind(struct drm_i915_gem_object *obj) 316 { 317 struct i915_vma *vma; 318 LIST_HEAD(still_in_list); 319 int ret; 320 321 lockdep_assert_held(&obj->base.dev->struct_mutex); 322 323 /* Closed vma are removed from the obj->vma_list - but they may 324 * still have an active binding on the object. To remove those we 325 * must wait for all rendering to complete to the object (as unbinding 326 * must anyway), and retire the requests. 327 */ 328 ret = i915_gem_object_wait(obj, 329 I915_WAIT_INTERRUPTIBLE | 330 I915_WAIT_LOCKED | 331 I915_WAIT_ALL, 332 MAX_SCHEDULE_TIMEOUT, 333 NULL); 334 if (ret) 335 return ret; 336 337 i915_gem_retire_requests(to_i915(obj->base.dev)); 338 339 while ((vma = list_first_entry_or_null(&obj->vma_list, 340 struct i915_vma, 341 obj_link))) { 342 list_move_tail(&vma->obj_link, &still_in_list); 343 ret = i915_vma_unbind(vma); 344 if (ret) 345 break; 346 } 347 list_splice(&still_in_list, &obj->vma_list); 348 349 return ret; 350 } 351 352 static long 353 i915_gem_object_wait_fence(struct dma_fence *fence, 354 unsigned int flags, 355 long timeout, 356 struct intel_rps_client *rps) 357 { 358 struct drm_i915_gem_request *rq; 359 360 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 361 362 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 363 return timeout; 364 365 if (!dma_fence_is_i915(fence)) 366 return dma_fence_wait_timeout(fence, 367 flags & I915_WAIT_INTERRUPTIBLE, 368 timeout); 369 370 rq = to_request(fence); 371 if (i915_gem_request_completed(rq)) 372 goto out; 373 374 /* This client is about to stall waiting for the GPU. In many cases 375 * this is undesirable and limits the throughput of the system, as 376 * many clients cannot continue processing user input/output whilst 377 * blocked. RPS autotuning may take tens of milliseconds to respond 378 * to the GPU load and thus incurs additional latency for the client. 379 * We can circumvent that by promoting the GPU frequency to maximum 380 * before we wait. This makes the GPU throttle up much more quickly 381 * (good for benchmarks and user experience, e.g. window animations), 382 * but at a cost of spending more power processing the workload 383 * (bad for battery). Not all clients even want their results 384 * immediately and for them we should just let the GPU select its own 385 * frequency to maximise efficiency. To prevent a single client from 386 * forcing the clocks too high for the whole system, we only allow 387 * each client to waitboost once in a busy period. 388 */ 389 if (rps) { 390 if (INTEL_GEN(rq->i915) >= 6) 391 gen6_rps_boost(rq, rps); 392 else 393 rps = NULL; 394 } 395 396 timeout = i915_wait_request(rq, flags, timeout); 397 398 out: 399 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) 400 i915_gem_request_retire_upto(rq); 401 402 return timeout; 403 } 404 405 static long 406 i915_gem_object_wait_reservation(struct reservation_object *resv, 407 unsigned int flags, 408 long timeout, 409 struct intel_rps_client *rps) 410 { 411 unsigned int seq = __read_seqcount_begin(&resv->seq); 412 struct dma_fence *excl; 413 bool prune_fences = false; 414 415 if (flags & I915_WAIT_ALL) { 416 struct dma_fence **shared; 417 unsigned int count, i; 418 int ret; 419 420 ret = reservation_object_get_fences_rcu(resv, 421 &excl, &count, &shared); 422 if (ret) 423 return ret; 424 425 for (i = 0; i < count; i++) { 426 timeout = i915_gem_object_wait_fence(shared[i], 427 flags, timeout, 428 rps); 429 if (timeout < 0) 430 break; 431 432 dma_fence_put(shared[i]); 433 } 434 435 for (; i < count; i++) 436 dma_fence_put(shared[i]); 437 kfree(shared); 438 439 prune_fences = count && timeout >= 0; 440 } else { 441 excl = reservation_object_get_excl_rcu(resv); 442 } 443 444 if (excl && timeout >= 0) { 445 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); 446 prune_fences = timeout >= 0; 447 } 448 449 dma_fence_put(excl); 450 451 /* Oportunistically prune the fences iff we know they have *all* been 452 * signaled and that the reservation object has not been changed (i.e. 453 * no new fences have been added). 454 */ 455 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { 456 if (reservation_object_trylock(resv)) { 457 if (!__read_seqcount_retry(&resv->seq, seq)) 458 reservation_object_add_excl_fence(resv, NULL); 459 reservation_object_unlock(resv); 460 } 461 } 462 463 return timeout; 464 } 465 466 static void __fence_set_priority(struct dma_fence *fence, int prio) 467 { 468 struct drm_i915_gem_request *rq; 469 struct intel_engine_cs *engine; 470 471 if (!dma_fence_is_i915(fence)) 472 return; 473 474 rq = to_request(fence); 475 engine = rq->engine; 476 if (!engine->schedule) 477 return; 478 479 engine->schedule(rq, prio); 480 } 481 482 static void fence_set_priority(struct dma_fence *fence, int prio) 483 { 484 /* Recurse once into a fence-array */ 485 if (dma_fence_is_array(fence)) { 486 struct dma_fence_array *array = to_dma_fence_array(fence); 487 int i; 488 489 for (i = 0; i < array->num_fences; i++) 490 __fence_set_priority(array->fences[i], prio); 491 } else { 492 __fence_set_priority(fence, prio); 493 } 494 } 495 496 int 497 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 498 unsigned int flags, 499 int prio) 500 { 501 struct dma_fence *excl; 502 503 if (flags & I915_WAIT_ALL) { 504 struct dma_fence **shared; 505 unsigned int count, i; 506 int ret; 507 508 ret = reservation_object_get_fences_rcu(obj->resv, 509 &excl, &count, &shared); 510 if (ret) 511 return ret; 512 513 for (i = 0; i < count; i++) { 514 fence_set_priority(shared[i], prio); 515 dma_fence_put(shared[i]); 516 } 517 518 kfree(shared); 519 } else { 520 excl = reservation_object_get_excl_rcu(obj->resv); 521 } 522 523 if (excl) { 524 fence_set_priority(excl, prio); 525 dma_fence_put(excl); 526 } 527 return 0; 528 } 529 530 /** 531 * Waits for rendering to the object to be completed 532 * @obj: i915 gem object 533 * @flags: how to wait (under a lock, for all rendering or just for writes etc) 534 * @timeout: how long to wait 535 * @rps: client (user process) to charge for any waitboosting 536 */ 537 int 538 i915_gem_object_wait(struct drm_i915_gem_object *obj, 539 unsigned int flags, 540 long timeout, 541 struct intel_rps_client *rps) 542 { 543 might_sleep(); 544 #if IS_ENABLED(CONFIG_LOCKDEP) 545 GEM_BUG_ON(debug_locks && 546 !!lockdep_is_held(&obj->base.dev->struct_mutex) != 547 !!(flags & I915_WAIT_LOCKED)); 548 #endif 549 GEM_BUG_ON(timeout < 0); 550 551 timeout = i915_gem_object_wait_reservation(obj->resv, 552 flags, timeout, 553 rps); 554 return timeout < 0 ? timeout : 0; 555 } 556 557 static struct intel_rps_client *to_rps_client(struct drm_file *file) 558 { 559 struct drm_i915_file_private *fpriv = file->driver_priv; 560 561 return &fpriv->rps; 562 } 563 564 static int 565 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, 566 struct drm_i915_gem_pwrite *args, 567 struct drm_file *file) 568 { 569 void *vaddr = obj->phys_handle->vaddr + args->offset; 570 char __user *user_data = u64_to_user_ptr(args->data_ptr); 571 572 /* We manually control the domain here and pretend that it 573 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 574 */ 575 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 576 if (copy_from_user(vaddr, user_data, args->size)) 577 return -EFAULT; 578 579 drm_clflush_virt_range(vaddr, args->size); 580 i915_gem_chipset_flush(to_i915(obj->base.dev)); 581 582 intel_fb_obj_flush(obj, ORIGIN_CPU); 583 return 0; 584 } 585 586 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) 587 { 588 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); 589 } 590 591 void i915_gem_object_free(struct drm_i915_gem_object *obj) 592 { 593 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 594 kmem_cache_free(dev_priv->objects, obj); 595 } 596 597 static int 598 i915_gem_create(struct drm_file *file, 599 struct drm_i915_private *dev_priv, 600 uint64_t size, 601 uint32_t *handle_p) 602 { 603 struct drm_i915_gem_object *obj; 604 int ret; 605 u32 handle; 606 607 size = roundup(size, PAGE_SIZE); 608 if (size == 0) 609 return -EINVAL; 610 611 /* Allocate the new object */ 612 obj = i915_gem_object_create(dev_priv, size); 613 if (IS_ERR(obj)) 614 return PTR_ERR(obj); 615 616 ret = drm_gem_handle_create(file, &obj->base, &handle); 617 /* drop reference from allocate - handle holds it now */ 618 i915_gem_object_put(obj); 619 if (ret) 620 return ret; 621 622 *handle_p = handle; 623 return 0; 624 } 625 626 int 627 i915_gem_dumb_create(struct drm_file *file, 628 struct drm_device *dev, 629 struct drm_mode_create_dumb *args) 630 { 631 /* have to work out size/pitch and return them */ 632 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 633 args->size = args->pitch * args->height; 634 return i915_gem_create(file, to_i915(dev), 635 args->size, &args->handle); 636 } 637 638 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) 639 { 640 return !(obj->cache_level == I915_CACHE_NONE || 641 obj->cache_level == I915_CACHE_WT); 642 } 643 644 /** 645 * Creates a new mm object and returns a handle to it. 646 * @dev: drm device pointer 647 * @data: ioctl data blob 648 * @file: drm file pointer 649 */ 650 int 651 i915_gem_create_ioctl(struct drm_device *dev, void *data, 652 struct drm_file *file) 653 { 654 struct drm_i915_private *dev_priv = to_i915(dev); 655 struct drm_i915_gem_create *args = data; 656 657 i915_gem_flush_free_objects(dev_priv); 658 659 return i915_gem_create(file, dev_priv, 660 args->size, &args->handle); 661 } 662 663 static inline enum fb_op_origin 664 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) 665 { 666 return (domain == I915_GEM_DOMAIN_GTT ? 667 obj->frontbuffer_ggtt_origin : ORIGIN_CPU); 668 } 669 670 static void 671 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) 672 { 673 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 674 675 if (!(obj->base.write_domain & flush_domains)) 676 return; 677 678 /* No actual flushing is required for the GTT write domain. Writes 679 * to it "immediately" go to main memory as far as we know, so there's 680 * no chipset flush. It also doesn't land in render cache. 681 * 682 * However, we do have to enforce the order so that all writes through 683 * the GTT land before any writes to the device, such as updates to 684 * the GATT itself. 685 * 686 * We also have to wait a bit for the writes to land from the GTT. 687 * An uncached read (i.e. mmio) seems to be ideal for the round-trip 688 * timing. This issue has only been observed when switching quickly 689 * between GTT writes and CPU reads from inside the kernel on recent hw, 690 * and it appears to only affect discrete GTT blocks (i.e. on LLC 691 * system agents we cannot reproduce this behaviour). 692 */ 693 wmb(); 694 695 switch (obj->base.write_domain) { 696 case I915_GEM_DOMAIN_GTT: 697 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) { 698 intel_runtime_pm_get(dev_priv); 699 spin_lock_irq(&dev_priv->uncore.lock); 700 POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); 701 spin_unlock_irq(&dev_priv->uncore.lock); 702 intel_runtime_pm_put(dev_priv); 703 } 704 705 intel_fb_obj_flush(obj, 706 fb_write_origin(obj, I915_GEM_DOMAIN_GTT)); 707 break; 708 709 case I915_GEM_DOMAIN_CPU: 710 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 711 break; 712 713 case I915_GEM_DOMAIN_RENDER: 714 if (gpu_write_needs_clflush(obj)) 715 obj->cache_dirty = true; 716 break; 717 } 718 719 obj->base.write_domain = 0; 720 } 721 722 static inline int 723 __copy_to_user_swizzled(char __user *cpu_vaddr, 724 const char *gpu_vaddr, int gpu_offset, 725 int length) 726 { 727 int ret, cpu_offset = 0; 728 729 while (length > 0) { 730 int cacheline_end = ALIGN(gpu_offset + 1, 64); 731 int this_length = min(cacheline_end - gpu_offset, length); 732 int swizzled_gpu_offset = gpu_offset ^ 64; 733 734 ret = __copy_to_user(cpu_vaddr + cpu_offset, 735 gpu_vaddr + swizzled_gpu_offset, 736 this_length); 737 if (ret) 738 return ret + length; 739 740 cpu_offset += this_length; 741 gpu_offset += this_length; 742 length -= this_length; 743 } 744 745 return 0; 746 } 747 748 static inline int 749 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 750 const char __user *cpu_vaddr, 751 int length) 752 { 753 int ret, cpu_offset = 0; 754 755 while (length > 0) { 756 int cacheline_end = ALIGN(gpu_offset + 1, 64); 757 int this_length = min(cacheline_end - gpu_offset, length); 758 int swizzled_gpu_offset = gpu_offset ^ 64; 759 760 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 761 cpu_vaddr + cpu_offset, 762 this_length); 763 if (ret) 764 return ret + length; 765 766 cpu_offset += this_length; 767 gpu_offset += this_length; 768 length -= this_length; 769 } 770 771 return 0; 772 } 773 774 /* 775 * Pins the specified object's pages and synchronizes the object with 776 * GPU accesses. Sets needs_clflush to non-zero if the caller should 777 * flush the object from the CPU cache. 778 */ 779 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 780 unsigned int *needs_clflush) 781 { 782 int ret; 783 784 lockdep_assert_held(&obj->base.dev->struct_mutex); 785 786 *needs_clflush = 0; 787 if (!i915_gem_object_has_struct_page(obj)) 788 return -ENODEV; 789 790 ret = i915_gem_object_wait(obj, 791 I915_WAIT_INTERRUPTIBLE | 792 I915_WAIT_LOCKED, 793 MAX_SCHEDULE_TIMEOUT, 794 NULL); 795 if (ret) 796 return ret; 797 798 ret = i915_gem_object_pin_pages(obj); 799 if (ret) 800 return ret; 801 802 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || 803 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 804 ret = i915_gem_object_set_to_cpu_domain(obj, false); 805 if (ret) 806 goto err_unpin; 807 else 808 goto out; 809 } 810 811 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 812 813 /* If we're not in the cpu read domain, set ourself into the gtt 814 * read domain and manually flush cachelines (if required). This 815 * optimizes for the case when the gpu will dirty the data 816 * anyway again before the next pread happens. 817 */ 818 if (!obj->cache_dirty && 819 !(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 820 *needs_clflush = CLFLUSH_BEFORE; 821 822 out: 823 /* return with the pages pinned */ 824 return 0; 825 826 err_unpin: 827 i915_gem_object_unpin_pages(obj); 828 return ret; 829 } 830 831 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 832 unsigned int *needs_clflush) 833 { 834 int ret; 835 836 lockdep_assert_held(&obj->base.dev->struct_mutex); 837 838 *needs_clflush = 0; 839 if (!i915_gem_object_has_struct_page(obj)) 840 return -ENODEV; 841 842 ret = i915_gem_object_wait(obj, 843 I915_WAIT_INTERRUPTIBLE | 844 I915_WAIT_LOCKED | 845 I915_WAIT_ALL, 846 MAX_SCHEDULE_TIMEOUT, 847 NULL); 848 if (ret) 849 return ret; 850 851 ret = i915_gem_object_pin_pages(obj); 852 if (ret) 853 return ret; 854 855 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || 856 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 857 ret = i915_gem_object_set_to_cpu_domain(obj, true); 858 if (ret) 859 goto err_unpin; 860 else 861 goto out; 862 } 863 864 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 865 866 /* If we're not in the cpu write domain, set ourself into the 867 * gtt write domain and manually flush cachelines (as required). 868 * This optimizes for the case when the gpu will use the data 869 * right away and we therefore have to clflush anyway. 870 */ 871 if (!obj->cache_dirty) { 872 *needs_clflush |= CLFLUSH_AFTER; 873 874 /* 875 * Same trick applies to invalidate partially written 876 * cachelines read before writing. 877 */ 878 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 879 *needs_clflush |= CLFLUSH_BEFORE; 880 } 881 882 out: 883 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 884 obj->mm.dirty = true; 885 /* return with the pages pinned */ 886 return 0; 887 888 err_unpin: 889 i915_gem_object_unpin_pages(obj); 890 return ret; 891 } 892 893 static void 894 shmem_clflush_swizzled_range(char *addr, unsigned long length, 895 bool swizzled) 896 { 897 if (unlikely(swizzled)) { 898 unsigned long start = (unsigned long) addr; 899 unsigned long end = (unsigned long) addr + length; 900 901 /* For swizzling simply ensure that we always flush both 902 * channels. Lame, but simple and it works. Swizzled 903 * pwrite/pread is far from a hotpath - current userspace 904 * doesn't use it at all. */ 905 start = round_down(start, 128); 906 end = round_up(end, 128); 907 908 drm_clflush_virt_range((void *)start, end - start); 909 } else { 910 drm_clflush_virt_range(addr, length); 911 } 912 913 } 914 915 /* Only difference to the fast-path function is that this can handle bit17 916 * and uses non-atomic copy and kmap functions. */ 917 static int 918 shmem_pread_slow(struct page *page, int offset, int length, 919 char __user *user_data, 920 bool page_do_bit17_swizzling, bool needs_clflush) 921 { 922 char *vaddr; 923 int ret; 924 925 vaddr = kmap(page); 926 if (needs_clflush) 927 shmem_clflush_swizzled_range(vaddr + offset, length, 928 page_do_bit17_swizzling); 929 930 if (page_do_bit17_swizzling) 931 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length); 932 else 933 ret = __copy_to_user(user_data, vaddr + offset, length); 934 kunmap(page); 935 936 return ret ? - EFAULT : 0; 937 } 938 939 static int 940 shmem_pread(struct page *page, int offset, int length, char __user *user_data, 941 bool page_do_bit17_swizzling, bool needs_clflush) 942 { 943 int ret; 944 945 ret = -ENODEV; 946 if (!page_do_bit17_swizzling) { 947 char *vaddr = kmap_atomic(page); 948 949 if (needs_clflush) 950 drm_clflush_virt_range(vaddr + offset, length); 951 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length); 952 kunmap_atomic(vaddr); 953 } 954 if (ret == 0) 955 return 0; 956 957 return shmem_pread_slow(page, offset, length, user_data, 958 page_do_bit17_swizzling, needs_clflush); 959 } 960 961 static int 962 i915_gem_shmem_pread(struct drm_i915_gem_object *obj, 963 struct drm_i915_gem_pread *args) 964 { 965 char __user *user_data; 966 u64 remain; 967 unsigned int obj_do_bit17_swizzling; 968 unsigned int needs_clflush; 969 unsigned int idx, offset; 970 int ret; 971 972 obj_do_bit17_swizzling = 0; 973 if (i915_gem_object_needs_bit17_swizzle(obj)) 974 obj_do_bit17_swizzling = BIT(17); 975 976 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); 977 if (ret) 978 return ret; 979 980 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); 981 mutex_unlock(&obj->base.dev->struct_mutex); 982 if (ret) 983 return ret; 984 985 remain = args->size; 986 user_data = u64_to_user_ptr(args->data_ptr); 987 offset = offset_in_page(args->offset); 988 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 989 struct page *page = i915_gem_object_get_page(obj, idx); 990 int length; 991 992 length = remain; 993 if (offset + length > PAGE_SIZE) 994 length = PAGE_SIZE - offset; 995 996 ret = shmem_pread(page, offset, length, user_data, 997 page_to_phys(page) & obj_do_bit17_swizzling, 998 needs_clflush); 999 if (ret) 1000 break; 1001 1002 remain -= length; 1003 user_data += length; 1004 offset = 0; 1005 } 1006 1007 i915_gem_obj_finish_shmem_access(obj); 1008 return ret; 1009 } 1010 1011 static inline bool 1012 gtt_user_read(struct io_mapping *mapping, 1013 loff_t base, int offset, 1014 char __user *user_data, int length) 1015 { 1016 void *vaddr; 1017 unsigned long unwritten; 1018 1019 /* We can use the cpu mem copy function because this is X86. */ 1020 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); 1021 unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length); 1022 io_mapping_unmap_atomic(vaddr); 1023 if (unwritten) { 1024 vaddr = (void __force *) 1025 io_mapping_map_wc(mapping, base, PAGE_SIZE); 1026 unwritten = copy_to_user(user_data, vaddr + offset, length); 1027 io_mapping_unmap(vaddr); 1028 } 1029 return unwritten; 1030 } 1031 1032 static int 1033 i915_gem_gtt_pread(struct drm_i915_gem_object *obj, 1034 const struct drm_i915_gem_pread *args) 1035 { 1036 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1037 struct i915_ggtt *ggtt = &i915->ggtt; 1038 struct drm_mm_node node; 1039 struct i915_vma *vma; 1040 void __user *user_data; 1041 u64 remain, offset; 1042 int ret; 1043 1044 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1045 if (ret) 1046 return ret; 1047 1048 intel_runtime_pm_get(i915); 1049 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1050 PIN_MAPPABLE | PIN_NONBLOCK); 1051 if (!IS_ERR(vma)) { 1052 node.start = i915_ggtt_offset(vma); 1053 node.allocated = false; 1054 ret = i915_vma_put_fence(vma); 1055 if (ret) { 1056 i915_vma_unpin(vma); 1057 vma = ERR_PTR(ret); 1058 } 1059 } 1060 if (IS_ERR(vma)) { 1061 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1062 if (ret) 1063 goto out_unlock; 1064 GEM_BUG_ON(!node.allocated); 1065 } 1066 1067 ret = i915_gem_object_set_to_gtt_domain(obj, false); 1068 if (ret) 1069 goto out_unpin; 1070 1071 mutex_unlock(&i915->drm.struct_mutex); 1072 1073 user_data = u64_to_user_ptr(args->data_ptr); 1074 remain = args->size; 1075 offset = args->offset; 1076 1077 while (remain > 0) { 1078 /* Operation in this page 1079 * 1080 * page_base = page offset within aperture 1081 * page_offset = offset within page 1082 * page_length = bytes to copy for this page 1083 */ 1084 u32 page_base = node.start; 1085 unsigned page_offset = offset_in_page(offset); 1086 unsigned page_length = PAGE_SIZE - page_offset; 1087 page_length = remain < page_length ? remain : page_length; 1088 if (node.allocated) { 1089 wmb(); 1090 ggtt->base.insert_page(&ggtt->base, 1091 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1092 node.start, I915_CACHE_NONE, 0); 1093 wmb(); 1094 } else { 1095 page_base += offset & PAGE_MASK; 1096 } 1097 1098 if (gtt_user_read(&ggtt->mappable, page_base, page_offset, 1099 user_data, page_length)) { 1100 ret = -EFAULT; 1101 break; 1102 } 1103 1104 remain -= page_length; 1105 user_data += page_length; 1106 offset += page_length; 1107 } 1108 1109 mutex_lock(&i915->drm.struct_mutex); 1110 out_unpin: 1111 if (node.allocated) { 1112 wmb(); 1113 ggtt->base.clear_range(&ggtt->base, 1114 node.start, node.size); 1115 remove_mappable_node(&node); 1116 } else { 1117 i915_vma_unpin(vma); 1118 } 1119 out_unlock: 1120 intel_runtime_pm_put(i915); 1121 mutex_unlock(&i915->drm.struct_mutex); 1122 1123 return ret; 1124 } 1125 1126 /** 1127 * Reads data from the object referenced by handle. 1128 * @dev: drm device pointer 1129 * @data: ioctl data blob 1130 * @file: drm file pointer 1131 * 1132 * On error, the contents of *data are undefined. 1133 */ 1134 int 1135 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1136 struct drm_file *file) 1137 { 1138 struct drm_i915_gem_pread *args = data; 1139 struct drm_i915_gem_object *obj; 1140 int ret; 1141 1142 if (args->size == 0) 1143 return 0; 1144 1145 if (!access_ok(VERIFY_WRITE, 1146 u64_to_user_ptr(args->data_ptr), 1147 args->size)) 1148 return -EFAULT; 1149 1150 obj = i915_gem_object_lookup(file, args->handle); 1151 if (!obj) 1152 return -ENOENT; 1153 1154 /* Bounds check source. */ 1155 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1156 ret = -EINVAL; 1157 goto out; 1158 } 1159 1160 trace_i915_gem_object_pread(obj, args->offset, args->size); 1161 1162 ret = i915_gem_object_wait(obj, 1163 I915_WAIT_INTERRUPTIBLE, 1164 MAX_SCHEDULE_TIMEOUT, 1165 to_rps_client(file)); 1166 if (ret) 1167 goto out; 1168 1169 ret = i915_gem_object_pin_pages(obj); 1170 if (ret) 1171 goto out; 1172 1173 ret = i915_gem_shmem_pread(obj, args); 1174 if (ret == -EFAULT || ret == -ENODEV) 1175 ret = i915_gem_gtt_pread(obj, args); 1176 1177 i915_gem_object_unpin_pages(obj); 1178 out: 1179 i915_gem_object_put(obj); 1180 return ret; 1181 } 1182 1183 /* This is the fast write path which cannot handle 1184 * page faults in the source data 1185 */ 1186 1187 static inline bool 1188 ggtt_write(struct io_mapping *mapping, 1189 loff_t base, int offset, 1190 char __user *user_data, int length) 1191 { 1192 void *vaddr; 1193 unsigned long unwritten; 1194 1195 /* We can use the cpu mem copy function because this is X86. */ 1196 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); 1197 unwritten = __copy_from_user_inatomic_nocache(vaddr + offset, 1198 user_data, length); 1199 io_mapping_unmap_atomic(vaddr); 1200 if (unwritten) { 1201 vaddr = (void __force *) 1202 io_mapping_map_wc(mapping, base, PAGE_SIZE); 1203 unwritten = copy_from_user(vaddr + offset, user_data, length); 1204 io_mapping_unmap(vaddr); 1205 } 1206 1207 return unwritten; 1208 } 1209 1210 /** 1211 * This is the fast pwrite path, where we copy the data directly from the 1212 * user into the GTT, uncached. 1213 * @obj: i915 GEM object 1214 * @args: pwrite arguments structure 1215 */ 1216 static int 1217 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, 1218 const struct drm_i915_gem_pwrite *args) 1219 { 1220 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1221 struct i915_ggtt *ggtt = &i915->ggtt; 1222 struct drm_mm_node node; 1223 struct i915_vma *vma; 1224 u64 remain, offset; 1225 void __user *user_data; 1226 int ret; 1227 1228 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1229 if (ret) 1230 return ret; 1231 1232 intel_runtime_pm_get(i915); 1233 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1234 PIN_MAPPABLE | PIN_NONBLOCK); 1235 if (!IS_ERR(vma)) { 1236 node.start = i915_ggtt_offset(vma); 1237 node.allocated = false; 1238 ret = i915_vma_put_fence(vma); 1239 if (ret) { 1240 i915_vma_unpin(vma); 1241 vma = ERR_PTR(ret); 1242 } 1243 } 1244 if (IS_ERR(vma)) { 1245 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1246 if (ret) 1247 goto out_unlock; 1248 GEM_BUG_ON(!node.allocated); 1249 } 1250 1251 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1252 if (ret) 1253 goto out_unpin; 1254 1255 mutex_unlock(&i915->drm.struct_mutex); 1256 1257 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 1258 1259 user_data = u64_to_user_ptr(args->data_ptr); 1260 offset = args->offset; 1261 remain = args->size; 1262 while (remain) { 1263 /* Operation in this page 1264 * 1265 * page_base = page offset within aperture 1266 * page_offset = offset within page 1267 * page_length = bytes to copy for this page 1268 */ 1269 u32 page_base = node.start; 1270 unsigned int page_offset = offset_in_page(offset); 1271 unsigned int page_length = PAGE_SIZE - page_offset; 1272 page_length = remain < page_length ? remain : page_length; 1273 if (node.allocated) { 1274 wmb(); /* flush the write before we modify the GGTT */ 1275 ggtt->base.insert_page(&ggtt->base, 1276 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1277 node.start, I915_CACHE_NONE, 0); 1278 wmb(); /* flush modifications to the GGTT (insert_page) */ 1279 } else { 1280 page_base += offset & PAGE_MASK; 1281 } 1282 /* If we get a fault while copying data, then (presumably) our 1283 * source page isn't available. Return the error and we'll 1284 * retry in the slow path. 1285 * If the object is non-shmem backed, we retry again with the 1286 * path that handles page fault. 1287 */ 1288 if (ggtt_write(&ggtt->mappable, page_base, page_offset, 1289 user_data, page_length)) { 1290 ret = -EFAULT; 1291 break; 1292 } 1293 1294 remain -= page_length; 1295 user_data += page_length; 1296 offset += page_length; 1297 } 1298 intel_fb_obj_flush(obj, ORIGIN_CPU); 1299 1300 mutex_lock(&i915->drm.struct_mutex); 1301 out_unpin: 1302 if (node.allocated) { 1303 wmb(); 1304 ggtt->base.clear_range(&ggtt->base, 1305 node.start, node.size); 1306 remove_mappable_node(&node); 1307 } else { 1308 i915_vma_unpin(vma); 1309 } 1310 out_unlock: 1311 intel_runtime_pm_put(i915); 1312 mutex_unlock(&i915->drm.struct_mutex); 1313 return ret; 1314 } 1315 1316 static int 1317 shmem_pwrite_slow(struct page *page, int offset, int length, 1318 char __user *user_data, 1319 bool page_do_bit17_swizzling, 1320 bool needs_clflush_before, 1321 bool needs_clflush_after) 1322 { 1323 char *vaddr; 1324 int ret; 1325 1326 vaddr = kmap(page); 1327 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 1328 shmem_clflush_swizzled_range(vaddr + offset, length, 1329 page_do_bit17_swizzling); 1330 if (page_do_bit17_swizzling) 1331 ret = __copy_from_user_swizzled(vaddr, offset, user_data, 1332 length); 1333 else 1334 ret = __copy_from_user(vaddr + offset, user_data, length); 1335 if (needs_clflush_after) 1336 shmem_clflush_swizzled_range(vaddr + offset, length, 1337 page_do_bit17_swizzling); 1338 kunmap(page); 1339 1340 return ret ? -EFAULT : 0; 1341 } 1342 1343 /* Per-page copy function for the shmem pwrite fastpath. 1344 * Flushes invalid cachelines before writing to the target if 1345 * needs_clflush_before is set and flushes out any written cachelines after 1346 * writing if needs_clflush is set. 1347 */ 1348 static int 1349 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, 1350 bool page_do_bit17_swizzling, 1351 bool needs_clflush_before, 1352 bool needs_clflush_after) 1353 { 1354 int ret; 1355 1356 ret = -ENODEV; 1357 if (!page_do_bit17_swizzling) { 1358 char *vaddr = kmap_atomic(page); 1359 1360 if (needs_clflush_before) 1361 drm_clflush_virt_range(vaddr + offset, len); 1362 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len); 1363 if (needs_clflush_after) 1364 drm_clflush_virt_range(vaddr + offset, len); 1365 1366 kunmap_atomic(vaddr); 1367 } 1368 if (ret == 0) 1369 return ret; 1370 1371 return shmem_pwrite_slow(page, offset, len, user_data, 1372 page_do_bit17_swizzling, 1373 needs_clflush_before, 1374 needs_clflush_after); 1375 } 1376 1377 static int 1378 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, 1379 const struct drm_i915_gem_pwrite *args) 1380 { 1381 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1382 void __user *user_data; 1383 u64 remain; 1384 unsigned int obj_do_bit17_swizzling; 1385 unsigned int partial_cacheline_write; 1386 unsigned int needs_clflush; 1387 unsigned int offset, idx; 1388 int ret; 1389 1390 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1391 if (ret) 1392 return ret; 1393 1394 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); 1395 mutex_unlock(&i915->drm.struct_mutex); 1396 if (ret) 1397 return ret; 1398 1399 obj_do_bit17_swizzling = 0; 1400 if (i915_gem_object_needs_bit17_swizzle(obj)) 1401 obj_do_bit17_swizzling = BIT(17); 1402 1403 /* If we don't overwrite a cacheline completely we need to be 1404 * careful to have up-to-date data by first clflushing. Don't 1405 * overcomplicate things and flush the entire patch. 1406 */ 1407 partial_cacheline_write = 0; 1408 if (needs_clflush & CLFLUSH_BEFORE) 1409 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; 1410 1411 user_data = u64_to_user_ptr(args->data_ptr); 1412 remain = args->size; 1413 offset = offset_in_page(args->offset); 1414 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 1415 struct page *page = i915_gem_object_get_page(obj, idx); 1416 int length; 1417 1418 length = remain; 1419 if (offset + length > PAGE_SIZE) 1420 length = PAGE_SIZE - offset; 1421 1422 ret = shmem_pwrite(page, offset, length, user_data, 1423 page_to_phys(page) & obj_do_bit17_swizzling, 1424 (offset | length) & partial_cacheline_write, 1425 needs_clflush & CLFLUSH_AFTER); 1426 if (ret) 1427 break; 1428 1429 remain -= length; 1430 user_data += length; 1431 offset = 0; 1432 } 1433 1434 intel_fb_obj_flush(obj, ORIGIN_CPU); 1435 i915_gem_obj_finish_shmem_access(obj); 1436 return ret; 1437 } 1438 1439 /** 1440 * Writes data to the object referenced by handle. 1441 * @dev: drm device 1442 * @data: ioctl data blob 1443 * @file: drm file 1444 * 1445 * On error, the contents of the buffer that were to be modified are undefined. 1446 */ 1447 int 1448 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1449 struct drm_file *file) 1450 { 1451 struct drm_i915_gem_pwrite *args = data; 1452 struct drm_i915_gem_object *obj; 1453 int ret; 1454 1455 if (args->size == 0) 1456 return 0; 1457 1458 if (!access_ok(VERIFY_READ, 1459 u64_to_user_ptr(args->data_ptr), 1460 args->size)) 1461 return -EFAULT; 1462 1463 obj = i915_gem_object_lookup(file, args->handle); 1464 if (!obj) 1465 return -ENOENT; 1466 1467 /* Bounds check destination. */ 1468 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1469 ret = -EINVAL; 1470 goto err; 1471 } 1472 1473 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1474 1475 ret = -ENODEV; 1476 if (obj->ops->pwrite) 1477 ret = obj->ops->pwrite(obj, args); 1478 if (ret != -ENODEV) 1479 goto err; 1480 1481 ret = i915_gem_object_wait(obj, 1482 I915_WAIT_INTERRUPTIBLE | 1483 I915_WAIT_ALL, 1484 MAX_SCHEDULE_TIMEOUT, 1485 to_rps_client(file)); 1486 if (ret) 1487 goto err; 1488 1489 ret = i915_gem_object_pin_pages(obj); 1490 if (ret) 1491 goto err; 1492 1493 ret = -EFAULT; 1494 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1495 * it would end up going through the fenced access, and we'll get 1496 * different detiling behavior between reading and writing. 1497 * pread/pwrite currently are reading and writing from the CPU 1498 * perspective, requiring manual detiling by the client. 1499 */ 1500 if (!i915_gem_object_has_struct_page(obj) || 1501 cpu_write_needs_clflush(obj)) 1502 /* Note that the gtt paths might fail with non-page-backed user 1503 * pointers (e.g. gtt mappings when moving data between 1504 * textures). Fallback to the shmem path in that case. 1505 */ 1506 ret = i915_gem_gtt_pwrite_fast(obj, args); 1507 1508 if (ret == -EFAULT || ret == -ENOSPC) { 1509 if (obj->phys_handle) 1510 ret = i915_gem_phys_pwrite(obj, args, file); 1511 else 1512 ret = i915_gem_shmem_pwrite(obj, args); 1513 } 1514 1515 i915_gem_object_unpin_pages(obj); 1516 err: 1517 i915_gem_object_put(obj); 1518 return ret; 1519 } 1520 1521 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) 1522 { 1523 struct drm_i915_private *i915; 1524 struct list_head *list; 1525 struct i915_vma *vma; 1526 1527 list_for_each_entry(vma, &obj->vma_list, obj_link) { 1528 if (!i915_vma_is_ggtt(vma)) 1529 break; 1530 1531 if (i915_vma_is_active(vma)) 1532 continue; 1533 1534 if (!drm_mm_node_allocated(&vma->node)) 1535 continue; 1536 1537 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 1538 } 1539 1540 i915 = to_i915(obj->base.dev); 1541 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; 1542 list_move_tail(&obj->global_link, list); 1543 } 1544 1545 /** 1546 * Called when user space prepares to use an object with the CPU, either 1547 * through the mmap ioctl's mapping or a GTT mapping. 1548 * @dev: drm device 1549 * @data: ioctl data blob 1550 * @file: drm file 1551 */ 1552 int 1553 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1554 struct drm_file *file) 1555 { 1556 struct drm_i915_gem_set_domain *args = data; 1557 struct drm_i915_gem_object *obj; 1558 uint32_t read_domains = args->read_domains; 1559 uint32_t write_domain = args->write_domain; 1560 int err; 1561 1562 /* Only handle setting domains to types used by the CPU. */ 1563 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) 1564 return -EINVAL; 1565 1566 /* Having something in the write domain implies it's in the read 1567 * domain, and only that read domain. Enforce that in the request. 1568 */ 1569 if (write_domain != 0 && read_domains != write_domain) 1570 return -EINVAL; 1571 1572 obj = i915_gem_object_lookup(file, args->handle); 1573 if (!obj) 1574 return -ENOENT; 1575 1576 /* Try to flush the object off the GPU without holding the lock. 1577 * We will repeat the flush holding the lock in the normal manner 1578 * to catch cases where we are gazumped. 1579 */ 1580 err = i915_gem_object_wait(obj, 1581 I915_WAIT_INTERRUPTIBLE | 1582 (write_domain ? I915_WAIT_ALL : 0), 1583 MAX_SCHEDULE_TIMEOUT, 1584 to_rps_client(file)); 1585 if (err) 1586 goto out; 1587 1588 /* Flush and acquire obj->pages so that we are coherent through 1589 * direct access in memory with previous cached writes through 1590 * shmemfs and that our cache domain tracking remains valid. 1591 * For example, if the obj->filp was moved to swap without us 1592 * being notified and releasing the pages, we would mistakenly 1593 * continue to assume that the obj remained out of the CPU cached 1594 * domain. 1595 */ 1596 err = i915_gem_object_pin_pages(obj); 1597 if (err) 1598 goto out; 1599 1600 err = i915_mutex_lock_interruptible(dev); 1601 if (err) 1602 goto out_unpin; 1603 1604 if (read_domains & I915_GEM_DOMAIN_WC) 1605 err = i915_gem_object_set_to_wc_domain(obj, write_domain); 1606 else if (read_domains & I915_GEM_DOMAIN_GTT) 1607 err = i915_gem_object_set_to_gtt_domain(obj, write_domain); 1608 else 1609 err = i915_gem_object_set_to_cpu_domain(obj, write_domain); 1610 1611 /* And bump the LRU for this access */ 1612 i915_gem_object_bump_inactive_ggtt(obj); 1613 1614 mutex_unlock(&dev->struct_mutex); 1615 1616 if (write_domain != 0) 1617 intel_fb_obj_invalidate(obj, 1618 fb_write_origin(obj, write_domain)); 1619 1620 out_unpin: 1621 i915_gem_object_unpin_pages(obj); 1622 out: 1623 i915_gem_object_put(obj); 1624 return err; 1625 } 1626 1627 /** 1628 * Called when user space has done writes to this buffer 1629 * @dev: drm device 1630 * @data: ioctl data blob 1631 * @file: drm file 1632 */ 1633 int 1634 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1635 struct drm_file *file) 1636 { 1637 struct drm_i915_gem_sw_finish *args = data; 1638 struct drm_i915_gem_object *obj; 1639 1640 obj = i915_gem_object_lookup(file, args->handle); 1641 if (!obj) 1642 return -ENOENT; 1643 1644 /* Pinned buffers may be scanout, so flush the cache */ 1645 i915_gem_object_flush_if_display(obj); 1646 i915_gem_object_put(obj); 1647 1648 return 0; 1649 } 1650 1651 /** 1652 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 1653 * it is mapped to. 1654 * @dev: drm device 1655 * @data: ioctl data blob 1656 * @file: drm file 1657 * 1658 * While the mapping holds a reference on the contents of the object, it doesn't 1659 * imply a ref on the object itself. 1660 * 1661 * IMPORTANT: 1662 * 1663 * DRM driver writers who look a this function as an example for how to do GEM 1664 * mmap support, please don't implement mmap support like here. The modern way 1665 * to implement DRM mmap support is with an mmap offset ioctl (like 1666 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1667 * That way debug tooling like valgrind will understand what's going on, hiding 1668 * the mmap call in a driver private ioctl will break that. The i915 driver only 1669 * does cpu mmaps this way because we didn't know better. 1670 */ 1671 int 1672 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1673 struct drm_file *file) 1674 { 1675 struct drm_i915_gem_mmap *args = data; 1676 struct drm_i915_gem_object *obj; 1677 unsigned long addr; 1678 1679 if (args->flags & ~(I915_MMAP_WC)) 1680 return -EINVAL; 1681 1682 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) 1683 return -ENODEV; 1684 1685 obj = i915_gem_object_lookup(file, args->handle); 1686 if (!obj) 1687 return -ENOENT; 1688 1689 /* prime objects have no backing filp to GEM mmap 1690 * pages from. 1691 */ 1692 if (!obj->base.filp) { 1693 i915_gem_object_put(obj); 1694 return -EINVAL; 1695 } 1696 1697 addr = vm_mmap(obj->base.filp, 0, args->size, 1698 PROT_READ | PROT_WRITE, MAP_SHARED, 1699 args->offset); 1700 if (args->flags & I915_MMAP_WC) { 1701 struct mm_struct *mm = current->mm; 1702 struct vm_area_struct *vma; 1703 1704 if (down_write_killable(&mm->mmap_sem)) { 1705 i915_gem_object_put(obj); 1706 return -EINTR; 1707 } 1708 vma = find_vma(mm, addr); 1709 if (vma) 1710 vma->vm_page_prot = 1711 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1712 else 1713 addr = -ENOMEM; 1714 up_write(&mm->mmap_sem); 1715 1716 /* This may race, but that's ok, it only gets set */ 1717 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); 1718 } 1719 i915_gem_object_put(obj); 1720 if (IS_ERR((void *)addr)) 1721 return addr; 1722 1723 args->addr_ptr = (uint64_t) addr; 1724 1725 return 0; 1726 } 1727 1728 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) 1729 { 1730 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; 1731 } 1732 1733 /** 1734 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 1735 * 1736 * A history of the GTT mmap interface: 1737 * 1738 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 1739 * aligned and suitable for fencing, and still fit into the available 1740 * mappable space left by the pinned display objects. A classic problem 1741 * we called the page-fault-of-doom where we would ping-pong between 1742 * two objects that could not fit inside the GTT and so the memcpy 1743 * would page one object in at the expense of the other between every 1744 * single byte. 1745 * 1746 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 1747 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 1748 * object is too large for the available space (or simply too large 1749 * for the mappable aperture!), a view is created instead and faulted 1750 * into userspace. (This view is aligned and sized appropriately for 1751 * fenced access.) 1752 * 1753 * 2 - Recognise WC as a separate cache domain so that we can flush the 1754 * delayed writes via GTT before performing direct access via WC. 1755 * 1756 * Restrictions: 1757 * 1758 * * snoopable objects cannot be accessed via the GTT. It can cause machine 1759 * hangs on some architectures, corruption on others. An attempt to service 1760 * a GTT page fault from a snoopable object will generate a SIGBUS. 1761 * 1762 * * the object must be able to fit into RAM (physical memory, though no 1763 * limited to the mappable aperture). 1764 * 1765 * 1766 * Caveats: 1767 * 1768 * * a new GTT page fault will synchronize rendering from the GPU and flush 1769 * all data to system memory. Subsequent access will not be synchronized. 1770 * 1771 * * all mappings are revoked on runtime device suspend. 1772 * 1773 * * there are only 8, 16 or 32 fence registers to share between all users 1774 * (older machines require fence register for display and blitter access 1775 * as well). Contention of the fence registers will cause the previous users 1776 * to be unmapped and any new access will generate new page faults. 1777 * 1778 * * running out of memory while servicing a fault may generate a SIGBUS, 1779 * rather than the expected SIGSEGV. 1780 */ 1781 int i915_gem_mmap_gtt_version(void) 1782 { 1783 return 2; 1784 } 1785 1786 static inline struct i915_ggtt_view 1787 compute_partial_view(struct drm_i915_gem_object *obj, 1788 pgoff_t page_offset, 1789 unsigned int chunk) 1790 { 1791 struct i915_ggtt_view view; 1792 1793 if (i915_gem_object_is_tiled(obj)) 1794 chunk = roundup(chunk, tile_row_pages(obj)); 1795 1796 view.type = I915_GGTT_VIEW_PARTIAL; 1797 view.partial.offset = rounddown(page_offset, chunk); 1798 view.partial.size = 1799 min_t(unsigned int, chunk, 1800 (obj->base.size >> PAGE_SHIFT) - view.partial.offset); 1801 1802 /* If the partial covers the entire object, just create a normal VMA. */ 1803 if (chunk >= obj->base.size >> PAGE_SHIFT) 1804 view.type = I915_GGTT_VIEW_NORMAL; 1805 1806 return view; 1807 } 1808 1809 /** 1810 * i915_gem_fault - fault a page into the GTT 1811 * @vmf: fault info 1812 * 1813 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1814 * from userspace. The fault handler takes care of binding the object to 1815 * the GTT (if needed), allocating and programming a fence register (again, 1816 * only if needed based on whether the old reg is still valid or the object 1817 * is tiled) and inserting a new PTE into the faulting process. 1818 * 1819 * Note that the faulting process may involve evicting existing objects 1820 * from the GTT and/or fence registers to make room. So performance may 1821 * suffer if the GTT working set is large or there are few fence registers 1822 * left. 1823 * 1824 * The current feature set supported by i915_gem_fault() and thus GTT mmaps 1825 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). 1826 */ 1827 int i915_gem_fault(struct vm_fault *vmf) 1828 { 1829 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ 1830 struct vm_area_struct *area = vmf->vma; 1831 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); 1832 struct drm_device *dev = obj->base.dev; 1833 struct drm_i915_private *dev_priv = to_i915(dev); 1834 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1835 bool write = !!(vmf->flags & FAULT_FLAG_WRITE); 1836 struct i915_vma *vma; 1837 pgoff_t page_offset; 1838 unsigned int flags; 1839 int ret; 1840 1841 /* We don't use vmf->pgoff since that has the fake offset */ 1842 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; 1843 1844 trace_i915_gem_object_fault(obj, page_offset, true, write); 1845 1846 /* Try to flush the object off the GPU first without holding the lock. 1847 * Upon acquiring the lock, we will perform our sanity checks and then 1848 * repeat the flush holding the lock in the normal manner to catch cases 1849 * where we are gazumped. 1850 */ 1851 ret = i915_gem_object_wait(obj, 1852 I915_WAIT_INTERRUPTIBLE, 1853 MAX_SCHEDULE_TIMEOUT, 1854 NULL); 1855 if (ret) 1856 goto err; 1857 1858 ret = i915_gem_object_pin_pages(obj); 1859 if (ret) 1860 goto err; 1861 1862 intel_runtime_pm_get(dev_priv); 1863 1864 ret = i915_mutex_lock_interruptible(dev); 1865 if (ret) 1866 goto err_rpm; 1867 1868 /* Access to snoopable pages through the GTT is incoherent. */ 1869 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { 1870 ret = -EFAULT; 1871 goto err_unlock; 1872 } 1873 1874 /* If the object is smaller than a couple of partial vma, it is 1875 * not worth only creating a single partial vma - we may as well 1876 * clear enough space for the full object. 1877 */ 1878 flags = PIN_MAPPABLE; 1879 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) 1880 flags |= PIN_NONBLOCK | PIN_NONFAULT; 1881 1882 /* Now pin it into the GTT as needed */ 1883 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); 1884 if (IS_ERR(vma)) { 1885 /* Use a partial view if it is bigger than available space */ 1886 struct i915_ggtt_view view = 1887 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 1888 1889 /* Userspace is now writing through an untracked VMA, abandon 1890 * all hope that the hardware is able to track future writes. 1891 */ 1892 obj->frontbuffer_ggtt_origin = ORIGIN_CPU; 1893 1894 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 1895 } 1896 if (IS_ERR(vma)) { 1897 ret = PTR_ERR(vma); 1898 goto err_unlock; 1899 } 1900 1901 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1902 if (ret) 1903 goto err_unpin; 1904 1905 ret = i915_vma_get_fence(vma); 1906 if (ret) 1907 goto err_unpin; 1908 1909 /* Mark as being mmapped into userspace for later revocation */ 1910 assert_rpm_wakelock_held(dev_priv); 1911 if (list_empty(&obj->userfault_link)) 1912 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); 1913 1914 /* Finally, remap it using the new GTT offset */ 1915 ret = remap_io_mapping(area, 1916 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), 1917 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, 1918 min_t(u64, vma->size, area->vm_end - area->vm_start), 1919 &ggtt->mappable); 1920 1921 err_unpin: 1922 __i915_vma_unpin(vma); 1923 err_unlock: 1924 mutex_unlock(&dev->struct_mutex); 1925 err_rpm: 1926 intel_runtime_pm_put(dev_priv); 1927 i915_gem_object_unpin_pages(obj); 1928 err: 1929 switch (ret) { 1930 case -EIO: 1931 /* 1932 * We eat errors when the gpu is terminally wedged to avoid 1933 * userspace unduly crashing (gl has no provisions for mmaps to 1934 * fail). But any other -EIO isn't ours (e.g. swap in failure) 1935 * and so needs to be reported. 1936 */ 1937 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 1938 ret = VM_FAULT_SIGBUS; 1939 break; 1940 } 1941 case -EAGAIN: 1942 /* 1943 * EAGAIN means the gpu is hung and we'll wait for the error 1944 * handler to reset everything when re-faulting in 1945 * i915_mutex_lock_interruptible. 1946 */ 1947 case 0: 1948 case -ERESTARTSYS: 1949 case -EINTR: 1950 case -EBUSY: 1951 /* 1952 * EBUSY is ok: this just means that another thread 1953 * already did the job. 1954 */ 1955 ret = VM_FAULT_NOPAGE; 1956 break; 1957 case -ENOMEM: 1958 ret = VM_FAULT_OOM; 1959 break; 1960 case -ENOSPC: 1961 case -EFAULT: 1962 ret = VM_FAULT_SIGBUS; 1963 break; 1964 default: 1965 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 1966 ret = VM_FAULT_SIGBUS; 1967 break; 1968 } 1969 return ret; 1970 } 1971 1972 /** 1973 * i915_gem_release_mmap - remove physical page mappings 1974 * @obj: obj in question 1975 * 1976 * Preserve the reservation of the mmapping with the DRM core code, but 1977 * relinquish ownership of the pages back to the system. 1978 * 1979 * It is vital that we remove the page mapping if we have mapped a tiled 1980 * object through the GTT and then lose the fence register due to 1981 * resource pressure. Similarly if the object has been moved out of the 1982 * aperture, than pages mapped into userspace must be revoked. Removing the 1983 * mapping will then trigger a page fault on the next user access, allowing 1984 * fixup by i915_gem_fault(). 1985 */ 1986 void 1987 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 1988 { 1989 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1990 1991 /* Serialisation between user GTT access and our code depends upon 1992 * revoking the CPU's PTE whilst the mutex is held. The next user 1993 * pagefault then has to wait until we release the mutex. 1994 * 1995 * Note that RPM complicates somewhat by adding an additional 1996 * requirement that operations to the GGTT be made holding the RPM 1997 * wakeref. 1998 */ 1999 lockdep_assert_held(&i915->drm.struct_mutex); 2000 intel_runtime_pm_get(i915); 2001 2002 if (list_empty(&obj->userfault_link)) 2003 goto out; 2004 2005 list_del_init(&obj->userfault_link); 2006 drm_vma_node_unmap(&obj->base.vma_node, 2007 obj->base.dev->anon_inode->i_mapping); 2008 2009 /* Ensure that the CPU's PTE are revoked and there are not outstanding 2010 * memory transactions from userspace before we return. The TLB 2011 * flushing implied above by changing the PTE above *should* be 2012 * sufficient, an extra barrier here just provides us with a bit 2013 * of paranoid documentation about our requirement to serialise 2014 * memory writes before touching registers / GSM. 2015 */ 2016 wmb(); 2017 2018 out: 2019 intel_runtime_pm_put(i915); 2020 } 2021 2022 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) 2023 { 2024 struct drm_i915_gem_object *obj, *on; 2025 int i; 2026 2027 /* 2028 * Only called during RPM suspend. All users of the userfault_list 2029 * must be holding an RPM wakeref to ensure that this can not 2030 * run concurrently with themselves (and use the struct_mutex for 2031 * protection between themselves). 2032 */ 2033 2034 list_for_each_entry_safe(obj, on, 2035 &dev_priv->mm.userfault_list, userfault_link) { 2036 list_del_init(&obj->userfault_link); 2037 drm_vma_node_unmap(&obj->base.vma_node, 2038 obj->base.dev->anon_inode->i_mapping); 2039 } 2040 2041 /* The fence will be lost when the device powers down. If any were 2042 * in use by hardware (i.e. they are pinned), we should not be powering 2043 * down! All other fences will be reacquired by the user upon waking. 2044 */ 2045 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2046 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2047 2048 /* Ideally we want to assert that the fence register is not 2049 * live at this point (i.e. that no piece of code will be 2050 * trying to write through fence + GTT, as that both violates 2051 * our tracking of activity and associated locking/barriers, 2052 * but also is illegal given that the hw is powered down). 2053 * 2054 * Previously we used reg->pin_count as a "liveness" indicator. 2055 * That is not sufficient, and we need a more fine-grained 2056 * tool if we want to have a sanity check here. 2057 */ 2058 2059 if (!reg->vma) 2060 continue; 2061 2062 GEM_BUG_ON(!list_empty(®->vma->obj->userfault_link)); 2063 reg->dirty = true; 2064 } 2065 } 2066 2067 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 2068 { 2069 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2070 int err; 2071 2072 err = drm_gem_create_mmap_offset(&obj->base); 2073 if (likely(!err)) 2074 return 0; 2075 2076 /* Attempt to reap some mmap space from dead objects */ 2077 do { 2078 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); 2079 if (err) 2080 break; 2081 2082 i915_gem_drain_freed_objects(dev_priv); 2083 err = drm_gem_create_mmap_offset(&obj->base); 2084 if (!err) 2085 break; 2086 2087 } while (flush_delayed_work(&dev_priv->gt.retire_work)); 2088 2089 return err; 2090 } 2091 2092 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 2093 { 2094 drm_gem_free_mmap_offset(&obj->base); 2095 } 2096 2097 int 2098 i915_gem_mmap_gtt(struct drm_file *file, 2099 struct drm_device *dev, 2100 uint32_t handle, 2101 uint64_t *offset) 2102 { 2103 struct drm_i915_gem_object *obj; 2104 int ret; 2105 2106 obj = i915_gem_object_lookup(file, handle); 2107 if (!obj) 2108 return -ENOENT; 2109 2110 ret = i915_gem_object_create_mmap_offset(obj); 2111 if (ret == 0) 2112 *offset = drm_vma_node_offset_addr(&obj->base.vma_node); 2113 2114 i915_gem_object_put(obj); 2115 return ret; 2116 } 2117 2118 /** 2119 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 2120 * @dev: DRM device 2121 * @data: GTT mapping ioctl data 2122 * @file: GEM object info 2123 * 2124 * Simply returns the fake offset to userspace so it can mmap it. 2125 * The mmap call will end up in drm_gem_mmap(), which will set things 2126 * up so we can get faults in the handler above. 2127 * 2128 * The fault handler will take care of binding the object into the GTT 2129 * (since it may have been evicted to make room for something), allocating 2130 * a fence register, and mapping the appropriate aperture address into 2131 * userspace. 2132 */ 2133 int 2134 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2135 struct drm_file *file) 2136 { 2137 struct drm_i915_gem_mmap_gtt *args = data; 2138 2139 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 2140 } 2141 2142 /* Immediately discard the backing storage */ 2143 static void 2144 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 2145 { 2146 i915_gem_object_free_mmap_offset(obj); 2147 2148 if (obj->base.filp == NULL) 2149 return; 2150 2151 /* Our goal here is to return as much of the memory as 2152 * is possible back to the system as we are called from OOM. 2153 * To do this we must instruct the shmfs to drop all of its 2154 * backing pages, *now*. 2155 */ 2156 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2157 obj->mm.madv = __I915_MADV_PURGED; 2158 obj->mm.pages = ERR_PTR(-EFAULT); 2159 } 2160 2161 /* Try to discard unwanted pages */ 2162 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) 2163 { 2164 struct address_space *mapping; 2165 2166 lockdep_assert_held(&obj->mm.lock); 2167 GEM_BUG_ON(obj->mm.pages); 2168 2169 switch (obj->mm.madv) { 2170 case I915_MADV_DONTNEED: 2171 i915_gem_object_truncate(obj); 2172 case __I915_MADV_PURGED: 2173 return; 2174 } 2175 2176 if (obj->base.filp == NULL) 2177 return; 2178 2179 mapping = obj->base.filp->f_mapping, 2180 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 2181 } 2182 2183 static void 2184 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, 2185 struct sg_table *pages) 2186 { 2187 struct sgt_iter sgt_iter; 2188 struct page *page; 2189 2190 __i915_gem_object_release_shmem(obj, pages, true); 2191 2192 i915_gem_gtt_finish_pages(obj, pages); 2193 2194 if (i915_gem_object_needs_bit17_swizzle(obj)) 2195 i915_gem_object_save_bit_17_swizzle(obj, pages); 2196 2197 for_each_sgt_page(page, sgt_iter, pages) { 2198 if (obj->mm.dirty) 2199 set_page_dirty(page); 2200 2201 if (obj->mm.madv == I915_MADV_WILLNEED) 2202 mark_page_accessed(page); 2203 2204 put_page(page); 2205 } 2206 obj->mm.dirty = false; 2207 2208 sg_free_table(pages); 2209 kfree(pages); 2210 } 2211 2212 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 2213 { 2214 struct radix_tree_iter iter; 2215 void __rcu **slot; 2216 2217 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 2218 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 2219 } 2220 2221 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 2222 enum i915_mm_subclass subclass) 2223 { 2224 struct sg_table *pages; 2225 2226 if (i915_gem_object_has_pinned_pages(obj)) 2227 return; 2228 2229 GEM_BUG_ON(obj->bind_count); 2230 if (!READ_ONCE(obj->mm.pages)) 2231 return; 2232 2233 /* May be called by shrinker from within get_pages() (on another bo) */ 2234 mutex_lock_nested(&obj->mm.lock, subclass); 2235 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) 2236 goto unlock; 2237 2238 /* ->put_pages might need to allocate memory for the bit17 swizzle 2239 * array, hence protect them from being reaped by removing them from gtt 2240 * lists early. */ 2241 pages = fetch_and_zero(&obj->mm.pages); 2242 GEM_BUG_ON(!pages); 2243 2244 if (obj->mm.mapping) { 2245 void *ptr; 2246 2247 ptr = page_mask_bits(obj->mm.mapping); 2248 if (is_vmalloc_addr(ptr)) 2249 vunmap(ptr); 2250 else 2251 kunmap(kmap_to_page(ptr)); 2252 2253 obj->mm.mapping = NULL; 2254 } 2255 2256 __i915_gem_object_reset_page_iter(obj); 2257 2258 if (!IS_ERR(pages)) 2259 obj->ops->put_pages(obj, pages); 2260 2261 unlock: 2262 mutex_unlock(&obj->mm.lock); 2263 } 2264 2265 static bool i915_sg_trim(struct sg_table *orig_st) 2266 { 2267 struct sg_table new_st; 2268 struct scatterlist *sg, *new_sg; 2269 unsigned int i; 2270 2271 if (orig_st->nents == orig_st->orig_nents) 2272 return false; 2273 2274 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) 2275 return false; 2276 2277 new_sg = new_st.sgl; 2278 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { 2279 sg_set_page(new_sg, sg_page(sg), sg->length, 0); 2280 /* called before being DMA mapped, no need to copy sg->dma_* */ 2281 new_sg = sg_next(new_sg); 2282 } 2283 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ 2284 2285 sg_free_table(orig_st); 2286 2287 *orig_st = new_st; 2288 return true; 2289 } 2290 2291 static struct sg_table * 2292 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2293 { 2294 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2295 const unsigned long page_count = obj->base.size / PAGE_SIZE; 2296 unsigned long i; 2297 struct address_space *mapping; 2298 struct sg_table *st; 2299 struct scatterlist *sg; 2300 struct sgt_iter sgt_iter; 2301 struct page *page; 2302 unsigned long last_pfn = 0; /* suppress gcc warning */ 2303 unsigned int max_segment; 2304 gfp_t noreclaim; 2305 int ret; 2306 2307 /* Assert that the object is not currently in any GPU domain. As it 2308 * wasn't in the GTT, there shouldn't be any way it could have been in 2309 * a GPU cache 2310 */ 2311 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2312 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2313 2314 max_segment = swiotlb_max_segment(); 2315 if (!max_segment) 2316 max_segment = rounddown(UINT_MAX, PAGE_SIZE); 2317 2318 st = kmalloc(sizeof(*st), GFP_KERNEL); 2319 if (st == NULL) 2320 return ERR_PTR(-ENOMEM); 2321 2322 rebuild_st: 2323 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2324 kfree(st); 2325 return ERR_PTR(-ENOMEM); 2326 } 2327 2328 /* Get the list of pages out of our struct file. They'll be pinned 2329 * at this point until we release them. 2330 * 2331 * Fail silently without starting the shrinker 2332 */ 2333 mapping = obj->base.filp->f_mapping; 2334 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 2335 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 2336 2337 sg = st->sgl; 2338 st->nents = 0; 2339 for (i = 0; i < page_count; i++) { 2340 const unsigned int shrink[] = { 2341 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, 2342 0, 2343 }, *s = shrink; 2344 gfp_t gfp = noreclaim; 2345 2346 do { 2347 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2348 if (likely(!IS_ERR(page))) 2349 break; 2350 2351 if (!*s) { 2352 ret = PTR_ERR(page); 2353 goto err_sg; 2354 } 2355 2356 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); 2357 cond_resched(); 2358 2359 /* We've tried hard to allocate the memory by reaping 2360 * our own buffer, now let the real VM do its job and 2361 * go down in flames if truly OOM. 2362 * 2363 * However, since graphics tend to be disposable, 2364 * defer the oom here by reporting the ENOMEM back 2365 * to userspace. 2366 */ 2367 if (!*s) { 2368 /* reclaim and warn, but no oom */ 2369 gfp = mapping_gfp_mask(mapping); 2370 2371 /* Our bo are always dirty and so we require 2372 * kswapd to reclaim our pages (direct reclaim 2373 * does not effectively begin pageout of our 2374 * buffers on its own). However, direct reclaim 2375 * only waits for kswapd when under allocation 2376 * congestion. So as a result __GFP_RECLAIM is 2377 * unreliable and fails to actually reclaim our 2378 * dirty pages -- unless you try over and over 2379 * again with !__GFP_NORETRY. However, we still 2380 * want to fail this allocation rather than 2381 * trigger the out-of-memory killer and for 2382 * this we want __GFP_RETRY_MAYFAIL. 2383 */ 2384 gfp |= __GFP_RETRY_MAYFAIL; 2385 } 2386 } while (1); 2387 2388 if (!i || 2389 sg->length >= max_segment || 2390 page_to_pfn(page) != last_pfn + 1) { 2391 if (i) 2392 sg = sg_next(sg); 2393 st->nents++; 2394 sg_set_page(sg, page, PAGE_SIZE, 0); 2395 } else { 2396 sg->length += PAGE_SIZE; 2397 } 2398 last_pfn = page_to_pfn(page); 2399 2400 /* Check that the i965g/gm workaround works. */ 2401 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 2402 } 2403 if (sg) /* loop terminated early; short sg table */ 2404 sg_mark_end(sg); 2405 2406 /* Trim unused sg entries to avoid wasting memory. */ 2407 i915_sg_trim(st); 2408 2409 ret = i915_gem_gtt_prepare_pages(obj, st); 2410 if (ret) { 2411 /* DMA remapping failed? One possible cause is that 2412 * it could not reserve enough large entries, asking 2413 * for PAGE_SIZE chunks instead may be helpful. 2414 */ 2415 if (max_segment > PAGE_SIZE) { 2416 for_each_sgt_page(page, sgt_iter, st) 2417 put_page(page); 2418 sg_free_table(st); 2419 2420 max_segment = PAGE_SIZE; 2421 goto rebuild_st; 2422 } else { 2423 dev_warn(&dev_priv->drm.pdev->dev, 2424 "Failed to DMA remap %lu pages\n", 2425 page_count); 2426 goto err_pages; 2427 } 2428 } 2429 2430 if (i915_gem_object_needs_bit17_swizzle(obj)) 2431 i915_gem_object_do_bit_17_swizzle(obj, st); 2432 2433 return st; 2434 2435 err_sg: 2436 sg_mark_end(sg); 2437 err_pages: 2438 for_each_sgt_page(page, sgt_iter, st) 2439 put_page(page); 2440 sg_free_table(st); 2441 kfree(st); 2442 2443 /* shmemfs first checks if there is enough memory to allocate the page 2444 * and reports ENOSPC should there be insufficient, along with the usual 2445 * ENOMEM for a genuine allocation failure. 2446 * 2447 * We use ENOSPC in our driver to mean that we have run out of aperture 2448 * space and so want to translate the error from shmemfs back to our 2449 * usual understanding of ENOMEM. 2450 */ 2451 if (ret == -ENOSPC) 2452 ret = -ENOMEM; 2453 2454 return ERR_PTR(ret); 2455 } 2456 2457 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 2458 struct sg_table *pages) 2459 { 2460 lockdep_assert_held(&obj->mm.lock); 2461 2462 obj->mm.get_page.sg_pos = pages->sgl; 2463 obj->mm.get_page.sg_idx = 0; 2464 2465 obj->mm.pages = pages; 2466 2467 if (i915_gem_object_is_tiled(obj) && 2468 to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 2469 GEM_BUG_ON(obj->mm.quirked); 2470 __i915_gem_object_pin_pages(obj); 2471 obj->mm.quirked = true; 2472 } 2473 } 2474 2475 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2476 { 2477 struct sg_table *pages; 2478 2479 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 2480 2481 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 2482 DRM_DEBUG("Attempting to obtain a purgeable object\n"); 2483 return -EFAULT; 2484 } 2485 2486 pages = obj->ops->get_pages(obj); 2487 if (unlikely(IS_ERR(pages))) 2488 return PTR_ERR(pages); 2489 2490 __i915_gem_object_set_pages(obj, pages); 2491 return 0; 2492 } 2493 2494 /* Ensure that the associated pages are gathered from the backing storage 2495 * and pinned into our object. i915_gem_object_pin_pages() may be called 2496 * multiple times before they are released by a single call to 2497 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 2498 * either as a result of memory pressure (reaping pages under the shrinker) 2499 * or as the object is itself released. 2500 */ 2501 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2502 { 2503 int err; 2504 2505 err = mutex_lock_interruptible(&obj->mm.lock); 2506 if (err) 2507 return err; 2508 2509 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { 2510 err = ____i915_gem_object_get_pages(obj); 2511 if (err) 2512 goto unlock; 2513 2514 smp_mb__before_atomic(); 2515 } 2516 atomic_inc(&obj->mm.pages_pin_count); 2517 2518 unlock: 2519 mutex_unlock(&obj->mm.lock); 2520 return err; 2521 } 2522 2523 /* The 'mapping' part of i915_gem_object_pin_map() below */ 2524 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, 2525 enum i915_map_type type) 2526 { 2527 unsigned long n_pages = obj->base.size >> PAGE_SHIFT; 2528 struct sg_table *sgt = obj->mm.pages; 2529 struct sgt_iter sgt_iter; 2530 struct page *page; 2531 struct page *stack_pages[32]; 2532 struct page **pages = stack_pages; 2533 unsigned long i = 0; 2534 pgprot_t pgprot; 2535 void *addr; 2536 2537 /* A single page can always be kmapped */ 2538 if (n_pages == 1 && type == I915_MAP_WB) 2539 return kmap(sg_page(sgt->sgl)); 2540 2541 if (n_pages > ARRAY_SIZE(stack_pages)) { 2542 /* Too big for stack -- allocate temporary array instead */ 2543 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 2544 if (!pages) 2545 return NULL; 2546 } 2547 2548 for_each_sgt_page(page, sgt_iter, sgt) 2549 pages[i++] = page; 2550 2551 /* Check that we have the expected number of pages */ 2552 GEM_BUG_ON(i != n_pages); 2553 2554 switch (type) { 2555 default: 2556 MISSING_CASE(type); 2557 /* fallthrough to use PAGE_KERNEL anyway */ 2558 case I915_MAP_WB: 2559 pgprot = PAGE_KERNEL; 2560 break; 2561 case I915_MAP_WC: 2562 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 2563 break; 2564 } 2565 addr = vmap(pages, n_pages, 0, pgprot); 2566 2567 if (pages != stack_pages) 2568 kvfree(pages); 2569 2570 return addr; 2571 } 2572 2573 /* get, pin, and map the pages of the object into kernel space */ 2574 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 2575 enum i915_map_type type) 2576 { 2577 enum i915_map_type has_type; 2578 bool pinned; 2579 void *ptr; 2580 int ret; 2581 2582 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 2583 2584 ret = mutex_lock_interruptible(&obj->mm.lock); 2585 if (ret) 2586 return ERR_PTR(ret); 2587 2588 pinned = !(type & I915_MAP_OVERRIDE); 2589 type &= ~I915_MAP_OVERRIDE; 2590 2591 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2592 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { 2593 ret = ____i915_gem_object_get_pages(obj); 2594 if (ret) 2595 goto err_unlock; 2596 2597 smp_mb__before_atomic(); 2598 } 2599 atomic_inc(&obj->mm.pages_pin_count); 2600 pinned = false; 2601 } 2602 GEM_BUG_ON(!obj->mm.pages); 2603 2604 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 2605 if (ptr && has_type != type) { 2606 if (pinned) { 2607 ret = -EBUSY; 2608 goto err_unpin; 2609 } 2610 2611 if (is_vmalloc_addr(ptr)) 2612 vunmap(ptr); 2613 else 2614 kunmap(kmap_to_page(ptr)); 2615 2616 ptr = obj->mm.mapping = NULL; 2617 } 2618 2619 if (!ptr) { 2620 ptr = i915_gem_object_map(obj, type); 2621 if (!ptr) { 2622 ret = -ENOMEM; 2623 goto err_unpin; 2624 } 2625 2626 obj->mm.mapping = page_pack_bits(ptr, type); 2627 } 2628 2629 out_unlock: 2630 mutex_unlock(&obj->mm.lock); 2631 return ptr; 2632 2633 err_unpin: 2634 atomic_dec(&obj->mm.pages_pin_count); 2635 err_unlock: 2636 ptr = ERR_PTR(ret); 2637 goto out_unlock; 2638 } 2639 2640 static int 2641 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, 2642 const struct drm_i915_gem_pwrite *arg) 2643 { 2644 struct address_space *mapping = obj->base.filp->f_mapping; 2645 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 2646 u64 remain, offset; 2647 unsigned int pg; 2648 2649 /* Before we instantiate/pin the backing store for our use, we 2650 * can prepopulate the shmemfs filp efficiently using a write into 2651 * the pagecache. We avoid the penalty of instantiating all the 2652 * pages, important if the user is just writing to a few and never 2653 * uses the object on the GPU, and using a direct write into shmemfs 2654 * allows it to avoid the cost of retrieving a page (either swapin 2655 * or clearing-before-use) before it is overwritten. 2656 */ 2657 if (READ_ONCE(obj->mm.pages)) 2658 return -ENODEV; 2659 2660 /* Before the pages are instantiated the object is treated as being 2661 * in the CPU domain. The pages will be clflushed as required before 2662 * use, and we can freely write into the pages directly. If userspace 2663 * races pwrite with any other operation; corruption will ensue - 2664 * that is userspace's prerogative! 2665 */ 2666 2667 remain = arg->size; 2668 offset = arg->offset; 2669 pg = offset_in_page(offset); 2670 2671 do { 2672 unsigned int len, unwritten; 2673 struct page *page; 2674 void *data, *vaddr; 2675 int err; 2676 2677 len = PAGE_SIZE - pg; 2678 if (len > remain) 2679 len = remain; 2680 2681 err = pagecache_write_begin(obj->base.filp, mapping, 2682 offset, len, 0, 2683 &page, &data); 2684 if (err < 0) 2685 return err; 2686 2687 vaddr = kmap(page); 2688 unwritten = copy_from_user(vaddr + pg, user_data, len); 2689 kunmap(page); 2690 2691 err = pagecache_write_end(obj->base.filp, mapping, 2692 offset, len, len - unwritten, 2693 page, data); 2694 if (err < 0) 2695 return err; 2696 2697 if (unwritten) 2698 return -EFAULT; 2699 2700 remain -= len; 2701 user_data += len; 2702 offset += len; 2703 pg = 0; 2704 } while (remain); 2705 2706 return 0; 2707 } 2708 2709 static bool ban_context(const struct i915_gem_context *ctx, 2710 unsigned int score) 2711 { 2712 return (i915_gem_context_is_bannable(ctx) && 2713 score >= CONTEXT_SCORE_BAN_THRESHOLD); 2714 } 2715 2716 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 2717 { 2718 unsigned int score; 2719 bool banned; 2720 2721 atomic_inc(&ctx->guilty_count); 2722 2723 score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); 2724 banned = ban_context(ctx, score); 2725 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", 2726 ctx->name, score, yesno(banned)); 2727 if (!banned) 2728 return; 2729 2730 i915_gem_context_set_banned(ctx); 2731 if (!IS_ERR_OR_NULL(ctx->file_priv)) { 2732 atomic_inc(&ctx->file_priv->context_bans); 2733 DRM_DEBUG_DRIVER("client %s has had %d context banned\n", 2734 ctx->name, atomic_read(&ctx->file_priv->context_bans)); 2735 } 2736 } 2737 2738 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) 2739 { 2740 atomic_inc(&ctx->active_count); 2741 } 2742 2743 struct drm_i915_gem_request * 2744 i915_gem_find_active_request(struct intel_engine_cs *engine) 2745 { 2746 struct drm_i915_gem_request *request, *active = NULL; 2747 unsigned long flags; 2748 2749 /* We are called by the error capture and reset at a random 2750 * point in time. In particular, note that neither is crucially 2751 * ordered with an interrupt. After a hang, the GPU is dead and we 2752 * assume that no more writes can happen (we waited long enough for 2753 * all writes that were in transaction to be flushed) - adding an 2754 * extra delay for a recent interrupt is pointless. Hence, we do 2755 * not need an engine->irq_seqno_barrier() before the seqno reads. 2756 */ 2757 spin_lock_irqsave(&engine->timeline->lock, flags); 2758 list_for_each_entry(request, &engine->timeline->requests, link) { 2759 if (__i915_gem_request_completed(request, 2760 request->global_seqno)) 2761 continue; 2762 2763 GEM_BUG_ON(request->engine != engine); 2764 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 2765 &request->fence.flags)); 2766 2767 active = request; 2768 break; 2769 } 2770 spin_unlock_irqrestore(&engine->timeline->lock, flags); 2771 2772 return active; 2773 } 2774 2775 static bool engine_stalled(struct intel_engine_cs *engine) 2776 { 2777 if (!engine->hangcheck.stalled) 2778 return false; 2779 2780 /* Check for possible seqno movement after hang declaration */ 2781 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { 2782 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); 2783 return false; 2784 } 2785 2786 return true; 2787 } 2788 2789 /* 2790 * Ensure irq handler finishes, and not run again. 2791 * Also return the active request so that we only search for it once. 2792 */ 2793 struct drm_i915_gem_request * 2794 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) 2795 { 2796 struct drm_i915_gem_request *request = NULL; 2797 2798 /* Prevent the signaler thread from updating the request 2799 * state (by calling dma_fence_signal) as we are processing 2800 * the reset. The write from the GPU of the seqno is 2801 * asynchronous and the signaler thread may see a different 2802 * value to us and declare the request complete, even though 2803 * the reset routine have picked that request as the active 2804 * (incomplete) request. This conflict is not handled 2805 * gracefully! 2806 */ 2807 kthread_park(engine->breadcrumbs.signaler); 2808 2809 /* Prevent request submission to the hardware until we have 2810 * completed the reset in i915_gem_reset_finish(). If a request 2811 * is completed by one engine, it may then queue a request 2812 * to a second via its engine->irq_tasklet *just* as we are 2813 * calling engine->init_hw() and also writing the ELSP. 2814 * Turning off the engine->irq_tasklet until the reset is over 2815 * prevents the race. 2816 */ 2817 tasklet_kill(&engine->irq_tasklet); 2818 tasklet_disable(&engine->irq_tasklet); 2819 2820 if (engine->irq_seqno_barrier) 2821 engine->irq_seqno_barrier(engine); 2822 2823 request = i915_gem_find_active_request(engine); 2824 if (request && request->fence.error == -EIO) 2825 request = ERR_PTR(-EIO); /* Previous reset failed! */ 2826 2827 return request; 2828 } 2829 2830 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) 2831 { 2832 struct intel_engine_cs *engine; 2833 struct drm_i915_gem_request *request; 2834 enum intel_engine_id id; 2835 int err = 0; 2836 2837 for_each_engine(engine, dev_priv, id) { 2838 request = i915_gem_reset_prepare_engine(engine); 2839 if (IS_ERR(request)) { 2840 err = PTR_ERR(request); 2841 continue; 2842 } 2843 2844 engine->hangcheck.active_request = request; 2845 } 2846 2847 i915_gem_revoke_fences(dev_priv); 2848 2849 return err; 2850 } 2851 2852 static void skip_request(struct drm_i915_gem_request *request) 2853 { 2854 void *vaddr = request->ring->vaddr; 2855 u32 head; 2856 2857 /* As this request likely depends on state from the lost 2858 * context, clear out all the user operations leaving the 2859 * breadcrumb at the end (so we get the fence notifications). 2860 */ 2861 head = request->head; 2862 if (request->postfix < head) { 2863 memset(vaddr + head, 0, request->ring->size - head); 2864 head = 0; 2865 } 2866 memset(vaddr + head, 0, request->postfix - head); 2867 2868 dma_fence_set_error(&request->fence, -EIO); 2869 } 2870 2871 static void engine_skip_context(struct drm_i915_gem_request *request) 2872 { 2873 struct intel_engine_cs *engine = request->engine; 2874 struct i915_gem_context *hung_ctx = request->ctx; 2875 struct intel_timeline *timeline; 2876 unsigned long flags; 2877 2878 timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); 2879 2880 spin_lock_irqsave(&engine->timeline->lock, flags); 2881 spin_lock(&timeline->lock); 2882 2883 list_for_each_entry_continue(request, &engine->timeline->requests, link) 2884 if (request->ctx == hung_ctx) 2885 skip_request(request); 2886 2887 list_for_each_entry(request, &timeline->requests, link) 2888 skip_request(request); 2889 2890 spin_unlock(&timeline->lock); 2891 spin_unlock_irqrestore(&engine->timeline->lock, flags); 2892 } 2893 2894 /* Returns the request if it was guilty of the hang */ 2895 static struct drm_i915_gem_request * 2896 i915_gem_reset_request(struct intel_engine_cs *engine, 2897 struct drm_i915_gem_request *request) 2898 { 2899 /* The guilty request will get skipped on a hung engine. 2900 * 2901 * Users of client default contexts do not rely on logical 2902 * state preserved between batches so it is safe to execute 2903 * queued requests following the hang. Non default contexts 2904 * rely on preserved state, so skipping a batch loses the 2905 * evolution of the state and it needs to be considered corrupted. 2906 * Executing more queued batches on top of corrupted state is 2907 * risky. But we take the risk by trying to advance through 2908 * the queued requests in order to make the client behaviour 2909 * more predictable around resets, by not throwing away random 2910 * amount of batches it has prepared for execution. Sophisticated 2911 * clients can use gem_reset_stats_ioctl and dma fence status 2912 * (exported via sync_file info ioctl on explicit fences) to observe 2913 * when it loses the context state and should rebuild accordingly. 2914 * 2915 * The context ban, and ultimately the client ban, mechanism are safety 2916 * valves if client submission ends up resulting in nothing more than 2917 * subsequent hangs. 2918 */ 2919 2920 if (engine_stalled(engine)) { 2921 i915_gem_context_mark_guilty(request->ctx); 2922 skip_request(request); 2923 2924 /* If this context is now banned, skip all pending requests. */ 2925 if (i915_gem_context_is_banned(request->ctx)) 2926 engine_skip_context(request); 2927 } else { 2928 /* 2929 * Since this is not the hung engine, it may have advanced 2930 * since the hang declaration. Double check by refinding 2931 * the active request at the time of the reset. 2932 */ 2933 request = i915_gem_find_active_request(engine); 2934 if (request) { 2935 i915_gem_context_mark_innocent(request->ctx); 2936 dma_fence_set_error(&request->fence, -EAGAIN); 2937 2938 /* Rewind the engine to replay the incomplete rq */ 2939 spin_lock_irq(&engine->timeline->lock); 2940 request = list_prev_entry(request, link); 2941 if (&request->link == &engine->timeline->requests) 2942 request = NULL; 2943 spin_unlock_irq(&engine->timeline->lock); 2944 } 2945 } 2946 2947 return request; 2948 } 2949 2950 void i915_gem_reset_engine(struct intel_engine_cs *engine, 2951 struct drm_i915_gem_request *request) 2952 { 2953 engine->irq_posted = 0; 2954 2955 if (request) 2956 request = i915_gem_reset_request(engine, request); 2957 2958 if (request) { 2959 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", 2960 engine->name, request->global_seqno); 2961 } 2962 2963 /* Setup the CS to resume from the breadcrumb of the hung request */ 2964 engine->reset_hw(engine, request); 2965 } 2966 2967 void i915_gem_reset(struct drm_i915_private *dev_priv) 2968 { 2969 struct intel_engine_cs *engine; 2970 enum intel_engine_id id; 2971 2972 lockdep_assert_held(&dev_priv->drm.struct_mutex); 2973 2974 i915_gem_retire_requests(dev_priv); 2975 2976 for_each_engine(engine, dev_priv, id) { 2977 struct i915_gem_context *ctx; 2978 2979 i915_gem_reset_engine(engine, engine->hangcheck.active_request); 2980 ctx = fetch_and_zero(&engine->last_retired_context); 2981 if (ctx) 2982 engine->context_unpin(engine, ctx); 2983 } 2984 2985 i915_gem_restore_fences(dev_priv); 2986 2987 if (dev_priv->gt.awake) { 2988 intel_sanitize_gt_powersave(dev_priv); 2989 intel_enable_gt_powersave(dev_priv); 2990 if (INTEL_GEN(dev_priv) >= 6) 2991 gen6_rps_busy(dev_priv); 2992 } 2993 } 2994 2995 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) 2996 { 2997 tasklet_enable(&engine->irq_tasklet); 2998 kthread_unpark(engine->breadcrumbs.signaler); 2999 } 3000 3001 void i915_gem_reset_finish(struct drm_i915_private *dev_priv) 3002 { 3003 struct intel_engine_cs *engine; 3004 enum intel_engine_id id; 3005 3006 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3007 3008 for_each_engine(engine, dev_priv, id) { 3009 engine->hangcheck.active_request = NULL; 3010 i915_gem_reset_finish_engine(engine); 3011 } 3012 } 3013 3014 static void nop_submit_request(struct drm_i915_gem_request *request) 3015 { 3016 GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); 3017 dma_fence_set_error(&request->fence, -EIO); 3018 i915_gem_request_submit(request); 3019 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3020 } 3021 3022 static void engine_set_wedged(struct intel_engine_cs *engine) 3023 { 3024 struct drm_i915_gem_request *request; 3025 unsigned long flags; 3026 3027 /* We need to be sure that no thread is running the old callback as 3028 * we install the nop handler (otherwise we would submit a request 3029 * to hardware that will never complete). In order to prevent this 3030 * race, we wait until the machine is idle before making the swap 3031 * (using stop_machine()). 3032 */ 3033 engine->submit_request = nop_submit_request; 3034 3035 /* Mark all executing requests as skipped */ 3036 spin_lock_irqsave(&engine->timeline->lock, flags); 3037 list_for_each_entry(request, &engine->timeline->requests, link) 3038 if (!i915_gem_request_completed(request)) 3039 dma_fence_set_error(&request->fence, -EIO); 3040 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3041 3042 /* 3043 * Clear the execlists queue up before freeing the requests, as those 3044 * are the ones that keep the context and ringbuffer backing objects 3045 * pinned in place. 3046 */ 3047 3048 if (i915.enable_execlists) { 3049 struct execlist_port *port = engine->execlist_port; 3050 unsigned long flags; 3051 unsigned int n; 3052 3053 spin_lock_irqsave(&engine->timeline->lock, flags); 3054 3055 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) 3056 i915_gem_request_put(port_request(&port[n])); 3057 memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); 3058 engine->execlist_queue = RB_ROOT; 3059 engine->execlist_first = NULL; 3060 3061 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3062 3063 /* The port is checked prior to scheduling a tasklet, but 3064 * just in case we have suspended the tasklet to do the 3065 * wedging make sure that when it wakes, it decides there 3066 * is no work to do by clearing the irq_posted bit. 3067 */ 3068 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 3069 } 3070 3071 /* Mark all pending requests as complete so that any concurrent 3072 * (lockless) lookup doesn't try and wait upon the request as we 3073 * reset it. 3074 */ 3075 intel_engine_init_global_seqno(engine, 3076 intel_engine_last_submit(engine)); 3077 } 3078 3079 static int __i915_gem_set_wedged_BKL(void *data) 3080 { 3081 struct drm_i915_private *i915 = data; 3082 struct intel_engine_cs *engine; 3083 enum intel_engine_id id; 3084 3085 for_each_engine(engine, i915, id) 3086 engine_set_wedged(engine); 3087 3088 set_bit(I915_WEDGED, &i915->gpu_error.flags); 3089 wake_up_all(&i915->gpu_error.reset_queue); 3090 3091 return 0; 3092 } 3093 3094 void i915_gem_set_wedged(struct drm_i915_private *dev_priv) 3095 { 3096 stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); 3097 } 3098 3099 bool i915_gem_unset_wedged(struct drm_i915_private *i915) 3100 { 3101 struct i915_gem_timeline *tl; 3102 int i; 3103 3104 lockdep_assert_held(&i915->drm.struct_mutex); 3105 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) 3106 return true; 3107 3108 /* Before unwedging, make sure that all pending operations 3109 * are flushed and errored out - we may have requests waiting upon 3110 * third party fences. We marked all inflight requests as EIO, and 3111 * every execbuf since returned EIO, for consistency we want all 3112 * the currently pending requests to also be marked as EIO, which 3113 * is done inside our nop_submit_request - and so we must wait. 3114 * 3115 * No more can be submitted until we reset the wedged bit. 3116 */ 3117 list_for_each_entry(tl, &i915->gt.timelines, link) { 3118 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3119 struct drm_i915_gem_request *rq; 3120 3121 rq = i915_gem_active_peek(&tl->engine[i].last_request, 3122 &i915->drm.struct_mutex); 3123 if (!rq) 3124 continue; 3125 3126 /* We can't use our normal waiter as we want to 3127 * avoid recursively trying to handle the current 3128 * reset. The basic dma_fence_default_wait() installs 3129 * a callback for dma_fence_signal(), which is 3130 * triggered by our nop handler (indirectly, the 3131 * callback enables the signaler thread which is 3132 * woken by the nop_submit_request() advancing the seqno 3133 * and when the seqno passes the fence, the signaler 3134 * then signals the fence waking us up). 3135 */ 3136 if (dma_fence_default_wait(&rq->fence, true, 3137 MAX_SCHEDULE_TIMEOUT) < 0) 3138 return false; 3139 } 3140 } 3141 3142 /* Undo nop_submit_request. We prevent all new i915 requests from 3143 * being queued (by disallowing execbuf whilst wedged) so having 3144 * waited for all active requests above, we know the system is idle 3145 * and do not have to worry about a thread being inside 3146 * engine->submit_request() as we swap over. So unlike installing 3147 * the nop_submit_request on reset, we can do this from normal 3148 * context and do not require stop_machine(). 3149 */ 3150 intel_engines_reset_default_submission(i915); 3151 i915_gem_contexts_lost(i915); 3152 3153 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ 3154 clear_bit(I915_WEDGED, &i915->gpu_error.flags); 3155 3156 return true; 3157 } 3158 3159 static void 3160 i915_gem_retire_work_handler(struct work_struct *work) 3161 { 3162 struct drm_i915_private *dev_priv = 3163 container_of(work, typeof(*dev_priv), gt.retire_work.work); 3164 struct drm_device *dev = &dev_priv->drm; 3165 3166 /* Come back later if the device is busy... */ 3167 if (mutex_trylock(&dev->struct_mutex)) { 3168 i915_gem_retire_requests(dev_priv); 3169 mutex_unlock(&dev->struct_mutex); 3170 } 3171 3172 /* Keep the retire handler running until we are finally idle. 3173 * We do not need to do this test under locking as in the worst-case 3174 * we queue the retire worker once too often. 3175 */ 3176 if (READ_ONCE(dev_priv->gt.awake)) { 3177 i915_queue_hangcheck(dev_priv); 3178 queue_delayed_work(dev_priv->wq, 3179 &dev_priv->gt.retire_work, 3180 round_jiffies_up_relative(HZ)); 3181 } 3182 } 3183 3184 static void 3185 i915_gem_idle_work_handler(struct work_struct *work) 3186 { 3187 struct drm_i915_private *dev_priv = 3188 container_of(work, typeof(*dev_priv), gt.idle_work.work); 3189 struct drm_device *dev = &dev_priv->drm; 3190 bool rearm_hangcheck; 3191 3192 if (!READ_ONCE(dev_priv->gt.awake)) 3193 return; 3194 3195 /* 3196 * Wait for last execlists context complete, but bail out in case a 3197 * new request is submitted. 3198 */ 3199 wait_for(intel_engines_are_idle(dev_priv), 10); 3200 if (READ_ONCE(dev_priv->gt.active_requests)) 3201 return; 3202 3203 rearm_hangcheck = 3204 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 3205 3206 if (!mutex_trylock(&dev->struct_mutex)) { 3207 /* Currently busy, come back later */ 3208 mod_delayed_work(dev_priv->wq, 3209 &dev_priv->gt.idle_work, 3210 msecs_to_jiffies(50)); 3211 goto out_rearm; 3212 } 3213 3214 /* 3215 * New request retired after this work handler started, extend active 3216 * period until next instance of the work. 3217 */ 3218 if (work_pending(work)) 3219 goto out_unlock; 3220 3221 if (dev_priv->gt.active_requests) 3222 goto out_unlock; 3223 3224 if (wait_for(intel_engines_are_idle(dev_priv), 10)) 3225 DRM_ERROR("Timeout waiting for engines to idle\n"); 3226 3227 intel_engines_mark_idle(dev_priv); 3228 i915_gem_timelines_mark_idle(dev_priv); 3229 3230 GEM_BUG_ON(!dev_priv->gt.awake); 3231 dev_priv->gt.awake = false; 3232 rearm_hangcheck = false; 3233 3234 if (INTEL_GEN(dev_priv) >= 6) 3235 gen6_rps_idle(dev_priv); 3236 intel_runtime_pm_put(dev_priv); 3237 out_unlock: 3238 mutex_unlock(&dev->struct_mutex); 3239 3240 out_rearm: 3241 if (rearm_hangcheck) { 3242 GEM_BUG_ON(!dev_priv->gt.awake); 3243 i915_queue_hangcheck(dev_priv); 3244 } 3245 } 3246 3247 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) 3248 { 3249 struct drm_i915_private *i915 = to_i915(gem->dev); 3250 struct drm_i915_gem_object *obj = to_intel_bo(gem); 3251 struct drm_i915_file_private *fpriv = file->driver_priv; 3252 struct i915_lut_handle *lut, *ln; 3253 3254 mutex_lock(&i915->drm.struct_mutex); 3255 3256 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { 3257 struct i915_gem_context *ctx = lut->ctx; 3258 struct i915_vma *vma; 3259 3260 if (ctx->file_priv != fpriv) 3261 continue; 3262 3263 vma = radix_tree_delete(&ctx->handles_vma, lut->handle); 3264 3265 GEM_BUG_ON(vma->obj != obj); 3266 3267 /* We allow the process to have multiple handles to the same 3268 * vma, in the same fd namespace, by virtue of flink/open. 3269 */ 3270 GEM_BUG_ON(!vma->open_count); 3271 if (!--vma->open_count && !i915_vma_is_ggtt(vma)) 3272 i915_vma_close(vma); 3273 3274 list_del(&lut->obj_link); 3275 list_del(&lut->ctx_link); 3276 3277 kmem_cache_free(i915->luts, lut); 3278 __i915_gem_object_release_unless_active(obj); 3279 } 3280 3281 mutex_unlock(&i915->drm.struct_mutex); 3282 } 3283 3284 static unsigned long to_wait_timeout(s64 timeout_ns) 3285 { 3286 if (timeout_ns < 0) 3287 return MAX_SCHEDULE_TIMEOUT; 3288 3289 if (timeout_ns == 0) 3290 return 0; 3291 3292 return nsecs_to_jiffies_timeout(timeout_ns); 3293 } 3294 3295 /** 3296 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 3297 * @dev: drm device pointer 3298 * @data: ioctl data blob 3299 * @file: drm file pointer 3300 * 3301 * Returns 0 if successful, else an error is returned with the remaining time in 3302 * the timeout parameter. 3303 * -ETIME: object is still busy after timeout 3304 * -ERESTARTSYS: signal interrupted the wait 3305 * -ENONENT: object doesn't exist 3306 * Also possible, but rare: 3307 * -EAGAIN: incomplete, restart syscall 3308 * -ENOMEM: damn 3309 * -ENODEV: Internal IRQ fail 3310 * -E?: The add request failed 3311 * 3312 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 3313 * non-zero timeout parameter the wait ioctl will wait for the given number of 3314 * nanoseconds on an object becoming unbusy. Since the wait itself does so 3315 * without holding struct_mutex the object may become re-busied before this 3316 * function completes. A similar but shorter * race condition exists in the busy 3317 * ioctl 3318 */ 3319 int 3320 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 3321 { 3322 struct drm_i915_gem_wait *args = data; 3323 struct drm_i915_gem_object *obj; 3324 ktime_t start; 3325 long ret; 3326 3327 if (args->flags != 0) 3328 return -EINVAL; 3329 3330 obj = i915_gem_object_lookup(file, args->bo_handle); 3331 if (!obj) 3332 return -ENOENT; 3333 3334 start = ktime_get(); 3335 3336 ret = i915_gem_object_wait(obj, 3337 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, 3338 to_wait_timeout(args->timeout_ns), 3339 to_rps_client(file)); 3340 3341 if (args->timeout_ns > 0) { 3342 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3343 if (args->timeout_ns < 0) 3344 args->timeout_ns = 0; 3345 3346 /* 3347 * Apparently ktime isn't accurate enough and occasionally has a 3348 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 3349 * things up to make the test happy. We allow up to 1 jiffy. 3350 * 3351 * This is a regression from the timespec->ktime conversion. 3352 */ 3353 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 3354 args->timeout_ns = 0; 3355 3356 /* Asked to wait beyond the jiffie/scheduler precision? */ 3357 if (ret == -ETIME && args->timeout_ns) 3358 ret = -EAGAIN; 3359 } 3360 3361 i915_gem_object_put(obj); 3362 return ret; 3363 } 3364 3365 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) 3366 { 3367 int ret, i; 3368 3369 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3370 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags); 3371 if (ret) 3372 return ret; 3373 } 3374 3375 return 0; 3376 } 3377 3378 static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms) 3379 { 3380 return wait_for(intel_engine_is_idle(engine), timeout_ms); 3381 } 3382 3383 static int wait_for_engines(struct drm_i915_private *i915) 3384 { 3385 struct intel_engine_cs *engine; 3386 enum intel_engine_id id; 3387 3388 for_each_engine(engine, i915, id) { 3389 if (GEM_WARN_ON(wait_for_engine(engine, 50))) { 3390 i915_gem_set_wedged(i915); 3391 return -EIO; 3392 } 3393 3394 GEM_BUG_ON(intel_engine_get_seqno(engine) != 3395 intel_engine_last_submit(engine)); 3396 } 3397 3398 return 0; 3399 } 3400 3401 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) 3402 { 3403 int ret; 3404 3405 /* If the device is asleep, we have no requests outstanding */ 3406 if (!READ_ONCE(i915->gt.awake)) 3407 return 0; 3408 3409 if (flags & I915_WAIT_LOCKED) { 3410 struct i915_gem_timeline *tl; 3411 3412 lockdep_assert_held(&i915->drm.struct_mutex); 3413 3414 list_for_each_entry(tl, &i915->gt.timelines, link) { 3415 ret = wait_for_timeline(tl, flags); 3416 if (ret) 3417 return ret; 3418 } 3419 3420 i915_gem_retire_requests(i915); 3421 GEM_BUG_ON(i915->gt.active_requests); 3422 3423 ret = wait_for_engines(i915); 3424 } else { 3425 ret = wait_for_timeline(&i915->gt.global_timeline, flags); 3426 } 3427 3428 return ret; 3429 } 3430 3431 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) 3432 { 3433 /* 3434 * We manually flush the CPU domain so that we can override and 3435 * force the flush for the display, and perform it asyncrhonously. 3436 */ 3437 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 3438 if (obj->cache_dirty) 3439 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); 3440 obj->base.write_domain = 0; 3441 } 3442 3443 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) 3444 { 3445 if (!READ_ONCE(obj->pin_display)) 3446 return; 3447 3448 mutex_lock(&obj->base.dev->struct_mutex); 3449 __i915_gem_object_flush_for_display(obj); 3450 mutex_unlock(&obj->base.dev->struct_mutex); 3451 } 3452 3453 /** 3454 * Moves a single object to the WC read, and possibly write domain. 3455 * @obj: object to act on 3456 * @write: ask for write access or read only 3457 * 3458 * This function returns when the move is complete, including waiting on 3459 * flushes to occur. 3460 */ 3461 int 3462 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) 3463 { 3464 int ret; 3465 3466 lockdep_assert_held(&obj->base.dev->struct_mutex); 3467 3468 ret = i915_gem_object_wait(obj, 3469 I915_WAIT_INTERRUPTIBLE | 3470 I915_WAIT_LOCKED | 3471 (write ? I915_WAIT_ALL : 0), 3472 MAX_SCHEDULE_TIMEOUT, 3473 NULL); 3474 if (ret) 3475 return ret; 3476 3477 if (obj->base.write_domain == I915_GEM_DOMAIN_WC) 3478 return 0; 3479 3480 /* Flush and acquire obj->pages so that we are coherent through 3481 * direct access in memory with previous cached writes through 3482 * shmemfs and that our cache domain tracking remains valid. 3483 * For example, if the obj->filp was moved to swap without us 3484 * being notified and releasing the pages, we would mistakenly 3485 * continue to assume that the obj remained out of the CPU cached 3486 * domain. 3487 */ 3488 ret = i915_gem_object_pin_pages(obj); 3489 if (ret) 3490 return ret; 3491 3492 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); 3493 3494 /* Serialise direct access to this object with the barriers for 3495 * coherent writes from the GPU, by effectively invalidating the 3496 * WC domain upon first access. 3497 */ 3498 if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0) 3499 mb(); 3500 3501 /* It should now be out of any other write domains, and we can update 3502 * the domain values for our changes. 3503 */ 3504 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0); 3505 obj->base.read_domains |= I915_GEM_DOMAIN_WC; 3506 if (write) { 3507 obj->base.read_domains = I915_GEM_DOMAIN_WC; 3508 obj->base.write_domain = I915_GEM_DOMAIN_WC; 3509 obj->mm.dirty = true; 3510 } 3511 3512 i915_gem_object_unpin_pages(obj); 3513 return 0; 3514 } 3515 3516 /** 3517 * Moves a single object to the GTT read, and possibly write domain. 3518 * @obj: object to act on 3519 * @write: ask for write access or read only 3520 * 3521 * This function returns when the move is complete, including waiting on 3522 * flushes to occur. 3523 */ 3524 int 3525 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3526 { 3527 int ret; 3528 3529 lockdep_assert_held(&obj->base.dev->struct_mutex); 3530 3531 ret = i915_gem_object_wait(obj, 3532 I915_WAIT_INTERRUPTIBLE | 3533 I915_WAIT_LOCKED | 3534 (write ? I915_WAIT_ALL : 0), 3535 MAX_SCHEDULE_TIMEOUT, 3536 NULL); 3537 if (ret) 3538 return ret; 3539 3540 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3541 return 0; 3542 3543 /* Flush and acquire obj->pages so that we are coherent through 3544 * direct access in memory with previous cached writes through 3545 * shmemfs and that our cache domain tracking remains valid. 3546 * For example, if the obj->filp was moved to swap without us 3547 * being notified and releasing the pages, we would mistakenly 3548 * continue to assume that the obj remained out of the CPU cached 3549 * domain. 3550 */ 3551 ret = i915_gem_object_pin_pages(obj); 3552 if (ret) 3553 return ret; 3554 3555 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); 3556 3557 /* Serialise direct access to this object with the barriers for 3558 * coherent writes from the GPU, by effectively invalidating the 3559 * GTT domain upon first access. 3560 */ 3561 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3562 mb(); 3563 3564 /* It should now be out of any other write domains, and we can update 3565 * the domain values for our changes. 3566 */ 3567 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3568 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3569 if (write) { 3570 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3571 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3572 obj->mm.dirty = true; 3573 } 3574 3575 i915_gem_object_unpin_pages(obj); 3576 return 0; 3577 } 3578 3579 /** 3580 * Changes the cache-level of an object across all VMA. 3581 * @obj: object to act on 3582 * @cache_level: new cache level to set for the object 3583 * 3584 * After this function returns, the object will be in the new cache-level 3585 * across all GTT and the contents of the backing storage will be coherent, 3586 * with respect to the new cache-level. In order to keep the backing storage 3587 * coherent for all users, we only allow a single cache level to be set 3588 * globally on the object and prevent it from being changed whilst the 3589 * hardware is reading from the object. That is if the object is currently 3590 * on the scanout it will be set to uncached (or equivalent display 3591 * cache coherency) and all non-MOCS GPU access will also be uncached so 3592 * that all direct access to the scanout remains coherent. 3593 */ 3594 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3595 enum i915_cache_level cache_level) 3596 { 3597 struct i915_vma *vma; 3598 int ret; 3599 3600 lockdep_assert_held(&obj->base.dev->struct_mutex); 3601 3602 if (obj->cache_level == cache_level) 3603 return 0; 3604 3605 /* Inspect the list of currently bound VMA and unbind any that would 3606 * be invalid given the new cache-level. This is principally to 3607 * catch the issue of the CS prefetch crossing page boundaries and 3608 * reading an invalid PTE on older architectures. 3609 */ 3610 restart: 3611 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3612 if (!drm_mm_node_allocated(&vma->node)) 3613 continue; 3614 3615 if (i915_vma_is_pinned(vma)) { 3616 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3617 return -EBUSY; 3618 } 3619 3620 if (i915_gem_valid_gtt_space(vma, cache_level)) 3621 continue; 3622 3623 ret = i915_vma_unbind(vma); 3624 if (ret) 3625 return ret; 3626 3627 /* As unbinding may affect other elements in the 3628 * obj->vma_list (due to side-effects from retiring 3629 * an active vma), play safe and restart the iterator. 3630 */ 3631 goto restart; 3632 } 3633 3634 /* We can reuse the existing drm_mm nodes but need to change the 3635 * cache-level on the PTE. We could simply unbind them all and 3636 * rebind with the correct cache-level on next use. However since 3637 * we already have a valid slot, dma mapping, pages etc, we may as 3638 * rewrite the PTE in the belief that doing so tramples upon less 3639 * state and so involves less work. 3640 */ 3641 if (obj->bind_count) { 3642 /* Before we change the PTE, the GPU must not be accessing it. 3643 * If we wait upon the object, we know that all the bound 3644 * VMA are no longer active. 3645 */ 3646 ret = i915_gem_object_wait(obj, 3647 I915_WAIT_INTERRUPTIBLE | 3648 I915_WAIT_LOCKED | 3649 I915_WAIT_ALL, 3650 MAX_SCHEDULE_TIMEOUT, 3651 NULL); 3652 if (ret) 3653 return ret; 3654 3655 if (!HAS_LLC(to_i915(obj->base.dev)) && 3656 cache_level != I915_CACHE_NONE) { 3657 /* Access to snoopable pages through the GTT is 3658 * incoherent and on some machines causes a hard 3659 * lockup. Relinquish the CPU mmaping to force 3660 * userspace to refault in the pages and we can 3661 * then double check if the GTT mapping is still 3662 * valid for that pointer access. 3663 */ 3664 i915_gem_release_mmap(obj); 3665 3666 /* As we no longer need a fence for GTT access, 3667 * we can relinquish it now (and so prevent having 3668 * to steal a fence from someone else on the next 3669 * fence request). Note GPU activity would have 3670 * dropped the fence as all snoopable access is 3671 * supposed to be linear. 3672 */ 3673 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3674 ret = i915_vma_put_fence(vma); 3675 if (ret) 3676 return ret; 3677 } 3678 } else { 3679 /* We either have incoherent backing store and 3680 * so no GTT access or the architecture is fully 3681 * coherent. In such cases, existing GTT mmaps 3682 * ignore the cache bit in the PTE and we can 3683 * rewrite it without confusing the GPU or having 3684 * to force userspace to fault back in its mmaps. 3685 */ 3686 } 3687 3688 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3689 if (!drm_mm_node_allocated(&vma->node)) 3690 continue; 3691 3692 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); 3693 if (ret) 3694 return ret; 3695 } 3696 } 3697 3698 list_for_each_entry(vma, &obj->vma_list, obj_link) 3699 vma->node.color = cache_level; 3700 i915_gem_object_set_cache_coherency(obj, cache_level); 3701 obj->cache_dirty = true; /* Always invalidate stale cachelines */ 3702 3703 return 0; 3704 } 3705 3706 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3707 struct drm_file *file) 3708 { 3709 struct drm_i915_gem_caching *args = data; 3710 struct drm_i915_gem_object *obj; 3711 int err = 0; 3712 3713 rcu_read_lock(); 3714 obj = i915_gem_object_lookup_rcu(file, args->handle); 3715 if (!obj) { 3716 err = -ENOENT; 3717 goto out; 3718 } 3719 3720 switch (obj->cache_level) { 3721 case I915_CACHE_LLC: 3722 case I915_CACHE_L3_LLC: 3723 args->caching = I915_CACHING_CACHED; 3724 break; 3725 3726 case I915_CACHE_WT: 3727 args->caching = I915_CACHING_DISPLAY; 3728 break; 3729 3730 default: 3731 args->caching = I915_CACHING_NONE; 3732 break; 3733 } 3734 out: 3735 rcu_read_unlock(); 3736 return err; 3737 } 3738 3739 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3740 struct drm_file *file) 3741 { 3742 struct drm_i915_private *i915 = to_i915(dev); 3743 struct drm_i915_gem_caching *args = data; 3744 struct drm_i915_gem_object *obj; 3745 enum i915_cache_level level; 3746 int ret = 0; 3747 3748 switch (args->caching) { 3749 case I915_CACHING_NONE: 3750 level = I915_CACHE_NONE; 3751 break; 3752 case I915_CACHING_CACHED: 3753 /* 3754 * Due to a HW issue on BXT A stepping, GPU stores via a 3755 * snooped mapping may leave stale data in a corresponding CPU 3756 * cacheline, whereas normally such cachelines would get 3757 * invalidated. 3758 */ 3759 if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) 3760 return -ENODEV; 3761 3762 level = I915_CACHE_LLC; 3763 break; 3764 case I915_CACHING_DISPLAY: 3765 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; 3766 break; 3767 default: 3768 return -EINVAL; 3769 } 3770 3771 obj = i915_gem_object_lookup(file, args->handle); 3772 if (!obj) 3773 return -ENOENT; 3774 3775 if (obj->cache_level == level) 3776 goto out; 3777 3778 ret = i915_gem_object_wait(obj, 3779 I915_WAIT_INTERRUPTIBLE, 3780 MAX_SCHEDULE_TIMEOUT, 3781 to_rps_client(file)); 3782 if (ret) 3783 goto out; 3784 3785 ret = i915_mutex_lock_interruptible(dev); 3786 if (ret) 3787 goto out; 3788 3789 ret = i915_gem_object_set_cache_level(obj, level); 3790 mutex_unlock(&dev->struct_mutex); 3791 3792 out: 3793 i915_gem_object_put(obj); 3794 return ret; 3795 } 3796 3797 /* 3798 * Prepare buffer for display plane (scanout, cursors, etc). 3799 * Can be called from an uninterruptible phase (modesetting) and allows 3800 * any flushes to be pipelined (for pageflips). 3801 */ 3802 struct i915_vma * 3803 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3804 u32 alignment, 3805 const struct i915_ggtt_view *view) 3806 { 3807 struct i915_vma *vma; 3808 int ret; 3809 3810 lockdep_assert_held(&obj->base.dev->struct_mutex); 3811 3812 /* Mark the pin_display early so that we account for the 3813 * display coherency whilst setting up the cache domains. 3814 */ 3815 obj->pin_display++; 3816 3817 /* The display engine is not coherent with the LLC cache on gen6. As 3818 * a result, we make sure that the pinning that is about to occur is 3819 * done with uncached PTEs. This is lowest common denominator for all 3820 * chipsets. 3821 * 3822 * However for gen6+, we could do better by using the GFDT bit instead 3823 * of uncaching, which would allow us to flush all the LLC-cached data 3824 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3825 */ 3826 ret = i915_gem_object_set_cache_level(obj, 3827 HAS_WT(to_i915(obj->base.dev)) ? 3828 I915_CACHE_WT : I915_CACHE_NONE); 3829 if (ret) { 3830 vma = ERR_PTR(ret); 3831 goto err_unpin_display; 3832 } 3833 3834 /* As the user may map the buffer once pinned in the display plane 3835 * (e.g. libkms for the bootup splash), we have to ensure that we 3836 * always use map_and_fenceable for all scanout buffers. However, 3837 * it may simply be too big to fit into mappable, in which case 3838 * put it anyway and hope that userspace can cope (but always first 3839 * try to preserve the existing ABI). 3840 */ 3841 vma = ERR_PTR(-ENOSPC); 3842 if (!view || view->type == I915_GGTT_VIEW_NORMAL) 3843 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 3844 PIN_MAPPABLE | PIN_NONBLOCK); 3845 if (IS_ERR(vma)) { 3846 struct drm_i915_private *i915 = to_i915(obj->base.dev); 3847 unsigned int flags; 3848 3849 /* Valleyview is definitely limited to scanning out the first 3850 * 512MiB. Lets presume this behaviour was inherited from the 3851 * g4x display engine and that all earlier gen are similarly 3852 * limited. Testing suggests that it is a little more 3853 * complicated than this. For example, Cherryview appears quite 3854 * happy to scanout from anywhere within its global aperture. 3855 */ 3856 flags = 0; 3857 if (HAS_GMCH_DISPLAY(i915)) 3858 flags = PIN_MAPPABLE; 3859 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); 3860 } 3861 if (IS_ERR(vma)) 3862 goto err_unpin_display; 3863 3864 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 3865 3866 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ 3867 __i915_gem_object_flush_for_display(obj); 3868 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 3869 3870 /* It should now be out of any other write domains, and we can update 3871 * the domain values for our changes. 3872 */ 3873 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3874 3875 return vma; 3876 3877 err_unpin_display: 3878 obj->pin_display--; 3879 return vma; 3880 } 3881 3882 void 3883 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) 3884 { 3885 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 3886 3887 if (WARN_ON(vma->obj->pin_display == 0)) 3888 return; 3889 3890 if (--vma->obj->pin_display == 0) 3891 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 3892 3893 /* Bump the LRU to try and avoid premature eviction whilst flipping */ 3894 i915_gem_object_bump_inactive_ggtt(vma->obj); 3895 3896 i915_vma_unpin(vma); 3897 } 3898 3899 /** 3900 * Moves a single object to the CPU read, and possibly write domain. 3901 * @obj: object to act on 3902 * @write: requesting write or read-only access 3903 * 3904 * This function returns when the move is complete, including waiting on 3905 * flushes to occur. 3906 */ 3907 int 3908 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 3909 { 3910 int ret; 3911 3912 lockdep_assert_held(&obj->base.dev->struct_mutex); 3913 3914 ret = i915_gem_object_wait(obj, 3915 I915_WAIT_INTERRUPTIBLE | 3916 I915_WAIT_LOCKED | 3917 (write ? I915_WAIT_ALL : 0), 3918 MAX_SCHEDULE_TIMEOUT, 3919 NULL); 3920 if (ret) 3921 return ret; 3922 3923 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 3924 3925 /* Flush the CPU cache if it's still invalid. */ 3926 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 3927 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 3928 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 3929 } 3930 3931 /* It should now be out of any other write domains, and we can update 3932 * the domain values for our changes. 3933 */ 3934 GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 3935 3936 /* If we're writing through the CPU, then the GPU read domains will 3937 * need to be invalidated at next use. 3938 */ 3939 if (write) 3940 __start_cpu_write(obj); 3941 3942 return 0; 3943 } 3944 3945 /* Throttle our rendering by waiting until the ring has completed our requests 3946 * emitted over 20 msec ago. 3947 * 3948 * Note that if we were to use the current jiffies each time around the loop, 3949 * we wouldn't escape the function with any frames outstanding if the time to 3950 * render a frame was over 20ms. 3951 * 3952 * This should get us reasonable parallelism between CPU and GPU but also 3953 * relatively low latency when blocking on a particular request to finish. 3954 */ 3955 static int 3956 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 3957 { 3958 struct drm_i915_private *dev_priv = to_i915(dev); 3959 struct drm_i915_file_private *file_priv = file->driver_priv; 3960 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 3961 struct drm_i915_gem_request *request, *target = NULL; 3962 long ret; 3963 3964 /* ABI: return -EIO if already wedged */ 3965 if (i915_terminally_wedged(&dev_priv->gpu_error)) 3966 return -EIO; 3967 3968 spin_lock(&file_priv->mm.lock); 3969 list_for_each_entry(request, &file_priv->mm.request_list, client_link) { 3970 if (time_after_eq(request->emitted_jiffies, recent_enough)) 3971 break; 3972 3973 if (target) { 3974 list_del(&target->client_link); 3975 target->file_priv = NULL; 3976 } 3977 3978 target = request; 3979 } 3980 if (target) 3981 i915_gem_request_get(target); 3982 spin_unlock(&file_priv->mm.lock); 3983 3984 if (target == NULL) 3985 return 0; 3986 3987 ret = i915_wait_request(target, 3988 I915_WAIT_INTERRUPTIBLE, 3989 MAX_SCHEDULE_TIMEOUT); 3990 i915_gem_request_put(target); 3991 3992 return ret < 0 ? ret : 0; 3993 } 3994 3995 struct i915_vma * 3996 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 3997 const struct i915_ggtt_view *view, 3998 u64 size, 3999 u64 alignment, 4000 u64 flags) 4001 { 4002 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 4003 struct i915_address_space *vm = &dev_priv->ggtt.base; 4004 struct i915_vma *vma; 4005 int ret; 4006 4007 lockdep_assert_held(&obj->base.dev->struct_mutex); 4008 4009 vma = i915_vma_instance(obj, vm, view); 4010 if (unlikely(IS_ERR(vma))) 4011 return vma; 4012 4013 if (i915_vma_misplaced(vma, size, alignment, flags)) { 4014 if (flags & PIN_NONBLOCK && 4015 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) 4016 return ERR_PTR(-ENOSPC); 4017 4018 if (flags & PIN_MAPPABLE) { 4019 /* If the required space is larger than the available 4020 * aperture, we will not able to find a slot for the 4021 * object and unbinding the object now will be in 4022 * vain. Worse, doing so may cause us to ping-pong 4023 * the object in and out of the Global GTT and 4024 * waste a lot of cycles under the mutex. 4025 */ 4026 if (vma->fence_size > dev_priv->ggtt.mappable_end) 4027 return ERR_PTR(-E2BIG); 4028 4029 /* If NONBLOCK is set the caller is optimistically 4030 * trying to cache the full object within the mappable 4031 * aperture, and *must* have a fallback in place for 4032 * situations where we cannot bind the object. We 4033 * can be a little more lax here and use the fallback 4034 * more often to avoid costly migrations of ourselves 4035 * and other objects within the aperture. 4036 * 4037 * Half-the-aperture is used as a simple heuristic. 4038 * More interesting would to do search for a free 4039 * block prior to making the commitment to unbind. 4040 * That caters for the self-harm case, and with a 4041 * little more heuristics (e.g. NOFAULT, NOEVICT) 4042 * we could try to minimise harm to others. 4043 */ 4044 if (flags & PIN_NONBLOCK && 4045 vma->fence_size > dev_priv->ggtt.mappable_end / 2) 4046 return ERR_PTR(-ENOSPC); 4047 } 4048 4049 WARN(i915_vma_is_pinned(vma), 4050 "bo is already pinned in ggtt with incorrect alignment:" 4051 " offset=%08x, req.alignment=%llx," 4052 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", 4053 i915_ggtt_offset(vma), alignment, 4054 !!(flags & PIN_MAPPABLE), 4055 i915_vma_is_map_and_fenceable(vma)); 4056 ret = i915_vma_unbind(vma); 4057 if (ret) 4058 return ERR_PTR(ret); 4059 } 4060 4061 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); 4062 if (ret) 4063 return ERR_PTR(ret); 4064 4065 return vma; 4066 } 4067 4068 static __always_inline unsigned int __busy_read_flag(unsigned int id) 4069 { 4070 /* Note that we could alias engines in the execbuf API, but 4071 * that would be very unwise as it prevents userspace from 4072 * fine control over engine selection. Ahem. 4073 * 4074 * This should be something like EXEC_MAX_ENGINE instead of 4075 * I915_NUM_ENGINES. 4076 */ 4077 BUILD_BUG_ON(I915_NUM_ENGINES > 16); 4078 return 0x10000 << id; 4079 } 4080 4081 static __always_inline unsigned int __busy_write_id(unsigned int id) 4082 { 4083 /* The uABI guarantees an active writer is also amongst the read 4084 * engines. This would be true if we accessed the activity tracking 4085 * under the lock, but as we perform the lookup of the object and 4086 * its activity locklessly we can not guarantee that the last_write 4087 * being active implies that we have set the same engine flag from 4088 * last_read - hence we always set both read and write busy for 4089 * last_write. 4090 */ 4091 return id | __busy_read_flag(id); 4092 } 4093 4094 static __always_inline unsigned int 4095 __busy_set_if_active(const struct dma_fence *fence, 4096 unsigned int (*flag)(unsigned int id)) 4097 { 4098 struct drm_i915_gem_request *rq; 4099 4100 /* We have to check the current hw status of the fence as the uABI 4101 * guarantees forward progress. We could rely on the idle worker 4102 * to eventually flush us, but to minimise latency just ask the 4103 * hardware. 4104 * 4105 * Note we only report on the status of native fences. 4106 */ 4107 if (!dma_fence_is_i915(fence)) 4108 return 0; 4109 4110 /* opencode to_request() in order to avoid const warnings */ 4111 rq = container_of(fence, struct drm_i915_gem_request, fence); 4112 if (i915_gem_request_completed(rq)) 4113 return 0; 4114 4115 return flag(rq->engine->uabi_id); 4116 } 4117 4118 static __always_inline unsigned int 4119 busy_check_reader(const struct dma_fence *fence) 4120 { 4121 return __busy_set_if_active(fence, __busy_read_flag); 4122 } 4123 4124 static __always_inline unsigned int 4125 busy_check_writer(const struct dma_fence *fence) 4126 { 4127 if (!fence) 4128 return 0; 4129 4130 return __busy_set_if_active(fence, __busy_write_id); 4131 } 4132 4133 int 4134 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 4135 struct drm_file *file) 4136 { 4137 struct drm_i915_gem_busy *args = data; 4138 struct drm_i915_gem_object *obj; 4139 struct reservation_object_list *list; 4140 unsigned int seq; 4141 int err; 4142 4143 err = -ENOENT; 4144 rcu_read_lock(); 4145 obj = i915_gem_object_lookup_rcu(file, args->handle); 4146 if (!obj) 4147 goto out; 4148 4149 /* A discrepancy here is that we do not report the status of 4150 * non-i915 fences, i.e. even though we may report the object as idle, 4151 * a call to set-domain may still stall waiting for foreign rendering. 4152 * This also means that wait-ioctl may report an object as busy, 4153 * where busy-ioctl considers it idle. 4154 * 4155 * We trade the ability to warn of foreign fences to report on which 4156 * i915 engines are active for the object. 4157 * 4158 * Alternatively, we can trade that extra information on read/write 4159 * activity with 4160 * args->busy = 4161 * !reservation_object_test_signaled_rcu(obj->resv, true); 4162 * to report the overall busyness. This is what the wait-ioctl does. 4163 * 4164 */ 4165 retry: 4166 seq = raw_read_seqcount(&obj->resv->seq); 4167 4168 /* Translate the exclusive fence to the READ *and* WRITE engine */ 4169 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); 4170 4171 /* Translate shared fences to READ set of engines */ 4172 list = rcu_dereference(obj->resv->fence); 4173 if (list) { 4174 unsigned int shared_count = list->shared_count, i; 4175 4176 for (i = 0; i < shared_count; ++i) { 4177 struct dma_fence *fence = 4178 rcu_dereference(list->shared[i]); 4179 4180 args->busy |= busy_check_reader(fence); 4181 } 4182 } 4183 4184 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) 4185 goto retry; 4186 4187 err = 0; 4188 out: 4189 rcu_read_unlock(); 4190 return err; 4191 } 4192 4193 int 4194 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 4195 struct drm_file *file_priv) 4196 { 4197 return i915_gem_ring_throttle(dev, file_priv); 4198 } 4199 4200 int 4201 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4202 struct drm_file *file_priv) 4203 { 4204 struct drm_i915_private *dev_priv = to_i915(dev); 4205 struct drm_i915_gem_madvise *args = data; 4206 struct drm_i915_gem_object *obj; 4207 int err; 4208 4209 switch (args->madv) { 4210 case I915_MADV_DONTNEED: 4211 case I915_MADV_WILLNEED: 4212 break; 4213 default: 4214 return -EINVAL; 4215 } 4216 4217 obj = i915_gem_object_lookup(file_priv, args->handle); 4218 if (!obj) 4219 return -ENOENT; 4220 4221 err = mutex_lock_interruptible(&obj->mm.lock); 4222 if (err) 4223 goto out; 4224 4225 if (obj->mm.pages && 4226 i915_gem_object_is_tiled(obj) && 4227 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 4228 if (obj->mm.madv == I915_MADV_WILLNEED) { 4229 GEM_BUG_ON(!obj->mm.quirked); 4230 __i915_gem_object_unpin_pages(obj); 4231 obj->mm.quirked = false; 4232 } 4233 if (args->madv == I915_MADV_WILLNEED) { 4234 GEM_BUG_ON(obj->mm.quirked); 4235 __i915_gem_object_pin_pages(obj); 4236 obj->mm.quirked = true; 4237 } 4238 } 4239 4240 if (obj->mm.madv != __I915_MADV_PURGED) 4241 obj->mm.madv = args->madv; 4242 4243 /* if the object is no longer attached, discard its backing storage */ 4244 if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages) 4245 i915_gem_object_truncate(obj); 4246 4247 args->retained = obj->mm.madv != __I915_MADV_PURGED; 4248 mutex_unlock(&obj->mm.lock); 4249 4250 out: 4251 i915_gem_object_put(obj); 4252 return err; 4253 } 4254 4255 static void 4256 frontbuffer_retire(struct i915_gem_active *active, 4257 struct drm_i915_gem_request *request) 4258 { 4259 struct drm_i915_gem_object *obj = 4260 container_of(active, typeof(*obj), frontbuffer_write); 4261 4262 intel_fb_obj_flush(obj, ORIGIN_CS); 4263 } 4264 4265 void i915_gem_object_init(struct drm_i915_gem_object *obj, 4266 const struct drm_i915_gem_object_ops *ops) 4267 { 4268 mutex_init(&obj->mm.lock); 4269 4270 INIT_LIST_HEAD(&obj->global_link); 4271 INIT_LIST_HEAD(&obj->userfault_link); 4272 INIT_LIST_HEAD(&obj->vma_list); 4273 INIT_LIST_HEAD(&obj->lut_list); 4274 INIT_LIST_HEAD(&obj->batch_pool_link); 4275 4276 obj->ops = ops; 4277 4278 reservation_object_init(&obj->__builtin_resv); 4279 obj->resv = &obj->__builtin_resv; 4280 4281 obj->frontbuffer_ggtt_origin = ORIGIN_GTT; 4282 init_request_active(&obj->frontbuffer_write, frontbuffer_retire); 4283 4284 obj->mm.madv = I915_MADV_WILLNEED; 4285 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); 4286 mutex_init(&obj->mm.get_page.lock); 4287 4288 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); 4289 } 4290 4291 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4292 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4293 I915_GEM_OBJECT_IS_SHRINKABLE, 4294 4295 .get_pages = i915_gem_object_get_pages_gtt, 4296 .put_pages = i915_gem_object_put_pages_gtt, 4297 4298 .pwrite = i915_gem_object_pwrite_gtt, 4299 }; 4300 4301 struct drm_i915_gem_object * 4302 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) 4303 { 4304 struct drm_i915_gem_object *obj; 4305 struct address_space *mapping; 4306 unsigned int cache_level; 4307 gfp_t mask; 4308 int ret; 4309 4310 /* There is a prevalence of the assumption that we fit the object's 4311 * page count inside a 32bit _signed_ variable. Let's document this and 4312 * catch if we ever need to fix it. In the meantime, if you do spot 4313 * such a local variable, please consider fixing! 4314 */ 4315 if (size >> PAGE_SHIFT > INT_MAX) 4316 return ERR_PTR(-E2BIG); 4317 4318 if (overflows_type(size, obj->base.size)) 4319 return ERR_PTR(-E2BIG); 4320 4321 obj = i915_gem_object_alloc(dev_priv); 4322 if (obj == NULL) 4323 return ERR_PTR(-ENOMEM); 4324 4325 ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size); 4326 if (ret) 4327 goto fail; 4328 4329 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4330 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { 4331 /* 965gm cannot relocate objects above 4GiB. */ 4332 mask &= ~__GFP_HIGHMEM; 4333 mask |= __GFP_DMA32; 4334 } 4335 4336 mapping = obj->base.filp->f_mapping; 4337 mapping_set_gfp_mask(mapping, mask); 4338 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 4339 4340 i915_gem_object_init(obj, &i915_gem_object_ops); 4341 4342 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4343 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4344 4345 if (HAS_LLC(dev_priv)) 4346 /* On some devices, we can have the GPU use the LLC (the CPU 4347 * cache) for about a 10% performance improvement 4348 * compared to uncached. Graphics requests other than 4349 * display scanout are coherent with the CPU in 4350 * accessing this cache. This means in this mode we 4351 * don't need to clflush on the CPU side, and on the 4352 * GPU side we only need to flush internal caches to 4353 * get data visible to the CPU. 4354 * 4355 * However, we maintain the display planes as UC, and so 4356 * need to rebind when first used as such. 4357 */ 4358 cache_level = I915_CACHE_LLC; 4359 else 4360 cache_level = I915_CACHE_NONE; 4361 4362 i915_gem_object_set_cache_coherency(obj, cache_level); 4363 4364 trace_i915_gem_object_create(obj); 4365 4366 return obj; 4367 4368 fail: 4369 i915_gem_object_free(obj); 4370 return ERR_PTR(ret); 4371 } 4372 4373 static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4374 { 4375 /* If we are the last user of the backing storage (be it shmemfs 4376 * pages or stolen etc), we know that the pages are going to be 4377 * immediately released. In this case, we can then skip copying 4378 * back the contents from the GPU. 4379 */ 4380 4381 if (obj->mm.madv != I915_MADV_WILLNEED) 4382 return false; 4383 4384 if (obj->base.filp == NULL) 4385 return true; 4386 4387 /* At first glance, this looks racy, but then again so would be 4388 * userspace racing mmap against close. However, the first external 4389 * reference to the filp can only be obtained through the 4390 * i915_gem_mmap_ioctl() which safeguards us against the user 4391 * acquiring such a reference whilst we are in the middle of 4392 * freeing the object. 4393 */ 4394 return atomic_long_read(&obj->base.filp->f_count) == 1; 4395 } 4396 4397 static void __i915_gem_free_objects(struct drm_i915_private *i915, 4398 struct llist_node *freed) 4399 { 4400 struct drm_i915_gem_object *obj, *on; 4401 4402 mutex_lock(&i915->drm.struct_mutex); 4403 intel_runtime_pm_get(i915); 4404 llist_for_each_entry(obj, freed, freed) { 4405 struct i915_vma *vma, *vn; 4406 4407 trace_i915_gem_object_destroy(obj); 4408 4409 GEM_BUG_ON(i915_gem_object_is_active(obj)); 4410 list_for_each_entry_safe(vma, vn, 4411 &obj->vma_list, obj_link) { 4412 GEM_BUG_ON(i915_vma_is_active(vma)); 4413 vma->flags &= ~I915_VMA_PIN_MASK; 4414 i915_vma_close(vma); 4415 } 4416 GEM_BUG_ON(!list_empty(&obj->vma_list)); 4417 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); 4418 4419 list_del(&obj->global_link); 4420 } 4421 intel_runtime_pm_put(i915); 4422 mutex_unlock(&i915->drm.struct_mutex); 4423 4424 cond_resched(); 4425 4426 llist_for_each_entry_safe(obj, on, freed, freed) { 4427 GEM_BUG_ON(obj->bind_count); 4428 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); 4429 4430 if (obj->ops->release) 4431 obj->ops->release(obj); 4432 4433 if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) 4434 atomic_set(&obj->mm.pages_pin_count, 0); 4435 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 4436 GEM_BUG_ON(obj->mm.pages); 4437 4438 if (obj->base.import_attach) 4439 drm_prime_gem_destroy(&obj->base, NULL); 4440 4441 reservation_object_fini(&obj->__builtin_resv); 4442 drm_gem_object_release(&obj->base); 4443 i915_gem_info_remove_obj(i915, obj->base.size); 4444 4445 kfree(obj->bit_17); 4446 i915_gem_object_free(obj); 4447 } 4448 } 4449 4450 static void i915_gem_flush_free_objects(struct drm_i915_private *i915) 4451 { 4452 struct llist_node *freed; 4453 4454 freed = llist_del_all(&i915->mm.free_list); 4455 if (unlikely(freed)) 4456 __i915_gem_free_objects(i915, freed); 4457 } 4458 4459 static void __i915_gem_free_work(struct work_struct *work) 4460 { 4461 struct drm_i915_private *i915 = 4462 container_of(work, struct drm_i915_private, mm.free_work); 4463 struct llist_node *freed; 4464 4465 /* All file-owned VMA should have been released by this point through 4466 * i915_gem_close_object(), or earlier by i915_gem_context_close(). 4467 * However, the object may also be bound into the global GTT (e.g. 4468 * older GPUs without per-process support, or for direct access through 4469 * the GTT either for the user or for scanout). Those VMA still need to 4470 * unbound now. 4471 */ 4472 4473 while ((freed = llist_del_all(&i915->mm.free_list))) { 4474 __i915_gem_free_objects(i915, freed); 4475 if (need_resched()) 4476 break; 4477 } 4478 } 4479 4480 static void __i915_gem_free_object_rcu(struct rcu_head *head) 4481 { 4482 struct drm_i915_gem_object *obj = 4483 container_of(head, typeof(*obj), rcu); 4484 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4485 4486 /* We can't simply use call_rcu() from i915_gem_free_object() 4487 * as we need to block whilst unbinding, and the call_rcu 4488 * task may be called from softirq context. So we take a 4489 * detour through a worker. 4490 */ 4491 if (llist_add(&obj->freed, &i915->mm.free_list)) 4492 schedule_work(&i915->mm.free_work); 4493 } 4494 4495 void i915_gem_free_object(struct drm_gem_object *gem_obj) 4496 { 4497 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4498 4499 if (obj->mm.quirked) 4500 __i915_gem_object_unpin_pages(obj); 4501 4502 if (discard_backing_storage(obj)) 4503 obj->mm.madv = I915_MADV_DONTNEED; 4504 4505 /* Before we free the object, make sure any pure RCU-only 4506 * read-side critical sections are complete, e.g. 4507 * i915_gem_busy_ioctl(). For the corresponding synchronized 4508 * lookup see i915_gem_object_lookup_rcu(). 4509 */ 4510 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 4511 } 4512 4513 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) 4514 { 4515 lockdep_assert_held(&obj->base.dev->struct_mutex); 4516 4517 if (!i915_gem_object_has_active_reference(obj) && 4518 i915_gem_object_is_active(obj)) 4519 i915_gem_object_set_active_reference(obj); 4520 else 4521 i915_gem_object_put(obj); 4522 } 4523 4524 static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) 4525 { 4526 struct intel_engine_cs *engine; 4527 enum intel_engine_id id; 4528 4529 for_each_engine(engine, dev_priv, id) 4530 GEM_BUG_ON(engine->last_retired_context && 4531 !i915_gem_context_is_kernel(engine->last_retired_context)); 4532 } 4533 4534 void i915_gem_sanitize(struct drm_i915_private *i915) 4535 { 4536 /* 4537 * If we inherit context state from the BIOS or earlier occupants 4538 * of the GPU, the GPU may be in an inconsistent state when we 4539 * try to take over. The only way to remove the earlier state 4540 * is by resetting. However, resetting on earlier gen is tricky as 4541 * it may impact the display and we are uncertain about the stability 4542 * of the reset, so this could be applied to even earlier gen. 4543 */ 4544 if (INTEL_GEN(i915) >= 5) { 4545 int reset = intel_gpu_reset(i915, ALL_ENGINES); 4546 WARN_ON(reset && reset != -ENODEV); 4547 } 4548 } 4549 4550 int i915_gem_suspend(struct drm_i915_private *dev_priv) 4551 { 4552 struct drm_device *dev = &dev_priv->drm; 4553 int ret; 4554 4555 intel_runtime_pm_get(dev_priv); 4556 intel_suspend_gt_powersave(dev_priv); 4557 4558 mutex_lock(&dev->struct_mutex); 4559 4560 /* We have to flush all the executing contexts to main memory so 4561 * that they can saved in the hibernation image. To ensure the last 4562 * context image is coherent, we have to switch away from it. That 4563 * leaves the dev_priv->kernel_context still active when 4564 * we actually suspend, and its image in memory may not match the GPU 4565 * state. Fortunately, the kernel_context is disposable and we do 4566 * not rely on its state. 4567 */ 4568 ret = i915_gem_switch_to_kernel_context(dev_priv); 4569 if (ret) 4570 goto err_unlock; 4571 4572 ret = i915_gem_wait_for_idle(dev_priv, 4573 I915_WAIT_INTERRUPTIBLE | 4574 I915_WAIT_LOCKED); 4575 if (ret) 4576 goto err_unlock; 4577 4578 assert_kernel_context_is_current(dev_priv); 4579 i915_gem_contexts_lost(dev_priv); 4580 mutex_unlock(&dev->struct_mutex); 4581 4582 intel_guc_suspend(dev_priv); 4583 4584 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4585 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4586 4587 /* As the idle_work is rearming if it detects a race, play safe and 4588 * repeat the flush until it is definitely idle. 4589 */ 4590 while (flush_delayed_work(&dev_priv->gt.idle_work)) 4591 ; 4592 4593 /* Assert that we sucessfully flushed all the work and 4594 * reset the GPU back to its idle, low power state. 4595 */ 4596 WARN_ON(dev_priv->gt.awake); 4597 WARN_ON(!intel_engines_are_idle(dev_priv)); 4598 4599 /* 4600 * Neither the BIOS, ourselves or any other kernel 4601 * expects the system to be in execlists mode on startup, 4602 * so we need to reset the GPU back to legacy mode. And the only 4603 * known way to disable logical contexts is through a GPU reset. 4604 * 4605 * So in order to leave the system in a known default configuration, 4606 * always reset the GPU upon unload and suspend. Afterwards we then 4607 * clean up the GEM state tracking, flushing off the requests and 4608 * leaving the system in a known idle state. 4609 * 4610 * Note that is of the upmost importance that the GPU is idle and 4611 * all stray writes are flushed *before* we dismantle the backing 4612 * storage for the pinned objects. 4613 * 4614 * However, since we are uncertain that resetting the GPU on older 4615 * machines is a good idea, we don't - just in case it leaves the 4616 * machine in an unusable condition. 4617 */ 4618 i915_gem_sanitize(dev_priv); 4619 goto out_rpm_put; 4620 4621 err_unlock: 4622 mutex_unlock(&dev->struct_mutex); 4623 out_rpm_put: 4624 intel_runtime_pm_put(dev_priv); 4625 return ret; 4626 } 4627 4628 void i915_gem_resume(struct drm_i915_private *dev_priv) 4629 { 4630 struct drm_device *dev = &dev_priv->drm; 4631 4632 WARN_ON(dev_priv->gt.awake); 4633 4634 mutex_lock(&dev->struct_mutex); 4635 i915_gem_restore_gtt_mappings(dev_priv); 4636 4637 /* As we didn't flush the kernel context before suspend, we cannot 4638 * guarantee that the context image is complete. So let's just reset 4639 * it and start again. 4640 */ 4641 dev_priv->gt.resume(dev_priv); 4642 4643 mutex_unlock(&dev->struct_mutex); 4644 } 4645 4646 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) 4647 { 4648 if (INTEL_GEN(dev_priv) < 5 || 4649 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4650 return; 4651 4652 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 4653 DISP_TILE_SURFACE_SWIZZLING); 4654 4655 if (IS_GEN5(dev_priv)) 4656 return; 4657 4658 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 4659 if (IS_GEN6(dev_priv)) 4660 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4661 else if (IS_GEN7(dev_priv)) 4662 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4663 else if (IS_GEN8(dev_priv)) 4664 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 4665 else 4666 BUG(); 4667 } 4668 4669 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) 4670 { 4671 I915_WRITE(RING_CTL(base), 0); 4672 I915_WRITE(RING_HEAD(base), 0); 4673 I915_WRITE(RING_TAIL(base), 0); 4674 I915_WRITE(RING_START(base), 0); 4675 } 4676 4677 static void init_unused_rings(struct drm_i915_private *dev_priv) 4678 { 4679 if (IS_I830(dev_priv)) { 4680 init_unused_ring(dev_priv, PRB1_BASE); 4681 init_unused_ring(dev_priv, SRB0_BASE); 4682 init_unused_ring(dev_priv, SRB1_BASE); 4683 init_unused_ring(dev_priv, SRB2_BASE); 4684 init_unused_ring(dev_priv, SRB3_BASE); 4685 } else if (IS_GEN2(dev_priv)) { 4686 init_unused_ring(dev_priv, SRB0_BASE); 4687 init_unused_ring(dev_priv, SRB1_BASE); 4688 } else if (IS_GEN3(dev_priv)) { 4689 init_unused_ring(dev_priv, PRB1_BASE); 4690 init_unused_ring(dev_priv, PRB2_BASE); 4691 } 4692 } 4693 4694 static int __i915_gem_restart_engines(void *data) 4695 { 4696 struct drm_i915_private *i915 = data; 4697 struct intel_engine_cs *engine; 4698 enum intel_engine_id id; 4699 int err; 4700 4701 for_each_engine(engine, i915, id) { 4702 err = engine->init_hw(engine); 4703 if (err) 4704 return err; 4705 } 4706 4707 return 0; 4708 } 4709 4710 int i915_gem_init_hw(struct drm_i915_private *dev_priv) 4711 { 4712 int ret; 4713 4714 dev_priv->gt.last_init_time = ktime_get(); 4715 4716 /* Double layer security blanket, see i915_gem_init() */ 4717 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4718 4719 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) 4720 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4721 4722 if (IS_HASWELL(dev_priv)) 4723 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? 4724 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4725 4726 if (HAS_PCH_NOP(dev_priv)) { 4727 if (IS_IVYBRIDGE(dev_priv)) { 4728 u32 temp = I915_READ(GEN7_MSG_CTL); 4729 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4730 I915_WRITE(GEN7_MSG_CTL, temp); 4731 } else if (INTEL_GEN(dev_priv) >= 7) { 4732 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 4733 temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 4734 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 4735 } 4736 } 4737 4738 i915_gem_init_swizzling(dev_priv); 4739 4740 /* 4741 * At least 830 can leave some of the unused rings 4742 * "active" (ie. head != tail) after resume which 4743 * will prevent c3 entry. Makes sure all unused rings 4744 * are totally idle. 4745 */ 4746 init_unused_rings(dev_priv); 4747 4748 BUG_ON(!dev_priv->kernel_context); 4749 4750 ret = i915_ppgtt_init_hw(dev_priv); 4751 if (ret) { 4752 DRM_ERROR("PPGTT enable HW failed %d\n", ret); 4753 goto out; 4754 } 4755 4756 /* Need to do basic initialisation of all rings first: */ 4757 ret = __i915_gem_restart_engines(dev_priv); 4758 if (ret) 4759 goto out; 4760 4761 intel_mocs_init_l3cc_table(dev_priv); 4762 4763 /* We can't enable contexts until all firmware is loaded */ 4764 ret = intel_uc_init_hw(dev_priv); 4765 if (ret) 4766 goto out; 4767 4768 out: 4769 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4770 return ret; 4771 } 4772 4773 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) 4774 { 4775 if (INTEL_INFO(dev_priv)->gen < 6) 4776 return false; 4777 4778 /* TODO: make semaphores and Execlists play nicely together */ 4779 if (i915.enable_execlists) 4780 return false; 4781 4782 if (value >= 0) 4783 return value; 4784 4785 /* Enable semaphores on SNB when IO remapping is off */ 4786 if (IS_GEN6(dev_priv) && intel_vtd_active()) 4787 return false; 4788 4789 return true; 4790 } 4791 4792 int i915_gem_init(struct drm_i915_private *dev_priv) 4793 { 4794 int ret; 4795 4796 mutex_lock(&dev_priv->drm.struct_mutex); 4797 4798 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); 4799 4800 if (!i915.enable_execlists) { 4801 dev_priv->gt.resume = intel_legacy_submission_resume; 4802 dev_priv->gt.cleanup_engine = intel_engine_cleanup; 4803 } else { 4804 dev_priv->gt.resume = intel_lr_context_resume; 4805 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; 4806 } 4807 4808 /* This is just a security blanket to placate dragons. 4809 * On some systems, we very sporadically observe that the first TLBs 4810 * used by the CS may be stale, despite us poking the TLB reset. If 4811 * we hold the forcewake during initialisation these problems 4812 * just magically go away. 4813 */ 4814 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4815 4816 ret = i915_gem_init_userptr(dev_priv); 4817 if (ret) 4818 goto out_unlock; 4819 4820 ret = i915_gem_init_ggtt(dev_priv); 4821 if (ret) 4822 goto out_unlock; 4823 4824 ret = i915_gem_contexts_init(dev_priv); 4825 if (ret) 4826 goto out_unlock; 4827 4828 ret = intel_engines_init(dev_priv); 4829 if (ret) 4830 goto out_unlock; 4831 4832 ret = i915_gem_init_hw(dev_priv); 4833 if (ret == -EIO) { 4834 /* Allow engine initialisation to fail by marking the GPU as 4835 * wedged. But we only want to do this where the GPU is angry, 4836 * for all other failure, such as an allocation failure, bail. 4837 */ 4838 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 4839 i915_gem_set_wedged(dev_priv); 4840 ret = 0; 4841 } 4842 4843 out_unlock: 4844 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4845 mutex_unlock(&dev_priv->drm.struct_mutex); 4846 4847 return ret; 4848 } 4849 4850 void i915_gem_init_mmio(struct drm_i915_private *i915) 4851 { 4852 i915_gem_sanitize(i915); 4853 } 4854 4855 void 4856 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) 4857 { 4858 struct intel_engine_cs *engine; 4859 enum intel_engine_id id; 4860 4861 for_each_engine(engine, dev_priv, id) 4862 dev_priv->gt.cleanup_engine(engine); 4863 } 4864 4865 void 4866 i915_gem_load_init_fences(struct drm_i915_private *dev_priv) 4867 { 4868 int i; 4869 4870 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 4871 !IS_CHERRYVIEW(dev_priv)) 4872 dev_priv->num_fence_regs = 32; 4873 else if (INTEL_INFO(dev_priv)->gen >= 4 || 4874 IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 4875 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 4876 dev_priv->num_fence_regs = 16; 4877 else 4878 dev_priv->num_fence_regs = 8; 4879 4880 if (intel_vgpu_active(dev_priv)) 4881 dev_priv->num_fence_regs = 4882 I915_READ(vgtif_reg(avail_rs.fence_num)); 4883 4884 /* Initialize fence registers to zero */ 4885 for (i = 0; i < dev_priv->num_fence_regs; i++) { 4886 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; 4887 4888 fence->i915 = dev_priv; 4889 fence->id = i; 4890 list_add_tail(&fence->link, &dev_priv->mm.fence_list); 4891 } 4892 i915_gem_restore_fences(dev_priv); 4893 4894 i915_gem_detect_bit_6_swizzle(dev_priv); 4895 } 4896 4897 int 4898 i915_gem_load_init(struct drm_i915_private *dev_priv) 4899 { 4900 int err = -ENOMEM; 4901 4902 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); 4903 if (!dev_priv->objects) 4904 goto err_out; 4905 4906 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 4907 if (!dev_priv->vmas) 4908 goto err_objects; 4909 4910 dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0); 4911 if (!dev_priv->luts) 4912 goto err_vmas; 4913 4914 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, 4915 SLAB_HWCACHE_ALIGN | 4916 SLAB_RECLAIM_ACCOUNT | 4917 SLAB_TYPESAFE_BY_RCU); 4918 if (!dev_priv->requests) 4919 goto err_luts; 4920 4921 dev_priv->dependencies = KMEM_CACHE(i915_dependency, 4922 SLAB_HWCACHE_ALIGN | 4923 SLAB_RECLAIM_ACCOUNT); 4924 if (!dev_priv->dependencies) 4925 goto err_requests; 4926 4927 dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); 4928 if (!dev_priv->priorities) 4929 goto err_dependencies; 4930 4931 mutex_lock(&dev_priv->drm.struct_mutex); 4932 INIT_LIST_HEAD(&dev_priv->gt.timelines); 4933 err = i915_gem_timeline_init__global(dev_priv); 4934 mutex_unlock(&dev_priv->drm.struct_mutex); 4935 if (err) 4936 goto err_priorities; 4937 4938 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); 4939 init_llist_head(&dev_priv->mm.free_list); 4940 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4941 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4942 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4943 INIT_LIST_HEAD(&dev_priv->mm.userfault_list); 4944 INIT_DELAYED_WORK(&dev_priv->gt.retire_work, 4945 i915_gem_retire_work_handler); 4946 INIT_DELAYED_WORK(&dev_priv->gt.idle_work, 4947 i915_gem_idle_work_handler); 4948 init_waitqueue_head(&dev_priv->gpu_error.wait_queue); 4949 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4950 4951 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); 4952 4953 spin_lock_init(&dev_priv->fb_tracking.lock); 4954 4955 return 0; 4956 4957 err_priorities: 4958 kmem_cache_destroy(dev_priv->priorities); 4959 err_dependencies: 4960 kmem_cache_destroy(dev_priv->dependencies); 4961 err_requests: 4962 kmem_cache_destroy(dev_priv->requests); 4963 err_luts: 4964 kmem_cache_destroy(dev_priv->luts); 4965 err_vmas: 4966 kmem_cache_destroy(dev_priv->vmas); 4967 err_objects: 4968 kmem_cache_destroy(dev_priv->objects); 4969 err_out: 4970 return err; 4971 } 4972 4973 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) 4974 { 4975 i915_gem_drain_freed_objects(dev_priv); 4976 WARN_ON(!llist_empty(&dev_priv->mm.free_list)); 4977 WARN_ON(dev_priv->mm.object_count); 4978 4979 mutex_lock(&dev_priv->drm.struct_mutex); 4980 i915_gem_timeline_fini(&dev_priv->gt.global_timeline); 4981 WARN_ON(!list_empty(&dev_priv->gt.timelines)); 4982 mutex_unlock(&dev_priv->drm.struct_mutex); 4983 4984 kmem_cache_destroy(dev_priv->priorities); 4985 kmem_cache_destroy(dev_priv->dependencies); 4986 kmem_cache_destroy(dev_priv->requests); 4987 kmem_cache_destroy(dev_priv->luts); 4988 kmem_cache_destroy(dev_priv->vmas); 4989 kmem_cache_destroy(dev_priv->objects); 4990 4991 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ 4992 rcu_barrier(); 4993 } 4994 4995 int i915_gem_freeze(struct drm_i915_private *dev_priv) 4996 { 4997 /* Discard all purgeable objects, let userspace recover those as 4998 * required after resuming. 4999 */ 5000 i915_gem_shrink_all(dev_priv); 5001 5002 return 0; 5003 } 5004 5005 int i915_gem_freeze_late(struct drm_i915_private *dev_priv) 5006 { 5007 struct drm_i915_gem_object *obj; 5008 struct list_head *phases[] = { 5009 &dev_priv->mm.unbound_list, 5010 &dev_priv->mm.bound_list, 5011 NULL 5012 }, **p; 5013 5014 /* Called just before we write the hibernation image. 5015 * 5016 * We need to update the domain tracking to reflect that the CPU 5017 * will be accessing all the pages to create and restore from the 5018 * hibernation, and so upon restoration those pages will be in the 5019 * CPU domain. 5020 * 5021 * To make sure the hibernation image contains the latest state, 5022 * we update that state just before writing out the image. 5023 * 5024 * To try and reduce the hibernation image, we manually shrink 5025 * the objects as well, see i915_gem_freeze() 5026 */ 5027 5028 i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND); 5029 i915_gem_drain_freed_objects(dev_priv); 5030 5031 mutex_lock(&dev_priv->drm.struct_mutex); 5032 for (p = phases; *p; p++) { 5033 list_for_each_entry(obj, *p, global_link) 5034 __start_cpu_write(obj); 5035 } 5036 mutex_unlock(&dev_priv->drm.struct_mutex); 5037 5038 return 0; 5039 } 5040 5041 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5042 { 5043 struct drm_i915_file_private *file_priv = file->driver_priv; 5044 struct drm_i915_gem_request *request; 5045 5046 /* Clean up our request list when the client is going away, so that 5047 * later retire_requests won't dereference our soon-to-be-gone 5048 * file_priv. 5049 */ 5050 spin_lock(&file_priv->mm.lock); 5051 list_for_each_entry(request, &file_priv->mm.request_list, client_link) 5052 request->file_priv = NULL; 5053 spin_unlock(&file_priv->mm.lock); 5054 } 5055 5056 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) 5057 { 5058 struct drm_i915_file_private *file_priv; 5059 int ret; 5060 5061 DRM_DEBUG("\n"); 5062 5063 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 5064 if (!file_priv) 5065 return -ENOMEM; 5066 5067 file->driver_priv = file_priv; 5068 file_priv->dev_priv = i915; 5069 file_priv->file = file; 5070 5071 spin_lock_init(&file_priv->mm.lock); 5072 INIT_LIST_HEAD(&file_priv->mm.request_list); 5073 5074 file_priv->bsd_engine = -1; 5075 5076 ret = i915_gem_context_open(i915, file); 5077 if (ret) 5078 kfree(file_priv); 5079 5080 return ret; 5081 } 5082 5083 /** 5084 * i915_gem_track_fb - update frontbuffer tracking 5085 * @old: current GEM buffer for the frontbuffer slots 5086 * @new: new GEM buffer for the frontbuffer slots 5087 * @frontbuffer_bits: bitmask of frontbuffer slots 5088 * 5089 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 5090 * from @old and setting them in @new. Both @old and @new can be NULL. 5091 */ 5092 void i915_gem_track_fb(struct drm_i915_gem_object *old, 5093 struct drm_i915_gem_object *new, 5094 unsigned frontbuffer_bits) 5095 { 5096 /* Control of individual bits within the mask are guarded by 5097 * the owning plane->mutex, i.e. we can never see concurrent 5098 * manipulation of individual bits. But since the bitfield as a whole 5099 * is updated using RMW, we need to use atomics in order to update 5100 * the bits. 5101 */ 5102 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 5103 sizeof(atomic_t) * BITS_PER_BYTE); 5104 5105 if (old) { 5106 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); 5107 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); 5108 } 5109 5110 if (new) { 5111 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); 5112 atomic_or(frontbuffer_bits, &new->frontbuffer_bits); 5113 } 5114 } 5115 5116 /* Allocate a new GEM object and fill it with the supplied data */ 5117 struct drm_i915_gem_object * 5118 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, 5119 const void *data, size_t size) 5120 { 5121 struct drm_i915_gem_object *obj; 5122 struct file *file; 5123 size_t offset; 5124 int err; 5125 5126 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); 5127 if (IS_ERR(obj)) 5128 return obj; 5129 5130 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 5131 5132 file = obj->base.filp; 5133 offset = 0; 5134 do { 5135 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 5136 struct page *page; 5137 void *pgdata, *vaddr; 5138 5139 err = pagecache_write_begin(file, file->f_mapping, 5140 offset, len, 0, 5141 &page, &pgdata); 5142 if (err < 0) 5143 goto fail; 5144 5145 vaddr = kmap(page); 5146 memcpy(vaddr, data, len); 5147 kunmap(page); 5148 5149 err = pagecache_write_end(file, file->f_mapping, 5150 offset, len, len, 5151 page, pgdata); 5152 if (err < 0) 5153 goto fail; 5154 5155 size -= len; 5156 data += len; 5157 offset += len; 5158 } while (size); 5159 5160 return obj; 5161 5162 fail: 5163 i915_gem_object_put(obj); 5164 return ERR_PTR(err); 5165 } 5166 5167 struct scatterlist * 5168 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 5169 unsigned int n, 5170 unsigned int *offset) 5171 { 5172 struct i915_gem_object_page_iter *iter = &obj->mm.get_page; 5173 struct scatterlist *sg; 5174 unsigned int idx, count; 5175 5176 might_sleep(); 5177 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 5178 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 5179 5180 /* As we iterate forward through the sg, we record each entry in a 5181 * radixtree for quick repeated (backwards) lookups. If we have seen 5182 * this index previously, we will have an entry for it. 5183 * 5184 * Initial lookup is O(N), but this is amortized to O(1) for 5185 * sequential page access (where each new request is consecutive 5186 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 5187 * i.e. O(1) with a large constant! 5188 */ 5189 if (n < READ_ONCE(iter->sg_idx)) 5190 goto lookup; 5191 5192 mutex_lock(&iter->lock); 5193 5194 /* We prefer to reuse the last sg so that repeated lookup of this 5195 * (or the subsequent) sg are fast - comparing against the last 5196 * sg is faster than going through the radixtree. 5197 */ 5198 5199 sg = iter->sg_pos; 5200 idx = iter->sg_idx; 5201 count = __sg_page_count(sg); 5202 5203 while (idx + count <= n) { 5204 unsigned long exception, i; 5205 int ret; 5206 5207 /* If we cannot allocate and insert this entry, or the 5208 * individual pages from this range, cancel updating the 5209 * sg_idx so that on this lookup we are forced to linearly 5210 * scan onwards, but on future lookups we will try the 5211 * insertion again (in which case we need to be careful of 5212 * the error return reporting that we have already inserted 5213 * this index). 5214 */ 5215 ret = radix_tree_insert(&iter->radix, idx, sg); 5216 if (ret && ret != -EEXIST) 5217 goto scan; 5218 5219 exception = 5220 RADIX_TREE_EXCEPTIONAL_ENTRY | 5221 idx << RADIX_TREE_EXCEPTIONAL_SHIFT; 5222 for (i = 1; i < count; i++) { 5223 ret = radix_tree_insert(&iter->radix, idx + i, 5224 (void *)exception); 5225 if (ret && ret != -EEXIST) 5226 goto scan; 5227 } 5228 5229 idx += count; 5230 sg = ____sg_next(sg); 5231 count = __sg_page_count(sg); 5232 } 5233 5234 scan: 5235 iter->sg_pos = sg; 5236 iter->sg_idx = idx; 5237 5238 mutex_unlock(&iter->lock); 5239 5240 if (unlikely(n < idx)) /* insertion completed by another thread */ 5241 goto lookup; 5242 5243 /* In case we failed to insert the entry into the radixtree, we need 5244 * to look beyond the current sg. 5245 */ 5246 while (idx + count <= n) { 5247 idx += count; 5248 sg = ____sg_next(sg); 5249 count = __sg_page_count(sg); 5250 } 5251 5252 *offset = n - idx; 5253 return sg; 5254 5255 lookup: 5256 rcu_read_lock(); 5257 5258 sg = radix_tree_lookup(&iter->radix, n); 5259 GEM_BUG_ON(!sg); 5260 5261 /* If this index is in the middle of multi-page sg entry, 5262 * the radixtree will contain an exceptional entry that points 5263 * to the start of that range. We will return the pointer to 5264 * the base page and the offset of this page within the 5265 * sg entry's range. 5266 */ 5267 *offset = 0; 5268 if (unlikely(radix_tree_exception(sg))) { 5269 unsigned long base = 5270 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT; 5271 5272 sg = radix_tree_lookup(&iter->radix, base); 5273 GEM_BUG_ON(!sg); 5274 5275 *offset = n - base; 5276 } 5277 5278 rcu_read_unlock(); 5279 5280 return sg; 5281 } 5282 5283 struct page * 5284 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 5285 { 5286 struct scatterlist *sg; 5287 unsigned int offset; 5288 5289 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 5290 5291 sg = i915_gem_object_get_sg(obj, n, &offset); 5292 return nth_page(sg_page(sg), offset); 5293 } 5294 5295 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 5296 struct page * 5297 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 5298 unsigned int n) 5299 { 5300 struct page *page; 5301 5302 page = i915_gem_object_get_page(obj, n); 5303 if (!obj->mm.dirty) 5304 set_page_dirty(page); 5305 5306 return page; 5307 } 5308 5309 dma_addr_t 5310 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 5311 unsigned long n) 5312 { 5313 struct scatterlist *sg; 5314 unsigned int offset; 5315 5316 sg = i915_gem_object_get_sg(obj, n, &offset); 5317 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 5318 } 5319 5320 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) 5321 { 5322 struct sg_table *pages; 5323 int err; 5324 5325 if (align > obj->base.size) 5326 return -EINVAL; 5327 5328 if (obj->ops == &i915_gem_phys_ops) 5329 return 0; 5330 5331 if (obj->ops != &i915_gem_object_ops) 5332 return -EINVAL; 5333 5334 err = i915_gem_object_unbind(obj); 5335 if (err) 5336 return err; 5337 5338 mutex_lock(&obj->mm.lock); 5339 5340 if (obj->mm.madv != I915_MADV_WILLNEED) { 5341 err = -EFAULT; 5342 goto err_unlock; 5343 } 5344 5345 if (obj->mm.quirked) { 5346 err = -EFAULT; 5347 goto err_unlock; 5348 } 5349 5350 if (obj->mm.mapping) { 5351 err = -EBUSY; 5352 goto err_unlock; 5353 } 5354 5355 pages = obj->mm.pages; 5356 obj->ops = &i915_gem_phys_ops; 5357 5358 err = ____i915_gem_object_get_pages(obj); 5359 if (err) 5360 goto err_xfer; 5361 5362 /* Perma-pin (until release) the physical set of pages */ 5363 __i915_gem_object_pin_pages(obj); 5364 5365 if (!IS_ERR_OR_NULL(pages)) 5366 i915_gem_object_ops.put_pages(obj, pages); 5367 mutex_unlock(&obj->mm.lock); 5368 return 0; 5369 5370 err_xfer: 5371 obj->ops = &i915_gem_object_ops; 5372 obj->mm.pages = pages; 5373 err_unlock: 5374 mutex_unlock(&obj->mm.lock); 5375 return err; 5376 } 5377 5378 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 5379 #include "selftests/scatterlist.c" 5380 #include "selftests/mock_gem_device.c" 5381 #include "selftests/huge_gem_object.c" 5382 #include "selftests/i915_gem_object.c" 5383 #include "selftests/i915_gem_coherency.c" 5384 #endif 5385