1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_vma_manager.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_gem_clflush.h" 33 #include "i915_vgpu.h" 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 #include "intel_frontbuffer.h" 37 #include "intel_mocs.h" 38 #include "i915_gemfs.h" 39 #include <linux/dma-fence-array.h> 40 #include <linux/kthread.h> 41 #include <linux/reservation.h> 42 #include <linux/shmem_fs.h> 43 #include <linux/slab.h> 44 #include <linux/stop_machine.h> 45 #include <linux/swap.h> 46 #include <linux/pci.h> 47 #include <linux/dma-buf.h> 48 49 static void i915_gem_flush_free_objects(struct drm_i915_private *i915); 50 51 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 52 { 53 if (obj->cache_dirty) 54 return false; 55 56 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 57 return true; 58 59 return obj->pin_global; /* currently in use by HW, keep flushed */ 60 } 61 62 static int 63 insert_mappable_node(struct i915_ggtt *ggtt, 64 struct drm_mm_node *node, u32 size) 65 { 66 memset(node, 0, sizeof(*node)); 67 return drm_mm_insert_node_in_range(&ggtt->base.mm, node, 68 size, 0, I915_COLOR_UNEVICTABLE, 69 0, ggtt->mappable_end, 70 DRM_MM_INSERT_LOW); 71 } 72 73 static void 74 remove_mappable_node(struct drm_mm_node *node) 75 { 76 drm_mm_remove_node(node); 77 } 78 79 /* some bookkeeping */ 80 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 81 u64 size) 82 { 83 spin_lock(&dev_priv->mm.object_stat_lock); 84 dev_priv->mm.object_count++; 85 dev_priv->mm.object_memory += size; 86 spin_unlock(&dev_priv->mm.object_stat_lock); 87 } 88 89 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 90 u64 size) 91 { 92 spin_lock(&dev_priv->mm.object_stat_lock); 93 dev_priv->mm.object_count--; 94 dev_priv->mm.object_memory -= size; 95 spin_unlock(&dev_priv->mm.object_stat_lock); 96 } 97 98 static int 99 i915_gem_wait_for_error(struct i915_gpu_error *error) 100 { 101 int ret; 102 103 might_sleep(); 104 105 /* 106 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 107 * userspace. If it takes that long something really bad is going on and 108 * we should simply try to bail out and fail as gracefully as possible. 109 */ 110 ret = wait_event_interruptible_timeout(error->reset_queue, 111 !i915_reset_backoff(error), 112 I915_RESET_TIMEOUT); 113 if (ret == 0) { 114 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 115 return -EIO; 116 } else if (ret < 0) { 117 return ret; 118 } else { 119 return 0; 120 } 121 } 122 123 int i915_mutex_lock_interruptible(struct drm_device *dev) 124 { 125 struct drm_i915_private *dev_priv = to_i915(dev); 126 int ret; 127 128 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 129 if (ret) 130 return ret; 131 132 ret = mutex_lock_interruptible(&dev->struct_mutex); 133 if (ret) 134 return ret; 135 136 return 0; 137 } 138 139 int 140 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 141 struct drm_file *file) 142 { 143 struct drm_i915_private *dev_priv = to_i915(dev); 144 struct i915_ggtt *ggtt = &dev_priv->ggtt; 145 struct drm_i915_gem_get_aperture *args = data; 146 struct i915_vma *vma; 147 u64 pinned; 148 149 pinned = ggtt->base.reserved; 150 mutex_lock(&dev->struct_mutex); 151 list_for_each_entry(vma, &ggtt->base.active_list, vm_link) 152 if (i915_vma_is_pinned(vma)) 153 pinned += vma->node.size; 154 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) 155 if (i915_vma_is_pinned(vma)) 156 pinned += vma->node.size; 157 mutex_unlock(&dev->struct_mutex); 158 159 args->aper_size = ggtt->base.total; 160 args->aper_available_size = args->aper_size - pinned; 161 162 return 0; 163 } 164 165 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 166 { 167 struct address_space *mapping = obj->base.filp->f_mapping; 168 drm_dma_handle_t *phys; 169 struct sg_table *st; 170 struct scatterlist *sg; 171 char *vaddr; 172 int i; 173 int err; 174 175 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 176 return -EINVAL; 177 178 /* Always aligning to the object size, allows a single allocation 179 * to handle all possible callers, and given typical object sizes, 180 * the alignment of the buddy allocation will naturally match. 181 */ 182 phys = drm_pci_alloc(obj->base.dev, 183 roundup_pow_of_two(obj->base.size), 184 roundup_pow_of_two(obj->base.size)); 185 if (!phys) 186 return -ENOMEM; 187 188 vaddr = phys->vaddr; 189 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 190 struct page *page; 191 char *src; 192 193 page = shmem_read_mapping_page(mapping, i); 194 if (IS_ERR(page)) { 195 err = PTR_ERR(page); 196 goto err_phys; 197 } 198 199 src = kmap_atomic(page); 200 memcpy(vaddr, src, PAGE_SIZE); 201 drm_clflush_virt_range(vaddr, PAGE_SIZE); 202 kunmap_atomic(src); 203 204 put_page(page); 205 vaddr += PAGE_SIZE; 206 } 207 208 i915_gem_chipset_flush(to_i915(obj->base.dev)); 209 210 st = kmalloc(sizeof(*st), GFP_KERNEL); 211 if (!st) { 212 err = -ENOMEM; 213 goto err_phys; 214 } 215 216 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 217 kfree(st); 218 err = -ENOMEM; 219 goto err_phys; 220 } 221 222 sg = st->sgl; 223 sg->offset = 0; 224 sg->length = obj->base.size; 225 226 sg_dma_address(sg) = phys->busaddr; 227 sg_dma_len(sg) = obj->base.size; 228 229 obj->phys_handle = phys; 230 231 __i915_gem_object_set_pages(obj, st, sg->length); 232 233 return 0; 234 235 err_phys: 236 drm_pci_free(obj->base.dev, phys); 237 238 return err; 239 } 240 241 static void __start_cpu_write(struct drm_i915_gem_object *obj) 242 { 243 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 244 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 245 if (cpu_write_needs_clflush(obj)) 246 obj->cache_dirty = true; 247 } 248 249 static void 250 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 251 struct sg_table *pages, 252 bool needs_clflush) 253 { 254 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 255 256 if (obj->mm.madv == I915_MADV_DONTNEED) 257 obj->mm.dirty = false; 258 259 if (needs_clflush && 260 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 261 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 262 drm_clflush_sg(pages); 263 264 __start_cpu_write(obj); 265 } 266 267 static void 268 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 269 struct sg_table *pages) 270 { 271 __i915_gem_object_release_shmem(obj, pages, false); 272 273 if (obj->mm.dirty) { 274 struct address_space *mapping = obj->base.filp->f_mapping; 275 char *vaddr = obj->phys_handle->vaddr; 276 int i; 277 278 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 279 struct page *page; 280 char *dst; 281 282 page = shmem_read_mapping_page(mapping, i); 283 if (IS_ERR(page)) 284 continue; 285 286 dst = kmap_atomic(page); 287 drm_clflush_virt_range(vaddr, PAGE_SIZE); 288 memcpy(dst, vaddr, PAGE_SIZE); 289 kunmap_atomic(dst); 290 291 set_page_dirty(page); 292 if (obj->mm.madv == I915_MADV_WILLNEED) 293 mark_page_accessed(page); 294 put_page(page); 295 vaddr += PAGE_SIZE; 296 } 297 obj->mm.dirty = false; 298 } 299 300 sg_free_table(pages); 301 kfree(pages); 302 303 drm_pci_free(obj->base.dev, obj->phys_handle); 304 } 305 306 static void 307 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 308 { 309 i915_gem_object_unpin_pages(obj); 310 } 311 312 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 313 .get_pages = i915_gem_object_get_pages_phys, 314 .put_pages = i915_gem_object_put_pages_phys, 315 .release = i915_gem_object_release_phys, 316 }; 317 318 static const struct drm_i915_gem_object_ops i915_gem_object_ops; 319 320 int i915_gem_object_unbind(struct drm_i915_gem_object *obj) 321 { 322 struct i915_vma *vma; 323 LIST_HEAD(still_in_list); 324 int ret; 325 326 lockdep_assert_held(&obj->base.dev->struct_mutex); 327 328 /* Closed vma are removed from the obj->vma_list - but they may 329 * still have an active binding on the object. To remove those we 330 * must wait for all rendering to complete to the object (as unbinding 331 * must anyway), and retire the requests. 332 */ 333 ret = i915_gem_object_wait(obj, 334 I915_WAIT_INTERRUPTIBLE | 335 I915_WAIT_LOCKED | 336 I915_WAIT_ALL, 337 MAX_SCHEDULE_TIMEOUT, 338 NULL); 339 if (ret) 340 return ret; 341 342 i915_gem_retire_requests(to_i915(obj->base.dev)); 343 344 while ((vma = list_first_entry_or_null(&obj->vma_list, 345 struct i915_vma, 346 obj_link))) { 347 list_move_tail(&vma->obj_link, &still_in_list); 348 ret = i915_vma_unbind(vma); 349 if (ret) 350 break; 351 } 352 list_splice(&still_in_list, &obj->vma_list); 353 354 return ret; 355 } 356 357 static long 358 i915_gem_object_wait_fence(struct dma_fence *fence, 359 unsigned int flags, 360 long timeout, 361 struct intel_rps_client *rps_client) 362 { 363 struct drm_i915_gem_request *rq; 364 365 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 366 367 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 368 return timeout; 369 370 if (!dma_fence_is_i915(fence)) 371 return dma_fence_wait_timeout(fence, 372 flags & I915_WAIT_INTERRUPTIBLE, 373 timeout); 374 375 rq = to_request(fence); 376 if (i915_gem_request_completed(rq)) 377 goto out; 378 379 /* This client is about to stall waiting for the GPU. In many cases 380 * this is undesirable and limits the throughput of the system, as 381 * many clients cannot continue processing user input/output whilst 382 * blocked. RPS autotuning may take tens of milliseconds to respond 383 * to the GPU load and thus incurs additional latency for the client. 384 * We can circumvent that by promoting the GPU frequency to maximum 385 * before we wait. This makes the GPU throttle up much more quickly 386 * (good for benchmarks and user experience, e.g. window animations), 387 * but at a cost of spending more power processing the workload 388 * (bad for battery). Not all clients even want their results 389 * immediately and for them we should just let the GPU select its own 390 * frequency to maximise efficiency. To prevent a single client from 391 * forcing the clocks too high for the whole system, we only allow 392 * each client to waitboost once in a busy period. 393 */ 394 if (rps_client) { 395 if (INTEL_GEN(rq->i915) >= 6) 396 gen6_rps_boost(rq, rps_client); 397 else 398 rps_client = NULL; 399 } 400 401 timeout = i915_wait_request(rq, flags, timeout); 402 403 out: 404 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) 405 i915_gem_request_retire_upto(rq); 406 407 return timeout; 408 } 409 410 static long 411 i915_gem_object_wait_reservation(struct reservation_object *resv, 412 unsigned int flags, 413 long timeout, 414 struct intel_rps_client *rps_client) 415 { 416 unsigned int seq = __read_seqcount_begin(&resv->seq); 417 struct dma_fence *excl; 418 bool prune_fences = false; 419 420 if (flags & I915_WAIT_ALL) { 421 struct dma_fence **shared; 422 unsigned int count, i; 423 int ret; 424 425 ret = reservation_object_get_fences_rcu(resv, 426 &excl, &count, &shared); 427 if (ret) 428 return ret; 429 430 for (i = 0; i < count; i++) { 431 timeout = i915_gem_object_wait_fence(shared[i], 432 flags, timeout, 433 rps_client); 434 if (timeout < 0) 435 break; 436 437 dma_fence_put(shared[i]); 438 } 439 440 for (; i < count; i++) 441 dma_fence_put(shared[i]); 442 kfree(shared); 443 444 prune_fences = count && timeout >= 0; 445 } else { 446 excl = reservation_object_get_excl_rcu(resv); 447 } 448 449 if (excl && timeout >= 0) { 450 timeout = i915_gem_object_wait_fence(excl, flags, timeout, 451 rps_client); 452 prune_fences = timeout >= 0; 453 } 454 455 dma_fence_put(excl); 456 457 /* Oportunistically prune the fences iff we know they have *all* been 458 * signaled and that the reservation object has not been changed (i.e. 459 * no new fences have been added). 460 */ 461 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { 462 if (reservation_object_trylock(resv)) { 463 if (!__read_seqcount_retry(&resv->seq, seq)) 464 reservation_object_add_excl_fence(resv, NULL); 465 reservation_object_unlock(resv); 466 } 467 } 468 469 return timeout; 470 } 471 472 static void __fence_set_priority(struct dma_fence *fence, int prio) 473 { 474 struct drm_i915_gem_request *rq; 475 struct intel_engine_cs *engine; 476 477 if (!dma_fence_is_i915(fence)) 478 return; 479 480 rq = to_request(fence); 481 engine = rq->engine; 482 if (!engine->schedule) 483 return; 484 485 engine->schedule(rq, prio); 486 } 487 488 static void fence_set_priority(struct dma_fence *fence, int prio) 489 { 490 /* Recurse once into a fence-array */ 491 if (dma_fence_is_array(fence)) { 492 struct dma_fence_array *array = to_dma_fence_array(fence); 493 int i; 494 495 for (i = 0; i < array->num_fences; i++) 496 __fence_set_priority(array->fences[i], prio); 497 } else { 498 __fence_set_priority(fence, prio); 499 } 500 } 501 502 int 503 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 504 unsigned int flags, 505 int prio) 506 { 507 struct dma_fence *excl; 508 509 if (flags & I915_WAIT_ALL) { 510 struct dma_fence **shared; 511 unsigned int count, i; 512 int ret; 513 514 ret = reservation_object_get_fences_rcu(obj->resv, 515 &excl, &count, &shared); 516 if (ret) 517 return ret; 518 519 for (i = 0; i < count; i++) { 520 fence_set_priority(shared[i], prio); 521 dma_fence_put(shared[i]); 522 } 523 524 kfree(shared); 525 } else { 526 excl = reservation_object_get_excl_rcu(obj->resv); 527 } 528 529 if (excl) { 530 fence_set_priority(excl, prio); 531 dma_fence_put(excl); 532 } 533 return 0; 534 } 535 536 /** 537 * Waits for rendering to the object to be completed 538 * @obj: i915 gem object 539 * @flags: how to wait (under a lock, for all rendering or just for writes etc) 540 * @timeout: how long to wait 541 * @rps: client (user process) to charge for any waitboosting 542 */ 543 int 544 i915_gem_object_wait(struct drm_i915_gem_object *obj, 545 unsigned int flags, 546 long timeout, 547 struct intel_rps_client *rps_client) 548 { 549 might_sleep(); 550 #if IS_ENABLED(CONFIG_LOCKDEP) 551 GEM_BUG_ON(debug_locks && 552 !!lockdep_is_held(&obj->base.dev->struct_mutex) != 553 !!(flags & I915_WAIT_LOCKED)); 554 #endif 555 GEM_BUG_ON(timeout < 0); 556 557 timeout = i915_gem_object_wait_reservation(obj->resv, 558 flags, timeout, 559 rps_client); 560 return timeout < 0 ? timeout : 0; 561 } 562 563 static struct intel_rps_client *to_rps_client(struct drm_file *file) 564 { 565 struct drm_i915_file_private *fpriv = file->driver_priv; 566 567 return &fpriv->rps_client; 568 } 569 570 static int 571 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, 572 struct drm_i915_gem_pwrite *args, 573 struct drm_file *file) 574 { 575 void *vaddr = obj->phys_handle->vaddr + args->offset; 576 char __user *user_data = u64_to_user_ptr(args->data_ptr); 577 578 /* We manually control the domain here and pretend that it 579 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 580 */ 581 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 582 if (copy_from_user(vaddr, user_data, args->size)) 583 return -EFAULT; 584 585 drm_clflush_virt_range(vaddr, args->size); 586 i915_gem_chipset_flush(to_i915(obj->base.dev)); 587 588 intel_fb_obj_flush(obj, ORIGIN_CPU); 589 return 0; 590 } 591 592 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) 593 { 594 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); 595 } 596 597 void i915_gem_object_free(struct drm_i915_gem_object *obj) 598 { 599 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 600 kmem_cache_free(dev_priv->objects, obj); 601 } 602 603 static int 604 i915_gem_create(struct drm_file *file, 605 struct drm_i915_private *dev_priv, 606 uint64_t size, 607 uint32_t *handle_p) 608 { 609 struct drm_i915_gem_object *obj; 610 int ret; 611 u32 handle; 612 613 size = roundup(size, PAGE_SIZE); 614 if (size == 0) 615 return -EINVAL; 616 617 /* Allocate the new object */ 618 obj = i915_gem_object_create(dev_priv, size); 619 if (IS_ERR(obj)) 620 return PTR_ERR(obj); 621 622 ret = drm_gem_handle_create(file, &obj->base, &handle); 623 /* drop reference from allocate - handle holds it now */ 624 i915_gem_object_put(obj); 625 if (ret) 626 return ret; 627 628 *handle_p = handle; 629 return 0; 630 } 631 632 int 633 i915_gem_dumb_create(struct drm_file *file, 634 struct drm_device *dev, 635 struct drm_mode_create_dumb *args) 636 { 637 /* have to work out size/pitch and return them */ 638 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 639 args->size = args->pitch * args->height; 640 return i915_gem_create(file, to_i915(dev), 641 args->size, &args->handle); 642 } 643 644 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) 645 { 646 return !(obj->cache_level == I915_CACHE_NONE || 647 obj->cache_level == I915_CACHE_WT); 648 } 649 650 /** 651 * Creates a new mm object and returns a handle to it. 652 * @dev: drm device pointer 653 * @data: ioctl data blob 654 * @file: drm file pointer 655 */ 656 int 657 i915_gem_create_ioctl(struct drm_device *dev, void *data, 658 struct drm_file *file) 659 { 660 struct drm_i915_private *dev_priv = to_i915(dev); 661 struct drm_i915_gem_create *args = data; 662 663 i915_gem_flush_free_objects(dev_priv); 664 665 return i915_gem_create(file, dev_priv, 666 args->size, &args->handle); 667 } 668 669 static inline enum fb_op_origin 670 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) 671 { 672 return (domain == I915_GEM_DOMAIN_GTT ? 673 obj->frontbuffer_ggtt_origin : ORIGIN_CPU); 674 } 675 676 static void 677 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) 678 { 679 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 680 681 if (!(obj->base.write_domain & flush_domains)) 682 return; 683 684 /* No actual flushing is required for the GTT write domain. Writes 685 * to it "immediately" go to main memory as far as we know, so there's 686 * no chipset flush. It also doesn't land in render cache. 687 * 688 * However, we do have to enforce the order so that all writes through 689 * the GTT land before any writes to the device, such as updates to 690 * the GATT itself. 691 * 692 * We also have to wait a bit for the writes to land from the GTT. 693 * An uncached read (i.e. mmio) seems to be ideal for the round-trip 694 * timing. This issue has only been observed when switching quickly 695 * between GTT writes and CPU reads from inside the kernel on recent hw, 696 * and it appears to only affect discrete GTT blocks (i.e. on LLC 697 * system agents we cannot reproduce this behaviour). 698 */ 699 wmb(); 700 701 switch (obj->base.write_domain) { 702 case I915_GEM_DOMAIN_GTT: 703 if (!HAS_LLC(dev_priv)) { 704 intel_runtime_pm_get(dev_priv); 705 spin_lock_irq(&dev_priv->uncore.lock); 706 POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base)); 707 spin_unlock_irq(&dev_priv->uncore.lock); 708 intel_runtime_pm_put(dev_priv); 709 } 710 711 intel_fb_obj_flush(obj, 712 fb_write_origin(obj, I915_GEM_DOMAIN_GTT)); 713 break; 714 715 case I915_GEM_DOMAIN_CPU: 716 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 717 break; 718 719 case I915_GEM_DOMAIN_RENDER: 720 if (gpu_write_needs_clflush(obj)) 721 obj->cache_dirty = true; 722 break; 723 } 724 725 obj->base.write_domain = 0; 726 } 727 728 static inline int 729 __copy_to_user_swizzled(char __user *cpu_vaddr, 730 const char *gpu_vaddr, int gpu_offset, 731 int length) 732 { 733 int ret, cpu_offset = 0; 734 735 while (length > 0) { 736 int cacheline_end = ALIGN(gpu_offset + 1, 64); 737 int this_length = min(cacheline_end - gpu_offset, length); 738 int swizzled_gpu_offset = gpu_offset ^ 64; 739 740 ret = __copy_to_user(cpu_vaddr + cpu_offset, 741 gpu_vaddr + swizzled_gpu_offset, 742 this_length); 743 if (ret) 744 return ret + length; 745 746 cpu_offset += this_length; 747 gpu_offset += this_length; 748 length -= this_length; 749 } 750 751 return 0; 752 } 753 754 static inline int 755 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 756 const char __user *cpu_vaddr, 757 int length) 758 { 759 int ret, cpu_offset = 0; 760 761 while (length > 0) { 762 int cacheline_end = ALIGN(gpu_offset + 1, 64); 763 int this_length = min(cacheline_end - gpu_offset, length); 764 int swizzled_gpu_offset = gpu_offset ^ 64; 765 766 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 767 cpu_vaddr + cpu_offset, 768 this_length); 769 if (ret) 770 return ret + length; 771 772 cpu_offset += this_length; 773 gpu_offset += this_length; 774 length -= this_length; 775 } 776 777 return 0; 778 } 779 780 /* 781 * Pins the specified object's pages and synchronizes the object with 782 * GPU accesses. Sets needs_clflush to non-zero if the caller should 783 * flush the object from the CPU cache. 784 */ 785 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 786 unsigned int *needs_clflush) 787 { 788 int ret; 789 790 lockdep_assert_held(&obj->base.dev->struct_mutex); 791 792 *needs_clflush = 0; 793 if (!i915_gem_object_has_struct_page(obj)) 794 return -ENODEV; 795 796 ret = i915_gem_object_wait(obj, 797 I915_WAIT_INTERRUPTIBLE | 798 I915_WAIT_LOCKED, 799 MAX_SCHEDULE_TIMEOUT, 800 NULL); 801 if (ret) 802 return ret; 803 804 ret = i915_gem_object_pin_pages(obj); 805 if (ret) 806 return ret; 807 808 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || 809 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 810 ret = i915_gem_object_set_to_cpu_domain(obj, false); 811 if (ret) 812 goto err_unpin; 813 else 814 goto out; 815 } 816 817 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 818 819 /* If we're not in the cpu read domain, set ourself into the gtt 820 * read domain and manually flush cachelines (if required). This 821 * optimizes for the case when the gpu will dirty the data 822 * anyway again before the next pread happens. 823 */ 824 if (!obj->cache_dirty && 825 !(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 826 *needs_clflush = CLFLUSH_BEFORE; 827 828 out: 829 /* return with the pages pinned */ 830 return 0; 831 832 err_unpin: 833 i915_gem_object_unpin_pages(obj); 834 return ret; 835 } 836 837 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 838 unsigned int *needs_clflush) 839 { 840 int ret; 841 842 lockdep_assert_held(&obj->base.dev->struct_mutex); 843 844 *needs_clflush = 0; 845 if (!i915_gem_object_has_struct_page(obj)) 846 return -ENODEV; 847 848 ret = i915_gem_object_wait(obj, 849 I915_WAIT_INTERRUPTIBLE | 850 I915_WAIT_LOCKED | 851 I915_WAIT_ALL, 852 MAX_SCHEDULE_TIMEOUT, 853 NULL); 854 if (ret) 855 return ret; 856 857 ret = i915_gem_object_pin_pages(obj); 858 if (ret) 859 return ret; 860 861 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || 862 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 863 ret = i915_gem_object_set_to_cpu_domain(obj, true); 864 if (ret) 865 goto err_unpin; 866 else 867 goto out; 868 } 869 870 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 871 872 /* If we're not in the cpu write domain, set ourself into the 873 * gtt write domain and manually flush cachelines (as required). 874 * This optimizes for the case when the gpu will use the data 875 * right away and we therefore have to clflush anyway. 876 */ 877 if (!obj->cache_dirty) { 878 *needs_clflush |= CLFLUSH_AFTER; 879 880 /* 881 * Same trick applies to invalidate partially written 882 * cachelines read before writing. 883 */ 884 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 885 *needs_clflush |= CLFLUSH_BEFORE; 886 } 887 888 out: 889 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 890 obj->mm.dirty = true; 891 /* return with the pages pinned */ 892 return 0; 893 894 err_unpin: 895 i915_gem_object_unpin_pages(obj); 896 return ret; 897 } 898 899 static void 900 shmem_clflush_swizzled_range(char *addr, unsigned long length, 901 bool swizzled) 902 { 903 if (unlikely(swizzled)) { 904 unsigned long start = (unsigned long) addr; 905 unsigned long end = (unsigned long) addr + length; 906 907 /* For swizzling simply ensure that we always flush both 908 * channels. Lame, but simple and it works. Swizzled 909 * pwrite/pread is far from a hotpath - current userspace 910 * doesn't use it at all. */ 911 start = round_down(start, 128); 912 end = round_up(end, 128); 913 914 drm_clflush_virt_range((void *)start, end - start); 915 } else { 916 drm_clflush_virt_range(addr, length); 917 } 918 919 } 920 921 /* Only difference to the fast-path function is that this can handle bit17 922 * and uses non-atomic copy and kmap functions. */ 923 static int 924 shmem_pread_slow(struct page *page, int offset, int length, 925 char __user *user_data, 926 bool page_do_bit17_swizzling, bool needs_clflush) 927 { 928 char *vaddr; 929 int ret; 930 931 vaddr = kmap(page); 932 if (needs_clflush) 933 shmem_clflush_swizzled_range(vaddr + offset, length, 934 page_do_bit17_swizzling); 935 936 if (page_do_bit17_swizzling) 937 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length); 938 else 939 ret = __copy_to_user(user_data, vaddr + offset, length); 940 kunmap(page); 941 942 return ret ? - EFAULT : 0; 943 } 944 945 static int 946 shmem_pread(struct page *page, int offset, int length, char __user *user_data, 947 bool page_do_bit17_swizzling, bool needs_clflush) 948 { 949 int ret; 950 951 ret = -ENODEV; 952 if (!page_do_bit17_swizzling) { 953 char *vaddr = kmap_atomic(page); 954 955 if (needs_clflush) 956 drm_clflush_virt_range(vaddr + offset, length); 957 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length); 958 kunmap_atomic(vaddr); 959 } 960 if (ret == 0) 961 return 0; 962 963 return shmem_pread_slow(page, offset, length, user_data, 964 page_do_bit17_swizzling, needs_clflush); 965 } 966 967 static int 968 i915_gem_shmem_pread(struct drm_i915_gem_object *obj, 969 struct drm_i915_gem_pread *args) 970 { 971 char __user *user_data; 972 u64 remain; 973 unsigned int obj_do_bit17_swizzling; 974 unsigned int needs_clflush; 975 unsigned int idx, offset; 976 int ret; 977 978 obj_do_bit17_swizzling = 0; 979 if (i915_gem_object_needs_bit17_swizzle(obj)) 980 obj_do_bit17_swizzling = BIT(17); 981 982 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); 983 if (ret) 984 return ret; 985 986 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); 987 mutex_unlock(&obj->base.dev->struct_mutex); 988 if (ret) 989 return ret; 990 991 remain = args->size; 992 user_data = u64_to_user_ptr(args->data_ptr); 993 offset = offset_in_page(args->offset); 994 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 995 struct page *page = i915_gem_object_get_page(obj, idx); 996 int length; 997 998 length = remain; 999 if (offset + length > PAGE_SIZE) 1000 length = PAGE_SIZE - offset; 1001 1002 ret = shmem_pread(page, offset, length, user_data, 1003 page_to_phys(page) & obj_do_bit17_swizzling, 1004 needs_clflush); 1005 if (ret) 1006 break; 1007 1008 remain -= length; 1009 user_data += length; 1010 offset = 0; 1011 } 1012 1013 i915_gem_obj_finish_shmem_access(obj); 1014 return ret; 1015 } 1016 1017 static inline bool 1018 gtt_user_read(struct io_mapping *mapping, 1019 loff_t base, int offset, 1020 char __user *user_data, int length) 1021 { 1022 void __iomem *vaddr; 1023 unsigned long unwritten; 1024 1025 /* We can use the cpu mem copy function because this is X86. */ 1026 vaddr = io_mapping_map_atomic_wc(mapping, base); 1027 unwritten = __copy_to_user_inatomic(user_data, 1028 (void __force *)vaddr + offset, 1029 length); 1030 io_mapping_unmap_atomic(vaddr); 1031 if (unwritten) { 1032 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); 1033 unwritten = copy_to_user(user_data, 1034 (void __force *)vaddr + offset, 1035 length); 1036 io_mapping_unmap(vaddr); 1037 } 1038 return unwritten; 1039 } 1040 1041 static int 1042 i915_gem_gtt_pread(struct drm_i915_gem_object *obj, 1043 const struct drm_i915_gem_pread *args) 1044 { 1045 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1046 struct i915_ggtt *ggtt = &i915->ggtt; 1047 struct drm_mm_node node; 1048 struct i915_vma *vma; 1049 void __user *user_data; 1050 u64 remain, offset; 1051 int ret; 1052 1053 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1054 if (ret) 1055 return ret; 1056 1057 intel_runtime_pm_get(i915); 1058 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1059 PIN_MAPPABLE | 1060 PIN_NONFAULT | 1061 PIN_NONBLOCK); 1062 if (!IS_ERR(vma)) { 1063 node.start = i915_ggtt_offset(vma); 1064 node.allocated = false; 1065 ret = i915_vma_put_fence(vma); 1066 if (ret) { 1067 i915_vma_unpin(vma); 1068 vma = ERR_PTR(ret); 1069 } 1070 } 1071 if (IS_ERR(vma)) { 1072 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1073 if (ret) 1074 goto out_unlock; 1075 GEM_BUG_ON(!node.allocated); 1076 } 1077 1078 ret = i915_gem_object_set_to_gtt_domain(obj, false); 1079 if (ret) 1080 goto out_unpin; 1081 1082 mutex_unlock(&i915->drm.struct_mutex); 1083 1084 user_data = u64_to_user_ptr(args->data_ptr); 1085 remain = args->size; 1086 offset = args->offset; 1087 1088 while (remain > 0) { 1089 /* Operation in this page 1090 * 1091 * page_base = page offset within aperture 1092 * page_offset = offset within page 1093 * page_length = bytes to copy for this page 1094 */ 1095 u32 page_base = node.start; 1096 unsigned page_offset = offset_in_page(offset); 1097 unsigned page_length = PAGE_SIZE - page_offset; 1098 page_length = remain < page_length ? remain : page_length; 1099 if (node.allocated) { 1100 wmb(); 1101 ggtt->base.insert_page(&ggtt->base, 1102 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1103 node.start, I915_CACHE_NONE, 0); 1104 wmb(); 1105 } else { 1106 page_base += offset & PAGE_MASK; 1107 } 1108 1109 if (gtt_user_read(&ggtt->mappable, page_base, page_offset, 1110 user_data, page_length)) { 1111 ret = -EFAULT; 1112 break; 1113 } 1114 1115 remain -= page_length; 1116 user_data += page_length; 1117 offset += page_length; 1118 } 1119 1120 mutex_lock(&i915->drm.struct_mutex); 1121 out_unpin: 1122 if (node.allocated) { 1123 wmb(); 1124 ggtt->base.clear_range(&ggtt->base, 1125 node.start, node.size); 1126 remove_mappable_node(&node); 1127 } else { 1128 i915_vma_unpin(vma); 1129 } 1130 out_unlock: 1131 intel_runtime_pm_put(i915); 1132 mutex_unlock(&i915->drm.struct_mutex); 1133 1134 return ret; 1135 } 1136 1137 /** 1138 * Reads data from the object referenced by handle. 1139 * @dev: drm device pointer 1140 * @data: ioctl data blob 1141 * @file: drm file pointer 1142 * 1143 * On error, the contents of *data are undefined. 1144 */ 1145 int 1146 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1147 struct drm_file *file) 1148 { 1149 struct drm_i915_gem_pread *args = data; 1150 struct drm_i915_gem_object *obj; 1151 int ret; 1152 1153 if (args->size == 0) 1154 return 0; 1155 1156 if (!access_ok(VERIFY_WRITE, 1157 u64_to_user_ptr(args->data_ptr), 1158 args->size)) 1159 return -EFAULT; 1160 1161 obj = i915_gem_object_lookup(file, args->handle); 1162 if (!obj) 1163 return -ENOENT; 1164 1165 /* Bounds check source. */ 1166 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1167 ret = -EINVAL; 1168 goto out; 1169 } 1170 1171 trace_i915_gem_object_pread(obj, args->offset, args->size); 1172 1173 ret = i915_gem_object_wait(obj, 1174 I915_WAIT_INTERRUPTIBLE, 1175 MAX_SCHEDULE_TIMEOUT, 1176 to_rps_client(file)); 1177 if (ret) 1178 goto out; 1179 1180 ret = i915_gem_object_pin_pages(obj); 1181 if (ret) 1182 goto out; 1183 1184 ret = i915_gem_shmem_pread(obj, args); 1185 if (ret == -EFAULT || ret == -ENODEV) 1186 ret = i915_gem_gtt_pread(obj, args); 1187 1188 i915_gem_object_unpin_pages(obj); 1189 out: 1190 i915_gem_object_put(obj); 1191 return ret; 1192 } 1193 1194 /* This is the fast write path which cannot handle 1195 * page faults in the source data 1196 */ 1197 1198 static inline bool 1199 ggtt_write(struct io_mapping *mapping, 1200 loff_t base, int offset, 1201 char __user *user_data, int length) 1202 { 1203 void __iomem *vaddr; 1204 unsigned long unwritten; 1205 1206 /* We can use the cpu mem copy function because this is X86. */ 1207 vaddr = io_mapping_map_atomic_wc(mapping, base); 1208 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, 1209 user_data, length); 1210 io_mapping_unmap_atomic(vaddr); 1211 if (unwritten) { 1212 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); 1213 unwritten = copy_from_user((void __force *)vaddr + offset, 1214 user_data, length); 1215 io_mapping_unmap(vaddr); 1216 } 1217 1218 return unwritten; 1219 } 1220 1221 /** 1222 * This is the fast pwrite path, where we copy the data directly from the 1223 * user into the GTT, uncached. 1224 * @obj: i915 GEM object 1225 * @args: pwrite arguments structure 1226 */ 1227 static int 1228 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, 1229 const struct drm_i915_gem_pwrite *args) 1230 { 1231 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1232 struct i915_ggtt *ggtt = &i915->ggtt; 1233 struct drm_mm_node node; 1234 struct i915_vma *vma; 1235 u64 remain, offset; 1236 void __user *user_data; 1237 int ret; 1238 1239 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1240 if (ret) 1241 return ret; 1242 1243 if (i915_gem_object_has_struct_page(obj)) { 1244 /* 1245 * Avoid waking the device up if we can fallback, as 1246 * waking/resuming is very slow (worst-case 10-100 ms 1247 * depending on PCI sleeps and our own resume time). 1248 * This easily dwarfs any performance advantage from 1249 * using the cache bypass of indirect GGTT access. 1250 */ 1251 if (!intel_runtime_pm_get_if_in_use(i915)) { 1252 ret = -EFAULT; 1253 goto out_unlock; 1254 } 1255 } else { 1256 /* No backing pages, no fallback, we must force GGTT access */ 1257 intel_runtime_pm_get(i915); 1258 } 1259 1260 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1261 PIN_MAPPABLE | 1262 PIN_NONFAULT | 1263 PIN_NONBLOCK); 1264 if (!IS_ERR(vma)) { 1265 node.start = i915_ggtt_offset(vma); 1266 node.allocated = false; 1267 ret = i915_vma_put_fence(vma); 1268 if (ret) { 1269 i915_vma_unpin(vma); 1270 vma = ERR_PTR(ret); 1271 } 1272 } 1273 if (IS_ERR(vma)) { 1274 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1275 if (ret) 1276 goto out_rpm; 1277 GEM_BUG_ON(!node.allocated); 1278 } 1279 1280 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1281 if (ret) 1282 goto out_unpin; 1283 1284 mutex_unlock(&i915->drm.struct_mutex); 1285 1286 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 1287 1288 user_data = u64_to_user_ptr(args->data_ptr); 1289 offset = args->offset; 1290 remain = args->size; 1291 while (remain) { 1292 /* Operation in this page 1293 * 1294 * page_base = page offset within aperture 1295 * page_offset = offset within page 1296 * page_length = bytes to copy for this page 1297 */ 1298 u32 page_base = node.start; 1299 unsigned int page_offset = offset_in_page(offset); 1300 unsigned int page_length = PAGE_SIZE - page_offset; 1301 page_length = remain < page_length ? remain : page_length; 1302 if (node.allocated) { 1303 wmb(); /* flush the write before we modify the GGTT */ 1304 ggtt->base.insert_page(&ggtt->base, 1305 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1306 node.start, I915_CACHE_NONE, 0); 1307 wmb(); /* flush modifications to the GGTT (insert_page) */ 1308 } else { 1309 page_base += offset & PAGE_MASK; 1310 } 1311 /* If we get a fault while copying data, then (presumably) our 1312 * source page isn't available. Return the error and we'll 1313 * retry in the slow path. 1314 * If the object is non-shmem backed, we retry again with the 1315 * path that handles page fault. 1316 */ 1317 if (ggtt_write(&ggtt->mappable, page_base, page_offset, 1318 user_data, page_length)) { 1319 ret = -EFAULT; 1320 break; 1321 } 1322 1323 remain -= page_length; 1324 user_data += page_length; 1325 offset += page_length; 1326 } 1327 intel_fb_obj_flush(obj, ORIGIN_CPU); 1328 1329 mutex_lock(&i915->drm.struct_mutex); 1330 out_unpin: 1331 if (node.allocated) { 1332 wmb(); 1333 ggtt->base.clear_range(&ggtt->base, 1334 node.start, node.size); 1335 remove_mappable_node(&node); 1336 } else { 1337 i915_vma_unpin(vma); 1338 } 1339 out_rpm: 1340 intel_runtime_pm_put(i915); 1341 out_unlock: 1342 mutex_unlock(&i915->drm.struct_mutex); 1343 return ret; 1344 } 1345 1346 static int 1347 shmem_pwrite_slow(struct page *page, int offset, int length, 1348 char __user *user_data, 1349 bool page_do_bit17_swizzling, 1350 bool needs_clflush_before, 1351 bool needs_clflush_after) 1352 { 1353 char *vaddr; 1354 int ret; 1355 1356 vaddr = kmap(page); 1357 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 1358 shmem_clflush_swizzled_range(vaddr + offset, length, 1359 page_do_bit17_swizzling); 1360 if (page_do_bit17_swizzling) 1361 ret = __copy_from_user_swizzled(vaddr, offset, user_data, 1362 length); 1363 else 1364 ret = __copy_from_user(vaddr + offset, user_data, length); 1365 if (needs_clflush_after) 1366 shmem_clflush_swizzled_range(vaddr + offset, length, 1367 page_do_bit17_swizzling); 1368 kunmap(page); 1369 1370 return ret ? -EFAULT : 0; 1371 } 1372 1373 /* Per-page copy function for the shmem pwrite fastpath. 1374 * Flushes invalid cachelines before writing to the target if 1375 * needs_clflush_before is set and flushes out any written cachelines after 1376 * writing if needs_clflush is set. 1377 */ 1378 static int 1379 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, 1380 bool page_do_bit17_swizzling, 1381 bool needs_clflush_before, 1382 bool needs_clflush_after) 1383 { 1384 int ret; 1385 1386 ret = -ENODEV; 1387 if (!page_do_bit17_swizzling) { 1388 char *vaddr = kmap_atomic(page); 1389 1390 if (needs_clflush_before) 1391 drm_clflush_virt_range(vaddr + offset, len); 1392 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len); 1393 if (needs_clflush_after) 1394 drm_clflush_virt_range(vaddr + offset, len); 1395 1396 kunmap_atomic(vaddr); 1397 } 1398 if (ret == 0) 1399 return ret; 1400 1401 return shmem_pwrite_slow(page, offset, len, user_data, 1402 page_do_bit17_swizzling, 1403 needs_clflush_before, 1404 needs_clflush_after); 1405 } 1406 1407 static int 1408 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, 1409 const struct drm_i915_gem_pwrite *args) 1410 { 1411 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1412 void __user *user_data; 1413 u64 remain; 1414 unsigned int obj_do_bit17_swizzling; 1415 unsigned int partial_cacheline_write; 1416 unsigned int needs_clflush; 1417 unsigned int offset, idx; 1418 int ret; 1419 1420 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1421 if (ret) 1422 return ret; 1423 1424 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); 1425 mutex_unlock(&i915->drm.struct_mutex); 1426 if (ret) 1427 return ret; 1428 1429 obj_do_bit17_swizzling = 0; 1430 if (i915_gem_object_needs_bit17_swizzle(obj)) 1431 obj_do_bit17_swizzling = BIT(17); 1432 1433 /* If we don't overwrite a cacheline completely we need to be 1434 * careful to have up-to-date data by first clflushing. Don't 1435 * overcomplicate things and flush the entire patch. 1436 */ 1437 partial_cacheline_write = 0; 1438 if (needs_clflush & CLFLUSH_BEFORE) 1439 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; 1440 1441 user_data = u64_to_user_ptr(args->data_ptr); 1442 remain = args->size; 1443 offset = offset_in_page(args->offset); 1444 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 1445 struct page *page = i915_gem_object_get_page(obj, idx); 1446 int length; 1447 1448 length = remain; 1449 if (offset + length > PAGE_SIZE) 1450 length = PAGE_SIZE - offset; 1451 1452 ret = shmem_pwrite(page, offset, length, user_data, 1453 page_to_phys(page) & obj_do_bit17_swizzling, 1454 (offset | length) & partial_cacheline_write, 1455 needs_clflush & CLFLUSH_AFTER); 1456 if (ret) 1457 break; 1458 1459 remain -= length; 1460 user_data += length; 1461 offset = 0; 1462 } 1463 1464 intel_fb_obj_flush(obj, ORIGIN_CPU); 1465 i915_gem_obj_finish_shmem_access(obj); 1466 return ret; 1467 } 1468 1469 /** 1470 * Writes data to the object referenced by handle. 1471 * @dev: drm device 1472 * @data: ioctl data blob 1473 * @file: drm file 1474 * 1475 * On error, the contents of the buffer that were to be modified are undefined. 1476 */ 1477 int 1478 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1479 struct drm_file *file) 1480 { 1481 struct drm_i915_gem_pwrite *args = data; 1482 struct drm_i915_gem_object *obj; 1483 int ret; 1484 1485 if (args->size == 0) 1486 return 0; 1487 1488 if (!access_ok(VERIFY_READ, 1489 u64_to_user_ptr(args->data_ptr), 1490 args->size)) 1491 return -EFAULT; 1492 1493 obj = i915_gem_object_lookup(file, args->handle); 1494 if (!obj) 1495 return -ENOENT; 1496 1497 /* Bounds check destination. */ 1498 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1499 ret = -EINVAL; 1500 goto err; 1501 } 1502 1503 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1504 1505 ret = -ENODEV; 1506 if (obj->ops->pwrite) 1507 ret = obj->ops->pwrite(obj, args); 1508 if (ret != -ENODEV) 1509 goto err; 1510 1511 ret = i915_gem_object_wait(obj, 1512 I915_WAIT_INTERRUPTIBLE | 1513 I915_WAIT_ALL, 1514 MAX_SCHEDULE_TIMEOUT, 1515 to_rps_client(file)); 1516 if (ret) 1517 goto err; 1518 1519 ret = i915_gem_object_pin_pages(obj); 1520 if (ret) 1521 goto err; 1522 1523 ret = -EFAULT; 1524 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1525 * it would end up going through the fenced access, and we'll get 1526 * different detiling behavior between reading and writing. 1527 * pread/pwrite currently are reading and writing from the CPU 1528 * perspective, requiring manual detiling by the client. 1529 */ 1530 if (!i915_gem_object_has_struct_page(obj) || 1531 cpu_write_needs_clflush(obj)) 1532 /* Note that the gtt paths might fail with non-page-backed user 1533 * pointers (e.g. gtt mappings when moving data between 1534 * textures). Fallback to the shmem path in that case. 1535 */ 1536 ret = i915_gem_gtt_pwrite_fast(obj, args); 1537 1538 if (ret == -EFAULT || ret == -ENOSPC) { 1539 if (obj->phys_handle) 1540 ret = i915_gem_phys_pwrite(obj, args, file); 1541 else 1542 ret = i915_gem_shmem_pwrite(obj, args); 1543 } 1544 1545 i915_gem_object_unpin_pages(obj); 1546 err: 1547 i915_gem_object_put(obj); 1548 return ret; 1549 } 1550 1551 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) 1552 { 1553 struct drm_i915_private *i915; 1554 struct list_head *list; 1555 struct i915_vma *vma; 1556 1557 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 1558 1559 list_for_each_entry(vma, &obj->vma_list, obj_link) { 1560 if (!i915_vma_is_ggtt(vma)) 1561 break; 1562 1563 if (i915_vma_is_active(vma)) 1564 continue; 1565 1566 if (!drm_mm_node_allocated(&vma->node)) 1567 continue; 1568 1569 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 1570 } 1571 1572 i915 = to_i915(obj->base.dev); 1573 spin_lock(&i915->mm.obj_lock); 1574 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; 1575 list_move_tail(&obj->mm.link, list); 1576 spin_unlock(&i915->mm.obj_lock); 1577 } 1578 1579 /** 1580 * Called when user space prepares to use an object with the CPU, either 1581 * through the mmap ioctl's mapping or a GTT mapping. 1582 * @dev: drm device 1583 * @data: ioctl data blob 1584 * @file: drm file 1585 */ 1586 int 1587 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1588 struct drm_file *file) 1589 { 1590 struct drm_i915_gem_set_domain *args = data; 1591 struct drm_i915_gem_object *obj; 1592 uint32_t read_domains = args->read_domains; 1593 uint32_t write_domain = args->write_domain; 1594 int err; 1595 1596 /* Only handle setting domains to types used by the CPU. */ 1597 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) 1598 return -EINVAL; 1599 1600 /* Having something in the write domain implies it's in the read 1601 * domain, and only that read domain. Enforce that in the request. 1602 */ 1603 if (write_domain != 0 && read_domains != write_domain) 1604 return -EINVAL; 1605 1606 obj = i915_gem_object_lookup(file, args->handle); 1607 if (!obj) 1608 return -ENOENT; 1609 1610 /* Try to flush the object off the GPU without holding the lock. 1611 * We will repeat the flush holding the lock in the normal manner 1612 * to catch cases where we are gazumped. 1613 */ 1614 err = i915_gem_object_wait(obj, 1615 I915_WAIT_INTERRUPTIBLE | 1616 (write_domain ? I915_WAIT_ALL : 0), 1617 MAX_SCHEDULE_TIMEOUT, 1618 to_rps_client(file)); 1619 if (err) 1620 goto out; 1621 1622 /* Flush and acquire obj->pages so that we are coherent through 1623 * direct access in memory with previous cached writes through 1624 * shmemfs and that our cache domain tracking remains valid. 1625 * For example, if the obj->filp was moved to swap without us 1626 * being notified and releasing the pages, we would mistakenly 1627 * continue to assume that the obj remained out of the CPU cached 1628 * domain. 1629 */ 1630 err = i915_gem_object_pin_pages(obj); 1631 if (err) 1632 goto out; 1633 1634 err = i915_mutex_lock_interruptible(dev); 1635 if (err) 1636 goto out_unpin; 1637 1638 if (read_domains & I915_GEM_DOMAIN_WC) 1639 err = i915_gem_object_set_to_wc_domain(obj, write_domain); 1640 else if (read_domains & I915_GEM_DOMAIN_GTT) 1641 err = i915_gem_object_set_to_gtt_domain(obj, write_domain); 1642 else 1643 err = i915_gem_object_set_to_cpu_domain(obj, write_domain); 1644 1645 /* And bump the LRU for this access */ 1646 i915_gem_object_bump_inactive_ggtt(obj); 1647 1648 mutex_unlock(&dev->struct_mutex); 1649 1650 if (write_domain != 0) 1651 intel_fb_obj_invalidate(obj, 1652 fb_write_origin(obj, write_domain)); 1653 1654 out_unpin: 1655 i915_gem_object_unpin_pages(obj); 1656 out: 1657 i915_gem_object_put(obj); 1658 return err; 1659 } 1660 1661 /** 1662 * Called when user space has done writes to this buffer 1663 * @dev: drm device 1664 * @data: ioctl data blob 1665 * @file: drm file 1666 */ 1667 int 1668 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1669 struct drm_file *file) 1670 { 1671 struct drm_i915_gem_sw_finish *args = data; 1672 struct drm_i915_gem_object *obj; 1673 1674 obj = i915_gem_object_lookup(file, args->handle); 1675 if (!obj) 1676 return -ENOENT; 1677 1678 /* Pinned buffers may be scanout, so flush the cache */ 1679 i915_gem_object_flush_if_display(obj); 1680 i915_gem_object_put(obj); 1681 1682 return 0; 1683 } 1684 1685 /** 1686 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 1687 * it is mapped to. 1688 * @dev: drm device 1689 * @data: ioctl data blob 1690 * @file: drm file 1691 * 1692 * While the mapping holds a reference on the contents of the object, it doesn't 1693 * imply a ref on the object itself. 1694 * 1695 * IMPORTANT: 1696 * 1697 * DRM driver writers who look a this function as an example for how to do GEM 1698 * mmap support, please don't implement mmap support like here. The modern way 1699 * to implement DRM mmap support is with an mmap offset ioctl (like 1700 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1701 * That way debug tooling like valgrind will understand what's going on, hiding 1702 * the mmap call in a driver private ioctl will break that. The i915 driver only 1703 * does cpu mmaps this way because we didn't know better. 1704 */ 1705 int 1706 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1707 struct drm_file *file) 1708 { 1709 struct drm_i915_gem_mmap *args = data; 1710 struct drm_i915_gem_object *obj; 1711 unsigned long addr; 1712 1713 if (args->flags & ~(I915_MMAP_WC)) 1714 return -EINVAL; 1715 1716 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) 1717 return -ENODEV; 1718 1719 obj = i915_gem_object_lookup(file, args->handle); 1720 if (!obj) 1721 return -ENOENT; 1722 1723 /* prime objects have no backing filp to GEM mmap 1724 * pages from. 1725 */ 1726 if (!obj->base.filp) { 1727 i915_gem_object_put(obj); 1728 return -EINVAL; 1729 } 1730 1731 addr = vm_mmap(obj->base.filp, 0, args->size, 1732 PROT_READ | PROT_WRITE, MAP_SHARED, 1733 args->offset); 1734 if (args->flags & I915_MMAP_WC) { 1735 struct mm_struct *mm = current->mm; 1736 struct vm_area_struct *vma; 1737 1738 if (down_write_killable(&mm->mmap_sem)) { 1739 i915_gem_object_put(obj); 1740 return -EINTR; 1741 } 1742 vma = find_vma(mm, addr); 1743 if (vma) 1744 vma->vm_page_prot = 1745 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1746 else 1747 addr = -ENOMEM; 1748 up_write(&mm->mmap_sem); 1749 1750 /* This may race, but that's ok, it only gets set */ 1751 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); 1752 } 1753 i915_gem_object_put(obj); 1754 if (IS_ERR((void *)addr)) 1755 return addr; 1756 1757 args->addr_ptr = (uint64_t) addr; 1758 1759 return 0; 1760 } 1761 1762 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) 1763 { 1764 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; 1765 } 1766 1767 /** 1768 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 1769 * 1770 * A history of the GTT mmap interface: 1771 * 1772 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 1773 * aligned and suitable for fencing, and still fit into the available 1774 * mappable space left by the pinned display objects. A classic problem 1775 * we called the page-fault-of-doom where we would ping-pong between 1776 * two objects that could not fit inside the GTT and so the memcpy 1777 * would page one object in at the expense of the other between every 1778 * single byte. 1779 * 1780 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 1781 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 1782 * object is too large for the available space (or simply too large 1783 * for the mappable aperture!), a view is created instead and faulted 1784 * into userspace. (This view is aligned and sized appropriately for 1785 * fenced access.) 1786 * 1787 * 2 - Recognise WC as a separate cache domain so that we can flush the 1788 * delayed writes via GTT before performing direct access via WC. 1789 * 1790 * Restrictions: 1791 * 1792 * * snoopable objects cannot be accessed via the GTT. It can cause machine 1793 * hangs on some architectures, corruption on others. An attempt to service 1794 * a GTT page fault from a snoopable object will generate a SIGBUS. 1795 * 1796 * * the object must be able to fit into RAM (physical memory, though no 1797 * limited to the mappable aperture). 1798 * 1799 * 1800 * Caveats: 1801 * 1802 * * a new GTT page fault will synchronize rendering from the GPU and flush 1803 * all data to system memory. Subsequent access will not be synchronized. 1804 * 1805 * * all mappings are revoked on runtime device suspend. 1806 * 1807 * * there are only 8, 16 or 32 fence registers to share between all users 1808 * (older machines require fence register for display and blitter access 1809 * as well). Contention of the fence registers will cause the previous users 1810 * to be unmapped and any new access will generate new page faults. 1811 * 1812 * * running out of memory while servicing a fault may generate a SIGBUS, 1813 * rather than the expected SIGSEGV. 1814 */ 1815 int i915_gem_mmap_gtt_version(void) 1816 { 1817 return 2; 1818 } 1819 1820 static inline struct i915_ggtt_view 1821 compute_partial_view(struct drm_i915_gem_object *obj, 1822 pgoff_t page_offset, 1823 unsigned int chunk) 1824 { 1825 struct i915_ggtt_view view; 1826 1827 if (i915_gem_object_is_tiled(obj)) 1828 chunk = roundup(chunk, tile_row_pages(obj)); 1829 1830 view.type = I915_GGTT_VIEW_PARTIAL; 1831 view.partial.offset = rounddown(page_offset, chunk); 1832 view.partial.size = 1833 min_t(unsigned int, chunk, 1834 (obj->base.size >> PAGE_SHIFT) - view.partial.offset); 1835 1836 /* If the partial covers the entire object, just create a normal VMA. */ 1837 if (chunk >= obj->base.size >> PAGE_SHIFT) 1838 view.type = I915_GGTT_VIEW_NORMAL; 1839 1840 return view; 1841 } 1842 1843 /** 1844 * i915_gem_fault - fault a page into the GTT 1845 * @vmf: fault info 1846 * 1847 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1848 * from userspace. The fault handler takes care of binding the object to 1849 * the GTT (if needed), allocating and programming a fence register (again, 1850 * only if needed based on whether the old reg is still valid or the object 1851 * is tiled) and inserting a new PTE into the faulting process. 1852 * 1853 * Note that the faulting process may involve evicting existing objects 1854 * from the GTT and/or fence registers to make room. So performance may 1855 * suffer if the GTT working set is large or there are few fence registers 1856 * left. 1857 * 1858 * The current feature set supported by i915_gem_fault() and thus GTT mmaps 1859 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). 1860 */ 1861 int i915_gem_fault(struct vm_fault *vmf) 1862 { 1863 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ 1864 struct vm_area_struct *area = vmf->vma; 1865 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); 1866 struct drm_device *dev = obj->base.dev; 1867 struct drm_i915_private *dev_priv = to_i915(dev); 1868 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1869 bool write = !!(vmf->flags & FAULT_FLAG_WRITE); 1870 struct i915_vma *vma; 1871 pgoff_t page_offset; 1872 unsigned int flags; 1873 int ret; 1874 1875 /* We don't use vmf->pgoff since that has the fake offset */ 1876 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; 1877 1878 trace_i915_gem_object_fault(obj, page_offset, true, write); 1879 1880 /* Try to flush the object off the GPU first without holding the lock. 1881 * Upon acquiring the lock, we will perform our sanity checks and then 1882 * repeat the flush holding the lock in the normal manner to catch cases 1883 * where we are gazumped. 1884 */ 1885 ret = i915_gem_object_wait(obj, 1886 I915_WAIT_INTERRUPTIBLE, 1887 MAX_SCHEDULE_TIMEOUT, 1888 NULL); 1889 if (ret) 1890 goto err; 1891 1892 ret = i915_gem_object_pin_pages(obj); 1893 if (ret) 1894 goto err; 1895 1896 intel_runtime_pm_get(dev_priv); 1897 1898 ret = i915_mutex_lock_interruptible(dev); 1899 if (ret) 1900 goto err_rpm; 1901 1902 /* Access to snoopable pages through the GTT is incoherent. */ 1903 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { 1904 ret = -EFAULT; 1905 goto err_unlock; 1906 } 1907 1908 /* If the object is smaller than a couple of partial vma, it is 1909 * not worth only creating a single partial vma - we may as well 1910 * clear enough space for the full object. 1911 */ 1912 flags = PIN_MAPPABLE; 1913 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) 1914 flags |= PIN_NONBLOCK | PIN_NONFAULT; 1915 1916 /* Now pin it into the GTT as needed */ 1917 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); 1918 if (IS_ERR(vma)) { 1919 /* Use a partial view if it is bigger than available space */ 1920 struct i915_ggtt_view view = 1921 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 1922 1923 /* Userspace is now writing through an untracked VMA, abandon 1924 * all hope that the hardware is able to track future writes. 1925 */ 1926 obj->frontbuffer_ggtt_origin = ORIGIN_CPU; 1927 1928 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 1929 } 1930 if (IS_ERR(vma)) { 1931 ret = PTR_ERR(vma); 1932 goto err_unlock; 1933 } 1934 1935 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1936 if (ret) 1937 goto err_unpin; 1938 1939 ret = i915_vma_pin_fence(vma); 1940 if (ret) 1941 goto err_unpin; 1942 1943 /* Finally, remap it using the new GTT offset */ 1944 ret = remap_io_mapping(area, 1945 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), 1946 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, 1947 min_t(u64, vma->size, area->vm_end - area->vm_start), 1948 &ggtt->mappable); 1949 if (ret) 1950 goto err_fence; 1951 1952 /* Mark as being mmapped into userspace for later revocation */ 1953 assert_rpm_wakelock_held(dev_priv); 1954 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) 1955 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); 1956 GEM_BUG_ON(!obj->userfault_count); 1957 1958 err_fence: 1959 i915_vma_unpin_fence(vma); 1960 err_unpin: 1961 __i915_vma_unpin(vma); 1962 err_unlock: 1963 mutex_unlock(&dev->struct_mutex); 1964 err_rpm: 1965 intel_runtime_pm_put(dev_priv); 1966 i915_gem_object_unpin_pages(obj); 1967 err: 1968 switch (ret) { 1969 case -EIO: 1970 /* 1971 * We eat errors when the gpu is terminally wedged to avoid 1972 * userspace unduly crashing (gl has no provisions for mmaps to 1973 * fail). But any other -EIO isn't ours (e.g. swap in failure) 1974 * and so needs to be reported. 1975 */ 1976 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 1977 ret = VM_FAULT_SIGBUS; 1978 break; 1979 } 1980 case -EAGAIN: 1981 /* 1982 * EAGAIN means the gpu is hung and we'll wait for the error 1983 * handler to reset everything when re-faulting in 1984 * i915_mutex_lock_interruptible. 1985 */ 1986 case 0: 1987 case -ERESTARTSYS: 1988 case -EINTR: 1989 case -EBUSY: 1990 /* 1991 * EBUSY is ok: this just means that another thread 1992 * already did the job. 1993 */ 1994 ret = VM_FAULT_NOPAGE; 1995 break; 1996 case -ENOMEM: 1997 ret = VM_FAULT_OOM; 1998 break; 1999 case -ENOSPC: 2000 case -EFAULT: 2001 ret = VM_FAULT_SIGBUS; 2002 break; 2003 default: 2004 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 2005 ret = VM_FAULT_SIGBUS; 2006 break; 2007 } 2008 return ret; 2009 } 2010 2011 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) 2012 { 2013 struct i915_vma *vma; 2014 2015 GEM_BUG_ON(!obj->userfault_count); 2016 2017 obj->userfault_count = 0; 2018 list_del(&obj->userfault_link); 2019 drm_vma_node_unmap(&obj->base.vma_node, 2020 obj->base.dev->anon_inode->i_mapping); 2021 2022 list_for_each_entry(vma, &obj->vma_list, obj_link) { 2023 if (!i915_vma_is_ggtt(vma)) 2024 break; 2025 2026 i915_vma_unset_userfault(vma); 2027 } 2028 } 2029 2030 /** 2031 * i915_gem_release_mmap - remove physical page mappings 2032 * @obj: obj in question 2033 * 2034 * Preserve the reservation of the mmapping with the DRM core code, but 2035 * relinquish ownership of the pages back to the system. 2036 * 2037 * It is vital that we remove the page mapping if we have mapped a tiled 2038 * object through the GTT and then lose the fence register due to 2039 * resource pressure. Similarly if the object has been moved out of the 2040 * aperture, than pages mapped into userspace must be revoked. Removing the 2041 * mapping will then trigger a page fault on the next user access, allowing 2042 * fixup by i915_gem_fault(). 2043 */ 2044 void 2045 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 2046 { 2047 struct drm_i915_private *i915 = to_i915(obj->base.dev); 2048 2049 /* Serialisation between user GTT access and our code depends upon 2050 * revoking the CPU's PTE whilst the mutex is held. The next user 2051 * pagefault then has to wait until we release the mutex. 2052 * 2053 * Note that RPM complicates somewhat by adding an additional 2054 * requirement that operations to the GGTT be made holding the RPM 2055 * wakeref. 2056 */ 2057 lockdep_assert_held(&i915->drm.struct_mutex); 2058 intel_runtime_pm_get(i915); 2059 2060 if (!obj->userfault_count) 2061 goto out; 2062 2063 __i915_gem_object_release_mmap(obj); 2064 2065 /* Ensure that the CPU's PTE are revoked and there are not outstanding 2066 * memory transactions from userspace before we return. The TLB 2067 * flushing implied above by changing the PTE above *should* be 2068 * sufficient, an extra barrier here just provides us with a bit 2069 * of paranoid documentation about our requirement to serialise 2070 * memory writes before touching registers / GSM. 2071 */ 2072 wmb(); 2073 2074 out: 2075 intel_runtime_pm_put(i915); 2076 } 2077 2078 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) 2079 { 2080 struct drm_i915_gem_object *obj, *on; 2081 int i; 2082 2083 /* 2084 * Only called during RPM suspend. All users of the userfault_list 2085 * must be holding an RPM wakeref to ensure that this can not 2086 * run concurrently with themselves (and use the struct_mutex for 2087 * protection between themselves). 2088 */ 2089 2090 list_for_each_entry_safe(obj, on, 2091 &dev_priv->mm.userfault_list, userfault_link) 2092 __i915_gem_object_release_mmap(obj); 2093 2094 /* The fence will be lost when the device powers down. If any were 2095 * in use by hardware (i.e. they are pinned), we should not be powering 2096 * down! All other fences will be reacquired by the user upon waking. 2097 */ 2098 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2099 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2100 2101 /* Ideally we want to assert that the fence register is not 2102 * live at this point (i.e. that no piece of code will be 2103 * trying to write through fence + GTT, as that both violates 2104 * our tracking of activity and associated locking/barriers, 2105 * but also is illegal given that the hw is powered down). 2106 * 2107 * Previously we used reg->pin_count as a "liveness" indicator. 2108 * That is not sufficient, and we need a more fine-grained 2109 * tool if we want to have a sanity check here. 2110 */ 2111 2112 if (!reg->vma) 2113 continue; 2114 2115 GEM_BUG_ON(i915_vma_has_userfault(reg->vma)); 2116 reg->dirty = true; 2117 } 2118 } 2119 2120 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 2121 { 2122 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2123 int err; 2124 2125 err = drm_gem_create_mmap_offset(&obj->base); 2126 if (likely(!err)) 2127 return 0; 2128 2129 /* Attempt to reap some mmap space from dead objects */ 2130 do { 2131 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); 2132 if (err) 2133 break; 2134 2135 i915_gem_drain_freed_objects(dev_priv); 2136 err = drm_gem_create_mmap_offset(&obj->base); 2137 if (!err) 2138 break; 2139 2140 } while (flush_delayed_work(&dev_priv->gt.retire_work)); 2141 2142 return err; 2143 } 2144 2145 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 2146 { 2147 drm_gem_free_mmap_offset(&obj->base); 2148 } 2149 2150 int 2151 i915_gem_mmap_gtt(struct drm_file *file, 2152 struct drm_device *dev, 2153 uint32_t handle, 2154 uint64_t *offset) 2155 { 2156 struct drm_i915_gem_object *obj; 2157 int ret; 2158 2159 obj = i915_gem_object_lookup(file, handle); 2160 if (!obj) 2161 return -ENOENT; 2162 2163 ret = i915_gem_object_create_mmap_offset(obj); 2164 if (ret == 0) 2165 *offset = drm_vma_node_offset_addr(&obj->base.vma_node); 2166 2167 i915_gem_object_put(obj); 2168 return ret; 2169 } 2170 2171 /** 2172 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 2173 * @dev: DRM device 2174 * @data: GTT mapping ioctl data 2175 * @file: GEM object info 2176 * 2177 * Simply returns the fake offset to userspace so it can mmap it. 2178 * The mmap call will end up in drm_gem_mmap(), which will set things 2179 * up so we can get faults in the handler above. 2180 * 2181 * The fault handler will take care of binding the object into the GTT 2182 * (since it may have been evicted to make room for something), allocating 2183 * a fence register, and mapping the appropriate aperture address into 2184 * userspace. 2185 */ 2186 int 2187 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2188 struct drm_file *file) 2189 { 2190 struct drm_i915_gem_mmap_gtt *args = data; 2191 2192 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 2193 } 2194 2195 /* Immediately discard the backing storage */ 2196 static void 2197 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 2198 { 2199 i915_gem_object_free_mmap_offset(obj); 2200 2201 if (obj->base.filp == NULL) 2202 return; 2203 2204 /* Our goal here is to return as much of the memory as 2205 * is possible back to the system as we are called from OOM. 2206 * To do this we must instruct the shmfs to drop all of its 2207 * backing pages, *now*. 2208 */ 2209 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2210 obj->mm.madv = __I915_MADV_PURGED; 2211 obj->mm.pages = ERR_PTR(-EFAULT); 2212 } 2213 2214 /* Try to discard unwanted pages */ 2215 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) 2216 { 2217 struct address_space *mapping; 2218 2219 lockdep_assert_held(&obj->mm.lock); 2220 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 2221 2222 switch (obj->mm.madv) { 2223 case I915_MADV_DONTNEED: 2224 i915_gem_object_truncate(obj); 2225 case __I915_MADV_PURGED: 2226 return; 2227 } 2228 2229 if (obj->base.filp == NULL) 2230 return; 2231 2232 mapping = obj->base.filp->f_mapping, 2233 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 2234 } 2235 2236 static void 2237 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, 2238 struct sg_table *pages) 2239 { 2240 struct sgt_iter sgt_iter; 2241 struct page *page; 2242 2243 __i915_gem_object_release_shmem(obj, pages, true); 2244 2245 i915_gem_gtt_finish_pages(obj, pages); 2246 2247 if (i915_gem_object_needs_bit17_swizzle(obj)) 2248 i915_gem_object_save_bit_17_swizzle(obj, pages); 2249 2250 for_each_sgt_page(page, sgt_iter, pages) { 2251 if (obj->mm.dirty) 2252 set_page_dirty(page); 2253 2254 if (obj->mm.madv == I915_MADV_WILLNEED) 2255 mark_page_accessed(page); 2256 2257 put_page(page); 2258 } 2259 obj->mm.dirty = false; 2260 2261 sg_free_table(pages); 2262 kfree(pages); 2263 } 2264 2265 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 2266 { 2267 struct radix_tree_iter iter; 2268 void __rcu **slot; 2269 2270 rcu_read_lock(); 2271 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 2272 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 2273 rcu_read_unlock(); 2274 } 2275 2276 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 2277 enum i915_mm_subclass subclass) 2278 { 2279 struct drm_i915_private *i915 = to_i915(obj->base.dev); 2280 struct sg_table *pages; 2281 2282 if (i915_gem_object_has_pinned_pages(obj)) 2283 return; 2284 2285 GEM_BUG_ON(obj->bind_count); 2286 if (!i915_gem_object_has_pages(obj)) 2287 return; 2288 2289 /* May be called by shrinker from within get_pages() (on another bo) */ 2290 mutex_lock_nested(&obj->mm.lock, subclass); 2291 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) 2292 goto unlock; 2293 2294 /* ->put_pages might need to allocate memory for the bit17 swizzle 2295 * array, hence protect them from being reaped by removing them from gtt 2296 * lists early. */ 2297 pages = fetch_and_zero(&obj->mm.pages); 2298 GEM_BUG_ON(!pages); 2299 2300 spin_lock(&i915->mm.obj_lock); 2301 list_del(&obj->mm.link); 2302 spin_unlock(&i915->mm.obj_lock); 2303 2304 if (obj->mm.mapping) { 2305 void *ptr; 2306 2307 ptr = page_mask_bits(obj->mm.mapping); 2308 if (is_vmalloc_addr(ptr)) 2309 vunmap(ptr); 2310 else 2311 kunmap(kmap_to_page(ptr)); 2312 2313 obj->mm.mapping = NULL; 2314 } 2315 2316 __i915_gem_object_reset_page_iter(obj); 2317 2318 if (!IS_ERR(pages)) 2319 obj->ops->put_pages(obj, pages); 2320 2321 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; 2322 2323 unlock: 2324 mutex_unlock(&obj->mm.lock); 2325 } 2326 2327 static bool i915_sg_trim(struct sg_table *orig_st) 2328 { 2329 struct sg_table new_st; 2330 struct scatterlist *sg, *new_sg; 2331 unsigned int i; 2332 2333 if (orig_st->nents == orig_st->orig_nents) 2334 return false; 2335 2336 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) 2337 return false; 2338 2339 new_sg = new_st.sgl; 2340 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { 2341 sg_set_page(new_sg, sg_page(sg), sg->length, 0); 2342 /* called before being DMA mapped, no need to copy sg->dma_* */ 2343 new_sg = sg_next(new_sg); 2344 } 2345 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ 2346 2347 sg_free_table(orig_st); 2348 2349 *orig_st = new_st; 2350 return true; 2351 } 2352 2353 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2354 { 2355 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2356 const unsigned long page_count = obj->base.size / PAGE_SIZE; 2357 unsigned long i; 2358 struct address_space *mapping; 2359 struct sg_table *st; 2360 struct scatterlist *sg; 2361 struct sgt_iter sgt_iter; 2362 struct page *page; 2363 unsigned long last_pfn = 0; /* suppress gcc warning */ 2364 unsigned int max_segment = i915_sg_segment_size(); 2365 unsigned int sg_page_sizes; 2366 gfp_t noreclaim; 2367 int ret; 2368 2369 /* Assert that the object is not currently in any GPU domain. As it 2370 * wasn't in the GTT, there shouldn't be any way it could have been in 2371 * a GPU cache 2372 */ 2373 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2374 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2375 2376 st = kmalloc(sizeof(*st), GFP_KERNEL); 2377 if (st == NULL) 2378 return -ENOMEM; 2379 2380 rebuild_st: 2381 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2382 kfree(st); 2383 return -ENOMEM; 2384 } 2385 2386 /* Get the list of pages out of our struct file. They'll be pinned 2387 * at this point until we release them. 2388 * 2389 * Fail silently without starting the shrinker 2390 */ 2391 mapping = obj->base.filp->f_mapping; 2392 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 2393 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 2394 2395 sg = st->sgl; 2396 st->nents = 0; 2397 sg_page_sizes = 0; 2398 for (i = 0; i < page_count; i++) { 2399 const unsigned int shrink[] = { 2400 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, 2401 0, 2402 }, *s = shrink; 2403 gfp_t gfp = noreclaim; 2404 2405 do { 2406 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2407 if (likely(!IS_ERR(page))) 2408 break; 2409 2410 if (!*s) { 2411 ret = PTR_ERR(page); 2412 goto err_sg; 2413 } 2414 2415 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); 2416 cond_resched(); 2417 2418 /* We've tried hard to allocate the memory by reaping 2419 * our own buffer, now let the real VM do its job and 2420 * go down in flames if truly OOM. 2421 * 2422 * However, since graphics tend to be disposable, 2423 * defer the oom here by reporting the ENOMEM back 2424 * to userspace. 2425 */ 2426 if (!*s) { 2427 /* reclaim and warn, but no oom */ 2428 gfp = mapping_gfp_mask(mapping); 2429 2430 /* Our bo are always dirty and so we require 2431 * kswapd to reclaim our pages (direct reclaim 2432 * does not effectively begin pageout of our 2433 * buffers on its own). However, direct reclaim 2434 * only waits for kswapd when under allocation 2435 * congestion. So as a result __GFP_RECLAIM is 2436 * unreliable and fails to actually reclaim our 2437 * dirty pages -- unless you try over and over 2438 * again with !__GFP_NORETRY. However, we still 2439 * want to fail this allocation rather than 2440 * trigger the out-of-memory killer and for 2441 * this we want __GFP_RETRY_MAYFAIL. 2442 */ 2443 gfp |= __GFP_RETRY_MAYFAIL; 2444 } 2445 } while (1); 2446 2447 if (!i || 2448 sg->length >= max_segment || 2449 page_to_pfn(page) != last_pfn + 1) { 2450 if (i) { 2451 sg_page_sizes |= sg->length; 2452 sg = sg_next(sg); 2453 } 2454 st->nents++; 2455 sg_set_page(sg, page, PAGE_SIZE, 0); 2456 } else { 2457 sg->length += PAGE_SIZE; 2458 } 2459 last_pfn = page_to_pfn(page); 2460 2461 /* Check that the i965g/gm workaround works. */ 2462 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 2463 } 2464 if (sg) { /* loop terminated early; short sg table */ 2465 sg_page_sizes |= sg->length; 2466 sg_mark_end(sg); 2467 } 2468 2469 /* Trim unused sg entries to avoid wasting memory. */ 2470 i915_sg_trim(st); 2471 2472 ret = i915_gem_gtt_prepare_pages(obj, st); 2473 if (ret) { 2474 /* DMA remapping failed? One possible cause is that 2475 * it could not reserve enough large entries, asking 2476 * for PAGE_SIZE chunks instead may be helpful. 2477 */ 2478 if (max_segment > PAGE_SIZE) { 2479 for_each_sgt_page(page, sgt_iter, st) 2480 put_page(page); 2481 sg_free_table(st); 2482 2483 max_segment = PAGE_SIZE; 2484 goto rebuild_st; 2485 } else { 2486 dev_warn(&dev_priv->drm.pdev->dev, 2487 "Failed to DMA remap %lu pages\n", 2488 page_count); 2489 goto err_pages; 2490 } 2491 } 2492 2493 if (i915_gem_object_needs_bit17_swizzle(obj)) 2494 i915_gem_object_do_bit_17_swizzle(obj, st); 2495 2496 __i915_gem_object_set_pages(obj, st, sg_page_sizes); 2497 2498 return 0; 2499 2500 err_sg: 2501 sg_mark_end(sg); 2502 err_pages: 2503 for_each_sgt_page(page, sgt_iter, st) 2504 put_page(page); 2505 sg_free_table(st); 2506 kfree(st); 2507 2508 /* shmemfs first checks if there is enough memory to allocate the page 2509 * and reports ENOSPC should there be insufficient, along with the usual 2510 * ENOMEM for a genuine allocation failure. 2511 * 2512 * We use ENOSPC in our driver to mean that we have run out of aperture 2513 * space and so want to translate the error from shmemfs back to our 2514 * usual understanding of ENOMEM. 2515 */ 2516 if (ret == -ENOSPC) 2517 ret = -ENOMEM; 2518 2519 return ret; 2520 } 2521 2522 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 2523 struct sg_table *pages, 2524 unsigned int sg_page_sizes) 2525 { 2526 struct drm_i915_private *i915 = to_i915(obj->base.dev); 2527 unsigned long supported = INTEL_INFO(i915)->page_sizes; 2528 int i; 2529 2530 lockdep_assert_held(&obj->mm.lock); 2531 2532 obj->mm.get_page.sg_pos = pages->sgl; 2533 obj->mm.get_page.sg_idx = 0; 2534 2535 obj->mm.pages = pages; 2536 2537 if (i915_gem_object_is_tiled(obj) && 2538 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 2539 GEM_BUG_ON(obj->mm.quirked); 2540 __i915_gem_object_pin_pages(obj); 2541 obj->mm.quirked = true; 2542 } 2543 2544 GEM_BUG_ON(!sg_page_sizes); 2545 obj->mm.page_sizes.phys = sg_page_sizes; 2546 2547 /* 2548 * Calculate the supported page-sizes which fit into the given 2549 * sg_page_sizes. This will give us the page-sizes which we may be able 2550 * to use opportunistically when later inserting into the GTT. For 2551 * example if phys=2G, then in theory we should be able to use 1G, 2M, 2552 * 64K or 4K pages, although in practice this will depend on a number of 2553 * other factors. 2554 */ 2555 obj->mm.page_sizes.sg = 0; 2556 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 2557 if (obj->mm.page_sizes.phys & ~0u << i) 2558 obj->mm.page_sizes.sg |= BIT(i); 2559 } 2560 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); 2561 2562 spin_lock(&i915->mm.obj_lock); 2563 list_add(&obj->mm.link, &i915->mm.unbound_list); 2564 spin_unlock(&i915->mm.obj_lock); 2565 } 2566 2567 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2568 { 2569 int err; 2570 2571 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 2572 DRM_DEBUG("Attempting to obtain a purgeable object\n"); 2573 return -EFAULT; 2574 } 2575 2576 err = obj->ops->get_pages(obj); 2577 GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages)); 2578 2579 return err; 2580 } 2581 2582 /* Ensure that the associated pages are gathered from the backing storage 2583 * and pinned into our object. i915_gem_object_pin_pages() may be called 2584 * multiple times before they are released by a single call to 2585 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 2586 * either as a result of memory pressure (reaping pages under the shrinker) 2587 * or as the object is itself released. 2588 */ 2589 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2590 { 2591 int err; 2592 2593 err = mutex_lock_interruptible(&obj->mm.lock); 2594 if (err) 2595 return err; 2596 2597 if (unlikely(!i915_gem_object_has_pages(obj))) { 2598 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 2599 2600 err = ____i915_gem_object_get_pages(obj); 2601 if (err) 2602 goto unlock; 2603 2604 smp_mb__before_atomic(); 2605 } 2606 atomic_inc(&obj->mm.pages_pin_count); 2607 2608 unlock: 2609 mutex_unlock(&obj->mm.lock); 2610 return err; 2611 } 2612 2613 /* The 'mapping' part of i915_gem_object_pin_map() below */ 2614 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, 2615 enum i915_map_type type) 2616 { 2617 unsigned long n_pages = obj->base.size >> PAGE_SHIFT; 2618 struct sg_table *sgt = obj->mm.pages; 2619 struct sgt_iter sgt_iter; 2620 struct page *page; 2621 struct page *stack_pages[32]; 2622 struct page **pages = stack_pages; 2623 unsigned long i = 0; 2624 pgprot_t pgprot; 2625 void *addr; 2626 2627 /* A single page can always be kmapped */ 2628 if (n_pages == 1 && type == I915_MAP_WB) 2629 return kmap(sg_page(sgt->sgl)); 2630 2631 if (n_pages > ARRAY_SIZE(stack_pages)) { 2632 /* Too big for stack -- allocate temporary array instead */ 2633 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 2634 if (!pages) 2635 return NULL; 2636 } 2637 2638 for_each_sgt_page(page, sgt_iter, sgt) 2639 pages[i++] = page; 2640 2641 /* Check that we have the expected number of pages */ 2642 GEM_BUG_ON(i != n_pages); 2643 2644 switch (type) { 2645 default: 2646 MISSING_CASE(type); 2647 /* fallthrough to use PAGE_KERNEL anyway */ 2648 case I915_MAP_WB: 2649 pgprot = PAGE_KERNEL; 2650 break; 2651 case I915_MAP_WC: 2652 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 2653 break; 2654 } 2655 addr = vmap(pages, n_pages, 0, pgprot); 2656 2657 if (pages != stack_pages) 2658 kvfree(pages); 2659 2660 return addr; 2661 } 2662 2663 /* get, pin, and map the pages of the object into kernel space */ 2664 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 2665 enum i915_map_type type) 2666 { 2667 enum i915_map_type has_type; 2668 bool pinned; 2669 void *ptr; 2670 int ret; 2671 2672 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 2673 2674 ret = mutex_lock_interruptible(&obj->mm.lock); 2675 if (ret) 2676 return ERR_PTR(ret); 2677 2678 pinned = !(type & I915_MAP_OVERRIDE); 2679 type &= ~I915_MAP_OVERRIDE; 2680 2681 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2682 if (unlikely(!i915_gem_object_has_pages(obj))) { 2683 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 2684 2685 ret = ____i915_gem_object_get_pages(obj); 2686 if (ret) 2687 goto err_unlock; 2688 2689 smp_mb__before_atomic(); 2690 } 2691 atomic_inc(&obj->mm.pages_pin_count); 2692 pinned = false; 2693 } 2694 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 2695 2696 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 2697 if (ptr && has_type != type) { 2698 if (pinned) { 2699 ret = -EBUSY; 2700 goto err_unpin; 2701 } 2702 2703 if (is_vmalloc_addr(ptr)) 2704 vunmap(ptr); 2705 else 2706 kunmap(kmap_to_page(ptr)); 2707 2708 ptr = obj->mm.mapping = NULL; 2709 } 2710 2711 if (!ptr) { 2712 ptr = i915_gem_object_map(obj, type); 2713 if (!ptr) { 2714 ret = -ENOMEM; 2715 goto err_unpin; 2716 } 2717 2718 obj->mm.mapping = page_pack_bits(ptr, type); 2719 } 2720 2721 out_unlock: 2722 mutex_unlock(&obj->mm.lock); 2723 return ptr; 2724 2725 err_unpin: 2726 atomic_dec(&obj->mm.pages_pin_count); 2727 err_unlock: 2728 ptr = ERR_PTR(ret); 2729 goto out_unlock; 2730 } 2731 2732 static int 2733 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, 2734 const struct drm_i915_gem_pwrite *arg) 2735 { 2736 struct address_space *mapping = obj->base.filp->f_mapping; 2737 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 2738 u64 remain, offset; 2739 unsigned int pg; 2740 2741 /* Before we instantiate/pin the backing store for our use, we 2742 * can prepopulate the shmemfs filp efficiently using a write into 2743 * the pagecache. We avoid the penalty of instantiating all the 2744 * pages, important if the user is just writing to a few and never 2745 * uses the object on the GPU, and using a direct write into shmemfs 2746 * allows it to avoid the cost of retrieving a page (either swapin 2747 * or clearing-before-use) before it is overwritten. 2748 */ 2749 if (i915_gem_object_has_pages(obj)) 2750 return -ENODEV; 2751 2752 if (obj->mm.madv != I915_MADV_WILLNEED) 2753 return -EFAULT; 2754 2755 /* Before the pages are instantiated the object is treated as being 2756 * in the CPU domain. The pages will be clflushed as required before 2757 * use, and we can freely write into the pages directly. If userspace 2758 * races pwrite with any other operation; corruption will ensue - 2759 * that is userspace's prerogative! 2760 */ 2761 2762 remain = arg->size; 2763 offset = arg->offset; 2764 pg = offset_in_page(offset); 2765 2766 do { 2767 unsigned int len, unwritten; 2768 struct page *page; 2769 void *data, *vaddr; 2770 int err; 2771 2772 len = PAGE_SIZE - pg; 2773 if (len > remain) 2774 len = remain; 2775 2776 err = pagecache_write_begin(obj->base.filp, mapping, 2777 offset, len, 0, 2778 &page, &data); 2779 if (err < 0) 2780 return err; 2781 2782 vaddr = kmap(page); 2783 unwritten = copy_from_user(vaddr + pg, user_data, len); 2784 kunmap(page); 2785 2786 err = pagecache_write_end(obj->base.filp, mapping, 2787 offset, len, len - unwritten, 2788 page, data); 2789 if (err < 0) 2790 return err; 2791 2792 if (unwritten) 2793 return -EFAULT; 2794 2795 remain -= len; 2796 user_data += len; 2797 offset += len; 2798 pg = 0; 2799 } while (remain); 2800 2801 return 0; 2802 } 2803 2804 static bool ban_context(const struct i915_gem_context *ctx, 2805 unsigned int score) 2806 { 2807 return (i915_gem_context_is_bannable(ctx) && 2808 score >= CONTEXT_SCORE_BAN_THRESHOLD); 2809 } 2810 2811 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 2812 { 2813 unsigned int score; 2814 bool banned; 2815 2816 atomic_inc(&ctx->guilty_count); 2817 2818 score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); 2819 banned = ban_context(ctx, score); 2820 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", 2821 ctx->name, score, yesno(banned)); 2822 if (!banned) 2823 return; 2824 2825 i915_gem_context_set_banned(ctx); 2826 if (!IS_ERR_OR_NULL(ctx->file_priv)) { 2827 atomic_inc(&ctx->file_priv->context_bans); 2828 DRM_DEBUG_DRIVER("client %s has had %d context banned\n", 2829 ctx->name, atomic_read(&ctx->file_priv->context_bans)); 2830 } 2831 } 2832 2833 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) 2834 { 2835 atomic_inc(&ctx->active_count); 2836 } 2837 2838 struct drm_i915_gem_request * 2839 i915_gem_find_active_request(struct intel_engine_cs *engine) 2840 { 2841 struct drm_i915_gem_request *request, *active = NULL; 2842 unsigned long flags; 2843 2844 /* We are called by the error capture and reset at a random 2845 * point in time. In particular, note that neither is crucially 2846 * ordered with an interrupt. After a hang, the GPU is dead and we 2847 * assume that no more writes can happen (we waited long enough for 2848 * all writes that were in transaction to be flushed) - adding an 2849 * extra delay for a recent interrupt is pointless. Hence, we do 2850 * not need an engine->irq_seqno_barrier() before the seqno reads. 2851 */ 2852 spin_lock_irqsave(&engine->timeline->lock, flags); 2853 list_for_each_entry(request, &engine->timeline->requests, link) { 2854 if (__i915_gem_request_completed(request, 2855 request->global_seqno)) 2856 continue; 2857 2858 GEM_BUG_ON(request->engine != engine); 2859 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 2860 &request->fence.flags)); 2861 2862 active = request; 2863 break; 2864 } 2865 spin_unlock_irqrestore(&engine->timeline->lock, flags); 2866 2867 return active; 2868 } 2869 2870 static bool engine_stalled(struct intel_engine_cs *engine) 2871 { 2872 if (!engine->hangcheck.stalled) 2873 return false; 2874 2875 /* Check for possible seqno movement after hang declaration */ 2876 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { 2877 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); 2878 return false; 2879 } 2880 2881 return true; 2882 } 2883 2884 /* 2885 * Ensure irq handler finishes, and not run again. 2886 * Also return the active request so that we only search for it once. 2887 */ 2888 struct drm_i915_gem_request * 2889 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) 2890 { 2891 struct drm_i915_gem_request *request = NULL; 2892 2893 /* 2894 * During the reset sequence, we must prevent the engine from 2895 * entering RC6. As the context state is undefined until we restart 2896 * the engine, if it does enter RC6 during the reset, the state 2897 * written to the powercontext is undefined and so we may lose 2898 * GPU state upon resume, i.e. fail to restart after a reset. 2899 */ 2900 intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); 2901 2902 /* 2903 * Prevent the signaler thread from updating the request 2904 * state (by calling dma_fence_signal) as we are processing 2905 * the reset. The write from the GPU of the seqno is 2906 * asynchronous and the signaler thread may see a different 2907 * value to us and declare the request complete, even though 2908 * the reset routine have picked that request as the active 2909 * (incomplete) request. This conflict is not handled 2910 * gracefully! 2911 */ 2912 kthread_park(engine->breadcrumbs.signaler); 2913 2914 /* 2915 * Prevent request submission to the hardware until we have 2916 * completed the reset in i915_gem_reset_finish(). If a request 2917 * is completed by one engine, it may then queue a request 2918 * to a second via its engine->irq_tasklet *just* as we are 2919 * calling engine->init_hw() and also writing the ELSP. 2920 * Turning off the engine->irq_tasklet until the reset is over 2921 * prevents the race. 2922 */ 2923 tasklet_kill(&engine->execlists.irq_tasklet); 2924 tasklet_disable(&engine->execlists.irq_tasklet); 2925 2926 if (engine->irq_seqno_barrier) 2927 engine->irq_seqno_barrier(engine); 2928 2929 request = i915_gem_find_active_request(engine); 2930 if (request && request->fence.error == -EIO) 2931 request = ERR_PTR(-EIO); /* Previous reset failed! */ 2932 2933 return request; 2934 } 2935 2936 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) 2937 { 2938 struct intel_engine_cs *engine; 2939 struct drm_i915_gem_request *request; 2940 enum intel_engine_id id; 2941 int err = 0; 2942 2943 for_each_engine(engine, dev_priv, id) { 2944 request = i915_gem_reset_prepare_engine(engine); 2945 if (IS_ERR(request)) { 2946 err = PTR_ERR(request); 2947 continue; 2948 } 2949 2950 engine->hangcheck.active_request = request; 2951 } 2952 2953 i915_gem_revoke_fences(dev_priv); 2954 2955 return err; 2956 } 2957 2958 static void skip_request(struct drm_i915_gem_request *request) 2959 { 2960 void *vaddr = request->ring->vaddr; 2961 u32 head; 2962 2963 /* As this request likely depends on state from the lost 2964 * context, clear out all the user operations leaving the 2965 * breadcrumb at the end (so we get the fence notifications). 2966 */ 2967 head = request->head; 2968 if (request->postfix < head) { 2969 memset(vaddr + head, 0, request->ring->size - head); 2970 head = 0; 2971 } 2972 memset(vaddr + head, 0, request->postfix - head); 2973 2974 dma_fence_set_error(&request->fence, -EIO); 2975 } 2976 2977 static void engine_skip_context(struct drm_i915_gem_request *request) 2978 { 2979 struct intel_engine_cs *engine = request->engine; 2980 struct i915_gem_context *hung_ctx = request->ctx; 2981 struct intel_timeline *timeline; 2982 unsigned long flags; 2983 2984 timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); 2985 2986 spin_lock_irqsave(&engine->timeline->lock, flags); 2987 spin_lock(&timeline->lock); 2988 2989 list_for_each_entry_continue(request, &engine->timeline->requests, link) 2990 if (request->ctx == hung_ctx) 2991 skip_request(request); 2992 2993 list_for_each_entry(request, &timeline->requests, link) 2994 skip_request(request); 2995 2996 spin_unlock(&timeline->lock); 2997 spin_unlock_irqrestore(&engine->timeline->lock, flags); 2998 } 2999 3000 /* Returns the request if it was guilty of the hang */ 3001 static struct drm_i915_gem_request * 3002 i915_gem_reset_request(struct intel_engine_cs *engine, 3003 struct drm_i915_gem_request *request) 3004 { 3005 /* The guilty request will get skipped on a hung engine. 3006 * 3007 * Users of client default contexts do not rely on logical 3008 * state preserved between batches so it is safe to execute 3009 * queued requests following the hang. Non default contexts 3010 * rely on preserved state, so skipping a batch loses the 3011 * evolution of the state and it needs to be considered corrupted. 3012 * Executing more queued batches on top of corrupted state is 3013 * risky. But we take the risk by trying to advance through 3014 * the queued requests in order to make the client behaviour 3015 * more predictable around resets, by not throwing away random 3016 * amount of batches it has prepared for execution. Sophisticated 3017 * clients can use gem_reset_stats_ioctl and dma fence status 3018 * (exported via sync_file info ioctl on explicit fences) to observe 3019 * when it loses the context state and should rebuild accordingly. 3020 * 3021 * The context ban, and ultimately the client ban, mechanism are safety 3022 * valves if client submission ends up resulting in nothing more than 3023 * subsequent hangs. 3024 */ 3025 3026 if (engine_stalled(engine)) { 3027 i915_gem_context_mark_guilty(request->ctx); 3028 skip_request(request); 3029 3030 /* If this context is now banned, skip all pending requests. */ 3031 if (i915_gem_context_is_banned(request->ctx)) 3032 engine_skip_context(request); 3033 } else { 3034 /* 3035 * Since this is not the hung engine, it may have advanced 3036 * since the hang declaration. Double check by refinding 3037 * the active request at the time of the reset. 3038 */ 3039 request = i915_gem_find_active_request(engine); 3040 if (request) { 3041 i915_gem_context_mark_innocent(request->ctx); 3042 dma_fence_set_error(&request->fence, -EAGAIN); 3043 3044 /* Rewind the engine to replay the incomplete rq */ 3045 spin_lock_irq(&engine->timeline->lock); 3046 request = list_prev_entry(request, link); 3047 if (&request->link == &engine->timeline->requests) 3048 request = NULL; 3049 spin_unlock_irq(&engine->timeline->lock); 3050 } 3051 } 3052 3053 return request; 3054 } 3055 3056 void i915_gem_reset_engine(struct intel_engine_cs *engine, 3057 struct drm_i915_gem_request *request) 3058 { 3059 engine->irq_posted = 0; 3060 3061 if (request) 3062 request = i915_gem_reset_request(engine, request); 3063 3064 if (request) { 3065 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", 3066 engine->name, request->global_seqno); 3067 } 3068 3069 /* Setup the CS to resume from the breadcrumb of the hung request */ 3070 engine->reset_hw(engine, request); 3071 } 3072 3073 void i915_gem_reset(struct drm_i915_private *dev_priv) 3074 { 3075 struct intel_engine_cs *engine; 3076 enum intel_engine_id id; 3077 3078 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3079 3080 i915_gem_retire_requests(dev_priv); 3081 3082 for_each_engine(engine, dev_priv, id) { 3083 struct i915_gem_context *ctx; 3084 3085 i915_gem_reset_engine(engine, engine->hangcheck.active_request); 3086 ctx = fetch_and_zero(&engine->last_retired_context); 3087 if (ctx) 3088 engine->context_unpin(engine, ctx); 3089 } 3090 3091 i915_gem_restore_fences(dev_priv); 3092 3093 if (dev_priv->gt.awake) { 3094 intel_sanitize_gt_powersave(dev_priv); 3095 intel_enable_gt_powersave(dev_priv); 3096 if (INTEL_GEN(dev_priv) >= 6) 3097 gen6_rps_busy(dev_priv); 3098 } 3099 } 3100 3101 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) 3102 { 3103 tasklet_enable(&engine->execlists.irq_tasklet); 3104 kthread_unpark(engine->breadcrumbs.signaler); 3105 3106 intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); 3107 } 3108 3109 void i915_gem_reset_finish(struct drm_i915_private *dev_priv) 3110 { 3111 struct intel_engine_cs *engine; 3112 enum intel_engine_id id; 3113 3114 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3115 3116 for_each_engine(engine, dev_priv, id) { 3117 engine->hangcheck.active_request = NULL; 3118 i915_gem_reset_finish_engine(engine); 3119 } 3120 } 3121 3122 static void nop_submit_request(struct drm_i915_gem_request *request) 3123 { 3124 dma_fence_set_error(&request->fence, -EIO); 3125 3126 i915_gem_request_submit(request); 3127 } 3128 3129 static void nop_complete_submit_request(struct drm_i915_gem_request *request) 3130 { 3131 unsigned long flags; 3132 3133 dma_fence_set_error(&request->fence, -EIO); 3134 3135 spin_lock_irqsave(&request->engine->timeline->lock, flags); 3136 __i915_gem_request_submit(request); 3137 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3138 spin_unlock_irqrestore(&request->engine->timeline->lock, flags); 3139 } 3140 3141 void i915_gem_set_wedged(struct drm_i915_private *i915) 3142 { 3143 struct intel_engine_cs *engine; 3144 enum intel_engine_id id; 3145 3146 /* 3147 * First, stop submission to hw, but do not yet complete requests by 3148 * rolling the global seqno forward (since this would complete requests 3149 * for which we haven't set the fence error to EIO yet). 3150 */ 3151 for_each_engine(engine, i915, id) 3152 engine->submit_request = nop_submit_request; 3153 3154 /* 3155 * Make sure no one is running the old callback before we proceed with 3156 * cancelling requests and resetting the completion tracking. Otherwise 3157 * we might submit a request to the hardware which never completes. 3158 */ 3159 synchronize_rcu(); 3160 3161 for_each_engine(engine, i915, id) { 3162 /* Mark all executing requests as skipped */ 3163 engine->cancel_requests(engine); 3164 3165 /* 3166 * Only once we've force-cancelled all in-flight requests can we 3167 * start to complete all requests. 3168 */ 3169 engine->submit_request = nop_complete_submit_request; 3170 } 3171 3172 /* 3173 * Make sure no request can slip through without getting completed by 3174 * either this call here to intel_engine_init_global_seqno, or the one 3175 * in nop_complete_submit_request. 3176 */ 3177 synchronize_rcu(); 3178 3179 for_each_engine(engine, i915, id) { 3180 unsigned long flags; 3181 3182 /* Mark all pending requests as complete so that any concurrent 3183 * (lockless) lookup doesn't try and wait upon the request as we 3184 * reset it. 3185 */ 3186 spin_lock_irqsave(&engine->timeline->lock, flags); 3187 intel_engine_init_global_seqno(engine, 3188 intel_engine_last_submit(engine)); 3189 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3190 } 3191 3192 set_bit(I915_WEDGED, &i915->gpu_error.flags); 3193 wake_up_all(&i915->gpu_error.reset_queue); 3194 } 3195 3196 bool i915_gem_unset_wedged(struct drm_i915_private *i915) 3197 { 3198 struct i915_gem_timeline *tl; 3199 int i; 3200 3201 lockdep_assert_held(&i915->drm.struct_mutex); 3202 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) 3203 return true; 3204 3205 /* Before unwedging, make sure that all pending operations 3206 * are flushed and errored out - we may have requests waiting upon 3207 * third party fences. We marked all inflight requests as EIO, and 3208 * every execbuf since returned EIO, for consistency we want all 3209 * the currently pending requests to also be marked as EIO, which 3210 * is done inside our nop_submit_request - and so we must wait. 3211 * 3212 * No more can be submitted until we reset the wedged bit. 3213 */ 3214 list_for_each_entry(tl, &i915->gt.timelines, link) { 3215 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3216 struct drm_i915_gem_request *rq; 3217 3218 rq = i915_gem_active_peek(&tl->engine[i].last_request, 3219 &i915->drm.struct_mutex); 3220 if (!rq) 3221 continue; 3222 3223 /* We can't use our normal waiter as we want to 3224 * avoid recursively trying to handle the current 3225 * reset. The basic dma_fence_default_wait() installs 3226 * a callback for dma_fence_signal(), which is 3227 * triggered by our nop handler (indirectly, the 3228 * callback enables the signaler thread which is 3229 * woken by the nop_submit_request() advancing the seqno 3230 * and when the seqno passes the fence, the signaler 3231 * then signals the fence waking us up). 3232 */ 3233 if (dma_fence_default_wait(&rq->fence, true, 3234 MAX_SCHEDULE_TIMEOUT) < 0) 3235 return false; 3236 } 3237 } 3238 3239 /* Undo nop_submit_request. We prevent all new i915 requests from 3240 * being queued (by disallowing execbuf whilst wedged) so having 3241 * waited for all active requests above, we know the system is idle 3242 * and do not have to worry about a thread being inside 3243 * engine->submit_request() as we swap over. So unlike installing 3244 * the nop_submit_request on reset, we can do this from normal 3245 * context and do not require stop_machine(). 3246 */ 3247 intel_engines_reset_default_submission(i915); 3248 i915_gem_contexts_lost(i915); 3249 3250 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ 3251 clear_bit(I915_WEDGED, &i915->gpu_error.flags); 3252 3253 return true; 3254 } 3255 3256 static void 3257 i915_gem_retire_work_handler(struct work_struct *work) 3258 { 3259 struct drm_i915_private *dev_priv = 3260 container_of(work, typeof(*dev_priv), gt.retire_work.work); 3261 struct drm_device *dev = &dev_priv->drm; 3262 3263 /* Come back later if the device is busy... */ 3264 if (mutex_trylock(&dev->struct_mutex)) { 3265 i915_gem_retire_requests(dev_priv); 3266 mutex_unlock(&dev->struct_mutex); 3267 } 3268 3269 /* Keep the retire handler running until we are finally idle. 3270 * We do not need to do this test under locking as in the worst-case 3271 * we queue the retire worker once too often. 3272 */ 3273 if (READ_ONCE(dev_priv->gt.awake)) { 3274 i915_queue_hangcheck(dev_priv); 3275 queue_delayed_work(dev_priv->wq, 3276 &dev_priv->gt.retire_work, 3277 round_jiffies_up_relative(HZ)); 3278 } 3279 } 3280 3281 static void 3282 i915_gem_idle_work_handler(struct work_struct *work) 3283 { 3284 struct drm_i915_private *dev_priv = 3285 container_of(work, typeof(*dev_priv), gt.idle_work.work); 3286 struct drm_device *dev = &dev_priv->drm; 3287 bool rearm_hangcheck; 3288 3289 if (!READ_ONCE(dev_priv->gt.awake)) 3290 return; 3291 3292 /* 3293 * Wait for last execlists context complete, but bail out in case a 3294 * new request is submitted. 3295 */ 3296 wait_for(intel_engines_are_idle(dev_priv), 10); 3297 if (READ_ONCE(dev_priv->gt.active_requests)) 3298 return; 3299 3300 rearm_hangcheck = 3301 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 3302 3303 if (!mutex_trylock(&dev->struct_mutex)) { 3304 /* Currently busy, come back later */ 3305 mod_delayed_work(dev_priv->wq, 3306 &dev_priv->gt.idle_work, 3307 msecs_to_jiffies(50)); 3308 goto out_rearm; 3309 } 3310 3311 /* 3312 * New request retired after this work handler started, extend active 3313 * period until next instance of the work. 3314 */ 3315 if (work_pending(work)) 3316 goto out_unlock; 3317 3318 if (dev_priv->gt.active_requests) 3319 goto out_unlock; 3320 3321 if (wait_for(intel_engines_are_idle(dev_priv), 10)) 3322 DRM_ERROR("Timeout waiting for engines to idle\n"); 3323 3324 intel_engines_mark_idle(dev_priv); 3325 i915_gem_timelines_mark_idle(dev_priv); 3326 3327 GEM_BUG_ON(!dev_priv->gt.awake); 3328 dev_priv->gt.awake = false; 3329 rearm_hangcheck = false; 3330 3331 if (INTEL_GEN(dev_priv) >= 6) 3332 gen6_rps_idle(dev_priv); 3333 intel_runtime_pm_put(dev_priv); 3334 out_unlock: 3335 mutex_unlock(&dev->struct_mutex); 3336 3337 out_rearm: 3338 if (rearm_hangcheck) { 3339 GEM_BUG_ON(!dev_priv->gt.awake); 3340 i915_queue_hangcheck(dev_priv); 3341 } 3342 } 3343 3344 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) 3345 { 3346 struct drm_i915_private *i915 = to_i915(gem->dev); 3347 struct drm_i915_gem_object *obj = to_intel_bo(gem); 3348 struct drm_i915_file_private *fpriv = file->driver_priv; 3349 struct i915_lut_handle *lut, *ln; 3350 3351 mutex_lock(&i915->drm.struct_mutex); 3352 3353 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { 3354 struct i915_gem_context *ctx = lut->ctx; 3355 struct i915_vma *vma; 3356 3357 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF)); 3358 if (ctx->file_priv != fpriv) 3359 continue; 3360 3361 vma = radix_tree_delete(&ctx->handles_vma, lut->handle); 3362 GEM_BUG_ON(vma->obj != obj); 3363 3364 /* We allow the process to have multiple handles to the same 3365 * vma, in the same fd namespace, by virtue of flink/open. 3366 */ 3367 GEM_BUG_ON(!vma->open_count); 3368 if (!--vma->open_count && !i915_vma_is_ggtt(vma)) 3369 i915_vma_close(vma); 3370 3371 list_del(&lut->obj_link); 3372 list_del(&lut->ctx_link); 3373 3374 kmem_cache_free(i915->luts, lut); 3375 __i915_gem_object_release_unless_active(obj); 3376 } 3377 3378 mutex_unlock(&i915->drm.struct_mutex); 3379 } 3380 3381 static unsigned long to_wait_timeout(s64 timeout_ns) 3382 { 3383 if (timeout_ns < 0) 3384 return MAX_SCHEDULE_TIMEOUT; 3385 3386 if (timeout_ns == 0) 3387 return 0; 3388 3389 return nsecs_to_jiffies_timeout(timeout_ns); 3390 } 3391 3392 /** 3393 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 3394 * @dev: drm device pointer 3395 * @data: ioctl data blob 3396 * @file: drm file pointer 3397 * 3398 * Returns 0 if successful, else an error is returned with the remaining time in 3399 * the timeout parameter. 3400 * -ETIME: object is still busy after timeout 3401 * -ERESTARTSYS: signal interrupted the wait 3402 * -ENONENT: object doesn't exist 3403 * Also possible, but rare: 3404 * -EAGAIN: incomplete, restart syscall 3405 * -ENOMEM: damn 3406 * -ENODEV: Internal IRQ fail 3407 * -E?: The add request failed 3408 * 3409 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 3410 * non-zero timeout parameter the wait ioctl will wait for the given number of 3411 * nanoseconds on an object becoming unbusy. Since the wait itself does so 3412 * without holding struct_mutex the object may become re-busied before this 3413 * function completes. A similar but shorter * race condition exists in the busy 3414 * ioctl 3415 */ 3416 int 3417 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 3418 { 3419 struct drm_i915_gem_wait *args = data; 3420 struct drm_i915_gem_object *obj; 3421 ktime_t start; 3422 long ret; 3423 3424 if (args->flags != 0) 3425 return -EINVAL; 3426 3427 obj = i915_gem_object_lookup(file, args->bo_handle); 3428 if (!obj) 3429 return -ENOENT; 3430 3431 start = ktime_get(); 3432 3433 ret = i915_gem_object_wait(obj, 3434 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, 3435 to_wait_timeout(args->timeout_ns), 3436 to_rps_client(file)); 3437 3438 if (args->timeout_ns > 0) { 3439 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3440 if (args->timeout_ns < 0) 3441 args->timeout_ns = 0; 3442 3443 /* 3444 * Apparently ktime isn't accurate enough and occasionally has a 3445 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 3446 * things up to make the test happy. We allow up to 1 jiffy. 3447 * 3448 * This is a regression from the timespec->ktime conversion. 3449 */ 3450 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 3451 args->timeout_ns = 0; 3452 3453 /* Asked to wait beyond the jiffie/scheduler precision? */ 3454 if (ret == -ETIME && args->timeout_ns) 3455 ret = -EAGAIN; 3456 } 3457 3458 i915_gem_object_put(obj); 3459 return ret; 3460 } 3461 3462 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) 3463 { 3464 int ret, i; 3465 3466 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3467 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags); 3468 if (ret) 3469 return ret; 3470 } 3471 3472 return 0; 3473 } 3474 3475 static int wait_for_engines(struct drm_i915_private *i915) 3476 { 3477 if (wait_for(intel_engines_are_idle(i915), 50)) { 3478 DRM_ERROR("Failed to idle engines, declaring wedged!\n"); 3479 i915_gem_set_wedged(i915); 3480 return -EIO; 3481 } 3482 3483 return 0; 3484 } 3485 3486 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) 3487 { 3488 int ret; 3489 3490 /* If the device is asleep, we have no requests outstanding */ 3491 if (!READ_ONCE(i915->gt.awake)) 3492 return 0; 3493 3494 if (flags & I915_WAIT_LOCKED) { 3495 struct i915_gem_timeline *tl; 3496 3497 lockdep_assert_held(&i915->drm.struct_mutex); 3498 3499 list_for_each_entry(tl, &i915->gt.timelines, link) { 3500 ret = wait_for_timeline(tl, flags); 3501 if (ret) 3502 return ret; 3503 } 3504 3505 i915_gem_retire_requests(i915); 3506 GEM_BUG_ON(i915->gt.active_requests); 3507 3508 ret = wait_for_engines(i915); 3509 } else { 3510 ret = wait_for_timeline(&i915->gt.global_timeline, flags); 3511 } 3512 3513 return ret; 3514 } 3515 3516 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) 3517 { 3518 /* 3519 * We manually flush the CPU domain so that we can override and 3520 * force the flush for the display, and perform it asyncrhonously. 3521 */ 3522 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 3523 if (obj->cache_dirty) 3524 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); 3525 obj->base.write_domain = 0; 3526 } 3527 3528 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) 3529 { 3530 if (!READ_ONCE(obj->pin_global)) 3531 return; 3532 3533 mutex_lock(&obj->base.dev->struct_mutex); 3534 __i915_gem_object_flush_for_display(obj); 3535 mutex_unlock(&obj->base.dev->struct_mutex); 3536 } 3537 3538 /** 3539 * Moves a single object to the WC read, and possibly write domain. 3540 * @obj: object to act on 3541 * @write: ask for write access or read only 3542 * 3543 * This function returns when the move is complete, including waiting on 3544 * flushes to occur. 3545 */ 3546 int 3547 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) 3548 { 3549 int ret; 3550 3551 lockdep_assert_held(&obj->base.dev->struct_mutex); 3552 3553 ret = i915_gem_object_wait(obj, 3554 I915_WAIT_INTERRUPTIBLE | 3555 I915_WAIT_LOCKED | 3556 (write ? I915_WAIT_ALL : 0), 3557 MAX_SCHEDULE_TIMEOUT, 3558 NULL); 3559 if (ret) 3560 return ret; 3561 3562 if (obj->base.write_domain == I915_GEM_DOMAIN_WC) 3563 return 0; 3564 3565 /* Flush and acquire obj->pages so that we are coherent through 3566 * direct access in memory with previous cached writes through 3567 * shmemfs and that our cache domain tracking remains valid. 3568 * For example, if the obj->filp was moved to swap without us 3569 * being notified and releasing the pages, we would mistakenly 3570 * continue to assume that the obj remained out of the CPU cached 3571 * domain. 3572 */ 3573 ret = i915_gem_object_pin_pages(obj); 3574 if (ret) 3575 return ret; 3576 3577 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); 3578 3579 /* Serialise direct access to this object with the barriers for 3580 * coherent writes from the GPU, by effectively invalidating the 3581 * WC domain upon first access. 3582 */ 3583 if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0) 3584 mb(); 3585 3586 /* It should now be out of any other write domains, and we can update 3587 * the domain values for our changes. 3588 */ 3589 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0); 3590 obj->base.read_domains |= I915_GEM_DOMAIN_WC; 3591 if (write) { 3592 obj->base.read_domains = I915_GEM_DOMAIN_WC; 3593 obj->base.write_domain = I915_GEM_DOMAIN_WC; 3594 obj->mm.dirty = true; 3595 } 3596 3597 i915_gem_object_unpin_pages(obj); 3598 return 0; 3599 } 3600 3601 /** 3602 * Moves a single object to the GTT read, and possibly write domain. 3603 * @obj: object to act on 3604 * @write: ask for write access or read only 3605 * 3606 * This function returns when the move is complete, including waiting on 3607 * flushes to occur. 3608 */ 3609 int 3610 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3611 { 3612 int ret; 3613 3614 lockdep_assert_held(&obj->base.dev->struct_mutex); 3615 3616 ret = i915_gem_object_wait(obj, 3617 I915_WAIT_INTERRUPTIBLE | 3618 I915_WAIT_LOCKED | 3619 (write ? I915_WAIT_ALL : 0), 3620 MAX_SCHEDULE_TIMEOUT, 3621 NULL); 3622 if (ret) 3623 return ret; 3624 3625 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3626 return 0; 3627 3628 /* Flush and acquire obj->pages so that we are coherent through 3629 * direct access in memory with previous cached writes through 3630 * shmemfs and that our cache domain tracking remains valid. 3631 * For example, if the obj->filp was moved to swap without us 3632 * being notified and releasing the pages, we would mistakenly 3633 * continue to assume that the obj remained out of the CPU cached 3634 * domain. 3635 */ 3636 ret = i915_gem_object_pin_pages(obj); 3637 if (ret) 3638 return ret; 3639 3640 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); 3641 3642 /* Serialise direct access to this object with the barriers for 3643 * coherent writes from the GPU, by effectively invalidating the 3644 * GTT domain upon first access. 3645 */ 3646 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3647 mb(); 3648 3649 /* It should now be out of any other write domains, and we can update 3650 * the domain values for our changes. 3651 */ 3652 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3653 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3654 if (write) { 3655 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3656 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3657 obj->mm.dirty = true; 3658 } 3659 3660 i915_gem_object_unpin_pages(obj); 3661 return 0; 3662 } 3663 3664 /** 3665 * Changes the cache-level of an object across all VMA. 3666 * @obj: object to act on 3667 * @cache_level: new cache level to set for the object 3668 * 3669 * After this function returns, the object will be in the new cache-level 3670 * across all GTT and the contents of the backing storage will be coherent, 3671 * with respect to the new cache-level. In order to keep the backing storage 3672 * coherent for all users, we only allow a single cache level to be set 3673 * globally on the object and prevent it from being changed whilst the 3674 * hardware is reading from the object. That is if the object is currently 3675 * on the scanout it will be set to uncached (or equivalent display 3676 * cache coherency) and all non-MOCS GPU access will also be uncached so 3677 * that all direct access to the scanout remains coherent. 3678 */ 3679 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3680 enum i915_cache_level cache_level) 3681 { 3682 struct i915_vma *vma; 3683 int ret; 3684 3685 lockdep_assert_held(&obj->base.dev->struct_mutex); 3686 3687 if (obj->cache_level == cache_level) 3688 return 0; 3689 3690 /* Inspect the list of currently bound VMA and unbind any that would 3691 * be invalid given the new cache-level. This is principally to 3692 * catch the issue of the CS prefetch crossing page boundaries and 3693 * reading an invalid PTE on older architectures. 3694 */ 3695 restart: 3696 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3697 if (!drm_mm_node_allocated(&vma->node)) 3698 continue; 3699 3700 if (i915_vma_is_pinned(vma)) { 3701 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3702 return -EBUSY; 3703 } 3704 3705 if (i915_gem_valid_gtt_space(vma, cache_level)) 3706 continue; 3707 3708 ret = i915_vma_unbind(vma); 3709 if (ret) 3710 return ret; 3711 3712 /* As unbinding may affect other elements in the 3713 * obj->vma_list (due to side-effects from retiring 3714 * an active vma), play safe and restart the iterator. 3715 */ 3716 goto restart; 3717 } 3718 3719 /* We can reuse the existing drm_mm nodes but need to change the 3720 * cache-level on the PTE. We could simply unbind them all and 3721 * rebind with the correct cache-level on next use. However since 3722 * we already have a valid slot, dma mapping, pages etc, we may as 3723 * rewrite the PTE in the belief that doing so tramples upon less 3724 * state and so involves less work. 3725 */ 3726 if (obj->bind_count) { 3727 /* Before we change the PTE, the GPU must not be accessing it. 3728 * If we wait upon the object, we know that all the bound 3729 * VMA are no longer active. 3730 */ 3731 ret = i915_gem_object_wait(obj, 3732 I915_WAIT_INTERRUPTIBLE | 3733 I915_WAIT_LOCKED | 3734 I915_WAIT_ALL, 3735 MAX_SCHEDULE_TIMEOUT, 3736 NULL); 3737 if (ret) 3738 return ret; 3739 3740 if (!HAS_LLC(to_i915(obj->base.dev)) && 3741 cache_level != I915_CACHE_NONE) { 3742 /* Access to snoopable pages through the GTT is 3743 * incoherent and on some machines causes a hard 3744 * lockup. Relinquish the CPU mmaping to force 3745 * userspace to refault in the pages and we can 3746 * then double check if the GTT mapping is still 3747 * valid for that pointer access. 3748 */ 3749 i915_gem_release_mmap(obj); 3750 3751 /* As we no longer need a fence for GTT access, 3752 * we can relinquish it now (and so prevent having 3753 * to steal a fence from someone else on the next 3754 * fence request). Note GPU activity would have 3755 * dropped the fence as all snoopable access is 3756 * supposed to be linear. 3757 */ 3758 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3759 ret = i915_vma_put_fence(vma); 3760 if (ret) 3761 return ret; 3762 } 3763 } else { 3764 /* We either have incoherent backing store and 3765 * so no GTT access or the architecture is fully 3766 * coherent. In such cases, existing GTT mmaps 3767 * ignore the cache bit in the PTE and we can 3768 * rewrite it without confusing the GPU or having 3769 * to force userspace to fault back in its mmaps. 3770 */ 3771 } 3772 3773 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3774 if (!drm_mm_node_allocated(&vma->node)) 3775 continue; 3776 3777 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); 3778 if (ret) 3779 return ret; 3780 } 3781 } 3782 3783 list_for_each_entry(vma, &obj->vma_list, obj_link) 3784 vma->node.color = cache_level; 3785 i915_gem_object_set_cache_coherency(obj, cache_level); 3786 obj->cache_dirty = true; /* Always invalidate stale cachelines */ 3787 3788 return 0; 3789 } 3790 3791 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3792 struct drm_file *file) 3793 { 3794 struct drm_i915_gem_caching *args = data; 3795 struct drm_i915_gem_object *obj; 3796 int err = 0; 3797 3798 rcu_read_lock(); 3799 obj = i915_gem_object_lookup_rcu(file, args->handle); 3800 if (!obj) { 3801 err = -ENOENT; 3802 goto out; 3803 } 3804 3805 switch (obj->cache_level) { 3806 case I915_CACHE_LLC: 3807 case I915_CACHE_L3_LLC: 3808 args->caching = I915_CACHING_CACHED; 3809 break; 3810 3811 case I915_CACHE_WT: 3812 args->caching = I915_CACHING_DISPLAY; 3813 break; 3814 3815 default: 3816 args->caching = I915_CACHING_NONE; 3817 break; 3818 } 3819 out: 3820 rcu_read_unlock(); 3821 return err; 3822 } 3823 3824 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3825 struct drm_file *file) 3826 { 3827 struct drm_i915_private *i915 = to_i915(dev); 3828 struct drm_i915_gem_caching *args = data; 3829 struct drm_i915_gem_object *obj; 3830 enum i915_cache_level level; 3831 int ret = 0; 3832 3833 switch (args->caching) { 3834 case I915_CACHING_NONE: 3835 level = I915_CACHE_NONE; 3836 break; 3837 case I915_CACHING_CACHED: 3838 /* 3839 * Due to a HW issue on BXT A stepping, GPU stores via a 3840 * snooped mapping may leave stale data in a corresponding CPU 3841 * cacheline, whereas normally such cachelines would get 3842 * invalidated. 3843 */ 3844 if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) 3845 return -ENODEV; 3846 3847 level = I915_CACHE_LLC; 3848 break; 3849 case I915_CACHING_DISPLAY: 3850 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; 3851 break; 3852 default: 3853 return -EINVAL; 3854 } 3855 3856 obj = i915_gem_object_lookup(file, args->handle); 3857 if (!obj) 3858 return -ENOENT; 3859 3860 if (obj->cache_level == level) 3861 goto out; 3862 3863 ret = i915_gem_object_wait(obj, 3864 I915_WAIT_INTERRUPTIBLE, 3865 MAX_SCHEDULE_TIMEOUT, 3866 to_rps_client(file)); 3867 if (ret) 3868 goto out; 3869 3870 ret = i915_mutex_lock_interruptible(dev); 3871 if (ret) 3872 goto out; 3873 3874 ret = i915_gem_object_set_cache_level(obj, level); 3875 mutex_unlock(&dev->struct_mutex); 3876 3877 out: 3878 i915_gem_object_put(obj); 3879 return ret; 3880 } 3881 3882 /* 3883 * Prepare buffer for display plane (scanout, cursors, etc). 3884 * Can be called from an uninterruptible phase (modesetting) and allows 3885 * any flushes to be pipelined (for pageflips). 3886 */ 3887 struct i915_vma * 3888 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3889 u32 alignment, 3890 const struct i915_ggtt_view *view) 3891 { 3892 struct i915_vma *vma; 3893 int ret; 3894 3895 lockdep_assert_held(&obj->base.dev->struct_mutex); 3896 3897 /* Mark the global pin early so that we account for the 3898 * display coherency whilst setting up the cache domains. 3899 */ 3900 obj->pin_global++; 3901 3902 /* The display engine is not coherent with the LLC cache on gen6. As 3903 * a result, we make sure that the pinning that is about to occur is 3904 * done with uncached PTEs. This is lowest common denominator for all 3905 * chipsets. 3906 * 3907 * However for gen6+, we could do better by using the GFDT bit instead 3908 * of uncaching, which would allow us to flush all the LLC-cached data 3909 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3910 */ 3911 ret = i915_gem_object_set_cache_level(obj, 3912 HAS_WT(to_i915(obj->base.dev)) ? 3913 I915_CACHE_WT : I915_CACHE_NONE); 3914 if (ret) { 3915 vma = ERR_PTR(ret); 3916 goto err_unpin_global; 3917 } 3918 3919 /* As the user may map the buffer once pinned in the display plane 3920 * (e.g. libkms for the bootup splash), we have to ensure that we 3921 * always use map_and_fenceable for all scanout buffers. However, 3922 * it may simply be too big to fit into mappable, in which case 3923 * put it anyway and hope that userspace can cope (but always first 3924 * try to preserve the existing ABI). 3925 */ 3926 vma = ERR_PTR(-ENOSPC); 3927 if (!view || view->type == I915_GGTT_VIEW_NORMAL) 3928 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 3929 PIN_MAPPABLE | PIN_NONBLOCK); 3930 if (IS_ERR(vma)) { 3931 struct drm_i915_private *i915 = to_i915(obj->base.dev); 3932 unsigned int flags; 3933 3934 /* Valleyview is definitely limited to scanning out the first 3935 * 512MiB. Lets presume this behaviour was inherited from the 3936 * g4x display engine and that all earlier gen are similarly 3937 * limited. Testing suggests that it is a little more 3938 * complicated than this. For example, Cherryview appears quite 3939 * happy to scanout from anywhere within its global aperture. 3940 */ 3941 flags = 0; 3942 if (HAS_GMCH_DISPLAY(i915)) 3943 flags = PIN_MAPPABLE; 3944 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); 3945 } 3946 if (IS_ERR(vma)) 3947 goto err_unpin_global; 3948 3949 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 3950 3951 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ 3952 __i915_gem_object_flush_for_display(obj); 3953 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 3954 3955 /* It should now be out of any other write domains, and we can update 3956 * the domain values for our changes. 3957 */ 3958 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3959 3960 return vma; 3961 3962 err_unpin_global: 3963 obj->pin_global--; 3964 return vma; 3965 } 3966 3967 void 3968 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) 3969 { 3970 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 3971 3972 if (WARN_ON(vma->obj->pin_global == 0)) 3973 return; 3974 3975 if (--vma->obj->pin_global == 0) 3976 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 3977 3978 /* Bump the LRU to try and avoid premature eviction whilst flipping */ 3979 i915_gem_object_bump_inactive_ggtt(vma->obj); 3980 3981 i915_vma_unpin(vma); 3982 } 3983 3984 /** 3985 * Moves a single object to the CPU read, and possibly write domain. 3986 * @obj: object to act on 3987 * @write: requesting write or read-only access 3988 * 3989 * This function returns when the move is complete, including waiting on 3990 * flushes to occur. 3991 */ 3992 int 3993 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 3994 { 3995 int ret; 3996 3997 lockdep_assert_held(&obj->base.dev->struct_mutex); 3998 3999 ret = i915_gem_object_wait(obj, 4000 I915_WAIT_INTERRUPTIBLE | 4001 I915_WAIT_LOCKED | 4002 (write ? I915_WAIT_ALL : 0), 4003 MAX_SCHEDULE_TIMEOUT, 4004 NULL); 4005 if (ret) 4006 return ret; 4007 4008 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 4009 4010 /* Flush the CPU cache if it's still invalid. */ 4011 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 4012 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 4013 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 4014 } 4015 4016 /* It should now be out of any other write domains, and we can update 4017 * the domain values for our changes. 4018 */ 4019 GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 4020 4021 /* If we're writing through the CPU, then the GPU read domains will 4022 * need to be invalidated at next use. 4023 */ 4024 if (write) 4025 __start_cpu_write(obj); 4026 4027 return 0; 4028 } 4029 4030 /* Throttle our rendering by waiting until the ring has completed our requests 4031 * emitted over 20 msec ago. 4032 * 4033 * Note that if we were to use the current jiffies each time around the loop, 4034 * we wouldn't escape the function with any frames outstanding if the time to 4035 * render a frame was over 20ms. 4036 * 4037 * This should get us reasonable parallelism between CPU and GPU but also 4038 * relatively low latency when blocking on a particular request to finish. 4039 */ 4040 static int 4041 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 4042 { 4043 struct drm_i915_private *dev_priv = to_i915(dev); 4044 struct drm_i915_file_private *file_priv = file->driver_priv; 4045 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 4046 struct drm_i915_gem_request *request, *target = NULL; 4047 long ret; 4048 4049 /* ABI: return -EIO if already wedged */ 4050 if (i915_terminally_wedged(&dev_priv->gpu_error)) 4051 return -EIO; 4052 4053 spin_lock(&file_priv->mm.lock); 4054 list_for_each_entry(request, &file_priv->mm.request_list, client_link) { 4055 if (time_after_eq(request->emitted_jiffies, recent_enough)) 4056 break; 4057 4058 if (target) { 4059 list_del(&target->client_link); 4060 target->file_priv = NULL; 4061 } 4062 4063 target = request; 4064 } 4065 if (target) 4066 i915_gem_request_get(target); 4067 spin_unlock(&file_priv->mm.lock); 4068 4069 if (target == NULL) 4070 return 0; 4071 4072 ret = i915_wait_request(target, 4073 I915_WAIT_INTERRUPTIBLE, 4074 MAX_SCHEDULE_TIMEOUT); 4075 i915_gem_request_put(target); 4076 4077 return ret < 0 ? ret : 0; 4078 } 4079 4080 struct i915_vma * 4081 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 4082 const struct i915_ggtt_view *view, 4083 u64 size, 4084 u64 alignment, 4085 u64 flags) 4086 { 4087 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 4088 struct i915_address_space *vm = &dev_priv->ggtt.base; 4089 struct i915_vma *vma; 4090 int ret; 4091 4092 lockdep_assert_held(&obj->base.dev->struct_mutex); 4093 4094 if (!view && flags & PIN_MAPPABLE) { 4095 /* If the required space is larger than the available 4096 * aperture, we will not able to find a slot for the 4097 * object and unbinding the object now will be in 4098 * vain. Worse, doing so may cause us to ping-pong 4099 * the object in and out of the Global GTT and 4100 * waste a lot of cycles under the mutex. 4101 */ 4102 if (obj->base.size > dev_priv->ggtt.mappable_end) 4103 return ERR_PTR(-E2BIG); 4104 4105 /* If NONBLOCK is set the caller is optimistically 4106 * trying to cache the full object within the mappable 4107 * aperture, and *must* have a fallback in place for 4108 * situations where we cannot bind the object. We 4109 * can be a little more lax here and use the fallback 4110 * more often to avoid costly migrations of ourselves 4111 * and other objects within the aperture. 4112 * 4113 * Half-the-aperture is used as a simple heuristic. 4114 * More interesting would to do search for a free 4115 * block prior to making the commitment to unbind. 4116 * That caters for the self-harm case, and with a 4117 * little more heuristics (e.g. NOFAULT, NOEVICT) 4118 * we could try to minimise harm to others. 4119 */ 4120 if (flags & PIN_NONBLOCK && 4121 obj->base.size > dev_priv->ggtt.mappable_end / 2) 4122 return ERR_PTR(-ENOSPC); 4123 } 4124 4125 vma = i915_vma_instance(obj, vm, view); 4126 if (unlikely(IS_ERR(vma))) 4127 return vma; 4128 4129 if (i915_vma_misplaced(vma, size, alignment, flags)) { 4130 if (flags & PIN_NONBLOCK) { 4131 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) 4132 return ERR_PTR(-ENOSPC); 4133 4134 if (flags & PIN_MAPPABLE && 4135 vma->fence_size > dev_priv->ggtt.mappable_end / 2) 4136 return ERR_PTR(-ENOSPC); 4137 } 4138 4139 WARN(i915_vma_is_pinned(vma), 4140 "bo is already pinned in ggtt with incorrect alignment:" 4141 " offset=%08x, req.alignment=%llx," 4142 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", 4143 i915_ggtt_offset(vma), alignment, 4144 !!(flags & PIN_MAPPABLE), 4145 i915_vma_is_map_and_fenceable(vma)); 4146 ret = i915_vma_unbind(vma); 4147 if (ret) 4148 return ERR_PTR(ret); 4149 } 4150 4151 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); 4152 if (ret) 4153 return ERR_PTR(ret); 4154 4155 return vma; 4156 } 4157 4158 static __always_inline unsigned int __busy_read_flag(unsigned int id) 4159 { 4160 /* Note that we could alias engines in the execbuf API, but 4161 * that would be very unwise as it prevents userspace from 4162 * fine control over engine selection. Ahem. 4163 * 4164 * This should be something like EXEC_MAX_ENGINE instead of 4165 * I915_NUM_ENGINES. 4166 */ 4167 BUILD_BUG_ON(I915_NUM_ENGINES > 16); 4168 return 0x10000 << id; 4169 } 4170 4171 static __always_inline unsigned int __busy_write_id(unsigned int id) 4172 { 4173 /* The uABI guarantees an active writer is also amongst the read 4174 * engines. This would be true if we accessed the activity tracking 4175 * under the lock, but as we perform the lookup of the object and 4176 * its activity locklessly we can not guarantee that the last_write 4177 * being active implies that we have set the same engine flag from 4178 * last_read - hence we always set both read and write busy for 4179 * last_write. 4180 */ 4181 return id | __busy_read_flag(id); 4182 } 4183 4184 static __always_inline unsigned int 4185 __busy_set_if_active(const struct dma_fence *fence, 4186 unsigned int (*flag)(unsigned int id)) 4187 { 4188 struct drm_i915_gem_request *rq; 4189 4190 /* We have to check the current hw status of the fence as the uABI 4191 * guarantees forward progress. We could rely on the idle worker 4192 * to eventually flush us, but to minimise latency just ask the 4193 * hardware. 4194 * 4195 * Note we only report on the status of native fences. 4196 */ 4197 if (!dma_fence_is_i915(fence)) 4198 return 0; 4199 4200 /* opencode to_request() in order to avoid const warnings */ 4201 rq = container_of(fence, struct drm_i915_gem_request, fence); 4202 if (i915_gem_request_completed(rq)) 4203 return 0; 4204 4205 return flag(rq->engine->uabi_id); 4206 } 4207 4208 static __always_inline unsigned int 4209 busy_check_reader(const struct dma_fence *fence) 4210 { 4211 return __busy_set_if_active(fence, __busy_read_flag); 4212 } 4213 4214 static __always_inline unsigned int 4215 busy_check_writer(const struct dma_fence *fence) 4216 { 4217 if (!fence) 4218 return 0; 4219 4220 return __busy_set_if_active(fence, __busy_write_id); 4221 } 4222 4223 int 4224 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 4225 struct drm_file *file) 4226 { 4227 struct drm_i915_gem_busy *args = data; 4228 struct drm_i915_gem_object *obj; 4229 struct reservation_object_list *list; 4230 unsigned int seq; 4231 int err; 4232 4233 err = -ENOENT; 4234 rcu_read_lock(); 4235 obj = i915_gem_object_lookup_rcu(file, args->handle); 4236 if (!obj) 4237 goto out; 4238 4239 /* A discrepancy here is that we do not report the status of 4240 * non-i915 fences, i.e. even though we may report the object as idle, 4241 * a call to set-domain may still stall waiting for foreign rendering. 4242 * This also means that wait-ioctl may report an object as busy, 4243 * where busy-ioctl considers it idle. 4244 * 4245 * We trade the ability to warn of foreign fences to report on which 4246 * i915 engines are active for the object. 4247 * 4248 * Alternatively, we can trade that extra information on read/write 4249 * activity with 4250 * args->busy = 4251 * !reservation_object_test_signaled_rcu(obj->resv, true); 4252 * to report the overall busyness. This is what the wait-ioctl does. 4253 * 4254 */ 4255 retry: 4256 seq = raw_read_seqcount(&obj->resv->seq); 4257 4258 /* Translate the exclusive fence to the READ *and* WRITE engine */ 4259 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); 4260 4261 /* Translate shared fences to READ set of engines */ 4262 list = rcu_dereference(obj->resv->fence); 4263 if (list) { 4264 unsigned int shared_count = list->shared_count, i; 4265 4266 for (i = 0; i < shared_count; ++i) { 4267 struct dma_fence *fence = 4268 rcu_dereference(list->shared[i]); 4269 4270 args->busy |= busy_check_reader(fence); 4271 } 4272 } 4273 4274 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) 4275 goto retry; 4276 4277 err = 0; 4278 out: 4279 rcu_read_unlock(); 4280 return err; 4281 } 4282 4283 int 4284 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 4285 struct drm_file *file_priv) 4286 { 4287 return i915_gem_ring_throttle(dev, file_priv); 4288 } 4289 4290 int 4291 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4292 struct drm_file *file_priv) 4293 { 4294 struct drm_i915_private *dev_priv = to_i915(dev); 4295 struct drm_i915_gem_madvise *args = data; 4296 struct drm_i915_gem_object *obj; 4297 int err; 4298 4299 switch (args->madv) { 4300 case I915_MADV_DONTNEED: 4301 case I915_MADV_WILLNEED: 4302 break; 4303 default: 4304 return -EINVAL; 4305 } 4306 4307 obj = i915_gem_object_lookup(file_priv, args->handle); 4308 if (!obj) 4309 return -ENOENT; 4310 4311 err = mutex_lock_interruptible(&obj->mm.lock); 4312 if (err) 4313 goto out; 4314 4315 if (i915_gem_object_has_pages(obj) && 4316 i915_gem_object_is_tiled(obj) && 4317 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 4318 if (obj->mm.madv == I915_MADV_WILLNEED) { 4319 GEM_BUG_ON(!obj->mm.quirked); 4320 __i915_gem_object_unpin_pages(obj); 4321 obj->mm.quirked = false; 4322 } 4323 if (args->madv == I915_MADV_WILLNEED) { 4324 GEM_BUG_ON(obj->mm.quirked); 4325 __i915_gem_object_pin_pages(obj); 4326 obj->mm.quirked = true; 4327 } 4328 } 4329 4330 if (obj->mm.madv != __I915_MADV_PURGED) 4331 obj->mm.madv = args->madv; 4332 4333 /* if the object is no longer attached, discard its backing storage */ 4334 if (obj->mm.madv == I915_MADV_DONTNEED && 4335 !i915_gem_object_has_pages(obj)) 4336 i915_gem_object_truncate(obj); 4337 4338 args->retained = obj->mm.madv != __I915_MADV_PURGED; 4339 mutex_unlock(&obj->mm.lock); 4340 4341 out: 4342 i915_gem_object_put(obj); 4343 return err; 4344 } 4345 4346 static void 4347 frontbuffer_retire(struct i915_gem_active *active, 4348 struct drm_i915_gem_request *request) 4349 { 4350 struct drm_i915_gem_object *obj = 4351 container_of(active, typeof(*obj), frontbuffer_write); 4352 4353 intel_fb_obj_flush(obj, ORIGIN_CS); 4354 } 4355 4356 void i915_gem_object_init(struct drm_i915_gem_object *obj, 4357 const struct drm_i915_gem_object_ops *ops) 4358 { 4359 mutex_init(&obj->mm.lock); 4360 4361 INIT_LIST_HEAD(&obj->vma_list); 4362 INIT_LIST_HEAD(&obj->lut_list); 4363 INIT_LIST_HEAD(&obj->batch_pool_link); 4364 4365 obj->ops = ops; 4366 4367 reservation_object_init(&obj->__builtin_resv); 4368 obj->resv = &obj->__builtin_resv; 4369 4370 obj->frontbuffer_ggtt_origin = ORIGIN_GTT; 4371 init_request_active(&obj->frontbuffer_write, frontbuffer_retire); 4372 4373 obj->mm.madv = I915_MADV_WILLNEED; 4374 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); 4375 mutex_init(&obj->mm.get_page.lock); 4376 4377 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); 4378 } 4379 4380 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4381 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4382 I915_GEM_OBJECT_IS_SHRINKABLE, 4383 4384 .get_pages = i915_gem_object_get_pages_gtt, 4385 .put_pages = i915_gem_object_put_pages_gtt, 4386 4387 .pwrite = i915_gem_object_pwrite_gtt, 4388 }; 4389 4390 static int i915_gem_object_create_shmem(struct drm_device *dev, 4391 struct drm_gem_object *obj, 4392 size_t size) 4393 { 4394 struct drm_i915_private *i915 = to_i915(dev); 4395 unsigned long flags = VM_NORESERVE; 4396 struct file *filp; 4397 4398 drm_gem_private_object_init(dev, obj, size); 4399 4400 if (i915->mm.gemfs) 4401 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 4402 flags); 4403 else 4404 filp = shmem_file_setup("i915", size, flags); 4405 4406 if (IS_ERR(filp)) 4407 return PTR_ERR(filp); 4408 4409 obj->filp = filp; 4410 4411 return 0; 4412 } 4413 4414 struct drm_i915_gem_object * 4415 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) 4416 { 4417 struct drm_i915_gem_object *obj; 4418 struct address_space *mapping; 4419 unsigned int cache_level; 4420 gfp_t mask; 4421 int ret; 4422 4423 /* There is a prevalence of the assumption that we fit the object's 4424 * page count inside a 32bit _signed_ variable. Let's document this and 4425 * catch if we ever need to fix it. In the meantime, if you do spot 4426 * such a local variable, please consider fixing! 4427 */ 4428 if (size >> PAGE_SHIFT > INT_MAX) 4429 return ERR_PTR(-E2BIG); 4430 4431 if (overflows_type(size, obj->base.size)) 4432 return ERR_PTR(-E2BIG); 4433 4434 obj = i915_gem_object_alloc(dev_priv); 4435 if (obj == NULL) 4436 return ERR_PTR(-ENOMEM); 4437 4438 ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size); 4439 if (ret) 4440 goto fail; 4441 4442 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4443 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { 4444 /* 965gm cannot relocate objects above 4GiB. */ 4445 mask &= ~__GFP_HIGHMEM; 4446 mask |= __GFP_DMA32; 4447 } 4448 4449 mapping = obj->base.filp->f_mapping; 4450 mapping_set_gfp_mask(mapping, mask); 4451 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 4452 4453 i915_gem_object_init(obj, &i915_gem_object_ops); 4454 4455 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4456 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4457 4458 if (HAS_LLC(dev_priv)) 4459 /* On some devices, we can have the GPU use the LLC (the CPU 4460 * cache) for about a 10% performance improvement 4461 * compared to uncached. Graphics requests other than 4462 * display scanout are coherent with the CPU in 4463 * accessing this cache. This means in this mode we 4464 * don't need to clflush on the CPU side, and on the 4465 * GPU side we only need to flush internal caches to 4466 * get data visible to the CPU. 4467 * 4468 * However, we maintain the display planes as UC, and so 4469 * need to rebind when first used as such. 4470 */ 4471 cache_level = I915_CACHE_LLC; 4472 else 4473 cache_level = I915_CACHE_NONE; 4474 4475 i915_gem_object_set_cache_coherency(obj, cache_level); 4476 4477 trace_i915_gem_object_create(obj); 4478 4479 return obj; 4480 4481 fail: 4482 i915_gem_object_free(obj); 4483 return ERR_PTR(ret); 4484 } 4485 4486 static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4487 { 4488 /* If we are the last user of the backing storage (be it shmemfs 4489 * pages or stolen etc), we know that the pages are going to be 4490 * immediately released. In this case, we can then skip copying 4491 * back the contents from the GPU. 4492 */ 4493 4494 if (obj->mm.madv != I915_MADV_WILLNEED) 4495 return false; 4496 4497 if (obj->base.filp == NULL) 4498 return true; 4499 4500 /* At first glance, this looks racy, but then again so would be 4501 * userspace racing mmap against close. However, the first external 4502 * reference to the filp can only be obtained through the 4503 * i915_gem_mmap_ioctl() which safeguards us against the user 4504 * acquiring such a reference whilst we are in the middle of 4505 * freeing the object. 4506 */ 4507 return atomic_long_read(&obj->base.filp->f_count) == 1; 4508 } 4509 4510 static void __i915_gem_free_objects(struct drm_i915_private *i915, 4511 struct llist_node *freed) 4512 { 4513 struct drm_i915_gem_object *obj, *on; 4514 4515 intel_runtime_pm_get(i915); 4516 llist_for_each_entry_safe(obj, on, freed, freed) { 4517 struct i915_vma *vma, *vn; 4518 4519 trace_i915_gem_object_destroy(obj); 4520 4521 mutex_lock(&i915->drm.struct_mutex); 4522 4523 GEM_BUG_ON(i915_gem_object_is_active(obj)); 4524 list_for_each_entry_safe(vma, vn, 4525 &obj->vma_list, obj_link) { 4526 GEM_BUG_ON(i915_vma_is_active(vma)); 4527 vma->flags &= ~I915_VMA_PIN_MASK; 4528 i915_vma_close(vma); 4529 } 4530 GEM_BUG_ON(!list_empty(&obj->vma_list)); 4531 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); 4532 4533 /* This serializes freeing with the shrinker. Since the free 4534 * is delayed, first by RCU then by the workqueue, we want the 4535 * shrinker to be able to free pages of unreferenced objects, 4536 * or else we may oom whilst there are plenty of deferred 4537 * freed objects. 4538 */ 4539 if (i915_gem_object_has_pages(obj)) { 4540 spin_lock(&i915->mm.obj_lock); 4541 list_del_init(&obj->mm.link); 4542 spin_unlock(&i915->mm.obj_lock); 4543 } 4544 4545 mutex_unlock(&i915->drm.struct_mutex); 4546 4547 GEM_BUG_ON(obj->bind_count); 4548 GEM_BUG_ON(obj->userfault_count); 4549 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); 4550 GEM_BUG_ON(!list_empty(&obj->lut_list)); 4551 4552 if (obj->ops->release) 4553 obj->ops->release(obj); 4554 4555 if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) 4556 atomic_set(&obj->mm.pages_pin_count, 0); 4557 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 4558 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 4559 4560 if (obj->base.import_attach) 4561 drm_prime_gem_destroy(&obj->base, NULL); 4562 4563 reservation_object_fini(&obj->__builtin_resv); 4564 drm_gem_object_release(&obj->base); 4565 i915_gem_info_remove_obj(i915, obj->base.size); 4566 4567 kfree(obj->bit_17); 4568 i915_gem_object_free(obj); 4569 4570 if (on) 4571 cond_resched(); 4572 } 4573 intel_runtime_pm_put(i915); 4574 } 4575 4576 static void i915_gem_flush_free_objects(struct drm_i915_private *i915) 4577 { 4578 struct llist_node *freed; 4579 4580 /* Free the oldest, most stale object to keep the free_list short */ 4581 freed = NULL; 4582 if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */ 4583 /* Only one consumer of llist_del_first() allowed */ 4584 spin_lock(&i915->mm.free_lock); 4585 freed = llist_del_first(&i915->mm.free_list); 4586 spin_unlock(&i915->mm.free_lock); 4587 } 4588 if (unlikely(freed)) { 4589 freed->next = NULL; 4590 __i915_gem_free_objects(i915, freed); 4591 } 4592 } 4593 4594 static void __i915_gem_free_work(struct work_struct *work) 4595 { 4596 struct drm_i915_private *i915 = 4597 container_of(work, struct drm_i915_private, mm.free_work); 4598 struct llist_node *freed; 4599 4600 /* All file-owned VMA should have been released by this point through 4601 * i915_gem_close_object(), or earlier by i915_gem_context_close(). 4602 * However, the object may also be bound into the global GTT (e.g. 4603 * older GPUs without per-process support, or for direct access through 4604 * the GTT either for the user or for scanout). Those VMA still need to 4605 * unbound now. 4606 */ 4607 4608 spin_lock(&i915->mm.free_lock); 4609 while ((freed = llist_del_all(&i915->mm.free_list))) { 4610 spin_unlock(&i915->mm.free_lock); 4611 4612 __i915_gem_free_objects(i915, freed); 4613 if (need_resched()) 4614 return; 4615 4616 spin_lock(&i915->mm.free_lock); 4617 } 4618 spin_unlock(&i915->mm.free_lock); 4619 } 4620 4621 static void __i915_gem_free_object_rcu(struct rcu_head *head) 4622 { 4623 struct drm_i915_gem_object *obj = 4624 container_of(head, typeof(*obj), rcu); 4625 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4626 4627 /* We can't simply use call_rcu() from i915_gem_free_object() 4628 * as we need to block whilst unbinding, and the call_rcu 4629 * task may be called from softirq context. So we take a 4630 * detour through a worker. 4631 */ 4632 if (llist_add(&obj->freed, &i915->mm.free_list)) 4633 schedule_work(&i915->mm.free_work); 4634 } 4635 4636 void i915_gem_free_object(struct drm_gem_object *gem_obj) 4637 { 4638 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4639 4640 if (obj->mm.quirked) 4641 __i915_gem_object_unpin_pages(obj); 4642 4643 if (discard_backing_storage(obj)) 4644 obj->mm.madv = I915_MADV_DONTNEED; 4645 4646 /* Before we free the object, make sure any pure RCU-only 4647 * read-side critical sections are complete, e.g. 4648 * i915_gem_busy_ioctl(). For the corresponding synchronized 4649 * lookup see i915_gem_object_lookup_rcu(). 4650 */ 4651 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 4652 } 4653 4654 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) 4655 { 4656 lockdep_assert_held(&obj->base.dev->struct_mutex); 4657 4658 if (!i915_gem_object_has_active_reference(obj) && 4659 i915_gem_object_is_active(obj)) 4660 i915_gem_object_set_active_reference(obj); 4661 else 4662 i915_gem_object_put(obj); 4663 } 4664 4665 static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) 4666 { 4667 struct intel_engine_cs *engine; 4668 enum intel_engine_id id; 4669 4670 for_each_engine(engine, dev_priv, id) 4671 GEM_BUG_ON(engine->last_retired_context && 4672 !i915_gem_context_is_kernel(engine->last_retired_context)); 4673 } 4674 4675 void i915_gem_sanitize(struct drm_i915_private *i915) 4676 { 4677 if (i915_terminally_wedged(&i915->gpu_error)) { 4678 mutex_lock(&i915->drm.struct_mutex); 4679 i915_gem_unset_wedged(i915); 4680 mutex_unlock(&i915->drm.struct_mutex); 4681 } 4682 4683 /* 4684 * If we inherit context state from the BIOS or earlier occupants 4685 * of the GPU, the GPU may be in an inconsistent state when we 4686 * try to take over. The only way to remove the earlier state 4687 * is by resetting. However, resetting on earlier gen is tricky as 4688 * it may impact the display and we are uncertain about the stability 4689 * of the reset, so this could be applied to even earlier gen. 4690 */ 4691 if (INTEL_GEN(i915) >= 5) { 4692 int reset = intel_gpu_reset(i915, ALL_ENGINES); 4693 WARN_ON(reset && reset != -ENODEV); 4694 } 4695 } 4696 4697 int i915_gem_suspend(struct drm_i915_private *dev_priv) 4698 { 4699 struct drm_device *dev = &dev_priv->drm; 4700 int ret; 4701 4702 intel_runtime_pm_get(dev_priv); 4703 intel_suspend_gt_powersave(dev_priv); 4704 4705 mutex_lock(&dev->struct_mutex); 4706 4707 /* We have to flush all the executing contexts to main memory so 4708 * that they can saved in the hibernation image. To ensure the last 4709 * context image is coherent, we have to switch away from it. That 4710 * leaves the dev_priv->kernel_context still active when 4711 * we actually suspend, and its image in memory may not match the GPU 4712 * state. Fortunately, the kernel_context is disposable and we do 4713 * not rely on its state. 4714 */ 4715 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 4716 ret = i915_gem_switch_to_kernel_context(dev_priv); 4717 if (ret) 4718 goto err_unlock; 4719 4720 ret = i915_gem_wait_for_idle(dev_priv, 4721 I915_WAIT_INTERRUPTIBLE | 4722 I915_WAIT_LOCKED); 4723 if (ret && ret != -EIO) 4724 goto err_unlock; 4725 4726 assert_kernel_context_is_current(dev_priv); 4727 } 4728 i915_gem_contexts_lost(dev_priv); 4729 mutex_unlock(&dev->struct_mutex); 4730 4731 intel_guc_suspend(dev_priv); 4732 4733 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4734 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4735 4736 /* As the idle_work is rearming if it detects a race, play safe and 4737 * repeat the flush until it is definitely idle. 4738 */ 4739 drain_delayed_work(&dev_priv->gt.idle_work); 4740 4741 /* Assert that we sucessfully flushed all the work and 4742 * reset the GPU back to its idle, low power state. 4743 */ 4744 WARN_ON(dev_priv->gt.awake); 4745 if (WARN_ON(!intel_engines_are_idle(dev_priv))) 4746 i915_gem_set_wedged(dev_priv); /* no hope, discard everything */ 4747 4748 /* 4749 * Neither the BIOS, ourselves or any other kernel 4750 * expects the system to be in execlists mode on startup, 4751 * so we need to reset the GPU back to legacy mode. And the only 4752 * known way to disable logical contexts is through a GPU reset. 4753 * 4754 * So in order to leave the system in a known default configuration, 4755 * always reset the GPU upon unload and suspend. Afterwards we then 4756 * clean up the GEM state tracking, flushing off the requests and 4757 * leaving the system in a known idle state. 4758 * 4759 * Note that is of the upmost importance that the GPU is idle and 4760 * all stray writes are flushed *before* we dismantle the backing 4761 * storage for the pinned objects. 4762 * 4763 * However, since we are uncertain that resetting the GPU on older 4764 * machines is a good idea, we don't - just in case it leaves the 4765 * machine in an unusable condition. 4766 */ 4767 i915_gem_sanitize(dev_priv); 4768 4769 intel_runtime_pm_put(dev_priv); 4770 return 0; 4771 4772 err_unlock: 4773 mutex_unlock(&dev->struct_mutex); 4774 intel_runtime_pm_put(dev_priv); 4775 return ret; 4776 } 4777 4778 void i915_gem_resume(struct drm_i915_private *dev_priv) 4779 { 4780 struct drm_device *dev = &dev_priv->drm; 4781 4782 WARN_ON(dev_priv->gt.awake); 4783 4784 mutex_lock(&dev->struct_mutex); 4785 i915_gem_restore_gtt_mappings(dev_priv); 4786 i915_gem_restore_fences(dev_priv); 4787 4788 /* As we didn't flush the kernel context before suspend, we cannot 4789 * guarantee that the context image is complete. So let's just reset 4790 * it and start again. 4791 */ 4792 dev_priv->gt.resume(dev_priv); 4793 4794 mutex_unlock(&dev->struct_mutex); 4795 } 4796 4797 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) 4798 { 4799 if (INTEL_GEN(dev_priv) < 5 || 4800 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4801 return; 4802 4803 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 4804 DISP_TILE_SURFACE_SWIZZLING); 4805 4806 if (IS_GEN5(dev_priv)) 4807 return; 4808 4809 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 4810 if (IS_GEN6(dev_priv)) 4811 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4812 else if (IS_GEN7(dev_priv)) 4813 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4814 else if (IS_GEN8(dev_priv)) 4815 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 4816 else 4817 BUG(); 4818 } 4819 4820 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) 4821 { 4822 I915_WRITE(RING_CTL(base), 0); 4823 I915_WRITE(RING_HEAD(base), 0); 4824 I915_WRITE(RING_TAIL(base), 0); 4825 I915_WRITE(RING_START(base), 0); 4826 } 4827 4828 static void init_unused_rings(struct drm_i915_private *dev_priv) 4829 { 4830 if (IS_I830(dev_priv)) { 4831 init_unused_ring(dev_priv, PRB1_BASE); 4832 init_unused_ring(dev_priv, SRB0_BASE); 4833 init_unused_ring(dev_priv, SRB1_BASE); 4834 init_unused_ring(dev_priv, SRB2_BASE); 4835 init_unused_ring(dev_priv, SRB3_BASE); 4836 } else if (IS_GEN2(dev_priv)) { 4837 init_unused_ring(dev_priv, SRB0_BASE); 4838 init_unused_ring(dev_priv, SRB1_BASE); 4839 } else if (IS_GEN3(dev_priv)) { 4840 init_unused_ring(dev_priv, PRB1_BASE); 4841 init_unused_ring(dev_priv, PRB2_BASE); 4842 } 4843 } 4844 4845 static int __i915_gem_restart_engines(void *data) 4846 { 4847 struct drm_i915_private *i915 = data; 4848 struct intel_engine_cs *engine; 4849 enum intel_engine_id id; 4850 int err; 4851 4852 for_each_engine(engine, i915, id) { 4853 err = engine->init_hw(engine); 4854 if (err) 4855 return err; 4856 } 4857 4858 return 0; 4859 } 4860 4861 int i915_gem_init_hw(struct drm_i915_private *dev_priv) 4862 { 4863 int ret; 4864 4865 dev_priv->gt.last_init_time = ktime_get(); 4866 4867 /* Double layer security blanket, see i915_gem_init() */ 4868 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4869 4870 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) 4871 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4872 4873 if (IS_HASWELL(dev_priv)) 4874 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? 4875 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4876 4877 if (HAS_PCH_NOP(dev_priv)) { 4878 if (IS_IVYBRIDGE(dev_priv)) { 4879 u32 temp = I915_READ(GEN7_MSG_CTL); 4880 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4881 I915_WRITE(GEN7_MSG_CTL, temp); 4882 } else if (INTEL_GEN(dev_priv) >= 7) { 4883 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 4884 temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 4885 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 4886 } 4887 } 4888 4889 i915_gem_init_swizzling(dev_priv); 4890 4891 /* 4892 * At least 830 can leave some of the unused rings 4893 * "active" (ie. head != tail) after resume which 4894 * will prevent c3 entry. Makes sure all unused rings 4895 * are totally idle. 4896 */ 4897 init_unused_rings(dev_priv); 4898 4899 BUG_ON(!dev_priv->kernel_context); 4900 if (i915_terminally_wedged(&dev_priv->gpu_error)) { 4901 ret = -EIO; 4902 goto out; 4903 } 4904 4905 ret = i915_ppgtt_init_hw(dev_priv); 4906 if (ret) { 4907 DRM_ERROR("PPGTT enable HW failed %d\n", ret); 4908 goto out; 4909 } 4910 4911 /* Need to do basic initialisation of all rings first: */ 4912 ret = __i915_gem_restart_engines(dev_priv); 4913 if (ret) 4914 goto out; 4915 4916 intel_mocs_init_l3cc_table(dev_priv); 4917 4918 /* We can't enable contexts until all firmware is loaded */ 4919 ret = intel_uc_init_hw(dev_priv); 4920 if (ret) 4921 goto out; 4922 4923 out: 4924 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4925 return ret; 4926 } 4927 4928 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) 4929 { 4930 if (INTEL_INFO(dev_priv)->gen < 6) 4931 return false; 4932 4933 /* TODO: make semaphores and Execlists play nicely together */ 4934 if (i915_modparams.enable_execlists) 4935 return false; 4936 4937 if (value >= 0) 4938 return value; 4939 4940 /* Enable semaphores on SNB when IO remapping is off */ 4941 if (IS_GEN6(dev_priv) && intel_vtd_active()) 4942 return false; 4943 4944 return true; 4945 } 4946 4947 int i915_gem_init(struct drm_i915_private *dev_priv) 4948 { 4949 int ret; 4950 4951 /* 4952 * We need to fallback to 4K pages since gvt gtt handling doesn't 4953 * support huge page entries - we will need to check either hypervisor 4954 * mm can support huge guest page or just do emulation in gvt. 4955 */ 4956 if (intel_vgpu_active(dev_priv)) 4957 mkwrite_device_info(dev_priv)->page_sizes = 4958 I915_GTT_PAGE_SIZE_4K; 4959 4960 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); 4961 4962 if (!i915_modparams.enable_execlists) { 4963 dev_priv->gt.resume = intel_legacy_submission_resume; 4964 dev_priv->gt.cleanup_engine = intel_engine_cleanup; 4965 } else { 4966 dev_priv->gt.resume = intel_lr_context_resume; 4967 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; 4968 } 4969 4970 ret = i915_gem_init_userptr(dev_priv); 4971 if (ret) 4972 return ret; 4973 4974 /* This is just a security blanket to placate dragons. 4975 * On some systems, we very sporadically observe that the first TLBs 4976 * used by the CS may be stale, despite us poking the TLB reset. If 4977 * we hold the forcewake during initialisation these problems 4978 * just magically go away. 4979 */ 4980 mutex_lock(&dev_priv->drm.struct_mutex); 4981 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4982 4983 ret = i915_gem_init_ggtt(dev_priv); 4984 if (ret) 4985 goto out_unlock; 4986 4987 ret = i915_gem_contexts_init(dev_priv); 4988 if (ret) 4989 goto out_unlock; 4990 4991 ret = intel_engines_init(dev_priv); 4992 if (ret) 4993 goto out_unlock; 4994 4995 ret = i915_gem_init_hw(dev_priv); 4996 if (ret == -EIO) { 4997 /* Allow engine initialisation to fail by marking the GPU as 4998 * wedged. But we only want to do this where the GPU is angry, 4999 * for all other failure, such as an allocation failure, bail. 5000 */ 5001 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 5002 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 5003 i915_gem_set_wedged(dev_priv); 5004 } 5005 ret = 0; 5006 } 5007 5008 out_unlock: 5009 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5010 mutex_unlock(&dev_priv->drm.struct_mutex); 5011 5012 return ret; 5013 } 5014 5015 void i915_gem_init_mmio(struct drm_i915_private *i915) 5016 { 5017 i915_gem_sanitize(i915); 5018 } 5019 5020 void 5021 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) 5022 { 5023 struct intel_engine_cs *engine; 5024 enum intel_engine_id id; 5025 5026 for_each_engine(engine, dev_priv, id) 5027 dev_priv->gt.cleanup_engine(engine); 5028 } 5029 5030 void 5031 i915_gem_load_init_fences(struct drm_i915_private *dev_priv) 5032 { 5033 int i; 5034 5035 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 5036 !IS_CHERRYVIEW(dev_priv)) 5037 dev_priv->num_fence_regs = 32; 5038 else if (INTEL_INFO(dev_priv)->gen >= 4 || 5039 IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 5040 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 5041 dev_priv->num_fence_regs = 16; 5042 else 5043 dev_priv->num_fence_regs = 8; 5044 5045 if (intel_vgpu_active(dev_priv)) 5046 dev_priv->num_fence_regs = 5047 I915_READ(vgtif_reg(avail_rs.fence_num)); 5048 5049 /* Initialize fence registers to zero */ 5050 for (i = 0; i < dev_priv->num_fence_regs; i++) { 5051 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; 5052 5053 fence->i915 = dev_priv; 5054 fence->id = i; 5055 list_add_tail(&fence->link, &dev_priv->mm.fence_list); 5056 } 5057 i915_gem_restore_fences(dev_priv); 5058 5059 i915_gem_detect_bit_6_swizzle(dev_priv); 5060 } 5061 5062 int 5063 i915_gem_load_init(struct drm_i915_private *dev_priv) 5064 { 5065 int err = -ENOMEM; 5066 5067 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); 5068 if (!dev_priv->objects) 5069 goto err_out; 5070 5071 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 5072 if (!dev_priv->vmas) 5073 goto err_objects; 5074 5075 dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0); 5076 if (!dev_priv->luts) 5077 goto err_vmas; 5078 5079 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, 5080 SLAB_HWCACHE_ALIGN | 5081 SLAB_RECLAIM_ACCOUNT | 5082 SLAB_TYPESAFE_BY_RCU); 5083 if (!dev_priv->requests) 5084 goto err_luts; 5085 5086 dev_priv->dependencies = KMEM_CACHE(i915_dependency, 5087 SLAB_HWCACHE_ALIGN | 5088 SLAB_RECLAIM_ACCOUNT); 5089 if (!dev_priv->dependencies) 5090 goto err_requests; 5091 5092 dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); 5093 if (!dev_priv->priorities) 5094 goto err_dependencies; 5095 5096 mutex_lock(&dev_priv->drm.struct_mutex); 5097 INIT_LIST_HEAD(&dev_priv->gt.timelines); 5098 err = i915_gem_timeline_init__global(dev_priv); 5099 mutex_unlock(&dev_priv->drm.struct_mutex); 5100 if (err) 5101 goto err_priorities; 5102 5103 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); 5104 5105 spin_lock_init(&dev_priv->mm.obj_lock); 5106 spin_lock_init(&dev_priv->mm.free_lock); 5107 init_llist_head(&dev_priv->mm.free_list); 5108 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 5109 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 5110 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 5111 INIT_LIST_HEAD(&dev_priv->mm.userfault_list); 5112 5113 INIT_DELAYED_WORK(&dev_priv->gt.retire_work, 5114 i915_gem_retire_work_handler); 5115 INIT_DELAYED_WORK(&dev_priv->gt.idle_work, 5116 i915_gem_idle_work_handler); 5117 init_waitqueue_head(&dev_priv->gpu_error.wait_queue); 5118 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 5119 5120 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); 5121 5122 spin_lock_init(&dev_priv->fb_tracking.lock); 5123 5124 err = i915_gemfs_init(dev_priv); 5125 if (err) 5126 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); 5127 5128 return 0; 5129 5130 err_priorities: 5131 kmem_cache_destroy(dev_priv->priorities); 5132 err_dependencies: 5133 kmem_cache_destroy(dev_priv->dependencies); 5134 err_requests: 5135 kmem_cache_destroy(dev_priv->requests); 5136 err_luts: 5137 kmem_cache_destroy(dev_priv->luts); 5138 err_vmas: 5139 kmem_cache_destroy(dev_priv->vmas); 5140 err_objects: 5141 kmem_cache_destroy(dev_priv->objects); 5142 err_out: 5143 return err; 5144 } 5145 5146 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) 5147 { 5148 i915_gem_drain_freed_objects(dev_priv); 5149 WARN_ON(!llist_empty(&dev_priv->mm.free_list)); 5150 WARN_ON(dev_priv->mm.object_count); 5151 5152 mutex_lock(&dev_priv->drm.struct_mutex); 5153 i915_gem_timeline_fini(&dev_priv->gt.global_timeline); 5154 WARN_ON(!list_empty(&dev_priv->gt.timelines)); 5155 mutex_unlock(&dev_priv->drm.struct_mutex); 5156 5157 kmem_cache_destroy(dev_priv->priorities); 5158 kmem_cache_destroy(dev_priv->dependencies); 5159 kmem_cache_destroy(dev_priv->requests); 5160 kmem_cache_destroy(dev_priv->luts); 5161 kmem_cache_destroy(dev_priv->vmas); 5162 kmem_cache_destroy(dev_priv->objects); 5163 5164 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ 5165 rcu_barrier(); 5166 5167 i915_gemfs_fini(dev_priv); 5168 } 5169 5170 int i915_gem_freeze(struct drm_i915_private *dev_priv) 5171 { 5172 /* Discard all purgeable objects, let userspace recover those as 5173 * required after resuming. 5174 */ 5175 i915_gem_shrink_all(dev_priv); 5176 5177 return 0; 5178 } 5179 5180 int i915_gem_freeze_late(struct drm_i915_private *dev_priv) 5181 { 5182 struct drm_i915_gem_object *obj; 5183 struct list_head *phases[] = { 5184 &dev_priv->mm.unbound_list, 5185 &dev_priv->mm.bound_list, 5186 NULL 5187 }, **p; 5188 5189 /* Called just before we write the hibernation image. 5190 * 5191 * We need to update the domain tracking to reflect that the CPU 5192 * will be accessing all the pages to create and restore from the 5193 * hibernation, and so upon restoration those pages will be in the 5194 * CPU domain. 5195 * 5196 * To make sure the hibernation image contains the latest state, 5197 * we update that state just before writing out the image. 5198 * 5199 * To try and reduce the hibernation image, we manually shrink 5200 * the objects as well, see i915_gem_freeze() 5201 */ 5202 5203 i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND); 5204 i915_gem_drain_freed_objects(dev_priv); 5205 5206 spin_lock(&dev_priv->mm.obj_lock); 5207 for (p = phases; *p; p++) { 5208 list_for_each_entry(obj, *p, mm.link) 5209 __start_cpu_write(obj); 5210 } 5211 spin_unlock(&dev_priv->mm.obj_lock); 5212 5213 return 0; 5214 } 5215 5216 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5217 { 5218 struct drm_i915_file_private *file_priv = file->driver_priv; 5219 struct drm_i915_gem_request *request; 5220 5221 /* Clean up our request list when the client is going away, so that 5222 * later retire_requests won't dereference our soon-to-be-gone 5223 * file_priv. 5224 */ 5225 spin_lock(&file_priv->mm.lock); 5226 list_for_each_entry(request, &file_priv->mm.request_list, client_link) 5227 request->file_priv = NULL; 5228 spin_unlock(&file_priv->mm.lock); 5229 } 5230 5231 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) 5232 { 5233 struct drm_i915_file_private *file_priv; 5234 int ret; 5235 5236 DRM_DEBUG("\n"); 5237 5238 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 5239 if (!file_priv) 5240 return -ENOMEM; 5241 5242 file->driver_priv = file_priv; 5243 file_priv->dev_priv = i915; 5244 file_priv->file = file; 5245 5246 spin_lock_init(&file_priv->mm.lock); 5247 INIT_LIST_HEAD(&file_priv->mm.request_list); 5248 5249 file_priv->bsd_engine = -1; 5250 5251 ret = i915_gem_context_open(i915, file); 5252 if (ret) 5253 kfree(file_priv); 5254 5255 return ret; 5256 } 5257 5258 /** 5259 * i915_gem_track_fb - update frontbuffer tracking 5260 * @old: current GEM buffer for the frontbuffer slots 5261 * @new: new GEM buffer for the frontbuffer slots 5262 * @frontbuffer_bits: bitmask of frontbuffer slots 5263 * 5264 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 5265 * from @old and setting them in @new. Both @old and @new can be NULL. 5266 */ 5267 void i915_gem_track_fb(struct drm_i915_gem_object *old, 5268 struct drm_i915_gem_object *new, 5269 unsigned frontbuffer_bits) 5270 { 5271 /* Control of individual bits within the mask are guarded by 5272 * the owning plane->mutex, i.e. we can never see concurrent 5273 * manipulation of individual bits. But since the bitfield as a whole 5274 * is updated using RMW, we need to use atomics in order to update 5275 * the bits. 5276 */ 5277 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 5278 sizeof(atomic_t) * BITS_PER_BYTE); 5279 5280 if (old) { 5281 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); 5282 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); 5283 } 5284 5285 if (new) { 5286 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); 5287 atomic_or(frontbuffer_bits, &new->frontbuffer_bits); 5288 } 5289 } 5290 5291 /* Allocate a new GEM object and fill it with the supplied data */ 5292 struct drm_i915_gem_object * 5293 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, 5294 const void *data, size_t size) 5295 { 5296 struct drm_i915_gem_object *obj; 5297 struct file *file; 5298 size_t offset; 5299 int err; 5300 5301 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); 5302 if (IS_ERR(obj)) 5303 return obj; 5304 5305 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 5306 5307 file = obj->base.filp; 5308 offset = 0; 5309 do { 5310 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 5311 struct page *page; 5312 void *pgdata, *vaddr; 5313 5314 err = pagecache_write_begin(file, file->f_mapping, 5315 offset, len, 0, 5316 &page, &pgdata); 5317 if (err < 0) 5318 goto fail; 5319 5320 vaddr = kmap(page); 5321 memcpy(vaddr, data, len); 5322 kunmap(page); 5323 5324 err = pagecache_write_end(file, file->f_mapping, 5325 offset, len, len, 5326 page, pgdata); 5327 if (err < 0) 5328 goto fail; 5329 5330 size -= len; 5331 data += len; 5332 offset += len; 5333 } while (size); 5334 5335 return obj; 5336 5337 fail: 5338 i915_gem_object_put(obj); 5339 return ERR_PTR(err); 5340 } 5341 5342 struct scatterlist * 5343 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 5344 unsigned int n, 5345 unsigned int *offset) 5346 { 5347 struct i915_gem_object_page_iter *iter = &obj->mm.get_page; 5348 struct scatterlist *sg; 5349 unsigned int idx, count; 5350 5351 might_sleep(); 5352 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 5353 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 5354 5355 /* As we iterate forward through the sg, we record each entry in a 5356 * radixtree for quick repeated (backwards) lookups. If we have seen 5357 * this index previously, we will have an entry for it. 5358 * 5359 * Initial lookup is O(N), but this is amortized to O(1) for 5360 * sequential page access (where each new request is consecutive 5361 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 5362 * i.e. O(1) with a large constant! 5363 */ 5364 if (n < READ_ONCE(iter->sg_idx)) 5365 goto lookup; 5366 5367 mutex_lock(&iter->lock); 5368 5369 /* We prefer to reuse the last sg so that repeated lookup of this 5370 * (or the subsequent) sg are fast - comparing against the last 5371 * sg is faster than going through the radixtree. 5372 */ 5373 5374 sg = iter->sg_pos; 5375 idx = iter->sg_idx; 5376 count = __sg_page_count(sg); 5377 5378 while (idx + count <= n) { 5379 unsigned long exception, i; 5380 int ret; 5381 5382 /* If we cannot allocate and insert this entry, or the 5383 * individual pages from this range, cancel updating the 5384 * sg_idx so that on this lookup we are forced to linearly 5385 * scan onwards, but on future lookups we will try the 5386 * insertion again (in which case we need to be careful of 5387 * the error return reporting that we have already inserted 5388 * this index). 5389 */ 5390 ret = radix_tree_insert(&iter->radix, idx, sg); 5391 if (ret && ret != -EEXIST) 5392 goto scan; 5393 5394 exception = 5395 RADIX_TREE_EXCEPTIONAL_ENTRY | 5396 idx << RADIX_TREE_EXCEPTIONAL_SHIFT; 5397 for (i = 1; i < count; i++) { 5398 ret = radix_tree_insert(&iter->radix, idx + i, 5399 (void *)exception); 5400 if (ret && ret != -EEXIST) 5401 goto scan; 5402 } 5403 5404 idx += count; 5405 sg = ____sg_next(sg); 5406 count = __sg_page_count(sg); 5407 } 5408 5409 scan: 5410 iter->sg_pos = sg; 5411 iter->sg_idx = idx; 5412 5413 mutex_unlock(&iter->lock); 5414 5415 if (unlikely(n < idx)) /* insertion completed by another thread */ 5416 goto lookup; 5417 5418 /* In case we failed to insert the entry into the radixtree, we need 5419 * to look beyond the current sg. 5420 */ 5421 while (idx + count <= n) { 5422 idx += count; 5423 sg = ____sg_next(sg); 5424 count = __sg_page_count(sg); 5425 } 5426 5427 *offset = n - idx; 5428 return sg; 5429 5430 lookup: 5431 rcu_read_lock(); 5432 5433 sg = radix_tree_lookup(&iter->radix, n); 5434 GEM_BUG_ON(!sg); 5435 5436 /* If this index is in the middle of multi-page sg entry, 5437 * the radixtree will contain an exceptional entry that points 5438 * to the start of that range. We will return the pointer to 5439 * the base page and the offset of this page within the 5440 * sg entry's range. 5441 */ 5442 *offset = 0; 5443 if (unlikely(radix_tree_exception(sg))) { 5444 unsigned long base = 5445 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT; 5446 5447 sg = radix_tree_lookup(&iter->radix, base); 5448 GEM_BUG_ON(!sg); 5449 5450 *offset = n - base; 5451 } 5452 5453 rcu_read_unlock(); 5454 5455 return sg; 5456 } 5457 5458 struct page * 5459 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 5460 { 5461 struct scatterlist *sg; 5462 unsigned int offset; 5463 5464 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 5465 5466 sg = i915_gem_object_get_sg(obj, n, &offset); 5467 return nth_page(sg_page(sg), offset); 5468 } 5469 5470 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 5471 struct page * 5472 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 5473 unsigned int n) 5474 { 5475 struct page *page; 5476 5477 page = i915_gem_object_get_page(obj, n); 5478 if (!obj->mm.dirty) 5479 set_page_dirty(page); 5480 5481 return page; 5482 } 5483 5484 dma_addr_t 5485 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 5486 unsigned long n) 5487 { 5488 struct scatterlist *sg; 5489 unsigned int offset; 5490 5491 sg = i915_gem_object_get_sg(obj, n, &offset); 5492 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 5493 } 5494 5495 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) 5496 { 5497 struct sg_table *pages; 5498 int err; 5499 5500 if (align > obj->base.size) 5501 return -EINVAL; 5502 5503 if (obj->ops == &i915_gem_phys_ops) 5504 return 0; 5505 5506 if (obj->ops != &i915_gem_object_ops) 5507 return -EINVAL; 5508 5509 err = i915_gem_object_unbind(obj); 5510 if (err) 5511 return err; 5512 5513 mutex_lock(&obj->mm.lock); 5514 5515 if (obj->mm.madv != I915_MADV_WILLNEED) { 5516 err = -EFAULT; 5517 goto err_unlock; 5518 } 5519 5520 if (obj->mm.quirked) { 5521 err = -EFAULT; 5522 goto err_unlock; 5523 } 5524 5525 if (obj->mm.mapping) { 5526 err = -EBUSY; 5527 goto err_unlock; 5528 } 5529 5530 pages = fetch_and_zero(&obj->mm.pages); 5531 if (pages) { 5532 struct drm_i915_private *i915 = to_i915(obj->base.dev); 5533 5534 __i915_gem_object_reset_page_iter(obj); 5535 5536 spin_lock(&i915->mm.obj_lock); 5537 list_del(&obj->mm.link); 5538 spin_unlock(&i915->mm.obj_lock); 5539 } 5540 5541 obj->ops = &i915_gem_phys_ops; 5542 5543 err = ____i915_gem_object_get_pages(obj); 5544 if (err) 5545 goto err_xfer; 5546 5547 /* Perma-pin (until release) the physical set of pages */ 5548 __i915_gem_object_pin_pages(obj); 5549 5550 if (!IS_ERR_OR_NULL(pages)) 5551 i915_gem_object_ops.put_pages(obj, pages); 5552 mutex_unlock(&obj->mm.lock); 5553 return 0; 5554 5555 err_xfer: 5556 obj->ops = &i915_gem_object_ops; 5557 obj->mm.pages = pages; 5558 err_unlock: 5559 mutex_unlock(&obj->mm.lock); 5560 return err; 5561 } 5562 5563 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 5564 #include "selftests/scatterlist.c" 5565 #include "selftests/mock_gem_device.c" 5566 #include "selftests/huge_gem_object.c" 5567 #include "selftests/huge_pages.c" 5568 #include "selftests/i915_gem_object.c" 5569 #include "selftests/i915_gem_coherency.c" 5570 #endif 5571