1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_vma_manager.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_trace.h" 33 #include "intel_drv.h" 34 #include <linux/oom.h> 35 #include <linux/shmem_fs.h> 36 #include <linux/slab.h> 37 #include <linux/swap.h> 38 #include <linux/pci.h> 39 #include <linux/dma-buf.h> 40 41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, 43 bool force); 44 static __must_check int 45 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 46 bool readonly); 47 static void 48 i915_gem_object_retire(struct drm_i915_gem_object *obj); 49 50 static void i915_gem_write_fence(struct drm_device *dev, int reg, 51 struct drm_i915_gem_object *obj); 52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 53 struct drm_i915_fence_reg *fence, 54 bool enable); 55 56 static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, 57 struct shrink_control *sc); 58 static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker, 59 struct shrink_control *sc); 60 static int i915_gem_shrinker_oom(struct notifier_block *nb, 61 unsigned long event, 62 void *ptr); 63 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 64 65 static bool cpu_cache_is_coherent(struct drm_device *dev, 66 enum i915_cache_level level) 67 { 68 return HAS_LLC(dev) || level != I915_CACHE_NONE; 69 } 70 71 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 72 { 73 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 74 return true; 75 76 return obj->pin_display; 77 } 78 79 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) 80 { 81 if (obj->tiling_mode) 82 i915_gem_release_mmap(obj); 83 84 /* As we do not have an associated fence register, we will force 85 * a tiling change if we ever need to acquire one. 86 */ 87 obj->fence_dirty = false; 88 obj->fence_reg = I915_FENCE_REG_NONE; 89 } 90 91 /* some bookkeeping */ 92 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 93 size_t size) 94 { 95 spin_lock(&dev_priv->mm.object_stat_lock); 96 dev_priv->mm.object_count++; 97 dev_priv->mm.object_memory += size; 98 spin_unlock(&dev_priv->mm.object_stat_lock); 99 } 100 101 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 102 size_t size) 103 { 104 spin_lock(&dev_priv->mm.object_stat_lock); 105 dev_priv->mm.object_count--; 106 dev_priv->mm.object_memory -= size; 107 spin_unlock(&dev_priv->mm.object_stat_lock); 108 } 109 110 static int 111 i915_gem_wait_for_error(struct i915_gpu_error *error) 112 { 113 int ret; 114 115 #define EXIT_COND (!i915_reset_in_progress(error) || \ 116 i915_terminally_wedged(error)) 117 if (EXIT_COND) 118 return 0; 119 120 /* 121 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 122 * userspace. If it takes that long something really bad is going on and 123 * we should simply try to bail out and fail as gracefully as possible. 124 */ 125 ret = wait_event_interruptible_timeout(error->reset_queue, 126 EXIT_COND, 127 10*HZ); 128 if (ret == 0) { 129 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 130 return -EIO; 131 } else if (ret < 0) { 132 return ret; 133 } 134 #undef EXIT_COND 135 136 return 0; 137 } 138 139 int i915_mutex_lock_interruptible(struct drm_device *dev) 140 { 141 struct drm_i915_private *dev_priv = dev->dev_private; 142 int ret; 143 144 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 145 if (ret) 146 return ret; 147 148 ret = mutex_lock_interruptible(&dev->struct_mutex); 149 if (ret) 150 return ret; 151 152 WARN_ON(i915_verify_lists(dev)); 153 return 0; 154 } 155 156 static inline bool 157 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 158 { 159 return i915_gem_obj_bound_any(obj) && !obj->active; 160 } 161 162 int 163 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 164 struct drm_file *file) 165 { 166 struct drm_i915_private *dev_priv = dev->dev_private; 167 struct drm_i915_gem_get_aperture *args = data; 168 struct drm_i915_gem_object *obj; 169 size_t pinned; 170 171 pinned = 0; 172 mutex_lock(&dev->struct_mutex); 173 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 174 if (i915_gem_obj_is_pinned(obj)) 175 pinned += i915_gem_obj_ggtt_size(obj); 176 mutex_unlock(&dev->struct_mutex); 177 178 args->aper_size = dev_priv->gtt.base.total; 179 args->aper_available_size = args->aper_size - pinned; 180 181 return 0; 182 } 183 184 static int 185 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 186 { 187 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 188 char *vaddr = obj->phys_handle->vaddr; 189 struct sg_table *st; 190 struct scatterlist *sg; 191 int i; 192 193 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 194 return -EINVAL; 195 196 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 197 struct page *page; 198 char *src; 199 200 page = shmem_read_mapping_page(mapping, i); 201 if (IS_ERR(page)) 202 return PTR_ERR(page); 203 204 src = kmap_atomic(page); 205 memcpy(vaddr, src, PAGE_SIZE); 206 drm_clflush_virt_range(vaddr, PAGE_SIZE); 207 kunmap_atomic(src); 208 209 page_cache_release(page); 210 vaddr += PAGE_SIZE; 211 } 212 213 i915_gem_chipset_flush(obj->base.dev); 214 215 st = kmalloc(sizeof(*st), GFP_KERNEL); 216 if (st == NULL) 217 return -ENOMEM; 218 219 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 220 kfree(st); 221 return -ENOMEM; 222 } 223 224 sg = st->sgl; 225 sg->offset = 0; 226 sg->length = obj->base.size; 227 228 sg_dma_address(sg) = obj->phys_handle->busaddr; 229 sg_dma_len(sg) = obj->base.size; 230 231 obj->pages = st; 232 obj->has_dma_mapping = true; 233 return 0; 234 } 235 236 static void 237 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) 238 { 239 int ret; 240 241 BUG_ON(obj->madv == __I915_MADV_PURGED); 242 243 ret = i915_gem_object_set_to_cpu_domain(obj, true); 244 if (ret) { 245 /* In the event of a disaster, abandon all caches and 246 * hope for the best. 247 */ 248 WARN_ON(ret != -EIO); 249 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 250 } 251 252 if (obj->madv == I915_MADV_DONTNEED) 253 obj->dirty = 0; 254 255 if (obj->dirty) { 256 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 257 char *vaddr = obj->phys_handle->vaddr; 258 int i; 259 260 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 261 struct page *page; 262 char *dst; 263 264 page = shmem_read_mapping_page(mapping, i); 265 if (IS_ERR(page)) 266 continue; 267 268 dst = kmap_atomic(page); 269 drm_clflush_virt_range(vaddr, PAGE_SIZE); 270 memcpy(dst, vaddr, PAGE_SIZE); 271 kunmap_atomic(dst); 272 273 set_page_dirty(page); 274 if (obj->madv == I915_MADV_WILLNEED) 275 mark_page_accessed(page); 276 page_cache_release(page); 277 vaddr += PAGE_SIZE; 278 } 279 obj->dirty = 0; 280 } 281 282 sg_free_table(obj->pages); 283 kfree(obj->pages); 284 285 obj->has_dma_mapping = false; 286 } 287 288 static void 289 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 290 { 291 drm_pci_free(obj->base.dev, obj->phys_handle); 292 } 293 294 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 295 .get_pages = i915_gem_object_get_pages_phys, 296 .put_pages = i915_gem_object_put_pages_phys, 297 .release = i915_gem_object_release_phys, 298 }; 299 300 static int 301 drop_pages(struct drm_i915_gem_object *obj) 302 { 303 struct i915_vma *vma, *next; 304 int ret; 305 306 drm_gem_object_reference(&obj->base); 307 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) 308 if (i915_vma_unbind(vma)) 309 break; 310 311 ret = i915_gem_object_put_pages(obj); 312 drm_gem_object_unreference(&obj->base); 313 314 return ret; 315 } 316 317 int 318 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 319 int align) 320 { 321 drm_dma_handle_t *phys; 322 int ret; 323 324 if (obj->phys_handle) { 325 if ((unsigned long)obj->phys_handle->vaddr & (align -1)) 326 return -EBUSY; 327 328 return 0; 329 } 330 331 if (obj->madv != I915_MADV_WILLNEED) 332 return -EFAULT; 333 334 if (obj->base.filp == NULL) 335 return -EINVAL; 336 337 ret = drop_pages(obj); 338 if (ret) 339 return ret; 340 341 /* create a new object */ 342 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); 343 if (!phys) 344 return -ENOMEM; 345 346 obj->phys_handle = phys; 347 obj->ops = &i915_gem_phys_ops; 348 349 return i915_gem_object_get_pages(obj); 350 } 351 352 static int 353 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, 354 struct drm_i915_gem_pwrite *args, 355 struct drm_file *file_priv) 356 { 357 struct drm_device *dev = obj->base.dev; 358 void *vaddr = obj->phys_handle->vaddr + args->offset; 359 char __user *user_data = to_user_ptr(args->data_ptr); 360 int ret; 361 362 /* We manually control the domain here and pretend that it 363 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 364 */ 365 ret = i915_gem_object_wait_rendering(obj, false); 366 if (ret) 367 return ret; 368 369 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 370 unsigned long unwritten; 371 372 /* The physical object once assigned is fixed for the lifetime 373 * of the obj, so we can safely drop the lock and continue 374 * to access vaddr. 375 */ 376 mutex_unlock(&dev->struct_mutex); 377 unwritten = copy_from_user(vaddr, user_data, args->size); 378 mutex_lock(&dev->struct_mutex); 379 if (unwritten) 380 return -EFAULT; 381 } 382 383 drm_clflush_virt_range(vaddr, args->size); 384 i915_gem_chipset_flush(dev); 385 return 0; 386 } 387 388 void *i915_gem_object_alloc(struct drm_device *dev) 389 { 390 struct drm_i915_private *dev_priv = dev->dev_private; 391 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL); 392 } 393 394 void i915_gem_object_free(struct drm_i915_gem_object *obj) 395 { 396 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 397 kmem_cache_free(dev_priv->slab, obj); 398 } 399 400 static int 401 i915_gem_create(struct drm_file *file, 402 struct drm_device *dev, 403 uint64_t size, 404 bool dumb, 405 uint32_t *handle_p) 406 { 407 struct drm_i915_gem_object *obj; 408 int ret; 409 u32 handle; 410 411 size = roundup(size, PAGE_SIZE); 412 if (size == 0) 413 return -EINVAL; 414 415 /* Allocate the new object */ 416 obj = i915_gem_alloc_object(dev, size); 417 if (obj == NULL) 418 return -ENOMEM; 419 420 obj->base.dumb = dumb; 421 ret = drm_gem_handle_create(file, &obj->base, &handle); 422 /* drop reference from allocate - handle holds it now */ 423 drm_gem_object_unreference_unlocked(&obj->base); 424 if (ret) 425 return ret; 426 427 *handle_p = handle; 428 return 0; 429 } 430 431 int 432 i915_gem_dumb_create(struct drm_file *file, 433 struct drm_device *dev, 434 struct drm_mode_create_dumb *args) 435 { 436 /* have to work out size/pitch and return them */ 437 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 438 args->size = args->pitch * args->height; 439 return i915_gem_create(file, dev, 440 args->size, true, &args->handle); 441 } 442 443 /** 444 * Creates a new mm object and returns a handle to it. 445 */ 446 int 447 i915_gem_create_ioctl(struct drm_device *dev, void *data, 448 struct drm_file *file) 449 { 450 struct drm_i915_gem_create *args = data; 451 452 return i915_gem_create(file, dev, 453 args->size, false, &args->handle); 454 } 455 456 static inline int 457 __copy_to_user_swizzled(char __user *cpu_vaddr, 458 const char *gpu_vaddr, int gpu_offset, 459 int length) 460 { 461 int ret, cpu_offset = 0; 462 463 while (length > 0) { 464 int cacheline_end = ALIGN(gpu_offset + 1, 64); 465 int this_length = min(cacheline_end - gpu_offset, length); 466 int swizzled_gpu_offset = gpu_offset ^ 64; 467 468 ret = __copy_to_user(cpu_vaddr + cpu_offset, 469 gpu_vaddr + swizzled_gpu_offset, 470 this_length); 471 if (ret) 472 return ret + length; 473 474 cpu_offset += this_length; 475 gpu_offset += this_length; 476 length -= this_length; 477 } 478 479 return 0; 480 } 481 482 static inline int 483 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 484 const char __user *cpu_vaddr, 485 int length) 486 { 487 int ret, cpu_offset = 0; 488 489 while (length > 0) { 490 int cacheline_end = ALIGN(gpu_offset + 1, 64); 491 int this_length = min(cacheline_end - gpu_offset, length); 492 int swizzled_gpu_offset = gpu_offset ^ 64; 493 494 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 495 cpu_vaddr + cpu_offset, 496 this_length); 497 if (ret) 498 return ret + length; 499 500 cpu_offset += this_length; 501 gpu_offset += this_length; 502 length -= this_length; 503 } 504 505 return 0; 506 } 507 508 /* 509 * Pins the specified object's pages and synchronizes the object with 510 * GPU accesses. Sets needs_clflush to non-zero if the caller should 511 * flush the object from the CPU cache. 512 */ 513 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 514 int *needs_clflush) 515 { 516 int ret; 517 518 *needs_clflush = 0; 519 520 if (!obj->base.filp) 521 return -EINVAL; 522 523 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { 524 /* If we're not in the cpu read domain, set ourself into the gtt 525 * read domain and manually flush cachelines (if required). This 526 * optimizes for the case when the gpu will dirty the data 527 * anyway again before the next pread happens. */ 528 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, 529 obj->cache_level); 530 ret = i915_gem_object_wait_rendering(obj, true); 531 if (ret) 532 return ret; 533 534 i915_gem_object_retire(obj); 535 } 536 537 ret = i915_gem_object_get_pages(obj); 538 if (ret) 539 return ret; 540 541 i915_gem_object_pin_pages(obj); 542 543 return ret; 544 } 545 546 /* Per-page copy function for the shmem pread fastpath. 547 * Flushes invalid cachelines before reading the target if 548 * needs_clflush is set. */ 549 static int 550 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, 551 char __user *user_data, 552 bool page_do_bit17_swizzling, bool needs_clflush) 553 { 554 char *vaddr; 555 int ret; 556 557 if (unlikely(page_do_bit17_swizzling)) 558 return -EINVAL; 559 560 vaddr = kmap_atomic(page); 561 if (needs_clflush) 562 drm_clflush_virt_range(vaddr + shmem_page_offset, 563 page_length); 564 ret = __copy_to_user_inatomic(user_data, 565 vaddr + shmem_page_offset, 566 page_length); 567 kunmap_atomic(vaddr); 568 569 return ret ? -EFAULT : 0; 570 } 571 572 static void 573 shmem_clflush_swizzled_range(char *addr, unsigned long length, 574 bool swizzled) 575 { 576 if (unlikely(swizzled)) { 577 unsigned long start = (unsigned long) addr; 578 unsigned long end = (unsigned long) addr + length; 579 580 /* For swizzling simply ensure that we always flush both 581 * channels. Lame, but simple and it works. Swizzled 582 * pwrite/pread is far from a hotpath - current userspace 583 * doesn't use it at all. */ 584 start = round_down(start, 128); 585 end = round_up(end, 128); 586 587 drm_clflush_virt_range((void *)start, end - start); 588 } else { 589 drm_clflush_virt_range(addr, length); 590 } 591 592 } 593 594 /* Only difference to the fast-path function is that this can handle bit17 595 * and uses non-atomic copy and kmap functions. */ 596 static int 597 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, 598 char __user *user_data, 599 bool page_do_bit17_swizzling, bool needs_clflush) 600 { 601 char *vaddr; 602 int ret; 603 604 vaddr = kmap(page); 605 if (needs_clflush) 606 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 607 page_length, 608 page_do_bit17_swizzling); 609 610 if (page_do_bit17_swizzling) 611 ret = __copy_to_user_swizzled(user_data, 612 vaddr, shmem_page_offset, 613 page_length); 614 else 615 ret = __copy_to_user(user_data, 616 vaddr + shmem_page_offset, 617 page_length); 618 kunmap(page); 619 620 return ret ? - EFAULT : 0; 621 } 622 623 static int 624 i915_gem_shmem_pread(struct drm_device *dev, 625 struct drm_i915_gem_object *obj, 626 struct drm_i915_gem_pread *args, 627 struct drm_file *file) 628 { 629 char __user *user_data; 630 ssize_t remain; 631 loff_t offset; 632 int shmem_page_offset, page_length, ret = 0; 633 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 634 int prefaulted = 0; 635 int needs_clflush = 0; 636 struct sg_page_iter sg_iter; 637 638 user_data = to_user_ptr(args->data_ptr); 639 remain = args->size; 640 641 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 642 643 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); 644 if (ret) 645 return ret; 646 647 offset = args->offset; 648 649 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 650 offset >> PAGE_SHIFT) { 651 struct page *page = sg_page_iter_page(&sg_iter); 652 653 if (remain <= 0) 654 break; 655 656 /* Operation in this page 657 * 658 * shmem_page_offset = offset within page in shmem file 659 * page_length = bytes to copy for this page 660 */ 661 shmem_page_offset = offset_in_page(offset); 662 page_length = remain; 663 if ((shmem_page_offset + page_length) > PAGE_SIZE) 664 page_length = PAGE_SIZE - shmem_page_offset; 665 666 page_do_bit17_swizzling = obj_do_bit17_swizzling && 667 (page_to_phys(page) & (1 << 17)) != 0; 668 669 ret = shmem_pread_fast(page, shmem_page_offset, page_length, 670 user_data, page_do_bit17_swizzling, 671 needs_clflush); 672 if (ret == 0) 673 goto next_page; 674 675 mutex_unlock(&dev->struct_mutex); 676 677 if (likely(!i915.prefault_disable) && !prefaulted) { 678 ret = fault_in_multipages_writeable(user_data, remain); 679 /* Userspace is tricking us, but we've already clobbered 680 * its pages with the prefault and promised to write the 681 * data up to the first fault. Hence ignore any errors 682 * and just continue. */ 683 (void)ret; 684 prefaulted = 1; 685 } 686 687 ret = shmem_pread_slow(page, shmem_page_offset, page_length, 688 user_data, page_do_bit17_swizzling, 689 needs_clflush); 690 691 mutex_lock(&dev->struct_mutex); 692 693 if (ret) 694 goto out; 695 696 next_page: 697 remain -= page_length; 698 user_data += page_length; 699 offset += page_length; 700 } 701 702 out: 703 i915_gem_object_unpin_pages(obj); 704 705 return ret; 706 } 707 708 /** 709 * Reads data from the object referenced by handle. 710 * 711 * On error, the contents of *data are undefined. 712 */ 713 int 714 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 715 struct drm_file *file) 716 { 717 struct drm_i915_gem_pread *args = data; 718 struct drm_i915_gem_object *obj; 719 int ret = 0; 720 721 if (args->size == 0) 722 return 0; 723 724 if (!access_ok(VERIFY_WRITE, 725 to_user_ptr(args->data_ptr), 726 args->size)) 727 return -EFAULT; 728 729 ret = i915_mutex_lock_interruptible(dev); 730 if (ret) 731 return ret; 732 733 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 734 if (&obj->base == NULL) { 735 ret = -ENOENT; 736 goto unlock; 737 } 738 739 /* Bounds check source. */ 740 if (args->offset > obj->base.size || 741 args->size > obj->base.size - args->offset) { 742 ret = -EINVAL; 743 goto out; 744 } 745 746 /* prime objects have no backing filp to GEM pread/pwrite 747 * pages from. 748 */ 749 if (!obj->base.filp) { 750 ret = -EINVAL; 751 goto out; 752 } 753 754 trace_i915_gem_object_pread(obj, args->offset, args->size); 755 756 ret = i915_gem_shmem_pread(dev, obj, args, file); 757 758 out: 759 drm_gem_object_unreference(&obj->base); 760 unlock: 761 mutex_unlock(&dev->struct_mutex); 762 return ret; 763 } 764 765 /* This is the fast write path which cannot handle 766 * page faults in the source data 767 */ 768 769 static inline int 770 fast_user_write(struct io_mapping *mapping, 771 loff_t page_base, int page_offset, 772 char __user *user_data, 773 int length) 774 { 775 void __iomem *vaddr_atomic; 776 void *vaddr; 777 unsigned long unwritten; 778 779 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 780 /* We can use the cpu mem copy function because this is X86. */ 781 vaddr = (void __force*)vaddr_atomic + page_offset; 782 unwritten = __copy_from_user_inatomic_nocache(vaddr, 783 user_data, length); 784 io_mapping_unmap_atomic(vaddr_atomic); 785 return unwritten; 786 } 787 788 /** 789 * This is the fast pwrite path, where we copy the data directly from the 790 * user into the GTT, uncached. 791 */ 792 static int 793 i915_gem_gtt_pwrite_fast(struct drm_device *dev, 794 struct drm_i915_gem_object *obj, 795 struct drm_i915_gem_pwrite *args, 796 struct drm_file *file) 797 { 798 struct drm_i915_private *dev_priv = dev->dev_private; 799 ssize_t remain; 800 loff_t offset, page_base; 801 char __user *user_data; 802 int page_offset, page_length, ret; 803 804 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); 805 if (ret) 806 goto out; 807 808 ret = i915_gem_object_set_to_gtt_domain(obj, true); 809 if (ret) 810 goto out_unpin; 811 812 ret = i915_gem_object_put_fence(obj); 813 if (ret) 814 goto out_unpin; 815 816 user_data = to_user_ptr(args->data_ptr); 817 remain = args->size; 818 819 offset = i915_gem_obj_ggtt_offset(obj) + args->offset; 820 821 while (remain > 0) { 822 /* Operation in this page 823 * 824 * page_base = page offset within aperture 825 * page_offset = offset within page 826 * page_length = bytes to copy for this page 827 */ 828 page_base = offset & PAGE_MASK; 829 page_offset = offset_in_page(offset); 830 page_length = remain; 831 if ((page_offset + remain) > PAGE_SIZE) 832 page_length = PAGE_SIZE - page_offset; 833 834 /* If we get a fault while copying data, then (presumably) our 835 * source page isn't available. Return the error and we'll 836 * retry in the slow path. 837 */ 838 if (fast_user_write(dev_priv->gtt.mappable, page_base, 839 page_offset, user_data, page_length)) { 840 ret = -EFAULT; 841 goto out_unpin; 842 } 843 844 remain -= page_length; 845 user_data += page_length; 846 offset += page_length; 847 } 848 849 out_unpin: 850 i915_gem_object_ggtt_unpin(obj); 851 out: 852 return ret; 853 } 854 855 /* Per-page copy function for the shmem pwrite fastpath. 856 * Flushes invalid cachelines before writing to the target if 857 * needs_clflush_before is set and flushes out any written cachelines after 858 * writing if needs_clflush is set. */ 859 static int 860 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, 861 char __user *user_data, 862 bool page_do_bit17_swizzling, 863 bool needs_clflush_before, 864 bool needs_clflush_after) 865 { 866 char *vaddr; 867 int ret; 868 869 if (unlikely(page_do_bit17_swizzling)) 870 return -EINVAL; 871 872 vaddr = kmap_atomic(page); 873 if (needs_clflush_before) 874 drm_clflush_virt_range(vaddr + shmem_page_offset, 875 page_length); 876 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, 877 user_data, page_length); 878 if (needs_clflush_after) 879 drm_clflush_virt_range(vaddr + shmem_page_offset, 880 page_length); 881 kunmap_atomic(vaddr); 882 883 return ret ? -EFAULT : 0; 884 } 885 886 /* Only difference to the fast-path function is that this can handle bit17 887 * and uses non-atomic copy and kmap functions. */ 888 static int 889 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, 890 char __user *user_data, 891 bool page_do_bit17_swizzling, 892 bool needs_clflush_before, 893 bool needs_clflush_after) 894 { 895 char *vaddr; 896 int ret; 897 898 vaddr = kmap(page); 899 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 900 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 901 page_length, 902 page_do_bit17_swizzling); 903 if (page_do_bit17_swizzling) 904 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, 905 user_data, 906 page_length); 907 else 908 ret = __copy_from_user(vaddr + shmem_page_offset, 909 user_data, 910 page_length); 911 if (needs_clflush_after) 912 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 913 page_length, 914 page_do_bit17_swizzling); 915 kunmap(page); 916 917 return ret ? -EFAULT : 0; 918 } 919 920 static int 921 i915_gem_shmem_pwrite(struct drm_device *dev, 922 struct drm_i915_gem_object *obj, 923 struct drm_i915_gem_pwrite *args, 924 struct drm_file *file) 925 { 926 ssize_t remain; 927 loff_t offset; 928 char __user *user_data; 929 int shmem_page_offset, page_length, ret = 0; 930 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 931 int hit_slowpath = 0; 932 int needs_clflush_after = 0; 933 int needs_clflush_before = 0; 934 struct sg_page_iter sg_iter; 935 936 user_data = to_user_ptr(args->data_ptr); 937 remain = args->size; 938 939 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 940 941 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 942 /* If we're not in the cpu write domain, set ourself into the gtt 943 * write domain and manually flush cachelines (if required). This 944 * optimizes for the case when the gpu will use the data 945 * right away and we therefore have to clflush anyway. */ 946 needs_clflush_after = cpu_write_needs_clflush(obj); 947 ret = i915_gem_object_wait_rendering(obj, false); 948 if (ret) 949 return ret; 950 951 i915_gem_object_retire(obj); 952 } 953 /* Same trick applies to invalidate partially written cachelines read 954 * before writing. */ 955 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) 956 needs_clflush_before = 957 !cpu_cache_is_coherent(dev, obj->cache_level); 958 959 ret = i915_gem_object_get_pages(obj); 960 if (ret) 961 return ret; 962 963 i915_gem_object_pin_pages(obj); 964 965 offset = args->offset; 966 obj->dirty = 1; 967 968 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 969 offset >> PAGE_SHIFT) { 970 struct page *page = sg_page_iter_page(&sg_iter); 971 int partial_cacheline_write; 972 973 if (remain <= 0) 974 break; 975 976 /* Operation in this page 977 * 978 * shmem_page_offset = offset within page in shmem file 979 * page_length = bytes to copy for this page 980 */ 981 shmem_page_offset = offset_in_page(offset); 982 983 page_length = remain; 984 if ((shmem_page_offset + page_length) > PAGE_SIZE) 985 page_length = PAGE_SIZE - shmem_page_offset; 986 987 /* If we don't overwrite a cacheline completely we need to be 988 * careful to have up-to-date data by first clflushing. Don't 989 * overcomplicate things and flush the entire patch. */ 990 partial_cacheline_write = needs_clflush_before && 991 ((shmem_page_offset | page_length) 992 & (boot_cpu_data.x86_clflush_size - 1)); 993 994 page_do_bit17_swizzling = obj_do_bit17_swizzling && 995 (page_to_phys(page) & (1 << 17)) != 0; 996 997 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, 998 user_data, page_do_bit17_swizzling, 999 partial_cacheline_write, 1000 needs_clflush_after); 1001 if (ret == 0) 1002 goto next_page; 1003 1004 hit_slowpath = 1; 1005 mutex_unlock(&dev->struct_mutex); 1006 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, 1007 user_data, page_do_bit17_swizzling, 1008 partial_cacheline_write, 1009 needs_clflush_after); 1010 1011 mutex_lock(&dev->struct_mutex); 1012 1013 if (ret) 1014 goto out; 1015 1016 next_page: 1017 remain -= page_length; 1018 user_data += page_length; 1019 offset += page_length; 1020 } 1021 1022 out: 1023 i915_gem_object_unpin_pages(obj); 1024 1025 if (hit_slowpath) { 1026 /* 1027 * Fixup: Flush cpu caches in case we didn't flush the dirty 1028 * cachelines in-line while writing and the object moved 1029 * out of the cpu write domain while we've dropped the lock. 1030 */ 1031 if (!needs_clflush_after && 1032 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 1033 if (i915_gem_clflush_object(obj, obj->pin_display)) 1034 i915_gem_chipset_flush(dev); 1035 } 1036 } 1037 1038 if (needs_clflush_after) 1039 i915_gem_chipset_flush(dev); 1040 1041 return ret; 1042 } 1043 1044 /** 1045 * Writes data to the object referenced by handle. 1046 * 1047 * On error, the contents of the buffer that were to be modified are undefined. 1048 */ 1049 int 1050 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1051 struct drm_file *file) 1052 { 1053 struct drm_i915_gem_pwrite *args = data; 1054 struct drm_i915_gem_object *obj; 1055 int ret; 1056 1057 if (args->size == 0) 1058 return 0; 1059 1060 if (!access_ok(VERIFY_READ, 1061 to_user_ptr(args->data_ptr), 1062 args->size)) 1063 return -EFAULT; 1064 1065 if (likely(!i915.prefault_disable)) { 1066 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 1067 args->size); 1068 if (ret) 1069 return -EFAULT; 1070 } 1071 1072 ret = i915_mutex_lock_interruptible(dev); 1073 if (ret) 1074 return ret; 1075 1076 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1077 if (&obj->base == NULL) { 1078 ret = -ENOENT; 1079 goto unlock; 1080 } 1081 1082 /* Bounds check destination. */ 1083 if (args->offset > obj->base.size || 1084 args->size > obj->base.size - args->offset) { 1085 ret = -EINVAL; 1086 goto out; 1087 } 1088 1089 /* prime objects have no backing filp to GEM pread/pwrite 1090 * pages from. 1091 */ 1092 if (!obj->base.filp) { 1093 ret = -EINVAL; 1094 goto out; 1095 } 1096 1097 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1098 1099 ret = -EFAULT; 1100 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1101 * it would end up going through the fenced access, and we'll get 1102 * different detiling behavior between reading and writing. 1103 * pread/pwrite currently are reading and writing from the CPU 1104 * perspective, requiring manual detiling by the client. 1105 */ 1106 if (obj->tiling_mode == I915_TILING_NONE && 1107 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 1108 cpu_write_needs_clflush(obj)) { 1109 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 1110 /* Note that the gtt paths might fail with non-page-backed user 1111 * pointers (e.g. gtt mappings when moving data between 1112 * textures). Fallback to the shmem path in that case. */ 1113 } 1114 1115 if (ret == -EFAULT || ret == -ENOSPC) { 1116 if (obj->phys_handle) 1117 ret = i915_gem_phys_pwrite(obj, args, file); 1118 else 1119 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 1120 } 1121 1122 out: 1123 drm_gem_object_unreference(&obj->base); 1124 unlock: 1125 mutex_unlock(&dev->struct_mutex); 1126 return ret; 1127 } 1128 1129 int 1130 i915_gem_check_wedge(struct i915_gpu_error *error, 1131 bool interruptible) 1132 { 1133 if (i915_reset_in_progress(error)) { 1134 /* Non-interruptible callers can't handle -EAGAIN, hence return 1135 * -EIO unconditionally for these. */ 1136 if (!interruptible) 1137 return -EIO; 1138 1139 /* Recovery complete, but the reset failed ... */ 1140 if (i915_terminally_wedged(error)) 1141 return -EIO; 1142 1143 /* 1144 * Check if GPU Reset is in progress - we need intel_ring_begin 1145 * to work properly to reinit the hw state while the gpu is 1146 * still marked as reset-in-progress. Handle this with a flag. 1147 */ 1148 if (!error->reload_in_reset) 1149 return -EAGAIN; 1150 } 1151 1152 return 0; 1153 } 1154 1155 /* 1156 * Compare seqno against outstanding lazy request. Emit a request if they are 1157 * equal. 1158 */ 1159 int 1160 i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) 1161 { 1162 int ret; 1163 1164 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 1165 1166 ret = 0; 1167 if (seqno == ring->outstanding_lazy_seqno) 1168 ret = i915_add_request(ring, NULL); 1169 1170 return ret; 1171 } 1172 1173 static void fake_irq(unsigned long data) 1174 { 1175 wake_up_process((struct task_struct *)data); 1176 } 1177 1178 static bool missed_irq(struct drm_i915_private *dev_priv, 1179 struct intel_engine_cs *ring) 1180 { 1181 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); 1182 } 1183 1184 static bool can_wait_boost(struct drm_i915_file_private *file_priv) 1185 { 1186 if (file_priv == NULL) 1187 return true; 1188 1189 return !atomic_xchg(&file_priv->rps_wait_boost, true); 1190 } 1191 1192 /** 1193 * __i915_wait_seqno - wait until execution of seqno has finished 1194 * @ring: the ring expected to report seqno 1195 * @seqno: duh! 1196 * @reset_counter: reset sequence associated with the given seqno 1197 * @interruptible: do an interruptible wait (normally yes) 1198 * @timeout: in - how long to wait (NULL forever); out - how much time remaining 1199 * 1200 * Note: It is of utmost importance that the passed in seqno and reset_counter 1201 * values have been read by the caller in an smp safe manner. Where read-side 1202 * locks are involved, it is sufficient to read the reset_counter before 1203 * unlocking the lock that protects the seqno. For lockless tricks, the 1204 * reset_counter _must_ be read before, and an appropriate smp_rmb must be 1205 * inserted. 1206 * 1207 * Returns 0 if the seqno was found within the alloted time. Else returns the 1208 * errno with remaining time filled in timeout argument. 1209 */ 1210 int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, 1211 unsigned reset_counter, 1212 bool interruptible, 1213 s64 *timeout, 1214 struct drm_i915_file_private *file_priv) 1215 { 1216 struct drm_device *dev = ring->dev; 1217 struct drm_i915_private *dev_priv = dev->dev_private; 1218 const bool irq_test_in_progress = 1219 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1220 DEFINE_WAIT(wait); 1221 unsigned long timeout_expire; 1222 s64 before, now; 1223 int ret; 1224 1225 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); 1226 1227 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1228 return 0; 1229 1230 timeout_expire = timeout ? 1231 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; 1232 1233 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { 1234 gen6_rps_boost(dev_priv); 1235 if (file_priv) 1236 mod_delayed_work(dev_priv->wq, 1237 &file_priv->mm.idle_work, 1238 msecs_to_jiffies(100)); 1239 } 1240 1241 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) 1242 return -ENODEV; 1243 1244 /* Record current time in case interrupted by signal, or wedged */ 1245 trace_i915_gem_request_wait_begin(ring, seqno); 1246 before = ktime_get_raw_ns(); 1247 for (;;) { 1248 struct timer_list timer; 1249 1250 prepare_to_wait(&ring->irq_queue, &wait, 1251 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 1252 1253 /* We need to check whether any gpu reset happened in between 1254 * the caller grabbing the seqno and now ... */ 1255 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) { 1256 /* ... but upgrade the -EAGAIN to an -EIO if the gpu 1257 * is truely gone. */ 1258 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1259 if (ret == 0) 1260 ret = -EAGAIN; 1261 break; 1262 } 1263 1264 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) { 1265 ret = 0; 1266 break; 1267 } 1268 1269 if (interruptible && signal_pending(current)) { 1270 ret = -ERESTARTSYS; 1271 break; 1272 } 1273 1274 if (timeout && time_after_eq(jiffies, timeout_expire)) { 1275 ret = -ETIME; 1276 break; 1277 } 1278 1279 timer.function = NULL; 1280 if (timeout || missed_irq(dev_priv, ring)) { 1281 unsigned long expire; 1282 1283 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); 1284 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire; 1285 mod_timer(&timer, expire); 1286 } 1287 1288 io_schedule(); 1289 1290 if (timer.function) { 1291 del_singleshot_timer_sync(&timer); 1292 destroy_timer_on_stack(&timer); 1293 } 1294 } 1295 now = ktime_get_raw_ns(); 1296 trace_i915_gem_request_wait_end(ring, seqno); 1297 1298 if (!irq_test_in_progress) 1299 ring->irq_put(ring); 1300 1301 finish_wait(&ring->irq_queue, &wait); 1302 1303 if (timeout) { 1304 s64 tres = *timeout - (now - before); 1305 1306 *timeout = tres < 0 ? 0 : tres; 1307 1308 /* 1309 * Apparently ktime isn't accurate enough and occasionally has a 1310 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 1311 * things up to make the test happy. We allow up to 1 jiffy. 1312 * 1313 * This is a regrssion from the timespec->ktime conversion. 1314 */ 1315 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) 1316 *timeout = 0; 1317 } 1318 1319 return ret; 1320 } 1321 1322 /** 1323 * Waits for a sequence number to be signaled, and cleans up the 1324 * request and object lists appropriately for that event. 1325 */ 1326 int 1327 i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno) 1328 { 1329 struct drm_device *dev = ring->dev; 1330 struct drm_i915_private *dev_priv = dev->dev_private; 1331 bool interruptible = dev_priv->mm.interruptible; 1332 unsigned reset_counter; 1333 int ret; 1334 1335 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1336 BUG_ON(seqno == 0); 1337 1338 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1339 if (ret) 1340 return ret; 1341 1342 ret = i915_gem_check_olr(ring, seqno); 1343 if (ret) 1344 return ret; 1345 1346 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1347 return __i915_wait_seqno(ring, seqno, reset_counter, interruptible, 1348 NULL, NULL); 1349 } 1350 1351 static int 1352 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj) 1353 { 1354 if (!obj->active) 1355 return 0; 1356 1357 /* Manually manage the write flush as we may have not yet 1358 * retired the buffer. 1359 * 1360 * Note that the last_write_seqno is always the earlier of 1361 * the two (read/write) seqno, so if we haved successfully waited, 1362 * we know we have passed the last write. 1363 */ 1364 obj->last_write_seqno = 0; 1365 1366 return 0; 1367 } 1368 1369 /** 1370 * Ensures that all rendering to the object has completed and the object is 1371 * safe to unbind from the GTT or access from the CPU. 1372 */ 1373 static __must_check int 1374 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 1375 bool readonly) 1376 { 1377 struct intel_engine_cs *ring = obj->ring; 1378 u32 seqno; 1379 int ret; 1380 1381 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; 1382 if (seqno == 0) 1383 return 0; 1384 1385 ret = i915_wait_seqno(ring, seqno); 1386 if (ret) 1387 return ret; 1388 1389 return i915_gem_object_wait_rendering__tail(obj); 1390 } 1391 1392 /* A nonblocking variant of the above wait. This is a highly dangerous routine 1393 * as the object state may change during this call. 1394 */ 1395 static __must_check int 1396 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, 1397 struct drm_i915_file_private *file_priv, 1398 bool readonly) 1399 { 1400 struct drm_device *dev = obj->base.dev; 1401 struct drm_i915_private *dev_priv = dev->dev_private; 1402 struct intel_engine_cs *ring = obj->ring; 1403 unsigned reset_counter; 1404 u32 seqno; 1405 int ret; 1406 1407 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1408 BUG_ON(!dev_priv->mm.interruptible); 1409 1410 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; 1411 if (seqno == 0) 1412 return 0; 1413 1414 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); 1415 if (ret) 1416 return ret; 1417 1418 ret = i915_gem_check_olr(ring, seqno); 1419 if (ret) 1420 return ret; 1421 1422 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1423 mutex_unlock(&dev->struct_mutex); 1424 ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, 1425 file_priv); 1426 mutex_lock(&dev->struct_mutex); 1427 if (ret) 1428 return ret; 1429 1430 return i915_gem_object_wait_rendering__tail(obj); 1431 } 1432 1433 /** 1434 * Called when user space prepares to use an object with the CPU, either 1435 * through the mmap ioctl's mapping or a GTT mapping. 1436 */ 1437 int 1438 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1439 struct drm_file *file) 1440 { 1441 struct drm_i915_gem_set_domain *args = data; 1442 struct drm_i915_gem_object *obj; 1443 uint32_t read_domains = args->read_domains; 1444 uint32_t write_domain = args->write_domain; 1445 int ret; 1446 1447 /* Only handle setting domains to types used by the CPU. */ 1448 if (write_domain & I915_GEM_GPU_DOMAINS) 1449 return -EINVAL; 1450 1451 if (read_domains & I915_GEM_GPU_DOMAINS) 1452 return -EINVAL; 1453 1454 /* Having something in the write domain implies it's in the read 1455 * domain, and only that read domain. Enforce that in the request. 1456 */ 1457 if (write_domain != 0 && read_domains != write_domain) 1458 return -EINVAL; 1459 1460 ret = i915_mutex_lock_interruptible(dev); 1461 if (ret) 1462 return ret; 1463 1464 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1465 if (&obj->base == NULL) { 1466 ret = -ENOENT; 1467 goto unlock; 1468 } 1469 1470 /* Try to flush the object off the GPU without holding the lock. 1471 * We will repeat the flush holding the lock in the normal manner 1472 * to catch cases where we are gazumped. 1473 */ 1474 ret = i915_gem_object_wait_rendering__nonblocking(obj, 1475 file->driver_priv, 1476 !write_domain); 1477 if (ret) 1478 goto unref; 1479 1480 if (read_domains & I915_GEM_DOMAIN_GTT) { 1481 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1482 1483 /* Silently promote "you're not bound, there was nothing to do" 1484 * to success, since the client was just asking us to 1485 * make sure everything was done. 1486 */ 1487 if (ret == -EINVAL) 1488 ret = 0; 1489 } else { 1490 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1491 } 1492 1493 unref: 1494 drm_gem_object_unreference(&obj->base); 1495 unlock: 1496 mutex_unlock(&dev->struct_mutex); 1497 return ret; 1498 } 1499 1500 /** 1501 * Called when user space has done writes to this buffer 1502 */ 1503 int 1504 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1505 struct drm_file *file) 1506 { 1507 struct drm_i915_gem_sw_finish *args = data; 1508 struct drm_i915_gem_object *obj; 1509 int ret = 0; 1510 1511 ret = i915_mutex_lock_interruptible(dev); 1512 if (ret) 1513 return ret; 1514 1515 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1516 if (&obj->base == NULL) { 1517 ret = -ENOENT; 1518 goto unlock; 1519 } 1520 1521 /* Pinned buffers may be scanout, so flush the cache */ 1522 if (obj->pin_display) 1523 i915_gem_object_flush_cpu_write_domain(obj, true); 1524 1525 drm_gem_object_unreference(&obj->base); 1526 unlock: 1527 mutex_unlock(&dev->struct_mutex); 1528 return ret; 1529 } 1530 1531 /** 1532 * Maps the contents of an object, returning the address it is mapped 1533 * into. 1534 * 1535 * While the mapping holds a reference on the contents of the object, it doesn't 1536 * imply a ref on the object itself. 1537 * 1538 * IMPORTANT: 1539 * 1540 * DRM driver writers who look a this function as an example for how to do GEM 1541 * mmap support, please don't implement mmap support like here. The modern way 1542 * to implement DRM mmap support is with an mmap offset ioctl (like 1543 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1544 * That way debug tooling like valgrind will understand what's going on, hiding 1545 * the mmap call in a driver private ioctl will break that. The i915 driver only 1546 * does cpu mmaps this way because we didn't know better. 1547 */ 1548 int 1549 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1550 struct drm_file *file) 1551 { 1552 struct drm_i915_gem_mmap *args = data; 1553 struct drm_gem_object *obj; 1554 unsigned long addr; 1555 1556 obj = drm_gem_object_lookup(dev, file, args->handle); 1557 if (obj == NULL) 1558 return -ENOENT; 1559 1560 /* prime objects have no backing filp to GEM mmap 1561 * pages from. 1562 */ 1563 if (!obj->filp) { 1564 drm_gem_object_unreference_unlocked(obj); 1565 return -EINVAL; 1566 } 1567 1568 addr = vm_mmap(obj->filp, 0, args->size, 1569 PROT_READ | PROT_WRITE, MAP_SHARED, 1570 args->offset); 1571 drm_gem_object_unreference_unlocked(obj); 1572 if (IS_ERR((void *)addr)) 1573 return addr; 1574 1575 args->addr_ptr = (uint64_t) addr; 1576 1577 return 0; 1578 } 1579 1580 /** 1581 * i915_gem_fault - fault a page into the GTT 1582 * vma: VMA in question 1583 * vmf: fault info 1584 * 1585 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1586 * from userspace. The fault handler takes care of binding the object to 1587 * the GTT (if needed), allocating and programming a fence register (again, 1588 * only if needed based on whether the old reg is still valid or the object 1589 * is tiled) and inserting a new PTE into the faulting process. 1590 * 1591 * Note that the faulting process may involve evicting existing objects 1592 * from the GTT and/or fence registers to make room. So performance may 1593 * suffer if the GTT working set is large or there are few fence registers 1594 * left. 1595 */ 1596 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1597 { 1598 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); 1599 struct drm_device *dev = obj->base.dev; 1600 struct drm_i915_private *dev_priv = dev->dev_private; 1601 pgoff_t page_offset; 1602 unsigned long pfn; 1603 int ret = 0; 1604 bool write = !!(vmf->flags & FAULT_FLAG_WRITE); 1605 1606 intel_runtime_pm_get(dev_priv); 1607 1608 /* We don't use vmf->pgoff since that has the fake offset */ 1609 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 1610 PAGE_SHIFT; 1611 1612 ret = i915_mutex_lock_interruptible(dev); 1613 if (ret) 1614 goto out; 1615 1616 trace_i915_gem_object_fault(obj, page_offset, true, write); 1617 1618 /* Try to flush the object off the GPU first without holding the lock. 1619 * Upon reacquiring the lock, we will perform our sanity checks and then 1620 * repeat the flush holding the lock in the normal manner to catch cases 1621 * where we are gazumped. 1622 */ 1623 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write); 1624 if (ret) 1625 goto unlock; 1626 1627 /* Access to snoopable pages through the GTT is incoherent. */ 1628 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { 1629 ret = -EFAULT; 1630 goto unlock; 1631 } 1632 1633 /* Now bind it into the GTT if needed */ 1634 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); 1635 if (ret) 1636 goto unlock; 1637 1638 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1639 if (ret) 1640 goto unpin; 1641 1642 ret = i915_gem_object_get_fence(obj); 1643 if (ret) 1644 goto unpin; 1645 1646 /* Finally, remap it using the new GTT offset */ 1647 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); 1648 pfn >>= PAGE_SHIFT; 1649 1650 if (!obj->fault_mappable) { 1651 unsigned long size = min_t(unsigned long, 1652 vma->vm_end - vma->vm_start, 1653 obj->base.size); 1654 int i; 1655 1656 for (i = 0; i < size >> PAGE_SHIFT; i++) { 1657 ret = vm_insert_pfn(vma, 1658 (unsigned long)vma->vm_start + i * PAGE_SIZE, 1659 pfn + i); 1660 if (ret) 1661 break; 1662 } 1663 1664 obj->fault_mappable = true; 1665 } else 1666 ret = vm_insert_pfn(vma, 1667 (unsigned long)vmf->virtual_address, 1668 pfn + page_offset); 1669 unpin: 1670 i915_gem_object_ggtt_unpin(obj); 1671 unlock: 1672 mutex_unlock(&dev->struct_mutex); 1673 out: 1674 switch (ret) { 1675 case -EIO: 1676 /* 1677 * We eat errors when the gpu is terminally wedged to avoid 1678 * userspace unduly crashing (gl has no provisions for mmaps to 1679 * fail). But any other -EIO isn't ours (e.g. swap in failure) 1680 * and so needs to be reported. 1681 */ 1682 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 1683 ret = VM_FAULT_SIGBUS; 1684 break; 1685 } 1686 case -EAGAIN: 1687 /* 1688 * EAGAIN means the gpu is hung and we'll wait for the error 1689 * handler to reset everything when re-faulting in 1690 * i915_mutex_lock_interruptible. 1691 */ 1692 case 0: 1693 case -ERESTARTSYS: 1694 case -EINTR: 1695 case -EBUSY: 1696 /* 1697 * EBUSY is ok: this just means that another thread 1698 * already did the job. 1699 */ 1700 ret = VM_FAULT_NOPAGE; 1701 break; 1702 case -ENOMEM: 1703 ret = VM_FAULT_OOM; 1704 break; 1705 case -ENOSPC: 1706 case -EFAULT: 1707 ret = VM_FAULT_SIGBUS; 1708 break; 1709 default: 1710 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 1711 ret = VM_FAULT_SIGBUS; 1712 break; 1713 } 1714 1715 intel_runtime_pm_put(dev_priv); 1716 return ret; 1717 } 1718 1719 /** 1720 * i915_gem_release_mmap - remove physical page mappings 1721 * @obj: obj in question 1722 * 1723 * Preserve the reservation of the mmapping with the DRM core code, but 1724 * relinquish ownership of the pages back to the system. 1725 * 1726 * It is vital that we remove the page mapping if we have mapped a tiled 1727 * object through the GTT and then lose the fence register due to 1728 * resource pressure. Similarly if the object has been moved out of the 1729 * aperture, than pages mapped into userspace must be revoked. Removing the 1730 * mapping will then trigger a page fault on the next user access, allowing 1731 * fixup by i915_gem_fault(). 1732 */ 1733 void 1734 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 1735 { 1736 if (!obj->fault_mappable) 1737 return; 1738 1739 drm_vma_node_unmap(&obj->base.vma_node, 1740 obj->base.dev->anon_inode->i_mapping); 1741 obj->fault_mappable = false; 1742 } 1743 1744 void 1745 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) 1746 { 1747 struct drm_i915_gem_object *obj; 1748 1749 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 1750 i915_gem_release_mmap(obj); 1751 } 1752 1753 uint32_t 1754 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1755 { 1756 uint32_t gtt_size; 1757 1758 if (INTEL_INFO(dev)->gen >= 4 || 1759 tiling_mode == I915_TILING_NONE) 1760 return size; 1761 1762 /* Previous chips need a power-of-two fence region when tiling */ 1763 if (INTEL_INFO(dev)->gen == 3) 1764 gtt_size = 1024*1024; 1765 else 1766 gtt_size = 512*1024; 1767 1768 while (gtt_size < size) 1769 gtt_size <<= 1; 1770 1771 return gtt_size; 1772 } 1773 1774 /** 1775 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 1776 * @obj: object to check 1777 * 1778 * Return the required GTT alignment for an object, taking into account 1779 * potential fence register mapping. 1780 */ 1781 uint32_t 1782 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 1783 int tiling_mode, bool fenced) 1784 { 1785 /* 1786 * Minimum alignment is 4k (GTT page size), but might be greater 1787 * if a fence register is needed for the object. 1788 */ 1789 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) || 1790 tiling_mode == I915_TILING_NONE) 1791 return 4096; 1792 1793 /* 1794 * Previous chips need to be aligned to the size of the smallest 1795 * fence register that can contain the object. 1796 */ 1797 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1798 } 1799 1800 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 1801 { 1802 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1803 int ret; 1804 1805 if (drm_vma_node_has_offset(&obj->base.vma_node)) 1806 return 0; 1807 1808 dev_priv->mm.shrinker_no_lock_stealing = true; 1809 1810 ret = drm_gem_create_mmap_offset(&obj->base); 1811 if (ret != -ENOSPC) 1812 goto out; 1813 1814 /* Badly fragmented mmap space? The only way we can recover 1815 * space is by destroying unwanted objects. We can't randomly release 1816 * mmap_offsets as userspace expects them to be persistent for the 1817 * lifetime of the objects. The closest we can is to release the 1818 * offsets on purgeable objects by truncating it and marking it purged, 1819 * which prevents userspace from ever using that object again. 1820 */ 1821 i915_gem_shrink(dev_priv, 1822 obj->base.size >> PAGE_SHIFT, 1823 I915_SHRINK_BOUND | 1824 I915_SHRINK_UNBOUND | 1825 I915_SHRINK_PURGEABLE); 1826 ret = drm_gem_create_mmap_offset(&obj->base); 1827 if (ret != -ENOSPC) 1828 goto out; 1829 1830 i915_gem_shrink_all(dev_priv); 1831 ret = drm_gem_create_mmap_offset(&obj->base); 1832 out: 1833 dev_priv->mm.shrinker_no_lock_stealing = false; 1834 1835 return ret; 1836 } 1837 1838 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 1839 { 1840 drm_gem_free_mmap_offset(&obj->base); 1841 } 1842 1843 static int 1844 i915_gem_mmap_gtt(struct drm_file *file, 1845 struct drm_device *dev, 1846 uint32_t handle, bool dumb, 1847 uint64_t *offset) 1848 { 1849 struct drm_i915_private *dev_priv = dev->dev_private; 1850 struct drm_i915_gem_object *obj; 1851 int ret; 1852 1853 ret = i915_mutex_lock_interruptible(dev); 1854 if (ret) 1855 return ret; 1856 1857 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 1858 if (&obj->base == NULL) { 1859 ret = -ENOENT; 1860 goto unlock; 1861 } 1862 1863 /* 1864 * We don't allow dumb mmaps on objects created using another 1865 * interface. 1866 */ 1867 WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach), 1868 "Illegal dumb map of accelerated buffer.\n"); 1869 1870 if (obj->base.size > dev_priv->gtt.mappable_end) { 1871 ret = -E2BIG; 1872 goto out; 1873 } 1874 1875 if (obj->madv != I915_MADV_WILLNEED) { 1876 DRM_DEBUG("Attempting to mmap a purgeable buffer\n"); 1877 ret = -EFAULT; 1878 goto out; 1879 } 1880 1881 ret = i915_gem_object_create_mmap_offset(obj); 1882 if (ret) 1883 goto out; 1884 1885 *offset = drm_vma_node_offset_addr(&obj->base.vma_node); 1886 1887 out: 1888 drm_gem_object_unreference(&obj->base); 1889 unlock: 1890 mutex_unlock(&dev->struct_mutex); 1891 return ret; 1892 } 1893 1894 int 1895 i915_gem_dumb_map_offset(struct drm_file *file, 1896 struct drm_device *dev, 1897 uint32_t handle, 1898 uint64_t *offset) 1899 { 1900 return i915_gem_mmap_gtt(file, dev, handle, true, offset); 1901 } 1902 1903 /** 1904 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1905 * @dev: DRM device 1906 * @data: GTT mapping ioctl data 1907 * @file: GEM object info 1908 * 1909 * Simply returns the fake offset to userspace so it can mmap it. 1910 * The mmap call will end up in drm_gem_mmap(), which will set things 1911 * up so we can get faults in the handler above. 1912 * 1913 * The fault handler will take care of binding the object into the GTT 1914 * (since it may have been evicted to make room for something), allocating 1915 * a fence register, and mapping the appropriate aperture address into 1916 * userspace. 1917 */ 1918 int 1919 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1920 struct drm_file *file) 1921 { 1922 struct drm_i915_gem_mmap_gtt *args = data; 1923 1924 return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset); 1925 } 1926 1927 static inline int 1928 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) 1929 { 1930 return obj->madv == I915_MADV_DONTNEED; 1931 } 1932 1933 /* Immediately discard the backing storage */ 1934 static void 1935 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 1936 { 1937 i915_gem_object_free_mmap_offset(obj); 1938 1939 if (obj->base.filp == NULL) 1940 return; 1941 1942 /* Our goal here is to return as much of the memory as 1943 * is possible back to the system as we are called from OOM. 1944 * To do this we must instruct the shmfs to drop all of its 1945 * backing pages, *now*. 1946 */ 1947 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 1948 obj->madv = __I915_MADV_PURGED; 1949 } 1950 1951 /* Try to discard unwanted pages */ 1952 static void 1953 i915_gem_object_invalidate(struct drm_i915_gem_object *obj) 1954 { 1955 struct address_space *mapping; 1956 1957 switch (obj->madv) { 1958 case I915_MADV_DONTNEED: 1959 i915_gem_object_truncate(obj); 1960 case __I915_MADV_PURGED: 1961 return; 1962 } 1963 1964 if (obj->base.filp == NULL) 1965 return; 1966 1967 mapping = file_inode(obj->base.filp)->i_mapping, 1968 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 1969 } 1970 1971 static void 1972 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1973 { 1974 struct sg_page_iter sg_iter; 1975 int ret; 1976 1977 BUG_ON(obj->madv == __I915_MADV_PURGED); 1978 1979 ret = i915_gem_object_set_to_cpu_domain(obj, true); 1980 if (ret) { 1981 /* In the event of a disaster, abandon all caches and 1982 * hope for the best. 1983 */ 1984 WARN_ON(ret != -EIO); 1985 i915_gem_clflush_object(obj, true); 1986 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 1987 } 1988 1989 if (i915_gem_object_needs_bit17_swizzle(obj)) 1990 i915_gem_object_save_bit_17_swizzle(obj); 1991 1992 if (obj->madv == I915_MADV_DONTNEED) 1993 obj->dirty = 0; 1994 1995 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 1996 struct page *page = sg_page_iter_page(&sg_iter); 1997 1998 if (obj->dirty) 1999 set_page_dirty(page); 2000 2001 if (obj->madv == I915_MADV_WILLNEED) 2002 mark_page_accessed(page); 2003 2004 page_cache_release(page); 2005 } 2006 obj->dirty = 0; 2007 2008 sg_free_table(obj->pages); 2009 kfree(obj->pages); 2010 } 2011 2012 int 2013 i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 2014 { 2015 const struct drm_i915_gem_object_ops *ops = obj->ops; 2016 2017 if (obj->pages == NULL) 2018 return 0; 2019 2020 if (obj->pages_pin_count) 2021 return -EBUSY; 2022 2023 BUG_ON(i915_gem_obj_bound_any(obj)); 2024 2025 /* ->put_pages might need to allocate memory for the bit17 swizzle 2026 * array, hence protect them from being reaped by removing them from gtt 2027 * lists early. */ 2028 list_del(&obj->global_list); 2029 2030 ops->put_pages(obj); 2031 obj->pages = NULL; 2032 2033 i915_gem_object_invalidate(obj); 2034 2035 return 0; 2036 } 2037 2038 unsigned long 2039 i915_gem_shrink(struct drm_i915_private *dev_priv, 2040 long target, unsigned flags) 2041 { 2042 const struct { 2043 struct list_head *list; 2044 unsigned int bit; 2045 } phases[] = { 2046 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, 2047 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, 2048 { NULL, 0 }, 2049 }, *phase; 2050 unsigned long count = 0; 2051 2052 /* 2053 * As we may completely rewrite the (un)bound list whilst unbinding 2054 * (due to retiring requests) we have to strictly process only 2055 * one element of the list at the time, and recheck the list 2056 * on every iteration. 2057 * 2058 * In particular, we must hold a reference whilst removing the 2059 * object as we may end up waiting for and/or retiring the objects. 2060 * This might release the final reference (held by the active list) 2061 * and result in the object being freed from under us. This is 2062 * similar to the precautions the eviction code must take whilst 2063 * removing objects. 2064 * 2065 * Also note that although these lists do not hold a reference to 2066 * the object we can safely grab one here: The final object 2067 * unreferencing and the bound_list are both protected by the 2068 * dev->struct_mutex and so we won't ever be able to observe an 2069 * object on the bound_list with a reference count equals 0. 2070 */ 2071 for (phase = phases; phase->list; phase++) { 2072 struct list_head still_in_list; 2073 2074 if ((flags & phase->bit) == 0) 2075 continue; 2076 2077 INIT_LIST_HEAD(&still_in_list); 2078 while (count < target && !list_empty(phase->list)) { 2079 struct drm_i915_gem_object *obj; 2080 struct i915_vma *vma, *v; 2081 2082 obj = list_first_entry(phase->list, 2083 typeof(*obj), global_list); 2084 list_move_tail(&obj->global_list, &still_in_list); 2085 2086 if (flags & I915_SHRINK_PURGEABLE && 2087 !i915_gem_object_is_purgeable(obj)) 2088 continue; 2089 2090 drm_gem_object_reference(&obj->base); 2091 2092 /* For the unbound phase, this should be a no-op! */ 2093 list_for_each_entry_safe(vma, v, 2094 &obj->vma_list, vma_link) 2095 if (i915_vma_unbind(vma)) 2096 break; 2097 2098 if (i915_gem_object_put_pages(obj) == 0) 2099 count += obj->base.size >> PAGE_SHIFT; 2100 2101 drm_gem_object_unreference(&obj->base); 2102 } 2103 list_splice(&still_in_list, phase->list); 2104 } 2105 2106 return count; 2107 } 2108 2109 static unsigned long 2110 i915_gem_shrink_all(struct drm_i915_private *dev_priv) 2111 { 2112 i915_gem_evict_everything(dev_priv->dev); 2113 return i915_gem_shrink(dev_priv, LONG_MAX, 2114 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); 2115 } 2116 2117 static int 2118 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2119 { 2120 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2121 int page_count, i; 2122 struct address_space *mapping; 2123 struct sg_table *st; 2124 struct scatterlist *sg; 2125 struct sg_page_iter sg_iter; 2126 struct page *page; 2127 unsigned long last_pfn = 0; /* suppress gcc warning */ 2128 gfp_t gfp; 2129 2130 /* Assert that the object is not currently in any GPU domain. As it 2131 * wasn't in the GTT, there shouldn't be any way it could have been in 2132 * a GPU cache 2133 */ 2134 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2135 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2136 2137 st = kmalloc(sizeof(*st), GFP_KERNEL); 2138 if (st == NULL) 2139 return -ENOMEM; 2140 2141 page_count = obj->base.size / PAGE_SIZE; 2142 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2143 kfree(st); 2144 return -ENOMEM; 2145 } 2146 2147 /* Get the list of pages out of our struct file. They'll be pinned 2148 * at this point until we release them. 2149 * 2150 * Fail silently without starting the shrinker 2151 */ 2152 mapping = file_inode(obj->base.filp)->i_mapping; 2153 gfp = mapping_gfp_mask(mapping); 2154 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 2155 gfp &= ~(__GFP_IO | __GFP_WAIT); 2156 sg = st->sgl; 2157 st->nents = 0; 2158 for (i = 0; i < page_count; i++) { 2159 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2160 if (IS_ERR(page)) { 2161 i915_gem_shrink(dev_priv, 2162 page_count, 2163 I915_SHRINK_BOUND | 2164 I915_SHRINK_UNBOUND | 2165 I915_SHRINK_PURGEABLE); 2166 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2167 } 2168 if (IS_ERR(page)) { 2169 /* We've tried hard to allocate the memory by reaping 2170 * our own buffer, now let the real VM do its job and 2171 * go down in flames if truly OOM. 2172 */ 2173 i915_gem_shrink_all(dev_priv); 2174 page = shmem_read_mapping_page(mapping, i); 2175 if (IS_ERR(page)) 2176 goto err_pages; 2177 } 2178 #ifdef CONFIG_SWIOTLB 2179 if (swiotlb_nr_tbl()) { 2180 st->nents++; 2181 sg_set_page(sg, page, PAGE_SIZE, 0); 2182 sg = sg_next(sg); 2183 continue; 2184 } 2185 #endif 2186 if (!i || page_to_pfn(page) != last_pfn + 1) { 2187 if (i) 2188 sg = sg_next(sg); 2189 st->nents++; 2190 sg_set_page(sg, page, PAGE_SIZE, 0); 2191 } else { 2192 sg->length += PAGE_SIZE; 2193 } 2194 last_pfn = page_to_pfn(page); 2195 2196 /* Check that the i965g/gm workaround works. */ 2197 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 2198 } 2199 #ifdef CONFIG_SWIOTLB 2200 if (!swiotlb_nr_tbl()) 2201 #endif 2202 sg_mark_end(sg); 2203 obj->pages = st; 2204 2205 if (i915_gem_object_needs_bit17_swizzle(obj)) 2206 i915_gem_object_do_bit_17_swizzle(obj); 2207 2208 if (obj->tiling_mode != I915_TILING_NONE && 2209 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2210 i915_gem_object_pin_pages(obj); 2211 2212 return 0; 2213 2214 err_pages: 2215 sg_mark_end(sg); 2216 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2217 page_cache_release(sg_page_iter_page(&sg_iter)); 2218 sg_free_table(st); 2219 kfree(st); 2220 2221 /* shmemfs first checks if there is enough memory to allocate the page 2222 * and reports ENOSPC should there be insufficient, along with the usual 2223 * ENOMEM for a genuine allocation failure. 2224 * 2225 * We use ENOSPC in our driver to mean that we have run out of aperture 2226 * space and so want to translate the error from shmemfs back to our 2227 * usual understanding of ENOMEM. 2228 */ 2229 if (PTR_ERR(page) == -ENOSPC) 2230 return -ENOMEM; 2231 else 2232 return PTR_ERR(page); 2233 } 2234 2235 /* Ensure that the associated pages are gathered from the backing storage 2236 * and pinned into our object. i915_gem_object_get_pages() may be called 2237 * multiple times before they are released by a single call to 2238 * i915_gem_object_put_pages() - once the pages are no longer referenced 2239 * either as a result of memory pressure (reaping pages under the shrinker) 2240 * or as the object is itself released. 2241 */ 2242 int 2243 i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2244 { 2245 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2246 const struct drm_i915_gem_object_ops *ops = obj->ops; 2247 int ret; 2248 2249 if (obj->pages) 2250 return 0; 2251 2252 if (obj->madv != I915_MADV_WILLNEED) { 2253 DRM_DEBUG("Attempting to obtain a purgeable object\n"); 2254 return -EFAULT; 2255 } 2256 2257 BUG_ON(obj->pages_pin_count); 2258 2259 ret = ops->get_pages(obj); 2260 if (ret) 2261 return ret; 2262 2263 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); 2264 return 0; 2265 } 2266 2267 static void 2268 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 2269 struct intel_engine_cs *ring) 2270 { 2271 u32 seqno = intel_ring_get_seqno(ring); 2272 2273 BUG_ON(ring == NULL); 2274 if (obj->ring != ring && obj->last_write_seqno) { 2275 /* Keep the seqno relative to the current ring */ 2276 obj->last_write_seqno = seqno; 2277 } 2278 obj->ring = ring; 2279 2280 /* Add a reference if we're newly entering the active list. */ 2281 if (!obj->active) { 2282 drm_gem_object_reference(&obj->base); 2283 obj->active = 1; 2284 } 2285 2286 list_move_tail(&obj->ring_list, &ring->active_list); 2287 2288 obj->last_read_seqno = seqno; 2289 } 2290 2291 void i915_vma_move_to_active(struct i915_vma *vma, 2292 struct intel_engine_cs *ring) 2293 { 2294 list_move_tail(&vma->mm_list, &vma->vm->active_list); 2295 return i915_gem_object_move_to_active(vma->obj, ring); 2296 } 2297 2298 static void 2299 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2300 { 2301 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2302 struct i915_address_space *vm; 2303 struct i915_vma *vma; 2304 2305 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 2306 BUG_ON(!obj->active); 2307 2308 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 2309 vma = i915_gem_obj_to_vma(obj, vm); 2310 if (vma && !list_empty(&vma->mm_list)) 2311 list_move_tail(&vma->mm_list, &vm->inactive_list); 2312 } 2313 2314 intel_fb_obj_flush(obj, true); 2315 2316 list_del_init(&obj->ring_list); 2317 obj->ring = NULL; 2318 2319 obj->last_read_seqno = 0; 2320 obj->last_write_seqno = 0; 2321 obj->base.write_domain = 0; 2322 2323 obj->last_fenced_seqno = 0; 2324 2325 obj->active = 0; 2326 drm_gem_object_unreference(&obj->base); 2327 2328 WARN_ON(i915_verify_lists(dev)); 2329 } 2330 2331 static void 2332 i915_gem_object_retire(struct drm_i915_gem_object *obj) 2333 { 2334 struct intel_engine_cs *ring = obj->ring; 2335 2336 if (ring == NULL) 2337 return; 2338 2339 if (i915_seqno_passed(ring->get_seqno(ring, true), 2340 obj->last_read_seqno)) 2341 i915_gem_object_move_to_inactive(obj); 2342 } 2343 2344 static int 2345 i915_gem_init_seqno(struct drm_device *dev, u32 seqno) 2346 { 2347 struct drm_i915_private *dev_priv = dev->dev_private; 2348 struct intel_engine_cs *ring; 2349 int ret, i, j; 2350 2351 /* Carefully retire all requests without writing to the rings */ 2352 for_each_ring(ring, dev_priv, i) { 2353 ret = intel_ring_idle(ring); 2354 if (ret) 2355 return ret; 2356 } 2357 i915_gem_retire_requests(dev); 2358 2359 /* Finally reset hw state */ 2360 for_each_ring(ring, dev_priv, i) { 2361 intel_ring_init_seqno(ring, seqno); 2362 2363 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++) 2364 ring->semaphore.sync_seqno[j] = 0; 2365 } 2366 2367 return 0; 2368 } 2369 2370 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) 2371 { 2372 struct drm_i915_private *dev_priv = dev->dev_private; 2373 int ret; 2374 2375 if (seqno == 0) 2376 return -EINVAL; 2377 2378 /* HWS page needs to be set less than what we 2379 * will inject to ring 2380 */ 2381 ret = i915_gem_init_seqno(dev, seqno - 1); 2382 if (ret) 2383 return ret; 2384 2385 /* Carefully set the last_seqno value so that wrap 2386 * detection still works 2387 */ 2388 dev_priv->next_seqno = seqno; 2389 dev_priv->last_seqno = seqno - 1; 2390 if (dev_priv->last_seqno == 0) 2391 dev_priv->last_seqno--; 2392 2393 return 0; 2394 } 2395 2396 int 2397 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 2398 { 2399 struct drm_i915_private *dev_priv = dev->dev_private; 2400 2401 /* reserve 0 for non-seqno */ 2402 if (dev_priv->next_seqno == 0) { 2403 int ret = i915_gem_init_seqno(dev, 0); 2404 if (ret) 2405 return ret; 2406 2407 dev_priv->next_seqno = 1; 2408 } 2409 2410 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++; 2411 return 0; 2412 } 2413 2414 int __i915_add_request(struct intel_engine_cs *ring, 2415 struct drm_file *file, 2416 struct drm_i915_gem_object *obj, 2417 u32 *out_seqno) 2418 { 2419 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2420 struct drm_i915_gem_request *request; 2421 struct intel_ringbuffer *ringbuf; 2422 u32 request_ring_position, request_start; 2423 int ret; 2424 2425 request = ring->preallocated_lazy_request; 2426 if (WARN_ON(request == NULL)) 2427 return -ENOMEM; 2428 2429 if (i915.enable_execlists) { 2430 struct intel_context *ctx = request->ctx; 2431 ringbuf = ctx->engine[ring->id].ringbuf; 2432 } else 2433 ringbuf = ring->buffer; 2434 2435 request_start = intel_ring_get_tail(ringbuf); 2436 /* 2437 * Emit any outstanding flushes - execbuf can fail to emit the flush 2438 * after having emitted the batchbuffer command. Hence we need to fix 2439 * things up similar to emitting the lazy request. The difference here 2440 * is that the flush _must_ happen before the next request, no matter 2441 * what. 2442 */ 2443 if (i915.enable_execlists) { 2444 ret = logical_ring_flush_all_caches(ringbuf); 2445 if (ret) 2446 return ret; 2447 } else { 2448 ret = intel_ring_flush_all_caches(ring); 2449 if (ret) 2450 return ret; 2451 } 2452 2453 /* Record the position of the start of the request so that 2454 * should we detect the updated seqno part-way through the 2455 * GPU processing the request, we never over-estimate the 2456 * position of the head. 2457 */ 2458 request_ring_position = intel_ring_get_tail(ringbuf); 2459 2460 if (i915.enable_execlists) { 2461 ret = ring->emit_request(ringbuf); 2462 if (ret) 2463 return ret; 2464 } else { 2465 ret = ring->add_request(ring); 2466 if (ret) 2467 return ret; 2468 } 2469 2470 request->seqno = intel_ring_get_seqno(ring); 2471 request->ring = ring; 2472 request->head = request_start; 2473 request->tail = request_ring_position; 2474 2475 /* Whilst this request exists, batch_obj will be on the 2476 * active_list, and so will hold the active reference. Only when this 2477 * request is retired will the the batch_obj be moved onto the 2478 * inactive_list and lose its active reference. Hence we do not need 2479 * to explicitly hold another reference here. 2480 */ 2481 request->batch_obj = obj; 2482 2483 if (!i915.enable_execlists) { 2484 /* Hold a reference to the current context so that we can inspect 2485 * it later in case a hangcheck error event fires. 2486 */ 2487 request->ctx = ring->last_context; 2488 if (request->ctx) 2489 i915_gem_context_reference(request->ctx); 2490 } 2491 2492 request->emitted_jiffies = jiffies; 2493 list_add_tail(&request->list, &ring->request_list); 2494 request->file_priv = NULL; 2495 2496 if (file) { 2497 struct drm_i915_file_private *file_priv = file->driver_priv; 2498 2499 spin_lock(&file_priv->mm.lock); 2500 request->file_priv = file_priv; 2501 list_add_tail(&request->client_list, 2502 &file_priv->mm.request_list); 2503 spin_unlock(&file_priv->mm.lock); 2504 } 2505 2506 trace_i915_gem_request_add(ring, request->seqno); 2507 ring->outstanding_lazy_seqno = 0; 2508 ring->preallocated_lazy_request = NULL; 2509 2510 i915_queue_hangcheck(ring->dev); 2511 2512 cancel_delayed_work_sync(&dev_priv->mm.idle_work); 2513 queue_delayed_work(dev_priv->wq, 2514 &dev_priv->mm.retire_work, 2515 round_jiffies_up_relative(HZ)); 2516 intel_mark_busy(dev_priv->dev); 2517 2518 if (out_seqno) 2519 *out_seqno = request->seqno; 2520 return 0; 2521 } 2522 2523 static inline void 2524 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) 2525 { 2526 struct drm_i915_file_private *file_priv = request->file_priv; 2527 2528 if (!file_priv) 2529 return; 2530 2531 spin_lock(&file_priv->mm.lock); 2532 list_del(&request->client_list); 2533 request->file_priv = NULL; 2534 spin_unlock(&file_priv->mm.lock); 2535 } 2536 2537 static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2538 const struct intel_context *ctx) 2539 { 2540 unsigned long elapsed; 2541 2542 elapsed = get_seconds() - ctx->hang_stats.guilty_ts; 2543 2544 if (ctx->hang_stats.banned) 2545 return true; 2546 2547 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) { 2548 if (!i915_gem_context_is_default(ctx)) { 2549 DRM_DEBUG("context hanging too fast, banning!\n"); 2550 return true; 2551 } else if (i915_stop_ring_allow_ban(dev_priv)) { 2552 if (i915_stop_ring_allow_warn(dev_priv)) 2553 DRM_ERROR("gpu hanging too fast, banning!\n"); 2554 return true; 2555 } 2556 } 2557 2558 return false; 2559 } 2560 2561 static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2562 struct intel_context *ctx, 2563 const bool guilty) 2564 { 2565 struct i915_ctx_hang_stats *hs; 2566 2567 if (WARN_ON(!ctx)) 2568 return; 2569 2570 hs = &ctx->hang_stats; 2571 2572 if (guilty) { 2573 hs->banned = i915_context_is_banned(dev_priv, ctx); 2574 hs->batch_active++; 2575 hs->guilty_ts = get_seconds(); 2576 } else { 2577 hs->batch_pending++; 2578 } 2579 } 2580 2581 static void i915_gem_free_request(struct drm_i915_gem_request *request) 2582 { 2583 struct intel_context *ctx = request->ctx; 2584 2585 list_del(&request->list); 2586 i915_gem_request_remove_from_client(request); 2587 2588 if (ctx) { 2589 if (i915.enable_execlists) { 2590 struct intel_engine_cs *ring = request->ring; 2591 2592 if (ctx != ring->default_context) 2593 intel_lr_context_unpin(ring, ctx); 2594 } 2595 i915_gem_context_unreference(ctx); 2596 } 2597 kfree(request); 2598 } 2599 2600 struct drm_i915_gem_request * 2601 i915_gem_find_active_request(struct intel_engine_cs *ring) 2602 { 2603 struct drm_i915_gem_request *request; 2604 u32 completed_seqno; 2605 2606 completed_seqno = ring->get_seqno(ring, false); 2607 2608 list_for_each_entry(request, &ring->request_list, list) { 2609 if (i915_seqno_passed(completed_seqno, request->seqno)) 2610 continue; 2611 2612 return request; 2613 } 2614 2615 return NULL; 2616 } 2617 2618 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, 2619 struct intel_engine_cs *ring) 2620 { 2621 struct drm_i915_gem_request *request; 2622 bool ring_hung; 2623 2624 request = i915_gem_find_active_request(ring); 2625 2626 if (request == NULL) 2627 return; 2628 2629 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; 2630 2631 i915_set_reset_status(dev_priv, request->ctx, ring_hung); 2632 2633 list_for_each_entry_continue(request, &ring->request_list, list) 2634 i915_set_reset_status(dev_priv, request->ctx, false); 2635 } 2636 2637 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2638 struct intel_engine_cs *ring) 2639 { 2640 while (!list_empty(&ring->active_list)) { 2641 struct drm_i915_gem_object *obj; 2642 2643 obj = list_first_entry(&ring->active_list, 2644 struct drm_i915_gem_object, 2645 ring_list); 2646 2647 i915_gem_object_move_to_inactive(obj); 2648 } 2649 2650 /* 2651 * Clear the execlists queue up before freeing the requests, as those 2652 * are the ones that keep the context and ringbuffer backing objects 2653 * pinned in place. 2654 */ 2655 while (!list_empty(&ring->execlist_queue)) { 2656 struct intel_ctx_submit_request *submit_req; 2657 2658 submit_req = list_first_entry(&ring->execlist_queue, 2659 struct intel_ctx_submit_request, 2660 execlist_link); 2661 list_del(&submit_req->execlist_link); 2662 intel_runtime_pm_put(dev_priv); 2663 i915_gem_context_unreference(submit_req->ctx); 2664 kfree(submit_req); 2665 } 2666 2667 /* 2668 * We must free the requests after all the corresponding objects have 2669 * been moved off active lists. Which is the same order as the normal 2670 * retire_requests function does. This is important if object hold 2671 * implicit references on things like e.g. ppgtt address spaces through 2672 * the request. 2673 */ 2674 while (!list_empty(&ring->request_list)) { 2675 struct drm_i915_gem_request *request; 2676 2677 request = list_first_entry(&ring->request_list, 2678 struct drm_i915_gem_request, 2679 list); 2680 2681 i915_gem_free_request(request); 2682 } 2683 2684 /* These may not have been flush before the reset, do so now */ 2685 kfree(ring->preallocated_lazy_request); 2686 ring->preallocated_lazy_request = NULL; 2687 ring->outstanding_lazy_seqno = 0; 2688 } 2689 2690 void i915_gem_restore_fences(struct drm_device *dev) 2691 { 2692 struct drm_i915_private *dev_priv = dev->dev_private; 2693 int i; 2694 2695 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2696 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2697 2698 /* 2699 * Commit delayed tiling changes if we have an object still 2700 * attached to the fence, otherwise just clear the fence. 2701 */ 2702 if (reg->obj) { 2703 i915_gem_object_update_fence(reg->obj, reg, 2704 reg->obj->tiling_mode); 2705 } else { 2706 i915_gem_write_fence(dev, i, NULL); 2707 } 2708 } 2709 } 2710 2711 void i915_gem_reset(struct drm_device *dev) 2712 { 2713 struct drm_i915_private *dev_priv = dev->dev_private; 2714 struct intel_engine_cs *ring; 2715 int i; 2716 2717 /* 2718 * Before we free the objects from the requests, we need to inspect 2719 * them for finding the guilty party. As the requests only borrow 2720 * their reference to the objects, the inspection must be done first. 2721 */ 2722 for_each_ring(ring, dev_priv, i) 2723 i915_gem_reset_ring_status(dev_priv, ring); 2724 2725 for_each_ring(ring, dev_priv, i) 2726 i915_gem_reset_ring_cleanup(dev_priv, ring); 2727 2728 i915_gem_context_reset(dev); 2729 2730 i915_gem_restore_fences(dev); 2731 } 2732 2733 /** 2734 * This function clears the request list as sequence numbers are passed. 2735 */ 2736 void 2737 i915_gem_retire_requests_ring(struct intel_engine_cs *ring) 2738 { 2739 uint32_t seqno; 2740 2741 if (list_empty(&ring->request_list)) 2742 return; 2743 2744 WARN_ON(i915_verify_lists(ring->dev)); 2745 2746 seqno = ring->get_seqno(ring, true); 2747 2748 /* Move any buffers on the active list that are no longer referenced 2749 * by the ringbuffer to the flushing/inactive lists as appropriate, 2750 * before we free the context associated with the requests. 2751 */ 2752 while (!list_empty(&ring->active_list)) { 2753 struct drm_i915_gem_object *obj; 2754 2755 obj = list_first_entry(&ring->active_list, 2756 struct drm_i915_gem_object, 2757 ring_list); 2758 2759 if (!i915_seqno_passed(seqno, obj->last_read_seqno)) 2760 break; 2761 2762 i915_gem_object_move_to_inactive(obj); 2763 } 2764 2765 2766 while (!list_empty(&ring->request_list)) { 2767 struct drm_i915_gem_request *request; 2768 struct intel_ringbuffer *ringbuf; 2769 2770 request = list_first_entry(&ring->request_list, 2771 struct drm_i915_gem_request, 2772 list); 2773 2774 if (!i915_seqno_passed(seqno, request->seqno)) 2775 break; 2776 2777 trace_i915_gem_request_retire(ring, request->seqno); 2778 2779 /* This is one of the few common intersection points 2780 * between legacy ringbuffer submission and execlists: 2781 * we need to tell them apart in order to find the correct 2782 * ringbuffer to which the request belongs to. 2783 */ 2784 if (i915.enable_execlists) { 2785 struct intel_context *ctx = request->ctx; 2786 ringbuf = ctx->engine[ring->id].ringbuf; 2787 } else 2788 ringbuf = ring->buffer; 2789 2790 /* We know the GPU must have read the request to have 2791 * sent us the seqno + interrupt, so use the position 2792 * of tail of the request to update the last known position 2793 * of the GPU head. 2794 */ 2795 ringbuf->last_retired_head = request->tail; 2796 2797 i915_gem_free_request(request); 2798 } 2799 2800 if (unlikely(ring->trace_irq_seqno && 2801 i915_seqno_passed(seqno, ring->trace_irq_seqno))) { 2802 ring->irq_put(ring); 2803 ring->trace_irq_seqno = 0; 2804 } 2805 2806 WARN_ON(i915_verify_lists(ring->dev)); 2807 } 2808 2809 bool 2810 i915_gem_retire_requests(struct drm_device *dev) 2811 { 2812 struct drm_i915_private *dev_priv = dev->dev_private; 2813 struct intel_engine_cs *ring; 2814 bool idle = true; 2815 int i; 2816 2817 for_each_ring(ring, dev_priv, i) { 2818 i915_gem_retire_requests_ring(ring); 2819 idle &= list_empty(&ring->request_list); 2820 if (i915.enable_execlists) { 2821 unsigned long flags; 2822 2823 spin_lock_irqsave(&ring->execlist_lock, flags); 2824 idle &= list_empty(&ring->execlist_queue); 2825 spin_unlock_irqrestore(&ring->execlist_lock, flags); 2826 2827 intel_execlists_retire_requests(ring); 2828 } 2829 } 2830 2831 if (idle) 2832 mod_delayed_work(dev_priv->wq, 2833 &dev_priv->mm.idle_work, 2834 msecs_to_jiffies(100)); 2835 2836 return idle; 2837 } 2838 2839 static void 2840 i915_gem_retire_work_handler(struct work_struct *work) 2841 { 2842 struct drm_i915_private *dev_priv = 2843 container_of(work, typeof(*dev_priv), mm.retire_work.work); 2844 struct drm_device *dev = dev_priv->dev; 2845 bool idle; 2846 2847 /* Come back later if the device is busy... */ 2848 idle = false; 2849 if (mutex_trylock(&dev->struct_mutex)) { 2850 idle = i915_gem_retire_requests(dev); 2851 mutex_unlock(&dev->struct_mutex); 2852 } 2853 if (!idle) 2854 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2855 round_jiffies_up_relative(HZ)); 2856 } 2857 2858 static void 2859 i915_gem_idle_work_handler(struct work_struct *work) 2860 { 2861 struct drm_i915_private *dev_priv = 2862 container_of(work, typeof(*dev_priv), mm.idle_work.work); 2863 2864 intel_mark_idle(dev_priv->dev); 2865 } 2866 2867 /** 2868 * Ensures that an object will eventually get non-busy by flushing any required 2869 * write domains, emitting any outstanding lazy request and retiring and 2870 * completed requests. 2871 */ 2872 static int 2873 i915_gem_object_flush_active(struct drm_i915_gem_object *obj) 2874 { 2875 int ret; 2876 2877 if (obj->active) { 2878 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno); 2879 if (ret) 2880 return ret; 2881 2882 i915_gem_retire_requests_ring(obj->ring); 2883 } 2884 2885 return 0; 2886 } 2887 2888 /** 2889 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 2890 * @DRM_IOCTL_ARGS: standard ioctl arguments 2891 * 2892 * Returns 0 if successful, else an error is returned with the remaining time in 2893 * the timeout parameter. 2894 * -ETIME: object is still busy after timeout 2895 * -ERESTARTSYS: signal interrupted the wait 2896 * -ENONENT: object doesn't exist 2897 * Also possible, but rare: 2898 * -EAGAIN: GPU wedged 2899 * -ENOMEM: damn 2900 * -ENODEV: Internal IRQ fail 2901 * -E?: The add request failed 2902 * 2903 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 2904 * non-zero timeout parameter the wait ioctl will wait for the given number of 2905 * nanoseconds on an object becoming unbusy. Since the wait itself does so 2906 * without holding struct_mutex the object may become re-busied before this 2907 * function completes. A similar but shorter * race condition exists in the busy 2908 * ioctl 2909 */ 2910 int 2911 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 2912 { 2913 struct drm_i915_private *dev_priv = dev->dev_private; 2914 struct drm_i915_gem_wait *args = data; 2915 struct drm_i915_gem_object *obj; 2916 struct intel_engine_cs *ring = NULL; 2917 unsigned reset_counter; 2918 u32 seqno = 0; 2919 int ret = 0; 2920 2921 if (args->flags != 0) 2922 return -EINVAL; 2923 2924 ret = i915_mutex_lock_interruptible(dev); 2925 if (ret) 2926 return ret; 2927 2928 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle)); 2929 if (&obj->base == NULL) { 2930 mutex_unlock(&dev->struct_mutex); 2931 return -ENOENT; 2932 } 2933 2934 /* Need to make sure the object gets inactive eventually. */ 2935 ret = i915_gem_object_flush_active(obj); 2936 if (ret) 2937 goto out; 2938 2939 if (obj->active) { 2940 seqno = obj->last_read_seqno; 2941 ring = obj->ring; 2942 } 2943 2944 if (seqno == 0) 2945 goto out; 2946 2947 /* Do this after OLR check to make sure we make forward progress polling 2948 * on this IOCTL with a timeout <=0 (like busy ioctl) 2949 */ 2950 if (args->timeout_ns <= 0) { 2951 ret = -ETIME; 2952 goto out; 2953 } 2954 2955 drm_gem_object_unreference(&obj->base); 2956 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2957 mutex_unlock(&dev->struct_mutex); 2958 2959 return __i915_wait_seqno(ring, seqno, reset_counter, true, 2960 &args->timeout_ns, file->driver_priv); 2961 2962 out: 2963 drm_gem_object_unreference(&obj->base); 2964 mutex_unlock(&dev->struct_mutex); 2965 return ret; 2966 } 2967 2968 /** 2969 * i915_gem_object_sync - sync an object to a ring. 2970 * 2971 * @obj: object which may be in use on another ring. 2972 * @to: ring we wish to use the object on. May be NULL. 2973 * 2974 * This code is meant to abstract object synchronization with the GPU. 2975 * Calling with NULL implies synchronizing the object with the CPU 2976 * rather than a particular GPU ring. 2977 * 2978 * Returns 0 if successful, else propagates up the lower layer error. 2979 */ 2980 int 2981 i915_gem_object_sync(struct drm_i915_gem_object *obj, 2982 struct intel_engine_cs *to) 2983 { 2984 struct intel_engine_cs *from = obj->ring; 2985 u32 seqno; 2986 int ret, idx; 2987 2988 if (from == NULL || to == from) 2989 return 0; 2990 2991 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) 2992 return i915_gem_object_wait_rendering(obj, false); 2993 2994 idx = intel_ring_sync_index(from, to); 2995 2996 seqno = obj->last_read_seqno; 2997 /* Optimization: Avoid semaphore sync when we are sure we already 2998 * waited for an object with higher seqno */ 2999 if (seqno <= from->semaphore.sync_seqno[idx]) 3000 return 0; 3001 3002 ret = i915_gem_check_olr(obj->ring, seqno); 3003 if (ret) 3004 return ret; 3005 3006 trace_i915_gem_ring_sync_to(from, to, seqno); 3007 ret = to->semaphore.sync_to(to, from, seqno); 3008 if (!ret) 3009 /* We use last_read_seqno because sync_to() 3010 * might have just caused seqno wrap under 3011 * the radar. 3012 */ 3013 from->semaphore.sync_seqno[idx] = obj->last_read_seqno; 3014 3015 return ret; 3016 } 3017 3018 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) 3019 { 3020 u32 old_write_domain, old_read_domains; 3021 3022 /* Force a pagefault for domain tracking on next user access */ 3023 i915_gem_release_mmap(obj); 3024 3025 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3026 return; 3027 3028 /* Wait for any direct GTT access to complete */ 3029 mb(); 3030 3031 old_read_domains = obj->base.read_domains; 3032 old_write_domain = obj->base.write_domain; 3033 3034 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; 3035 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; 3036 3037 trace_i915_gem_object_change_domain(obj, 3038 old_read_domains, 3039 old_write_domain); 3040 } 3041 3042 int i915_vma_unbind(struct i915_vma *vma) 3043 { 3044 struct drm_i915_gem_object *obj = vma->obj; 3045 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3046 int ret; 3047 3048 if (list_empty(&vma->vma_link)) 3049 return 0; 3050 3051 if (!drm_mm_node_allocated(&vma->node)) { 3052 i915_gem_vma_destroy(vma); 3053 return 0; 3054 } 3055 3056 if (vma->pin_count) 3057 return -EBUSY; 3058 3059 BUG_ON(obj->pages == NULL); 3060 3061 ret = i915_gem_object_finish_gpu(obj); 3062 if (ret) 3063 return ret; 3064 /* Continue on if we fail due to EIO, the GPU is hung so we 3065 * should be safe and we need to cleanup or else we might 3066 * cause memory corruption through use-after-free. 3067 */ 3068 3069 /* Throw away the active reference before moving to the unbound list */ 3070 i915_gem_object_retire(obj); 3071 3072 if (i915_is_ggtt(vma->vm)) { 3073 i915_gem_object_finish_gtt(obj); 3074 3075 /* release the fence reg _after_ flushing */ 3076 ret = i915_gem_object_put_fence(obj); 3077 if (ret) 3078 return ret; 3079 } 3080 3081 trace_i915_vma_unbind(vma); 3082 3083 vma->unbind_vma(vma); 3084 3085 list_del_init(&vma->mm_list); 3086 if (i915_is_ggtt(vma->vm)) 3087 obj->map_and_fenceable = false; 3088 3089 drm_mm_remove_node(&vma->node); 3090 i915_gem_vma_destroy(vma); 3091 3092 /* Since the unbound list is global, only move to that list if 3093 * no more VMAs exist. */ 3094 if (list_empty(&obj->vma_list)) { 3095 i915_gem_gtt_finish_object(obj); 3096 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 3097 } 3098 3099 /* And finally now the object is completely decoupled from this vma, 3100 * we can drop its hold on the backing storage and allow it to be 3101 * reaped by the shrinker. 3102 */ 3103 i915_gem_object_unpin_pages(obj); 3104 3105 return 0; 3106 } 3107 3108 int i915_gpu_idle(struct drm_device *dev) 3109 { 3110 struct drm_i915_private *dev_priv = dev->dev_private; 3111 struct intel_engine_cs *ring; 3112 int ret, i; 3113 3114 /* Flush everything onto the inactive list. */ 3115 for_each_ring(ring, dev_priv, i) { 3116 if (!i915.enable_execlists) { 3117 ret = i915_switch_context(ring, ring->default_context); 3118 if (ret) 3119 return ret; 3120 } 3121 3122 ret = intel_ring_idle(ring); 3123 if (ret) 3124 return ret; 3125 } 3126 3127 return 0; 3128 } 3129 3130 static void i965_write_fence_reg(struct drm_device *dev, int reg, 3131 struct drm_i915_gem_object *obj) 3132 { 3133 struct drm_i915_private *dev_priv = dev->dev_private; 3134 int fence_reg; 3135 int fence_pitch_shift; 3136 3137 if (INTEL_INFO(dev)->gen >= 6) { 3138 fence_reg = FENCE_REG_SANDYBRIDGE_0; 3139 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; 3140 } else { 3141 fence_reg = FENCE_REG_965_0; 3142 fence_pitch_shift = I965_FENCE_PITCH_SHIFT; 3143 } 3144 3145 fence_reg += reg * 8; 3146 3147 /* To w/a incoherency with non-atomic 64-bit register updates, 3148 * we split the 64-bit update into two 32-bit writes. In order 3149 * for a partial fence not to be evaluated between writes, we 3150 * precede the update with write to turn off the fence register, 3151 * and only enable the fence as the last step. 3152 * 3153 * For extra levels of paranoia, we make sure each step lands 3154 * before applying the next step. 3155 */ 3156 I915_WRITE(fence_reg, 0); 3157 POSTING_READ(fence_reg); 3158 3159 if (obj) { 3160 u32 size = i915_gem_obj_ggtt_size(obj); 3161 uint64_t val; 3162 3163 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 3164 0xfffff000) << 32; 3165 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; 3166 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; 3167 if (obj->tiling_mode == I915_TILING_Y) 3168 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 3169 val |= I965_FENCE_REG_VALID; 3170 3171 I915_WRITE(fence_reg + 4, val >> 32); 3172 POSTING_READ(fence_reg + 4); 3173 3174 I915_WRITE(fence_reg + 0, val); 3175 POSTING_READ(fence_reg); 3176 } else { 3177 I915_WRITE(fence_reg + 4, 0); 3178 POSTING_READ(fence_reg + 4); 3179 } 3180 } 3181 3182 static void i915_write_fence_reg(struct drm_device *dev, int reg, 3183 struct drm_i915_gem_object *obj) 3184 { 3185 struct drm_i915_private *dev_priv = dev->dev_private; 3186 u32 val; 3187 3188 if (obj) { 3189 u32 size = i915_gem_obj_ggtt_size(obj); 3190 int pitch_val; 3191 int tile_width; 3192 3193 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || 3194 (size & -size) != size || 3195 (i915_gem_obj_ggtt_offset(obj) & (size - 1)), 3196 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", 3197 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); 3198 3199 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 3200 tile_width = 128; 3201 else 3202 tile_width = 512; 3203 3204 /* Note: pitch better be a power of two tile widths */ 3205 pitch_val = obj->stride / tile_width; 3206 pitch_val = ffs(pitch_val) - 1; 3207 3208 val = i915_gem_obj_ggtt_offset(obj); 3209 if (obj->tiling_mode == I915_TILING_Y) 3210 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 3211 val |= I915_FENCE_SIZE_BITS(size); 3212 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 3213 val |= I830_FENCE_REG_VALID; 3214 } else 3215 val = 0; 3216 3217 if (reg < 8) 3218 reg = FENCE_REG_830_0 + reg * 4; 3219 else 3220 reg = FENCE_REG_945_8 + (reg - 8) * 4; 3221 3222 I915_WRITE(reg, val); 3223 POSTING_READ(reg); 3224 } 3225 3226 static void i830_write_fence_reg(struct drm_device *dev, int reg, 3227 struct drm_i915_gem_object *obj) 3228 { 3229 struct drm_i915_private *dev_priv = dev->dev_private; 3230 uint32_t val; 3231 3232 if (obj) { 3233 u32 size = i915_gem_obj_ggtt_size(obj); 3234 uint32_t pitch_val; 3235 3236 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || 3237 (size & -size) != size || 3238 (i915_gem_obj_ggtt_offset(obj) & (size - 1)), 3239 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", 3240 i915_gem_obj_ggtt_offset(obj), size); 3241 3242 pitch_val = obj->stride / 128; 3243 pitch_val = ffs(pitch_val) - 1; 3244 3245 val = i915_gem_obj_ggtt_offset(obj); 3246 if (obj->tiling_mode == I915_TILING_Y) 3247 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 3248 val |= I830_FENCE_SIZE_BITS(size); 3249 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 3250 val |= I830_FENCE_REG_VALID; 3251 } else 3252 val = 0; 3253 3254 I915_WRITE(FENCE_REG_830_0 + reg * 4, val); 3255 POSTING_READ(FENCE_REG_830_0 + reg * 4); 3256 } 3257 3258 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) 3259 { 3260 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; 3261 } 3262 3263 static void i915_gem_write_fence(struct drm_device *dev, int reg, 3264 struct drm_i915_gem_object *obj) 3265 { 3266 struct drm_i915_private *dev_priv = dev->dev_private; 3267 3268 /* Ensure that all CPU reads are completed before installing a fence 3269 * and all writes before removing the fence. 3270 */ 3271 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) 3272 mb(); 3273 3274 WARN(obj && (!obj->stride || !obj->tiling_mode), 3275 "bogus fence setup with stride: 0x%x, tiling mode: %i\n", 3276 obj->stride, obj->tiling_mode); 3277 3278 switch (INTEL_INFO(dev)->gen) { 3279 case 9: 3280 case 8: 3281 case 7: 3282 case 6: 3283 case 5: 3284 case 4: i965_write_fence_reg(dev, reg, obj); break; 3285 case 3: i915_write_fence_reg(dev, reg, obj); break; 3286 case 2: i830_write_fence_reg(dev, reg, obj); break; 3287 default: BUG(); 3288 } 3289 3290 /* And similarly be paranoid that no direct access to this region 3291 * is reordered to before the fence is installed. 3292 */ 3293 if (i915_gem_object_needs_mb(obj)) 3294 mb(); 3295 } 3296 3297 static inline int fence_number(struct drm_i915_private *dev_priv, 3298 struct drm_i915_fence_reg *fence) 3299 { 3300 return fence - dev_priv->fence_regs; 3301 } 3302 3303 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 3304 struct drm_i915_fence_reg *fence, 3305 bool enable) 3306 { 3307 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3308 int reg = fence_number(dev_priv, fence); 3309 3310 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); 3311 3312 if (enable) { 3313 obj->fence_reg = reg; 3314 fence->obj = obj; 3315 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); 3316 } else { 3317 obj->fence_reg = I915_FENCE_REG_NONE; 3318 fence->obj = NULL; 3319 list_del_init(&fence->lru_list); 3320 } 3321 obj->fence_dirty = false; 3322 } 3323 3324 static int 3325 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) 3326 { 3327 if (obj->last_fenced_seqno) { 3328 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); 3329 if (ret) 3330 return ret; 3331 3332 obj->last_fenced_seqno = 0; 3333 } 3334 3335 return 0; 3336 } 3337 3338 int 3339 i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 3340 { 3341 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3342 struct drm_i915_fence_reg *fence; 3343 int ret; 3344 3345 ret = i915_gem_object_wait_fence(obj); 3346 if (ret) 3347 return ret; 3348 3349 if (obj->fence_reg == I915_FENCE_REG_NONE) 3350 return 0; 3351 3352 fence = &dev_priv->fence_regs[obj->fence_reg]; 3353 3354 if (WARN_ON(fence->pin_count)) 3355 return -EBUSY; 3356 3357 i915_gem_object_fence_lost(obj); 3358 i915_gem_object_update_fence(obj, fence, false); 3359 3360 return 0; 3361 } 3362 3363 static struct drm_i915_fence_reg * 3364 i915_find_fence_reg(struct drm_device *dev) 3365 { 3366 struct drm_i915_private *dev_priv = dev->dev_private; 3367 struct drm_i915_fence_reg *reg, *avail; 3368 int i; 3369 3370 /* First try to find a free reg */ 3371 avail = NULL; 3372 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 3373 reg = &dev_priv->fence_regs[i]; 3374 if (!reg->obj) 3375 return reg; 3376 3377 if (!reg->pin_count) 3378 avail = reg; 3379 } 3380 3381 if (avail == NULL) 3382 goto deadlock; 3383 3384 /* None available, try to steal one or wait for a user to finish */ 3385 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { 3386 if (reg->pin_count) 3387 continue; 3388 3389 return reg; 3390 } 3391 3392 deadlock: 3393 /* Wait for completion of pending flips which consume fences */ 3394 if (intel_has_pending_fb_unpin(dev)) 3395 return ERR_PTR(-EAGAIN); 3396 3397 return ERR_PTR(-EDEADLK); 3398 } 3399 3400 /** 3401 * i915_gem_object_get_fence - set up fencing for an object 3402 * @obj: object to map through a fence reg 3403 * 3404 * When mapping objects through the GTT, userspace wants to be able to write 3405 * to them without having to worry about swizzling if the object is tiled. 3406 * This function walks the fence regs looking for a free one for @obj, 3407 * stealing one if it can't find any. 3408 * 3409 * It then sets up the reg based on the object's properties: address, pitch 3410 * and tiling format. 3411 * 3412 * For an untiled surface, this removes any existing fence. 3413 */ 3414 int 3415 i915_gem_object_get_fence(struct drm_i915_gem_object *obj) 3416 { 3417 struct drm_device *dev = obj->base.dev; 3418 struct drm_i915_private *dev_priv = dev->dev_private; 3419 bool enable = obj->tiling_mode != I915_TILING_NONE; 3420 struct drm_i915_fence_reg *reg; 3421 int ret; 3422 3423 /* Have we updated the tiling parameters upon the object and so 3424 * will need to serialise the write to the associated fence register? 3425 */ 3426 if (obj->fence_dirty) { 3427 ret = i915_gem_object_wait_fence(obj); 3428 if (ret) 3429 return ret; 3430 } 3431 3432 /* Just update our place in the LRU if our fence is getting reused. */ 3433 if (obj->fence_reg != I915_FENCE_REG_NONE) { 3434 reg = &dev_priv->fence_regs[obj->fence_reg]; 3435 if (!obj->fence_dirty) { 3436 list_move_tail(®->lru_list, 3437 &dev_priv->mm.fence_list); 3438 return 0; 3439 } 3440 } else if (enable) { 3441 if (WARN_ON(!obj->map_and_fenceable)) 3442 return -EINVAL; 3443 3444 reg = i915_find_fence_reg(dev); 3445 if (IS_ERR(reg)) 3446 return PTR_ERR(reg); 3447 3448 if (reg->obj) { 3449 struct drm_i915_gem_object *old = reg->obj; 3450 3451 ret = i915_gem_object_wait_fence(old); 3452 if (ret) 3453 return ret; 3454 3455 i915_gem_object_fence_lost(old); 3456 } 3457 } else 3458 return 0; 3459 3460 i915_gem_object_update_fence(obj, reg, enable); 3461 3462 return 0; 3463 } 3464 3465 static bool i915_gem_valid_gtt_space(struct i915_vma *vma, 3466 unsigned long cache_level) 3467 { 3468 struct drm_mm_node *gtt_space = &vma->node; 3469 struct drm_mm_node *other; 3470 3471 /* 3472 * On some machines we have to be careful when putting differing types 3473 * of snoopable memory together to avoid the prefetcher crossing memory 3474 * domains and dying. During vm initialisation, we decide whether or not 3475 * these constraints apply and set the drm_mm.color_adjust 3476 * appropriately. 3477 */ 3478 if (vma->vm->mm.color_adjust == NULL) 3479 return true; 3480 3481 if (!drm_mm_node_allocated(gtt_space)) 3482 return true; 3483 3484 if (list_empty(>t_space->node_list)) 3485 return true; 3486 3487 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); 3488 if (other->allocated && !other->hole_follows && other->color != cache_level) 3489 return false; 3490 3491 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); 3492 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) 3493 return false; 3494 3495 return true; 3496 } 3497 3498 /** 3499 * Finds free space in the GTT aperture and binds the object there. 3500 */ 3501 static struct i915_vma * 3502 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3503 struct i915_address_space *vm, 3504 unsigned alignment, 3505 uint64_t flags) 3506 { 3507 struct drm_device *dev = obj->base.dev; 3508 struct drm_i915_private *dev_priv = dev->dev_private; 3509 u32 size, fence_size, fence_alignment, unfenced_alignment; 3510 unsigned long start = 3511 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 3512 unsigned long end = 3513 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3514 struct i915_vma *vma; 3515 int ret; 3516 3517 fence_size = i915_gem_get_gtt_size(dev, 3518 obj->base.size, 3519 obj->tiling_mode); 3520 fence_alignment = i915_gem_get_gtt_alignment(dev, 3521 obj->base.size, 3522 obj->tiling_mode, true); 3523 unfenced_alignment = 3524 i915_gem_get_gtt_alignment(dev, 3525 obj->base.size, 3526 obj->tiling_mode, false); 3527 3528 if (alignment == 0) 3529 alignment = flags & PIN_MAPPABLE ? fence_alignment : 3530 unfenced_alignment; 3531 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) { 3532 DRM_DEBUG("Invalid object alignment requested %u\n", alignment); 3533 return ERR_PTR(-EINVAL); 3534 } 3535 3536 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; 3537 3538 /* If the object is bigger than the entire aperture, reject it early 3539 * before evicting everything in a vain attempt to find space. 3540 */ 3541 if (obj->base.size > end) { 3542 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", 3543 obj->base.size, 3544 flags & PIN_MAPPABLE ? "mappable" : "total", 3545 end); 3546 return ERR_PTR(-E2BIG); 3547 } 3548 3549 ret = i915_gem_object_get_pages(obj); 3550 if (ret) 3551 return ERR_PTR(ret); 3552 3553 i915_gem_object_pin_pages(obj); 3554 3555 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); 3556 if (IS_ERR(vma)) 3557 goto err_unpin; 3558 3559 search_free: 3560 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3561 size, alignment, 3562 obj->cache_level, 3563 start, end, 3564 DRM_MM_SEARCH_DEFAULT, 3565 DRM_MM_CREATE_DEFAULT); 3566 if (ret) { 3567 ret = i915_gem_evict_something(dev, vm, size, alignment, 3568 obj->cache_level, 3569 start, end, 3570 flags); 3571 if (ret == 0) 3572 goto search_free; 3573 3574 goto err_free_vma; 3575 } 3576 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { 3577 ret = -EINVAL; 3578 goto err_remove_node; 3579 } 3580 3581 ret = i915_gem_gtt_prepare_object(obj); 3582 if (ret) 3583 goto err_remove_node; 3584 3585 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3586 list_add_tail(&vma->mm_list, &vm->inactive_list); 3587 3588 trace_i915_vma_bind(vma, flags); 3589 vma->bind_vma(vma, obj->cache_level, 3590 flags & PIN_GLOBAL ? GLOBAL_BIND : 0); 3591 3592 return vma; 3593 3594 err_remove_node: 3595 drm_mm_remove_node(&vma->node); 3596 err_free_vma: 3597 i915_gem_vma_destroy(vma); 3598 vma = ERR_PTR(ret); 3599 err_unpin: 3600 i915_gem_object_unpin_pages(obj); 3601 return vma; 3602 } 3603 3604 bool 3605 i915_gem_clflush_object(struct drm_i915_gem_object *obj, 3606 bool force) 3607 { 3608 /* If we don't have a page list set up, then we're not pinned 3609 * to GPU, and we can ignore the cache flush because it'll happen 3610 * again at bind time. 3611 */ 3612 if (obj->pages == NULL) 3613 return false; 3614 3615 /* 3616 * Stolen memory is always coherent with the GPU as it is explicitly 3617 * marked as wc by the system, or the system is cache-coherent. 3618 */ 3619 if (obj->stolen || obj->phys_handle) 3620 return false; 3621 3622 /* If the GPU is snooping the contents of the CPU cache, 3623 * we do not need to manually clear the CPU cache lines. However, 3624 * the caches are only snooped when the render cache is 3625 * flushed/invalidated. As we always have to emit invalidations 3626 * and flushes when moving into and out of the RENDER domain, correct 3627 * snooping behaviour occurs naturally as the result of our domain 3628 * tracking. 3629 */ 3630 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 3631 return false; 3632 3633 trace_i915_gem_object_clflush(obj); 3634 drm_clflush_sg(obj->pages); 3635 3636 return true; 3637 } 3638 3639 /** Flushes the GTT write domain for the object if it's dirty. */ 3640 static void 3641 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) 3642 { 3643 uint32_t old_write_domain; 3644 3645 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 3646 return; 3647 3648 /* No actual flushing is required for the GTT write domain. Writes 3649 * to it immediately go to main memory as far as we know, so there's 3650 * no chipset flush. It also doesn't land in render cache. 3651 * 3652 * However, we do have to enforce the order so that all writes through 3653 * the GTT land before any writes to the device, such as updates to 3654 * the GATT itself. 3655 */ 3656 wmb(); 3657 3658 old_write_domain = obj->base.write_domain; 3659 obj->base.write_domain = 0; 3660 3661 intel_fb_obj_flush(obj, false); 3662 3663 trace_i915_gem_object_change_domain(obj, 3664 obj->base.read_domains, 3665 old_write_domain); 3666 } 3667 3668 /** Flushes the CPU write domain for the object if it's dirty. */ 3669 static void 3670 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, 3671 bool force) 3672 { 3673 uint32_t old_write_domain; 3674 3675 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 3676 return; 3677 3678 if (i915_gem_clflush_object(obj, force)) 3679 i915_gem_chipset_flush(obj->base.dev); 3680 3681 old_write_domain = obj->base.write_domain; 3682 obj->base.write_domain = 0; 3683 3684 intel_fb_obj_flush(obj, false); 3685 3686 trace_i915_gem_object_change_domain(obj, 3687 obj->base.read_domains, 3688 old_write_domain); 3689 } 3690 3691 /** 3692 * Moves a single object to the GTT read, and possibly write domain. 3693 * 3694 * This function returns when the move is complete, including waiting on 3695 * flushes to occur. 3696 */ 3697 int 3698 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3699 { 3700 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3701 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); 3702 uint32_t old_write_domain, old_read_domains; 3703 int ret; 3704 3705 /* Not valid to be called on unbound objects. */ 3706 if (vma == NULL) 3707 return -EINVAL; 3708 3709 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3710 return 0; 3711 3712 ret = i915_gem_object_wait_rendering(obj, !write); 3713 if (ret) 3714 return ret; 3715 3716 i915_gem_object_retire(obj); 3717 i915_gem_object_flush_cpu_write_domain(obj, false); 3718 3719 /* Serialise direct access to this object with the barriers for 3720 * coherent writes from the GPU, by effectively invalidating the 3721 * GTT domain upon first access. 3722 */ 3723 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3724 mb(); 3725 3726 old_write_domain = obj->base.write_domain; 3727 old_read_domains = obj->base.read_domains; 3728 3729 /* It should now be out of any other write domains, and we can update 3730 * the domain values for our changes. 3731 */ 3732 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3733 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3734 if (write) { 3735 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3736 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3737 obj->dirty = 1; 3738 } 3739 3740 if (write) 3741 intel_fb_obj_invalidate(obj, NULL); 3742 3743 trace_i915_gem_object_change_domain(obj, 3744 old_read_domains, 3745 old_write_domain); 3746 3747 /* And bump the LRU for this access */ 3748 if (i915_gem_object_is_inactive(obj)) 3749 list_move_tail(&vma->mm_list, 3750 &dev_priv->gtt.base.inactive_list); 3751 3752 return 0; 3753 } 3754 3755 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3756 enum i915_cache_level cache_level) 3757 { 3758 struct drm_device *dev = obj->base.dev; 3759 struct i915_vma *vma, *next; 3760 int ret; 3761 3762 if (obj->cache_level == cache_level) 3763 return 0; 3764 3765 if (i915_gem_obj_is_pinned(obj)) { 3766 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3767 return -EBUSY; 3768 } 3769 3770 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 3771 if (!i915_gem_valid_gtt_space(vma, cache_level)) { 3772 ret = i915_vma_unbind(vma); 3773 if (ret) 3774 return ret; 3775 } 3776 } 3777 3778 if (i915_gem_obj_bound_any(obj)) { 3779 ret = i915_gem_object_finish_gpu(obj); 3780 if (ret) 3781 return ret; 3782 3783 i915_gem_object_finish_gtt(obj); 3784 3785 /* Before SandyBridge, you could not use tiling or fence 3786 * registers with snooped memory, so relinquish any fences 3787 * currently pointing to our region in the aperture. 3788 */ 3789 if (INTEL_INFO(dev)->gen < 6) { 3790 ret = i915_gem_object_put_fence(obj); 3791 if (ret) 3792 return ret; 3793 } 3794 3795 list_for_each_entry(vma, &obj->vma_list, vma_link) 3796 if (drm_mm_node_allocated(&vma->node)) 3797 vma->bind_vma(vma, cache_level, 3798 vma->bound & GLOBAL_BIND); 3799 } 3800 3801 list_for_each_entry(vma, &obj->vma_list, vma_link) 3802 vma->node.color = cache_level; 3803 obj->cache_level = cache_level; 3804 3805 if (cpu_write_needs_clflush(obj)) { 3806 u32 old_read_domains, old_write_domain; 3807 3808 /* If we're coming from LLC cached, then we haven't 3809 * actually been tracking whether the data is in the 3810 * CPU cache or not, since we only allow one bit set 3811 * in obj->write_domain and have been skipping the clflushes. 3812 * Just set it to the CPU cache for now. 3813 */ 3814 i915_gem_object_retire(obj); 3815 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 3816 3817 old_read_domains = obj->base.read_domains; 3818 old_write_domain = obj->base.write_domain; 3819 3820 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3821 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3822 3823 trace_i915_gem_object_change_domain(obj, 3824 old_read_domains, 3825 old_write_domain); 3826 } 3827 3828 return 0; 3829 } 3830 3831 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3832 struct drm_file *file) 3833 { 3834 struct drm_i915_gem_caching *args = data; 3835 struct drm_i915_gem_object *obj; 3836 int ret; 3837 3838 ret = i915_mutex_lock_interruptible(dev); 3839 if (ret) 3840 return ret; 3841 3842 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3843 if (&obj->base == NULL) { 3844 ret = -ENOENT; 3845 goto unlock; 3846 } 3847 3848 switch (obj->cache_level) { 3849 case I915_CACHE_LLC: 3850 case I915_CACHE_L3_LLC: 3851 args->caching = I915_CACHING_CACHED; 3852 break; 3853 3854 case I915_CACHE_WT: 3855 args->caching = I915_CACHING_DISPLAY; 3856 break; 3857 3858 default: 3859 args->caching = I915_CACHING_NONE; 3860 break; 3861 } 3862 3863 drm_gem_object_unreference(&obj->base); 3864 unlock: 3865 mutex_unlock(&dev->struct_mutex); 3866 return ret; 3867 } 3868 3869 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3870 struct drm_file *file) 3871 { 3872 struct drm_i915_gem_caching *args = data; 3873 struct drm_i915_gem_object *obj; 3874 enum i915_cache_level level; 3875 int ret; 3876 3877 switch (args->caching) { 3878 case I915_CACHING_NONE: 3879 level = I915_CACHE_NONE; 3880 break; 3881 case I915_CACHING_CACHED: 3882 level = I915_CACHE_LLC; 3883 break; 3884 case I915_CACHING_DISPLAY: 3885 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE; 3886 break; 3887 default: 3888 return -EINVAL; 3889 } 3890 3891 ret = i915_mutex_lock_interruptible(dev); 3892 if (ret) 3893 return ret; 3894 3895 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3896 if (&obj->base == NULL) { 3897 ret = -ENOENT; 3898 goto unlock; 3899 } 3900 3901 ret = i915_gem_object_set_cache_level(obj, level); 3902 3903 drm_gem_object_unreference(&obj->base); 3904 unlock: 3905 mutex_unlock(&dev->struct_mutex); 3906 return ret; 3907 } 3908 3909 static bool is_pin_display(struct drm_i915_gem_object *obj) 3910 { 3911 struct i915_vma *vma; 3912 3913 vma = i915_gem_obj_to_ggtt(obj); 3914 if (!vma) 3915 return false; 3916 3917 /* There are 3 sources that pin objects: 3918 * 1. The display engine (scanouts, sprites, cursors); 3919 * 2. Reservations for execbuffer; 3920 * 3. The user. 3921 * 3922 * We can ignore reservations as we hold the struct_mutex and 3923 * are only called outside of the reservation path. The user 3924 * can only increment pin_count once, and so if after 3925 * subtracting the potential reference by the user, any pin_count 3926 * remains, it must be due to another use by the display engine. 3927 */ 3928 return vma->pin_count - !!obj->user_pin_count; 3929 } 3930 3931 /* 3932 * Prepare buffer for display plane (scanout, cursors, etc). 3933 * Can be called from an uninterruptible phase (modesetting) and allows 3934 * any flushes to be pipelined (for pageflips). 3935 */ 3936 int 3937 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3938 u32 alignment, 3939 struct intel_engine_cs *pipelined) 3940 { 3941 u32 old_read_domains, old_write_domain; 3942 bool was_pin_display; 3943 int ret; 3944 3945 if (pipelined != obj->ring) { 3946 ret = i915_gem_object_sync(obj, pipelined); 3947 if (ret) 3948 return ret; 3949 } 3950 3951 /* Mark the pin_display early so that we account for the 3952 * display coherency whilst setting up the cache domains. 3953 */ 3954 was_pin_display = obj->pin_display; 3955 obj->pin_display = true; 3956 3957 /* The display engine is not coherent with the LLC cache on gen6. As 3958 * a result, we make sure that the pinning that is about to occur is 3959 * done with uncached PTEs. This is lowest common denominator for all 3960 * chipsets. 3961 * 3962 * However for gen6+, we could do better by using the GFDT bit instead 3963 * of uncaching, which would allow us to flush all the LLC-cached data 3964 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3965 */ 3966 ret = i915_gem_object_set_cache_level(obj, 3967 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE); 3968 if (ret) 3969 goto err_unpin_display; 3970 3971 /* As the user may map the buffer once pinned in the display plane 3972 * (e.g. libkms for the bootup splash), we have to ensure that we 3973 * always use map_and_fenceable for all scanout buffers. 3974 */ 3975 ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE); 3976 if (ret) 3977 goto err_unpin_display; 3978 3979 i915_gem_object_flush_cpu_write_domain(obj, true); 3980 3981 old_write_domain = obj->base.write_domain; 3982 old_read_domains = obj->base.read_domains; 3983 3984 /* It should now be out of any other write domains, and we can update 3985 * the domain values for our changes. 3986 */ 3987 obj->base.write_domain = 0; 3988 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3989 3990 trace_i915_gem_object_change_domain(obj, 3991 old_read_domains, 3992 old_write_domain); 3993 3994 return 0; 3995 3996 err_unpin_display: 3997 WARN_ON(was_pin_display != is_pin_display(obj)); 3998 obj->pin_display = was_pin_display; 3999 return ret; 4000 } 4001 4002 void 4003 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) 4004 { 4005 i915_gem_object_ggtt_unpin(obj); 4006 obj->pin_display = is_pin_display(obj); 4007 } 4008 4009 int 4010 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) 4011 { 4012 int ret; 4013 4014 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) 4015 return 0; 4016 4017 ret = i915_gem_object_wait_rendering(obj, false); 4018 if (ret) 4019 return ret; 4020 4021 /* Ensure that we invalidate the GPU's caches and TLBs. */ 4022 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 4023 return 0; 4024 } 4025 4026 /** 4027 * Moves a single object to the CPU read, and possibly write domain. 4028 * 4029 * This function returns when the move is complete, including waiting on 4030 * flushes to occur. 4031 */ 4032 int 4033 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 4034 { 4035 uint32_t old_write_domain, old_read_domains; 4036 int ret; 4037 4038 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 4039 return 0; 4040 4041 ret = i915_gem_object_wait_rendering(obj, !write); 4042 if (ret) 4043 return ret; 4044 4045 i915_gem_object_retire(obj); 4046 i915_gem_object_flush_gtt_write_domain(obj); 4047 4048 old_write_domain = obj->base.write_domain; 4049 old_read_domains = obj->base.read_domains; 4050 4051 /* Flush the CPU cache if it's still invalid. */ 4052 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 4053 i915_gem_clflush_object(obj, false); 4054 4055 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 4056 } 4057 4058 /* It should now be out of any other write domains, and we can update 4059 * the domain values for our changes. 4060 */ 4061 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 4062 4063 /* If we're writing through the CPU, then the GPU read domains will 4064 * need to be invalidated at next use. 4065 */ 4066 if (write) { 4067 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4068 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4069 } 4070 4071 if (write) 4072 intel_fb_obj_invalidate(obj, NULL); 4073 4074 trace_i915_gem_object_change_domain(obj, 4075 old_read_domains, 4076 old_write_domain); 4077 4078 return 0; 4079 } 4080 4081 /* Throttle our rendering by waiting until the ring has completed our requests 4082 * emitted over 20 msec ago. 4083 * 4084 * Note that if we were to use the current jiffies each time around the loop, 4085 * we wouldn't escape the function with any frames outstanding if the time to 4086 * render a frame was over 20ms. 4087 * 4088 * This should get us reasonable parallelism between CPU and GPU but also 4089 * relatively low latency when blocking on a particular request to finish. 4090 */ 4091 static int 4092 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 4093 { 4094 struct drm_i915_private *dev_priv = dev->dev_private; 4095 struct drm_i915_file_private *file_priv = file->driver_priv; 4096 unsigned long recent_enough = jiffies - msecs_to_jiffies(20); 4097 struct drm_i915_gem_request *request; 4098 struct intel_engine_cs *ring = NULL; 4099 unsigned reset_counter; 4100 u32 seqno = 0; 4101 int ret; 4102 4103 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 4104 if (ret) 4105 return ret; 4106 4107 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); 4108 if (ret) 4109 return ret; 4110 4111 spin_lock(&file_priv->mm.lock); 4112 list_for_each_entry(request, &file_priv->mm.request_list, client_list) { 4113 if (time_after_eq(request->emitted_jiffies, recent_enough)) 4114 break; 4115 4116 ring = request->ring; 4117 seqno = request->seqno; 4118 } 4119 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 4120 spin_unlock(&file_priv->mm.lock); 4121 4122 if (seqno == 0) 4123 return 0; 4124 4125 ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL); 4126 if (ret == 0) 4127 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 4128 4129 return ret; 4130 } 4131 4132 static bool 4133 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) 4134 { 4135 struct drm_i915_gem_object *obj = vma->obj; 4136 4137 if (alignment && 4138 vma->node.start & (alignment - 1)) 4139 return true; 4140 4141 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) 4142 return true; 4143 4144 if (flags & PIN_OFFSET_BIAS && 4145 vma->node.start < (flags & PIN_OFFSET_MASK)) 4146 return true; 4147 4148 return false; 4149 } 4150 4151 int 4152 i915_gem_object_pin(struct drm_i915_gem_object *obj, 4153 struct i915_address_space *vm, 4154 uint32_t alignment, 4155 uint64_t flags) 4156 { 4157 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4158 struct i915_vma *vma; 4159 unsigned bound; 4160 int ret; 4161 4162 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) 4163 return -ENODEV; 4164 4165 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) 4166 return -EINVAL; 4167 4168 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) 4169 return -EINVAL; 4170 4171 vma = i915_gem_obj_to_vma(obj, vm); 4172 if (vma) { 4173 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 4174 return -EBUSY; 4175 4176 if (i915_vma_misplaced(vma, alignment, flags)) { 4177 WARN(vma->pin_count, 4178 "bo is already pinned with incorrect alignment:" 4179 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 4180 " obj->map_and_fenceable=%d\n", 4181 i915_gem_obj_offset(obj, vm), alignment, 4182 !!(flags & PIN_MAPPABLE), 4183 obj->map_and_fenceable); 4184 ret = i915_vma_unbind(vma); 4185 if (ret) 4186 return ret; 4187 4188 vma = NULL; 4189 } 4190 } 4191 4192 bound = vma ? vma->bound : 0; 4193 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { 4194 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); 4195 if (IS_ERR(vma)) 4196 return PTR_ERR(vma); 4197 } 4198 4199 if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) 4200 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); 4201 4202 if ((bound ^ vma->bound) & GLOBAL_BIND) { 4203 bool mappable, fenceable; 4204 u32 fence_size, fence_alignment; 4205 4206 fence_size = i915_gem_get_gtt_size(obj->base.dev, 4207 obj->base.size, 4208 obj->tiling_mode); 4209 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, 4210 obj->base.size, 4211 obj->tiling_mode, 4212 true); 4213 4214 fenceable = (vma->node.size == fence_size && 4215 (vma->node.start & (fence_alignment - 1)) == 0); 4216 4217 mappable = (vma->node.start + obj->base.size <= 4218 dev_priv->gtt.mappable_end); 4219 4220 obj->map_and_fenceable = mappable && fenceable; 4221 } 4222 4223 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); 4224 4225 vma->pin_count++; 4226 if (flags & PIN_MAPPABLE) 4227 obj->pin_mappable |= true; 4228 4229 return 0; 4230 } 4231 4232 void 4233 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 4234 { 4235 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); 4236 4237 BUG_ON(!vma); 4238 BUG_ON(vma->pin_count == 0); 4239 BUG_ON(!i915_gem_obj_ggtt_bound(obj)); 4240 4241 if (--vma->pin_count == 0) 4242 obj->pin_mappable = false; 4243 } 4244 4245 bool 4246 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 4247 { 4248 if (obj->fence_reg != I915_FENCE_REG_NONE) { 4249 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4250 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); 4251 4252 WARN_ON(!ggtt_vma || 4253 dev_priv->fence_regs[obj->fence_reg].pin_count > 4254 ggtt_vma->pin_count); 4255 dev_priv->fence_regs[obj->fence_reg].pin_count++; 4256 return true; 4257 } else 4258 return false; 4259 } 4260 4261 void 4262 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) 4263 { 4264 if (obj->fence_reg != I915_FENCE_REG_NONE) { 4265 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4266 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); 4267 dev_priv->fence_regs[obj->fence_reg].pin_count--; 4268 } 4269 } 4270 4271 int 4272 i915_gem_pin_ioctl(struct drm_device *dev, void *data, 4273 struct drm_file *file) 4274 { 4275 struct drm_i915_gem_pin *args = data; 4276 struct drm_i915_gem_object *obj; 4277 int ret; 4278 4279 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4280 return -ENODEV; 4281 4282 ret = i915_mutex_lock_interruptible(dev); 4283 if (ret) 4284 return ret; 4285 4286 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 4287 if (&obj->base == NULL) { 4288 ret = -ENOENT; 4289 goto unlock; 4290 } 4291 4292 if (obj->madv != I915_MADV_WILLNEED) { 4293 DRM_DEBUG("Attempting to pin a purgeable buffer\n"); 4294 ret = -EFAULT; 4295 goto out; 4296 } 4297 4298 if (obj->pin_filp != NULL && obj->pin_filp != file) { 4299 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n", 4300 args->handle); 4301 ret = -EINVAL; 4302 goto out; 4303 } 4304 4305 if (obj->user_pin_count == ULONG_MAX) { 4306 ret = -EBUSY; 4307 goto out; 4308 } 4309 4310 if (obj->user_pin_count == 0) { 4311 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE); 4312 if (ret) 4313 goto out; 4314 } 4315 4316 obj->user_pin_count++; 4317 obj->pin_filp = file; 4318 4319 args->offset = i915_gem_obj_ggtt_offset(obj); 4320 out: 4321 drm_gem_object_unreference(&obj->base); 4322 unlock: 4323 mutex_unlock(&dev->struct_mutex); 4324 return ret; 4325 } 4326 4327 int 4328 i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 4329 struct drm_file *file) 4330 { 4331 struct drm_i915_gem_pin *args = data; 4332 struct drm_i915_gem_object *obj; 4333 int ret; 4334 4335 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4336 return -ENODEV; 4337 4338 ret = i915_mutex_lock_interruptible(dev); 4339 if (ret) 4340 return ret; 4341 4342 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 4343 if (&obj->base == NULL) { 4344 ret = -ENOENT; 4345 goto unlock; 4346 } 4347 4348 if (obj->pin_filp != file) { 4349 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 4350 args->handle); 4351 ret = -EINVAL; 4352 goto out; 4353 } 4354 obj->user_pin_count--; 4355 if (obj->user_pin_count == 0) { 4356 obj->pin_filp = NULL; 4357 i915_gem_object_ggtt_unpin(obj); 4358 } 4359 4360 out: 4361 drm_gem_object_unreference(&obj->base); 4362 unlock: 4363 mutex_unlock(&dev->struct_mutex); 4364 return ret; 4365 } 4366 4367 int 4368 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 4369 struct drm_file *file) 4370 { 4371 struct drm_i915_gem_busy *args = data; 4372 struct drm_i915_gem_object *obj; 4373 int ret; 4374 4375 ret = i915_mutex_lock_interruptible(dev); 4376 if (ret) 4377 return ret; 4378 4379 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 4380 if (&obj->base == NULL) { 4381 ret = -ENOENT; 4382 goto unlock; 4383 } 4384 4385 /* Count all active objects as busy, even if they are currently not used 4386 * by the gpu. Users of this interface expect objects to eventually 4387 * become non-busy without any further actions, therefore emit any 4388 * necessary flushes here. 4389 */ 4390 ret = i915_gem_object_flush_active(obj); 4391 4392 args->busy = obj->active; 4393 if (obj->ring) { 4394 BUILD_BUG_ON(I915_NUM_RINGS > 16); 4395 args->busy |= intel_ring_flag(obj->ring) << 16; 4396 } 4397 4398 drm_gem_object_unreference(&obj->base); 4399 unlock: 4400 mutex_unlock(&dev->struct_mutex); 4401 return ret; 4402 } 4403 4404 int 4405 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 4406 struct drm_file *file_priv) 4407 { 4408 return i915_gem_ring_throttle(dev, file_priv); 4409 } 4410 4411 int 4412 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4413 struct drm_file *file_priv) 4414 { 4415 struct drm_i915_private *dev_priv = dev->dev_private; 4416 struct drm_i915_gem_madvise *args = data; 4417 struct drm_i915_gem_object *obj; 4418 int ret; 4419 4420 switch (args->madv) { 4421 case I915_MADV_DONTNEED: 4422 case I915_MADV_WILLNEED: 4423 break; 4424 default: 4425 return -EINVAL; 4426 } 4427 4428 ret = i915_mutex_lock_interruptible(dev); 4429 if (ret) 4430 return ret; 4431 4432 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); 4433 if (&obj->base == NULL) { 4434 ret = -ENOENT; 4435 goto unlock; 4436 } 4437 4438 if (i915_gem_obj_is_pinned(obj)) { 4439 ret = -EINVAL; 4440 goto out; 4441 } 4442 4443 if (obj->pages && 4444 obj->tiling_mode != I915_TILING_NONE && 4445 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 4446 if (obj->madv == I915_MADV_WILLNEED) 4447 i915_gem_object_unpin_pages(obj); 4448 if (args->madv == I915_MADV_WILLNEED) 4449 i915_gem_object_pin_pages(obj); 4450 } 4451 4452 if (obj->madv != __I915_MADV_PURGED) 4453 obj->madv = args->madv; 4454 4455 /* if the object is no longer attached, discard its backing storage */ 4456 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) 4457 i915_gem_object_truncate(obj); 4458 4459 args->retained = obj->madv != __I915_MADV_PURGED; 4460 4461 out: 4462 drm_gem_object_unreference(&obj->base); 4463 unlock: 4464 mutex_unlock(&dev->struct_mutex); 4465 return ret; 4466 } 4467 4468 void i915_gem_object_init(struct drm_i915_gem_object *obj, 4469 const struct drm_i915_gem_object_ops *ops) 4470 { 4471 INIT_LIST_HEAD(&obj->global_list); 4472 INIT_LIST_HEAD(&obj->ring_list); 4473 INIT_LIST_HEAD(&obj->obj_exec_link); 4474 INIT_LIST_HEAD(&obj->vma_list); 4475 4476 obj->ops = ops; 4477 4478 obj->fence_reg = I915_FENCE_REG_NONE; 4479 obj->madv = I915_MADV_WILLNEED; 4480 4481 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); 4482 } 4483 4484 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4485 .get_pages = i915_gem_object_get_pages_gtt, 4486 .put_pages = i915_gem_object_put_pages_gtt, 4487 }; 4488 4489 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 4490 size_t size) 4491 { 4492 struct drm_i915_gem_object *obj; 4493 struct address_space *mapping; 4494 gfp_t mask; 4495 4496 obj = i915_gem_object_alloc(dev); 4497 if (obj == NULL) 4498 return NULL; 4499 4500 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 4501 i915_gem_object_free(obj); 4502 return NULL; 4503 } 4504 4505 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4506 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { 4507 /* 965gm cannot relocate objects above 4GiB. */ 4508 mask &= ~__GFP_HIGHMEM; 4509 mask |= __GFP_DMA32; 4510 } 4511 4512 mapping = file_inode(obj->base.filp)->i_mapping; 4513 mapping_set_gfp_mask(mapping, mask); 4514 4515 i915_gem_object_init(obj, &i915_gem_object_ops); 4516 4517 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4518 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4519 4520 if (HAS_LLC(dev)) { 4521 /* On some devices, we can have the GPU use the LLC (the CPU 4522 * cache) for about a 10% performance improvement 4523 * compared to uncached. Graphics requests other than 4524 * display scanout are coherent with the CPU in 4525 * accessing this cache. This means in this mode we 4526 * don't need to clflush on the CPU side, and on the 4527 * GPU side we only need to flush internal caches to 4528 * get data visible to the CPU. 4529 * 4530 * However, we maintain the display planes as UC, and so 4531 * need to rebind when first used as such. 4532 */ 4533 obj->cache_level = I915_CACHE_LLC; 4534 } else 4535 obj->cache_level = I915_CACHE_NONE; 4536 4537 trace_i915_gem_object_create(obj); 4538 4539 return obj; 4540 } 4541 4542 static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4543 { 4544 /* If we are the last user of the backing storage (be it shmemfs 4545 * pages or stolen etc), we know that the pages are going to be 4546 * immediately released. In this case, we can then skip copying 4547 * back the contents from the GPU. 4548 */ 4549 4550 if (obj->madv != I915_MADV_WILLNEED) 4551 return false; 4552 4553 if (obj->base.filp == NULL) 4554 return true; 4555 4556 /* At first glance, this looks racy, but then again so would be 4557 * userspace racing mmap against close. However, the first external 4558 * reference to the filp can only be obtained through the 4559 * i915_gem_mmap_ioctl() which safeguards us against the user 4560 * acquiring such a reference whilst we are in the middle of 4561 * freeing the object. 4562 */ 4563 return atomic_long_read(&obj->base.filp->f_count) == 1; 4564 } 4565 4566 void i915_gem_free_object(struct drm_gem_object *gem_obj) 4567 { 4568 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4569 struct drm_device *dev = obj->base.dev; 4570 struct drm_i915_private *dev_priv = dev->dev_private; 4571 struct i915_vma *vma, *next; 4572 4573 intel_runtime_pm_get(dev_priv); 4574 4575 trace_i915_gem_object_destroy(obj); 4576 4577 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4578 int ret; 4579 4580 vma->pin_count = 0; 4581 ret = i915_vma_unbind(vma); 4582 if (WARN_ON(ret == -ERESTARTSYS)) { 4583 bool was_interruptible; 4584 4585 was_interruptible = dev_priv->mm.interruptible; 4586 dev_priv->mm.interruptible = false; 4587 4588 WARN_ON(i915_vma_unbind(vma)); 4589 4590 dev_priv->mm.interruptible = was_interruptible; 4591 } 4592 } 4593 4594 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4595 * before progressing. */ 4596 if (obj->stolen) 4597 i915_gem_object_unpin_pages(obj); 4598 4599 WARN_ON(obj->frontbuffer_bits); 4600 4601 if (obj->pages && obj->madv == I915_MADV_WILLNEED && 4602 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES && 4603 obj->tiling_mode != I915_TILING_NONE) 4604 i915_gem_object_unpin_pages(obj); 4605 4606 if (WARN_ON(obj->pages_pin_count)) 4607 obj->pages_pin_count = 0; 4608 if (discard_backing_storage(obj)) 4609 obj->madv = I915_MADV_DONTNEED; 4610 i915_gem_object_put_pages(obj); 4611 i915_gem_object_free_mmap_offset(obj); 4612 4613 BUG_ON(obj->pages); 4614 4615 if (obj->base.import_attach) 4616 drm_prime_gem_destroy(&obj->base, NULL); 4617 4618 if (obj->ops->release) 4619 obj->ops->release(obj); 4620 4621 drm_gem_object_release(&obj->base); 4622 i915_gem_info_remove_obj(dev_priv, obj->base.size); 4623 4624 kfree(obj->bit_17); 4625 i915_gem_object_free(obj); 4626 4627 intel_runtime_pm_put(dev_priv); 4628 } 4629 4630 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 4631 struct i915_address_space *vm) 4632 { 4633 struct i915_vma *vma; 4634 list_for_each_entry(vma, &obj->vma_list, vma_link) 4635 if (vma->vm == vm) 4636 return vma; 4637 4638 return NULL; 4639 } 4640 4641 void i915_gem_vma_destroy(struct i915_vma *vma) 4642 { 4643 struct i915_address_space *vm = NULL; 4644 WARN_ON(vma->node.allocated); 4645 4646 /* Keep the vma as a placeholder in the execbuffer reservation lists */ 4647 if (!list_empty(&vma->exec_list)) 4648 return; 4649 4650 vm = vma->vm; 4651 4652 if (!i915_is_ggtt(vm)) 4653 i915_ppgtt_put(i915_vm_to_ppgtt(vm)); 4654 4655 list_del(&vma->vma_link); 4656 4657 kfree(vma); 4658 } 4659 4660 static void 4661 i915_gem_stop_ringbuffers(struct drm_device *dev) 4662 { 4663 struct drm_i915_private *dev_priv = dev->dev_private; 4664 struct intel_engine_cs *ring; 4665 int i; 4666 4667 for_each_ring(ring, dev_priv, i) 4668 dev_priv->gt.stop_ring(ring); 4669 } 4670 4671 int 4672 i915_gem_suspend(struct drm_device *dev) 4673 { 4674 struct drm_i915_private *dev_priv = dev->dev_private; 4675 int ret = 0; 4676 4677 mutex_lock(&dev->struct_mutex); 4678 ret = i915_gpu_idle(dev); 4679 if (ret) 4680 goto err; 4681 4682 i915_gem_retire_requests(dev); 4683 4684 /* Under UMS, be paranoid and evict. */ 4685 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4686 i915_gem_evict_everything(dev); 4687 4688 i915_gem_stop_ringbuffers(dev); 4689 mutex_unlock(&dev->struct_mutex); 4690 4691 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 4692 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4693 flush_delayed_work(&dev_priv->mm.idle_work); 4694 4695 return 0; 4696 4697 err: 4698 mutex_unlock(&dev->struct_mutex); 4699 return ret; 4700 } 4701 4702 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice) 4703 { 4704 struct drm_device *dev = ring->dev; 4705 struct drm_i915_private *dev_priv = dev->dev_private; 4706 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); 4707 u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; 4708 int i, ret; 4709 4710 if (!HAS_L3_DPF(dev) || !remap_info) 4711 return 0; 4712 4713 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3); 4714 if (ret) 4715 return ret; 4716 4717 /* 4718 * Note: We do not worry about the concurrent register cacheline hang 4719 * here because no other code should access these registers other than 4720 * at initialization time. 4721 */ 4722 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 4723 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 4724 intel_ring_emit(ring, reg_base + i); 4725 intel_ring_emit(ring, remap_info[i/4]); 4726 } 4727 4728 intel_ring_advance(ring); 4729 4730 return ret; 4731 } 4732 4733 void i915_gem_init_swizzling(struct drm_device *dev) 4734 { 4735 struct drm_i915_private *dev_priv = dev->dev_private; 4736 4737 if (INTEL_INFO(dev)->gen < 5 || 4738 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4739 return; 4740 4741 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 4742 DISP_TILE_SURFACE_SWIZZLING); 4743 4744 if (IS_GEN5(dev)) 4745 return; 4746 4747 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 4748 if (IS_GEN6(dev)) 4749 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4750 else if (IS_GEN7(dev)) 4751 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4752 else if (IS_GEN8(dev)) 4753 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 4754 else 4755 BUG(); 4756 } 4757 4758 static bool 4759 intel_enable_blt(struct drm_device *dev) 4760 { 4761 if (!HAS_BLT(dev)) 4762 return false; 4763 4764 /* The blitter was dysfunctional on early prototypes */ 4765 if (IS_GEN6(dev) && dev->pdev->revision < 8) { 4766 DRM_INFO("BLT not supported on this pre-production hardware;" 4767 " graphics performance will be degraded.\n"); 4768 return false; 4769 } 4770 4771 return true; 4772 } 4773 4774 static void init_unused_ring(struct drm_device *dev, u32 base) 4775 { 4776 struct drm_i915_private *dev_priv = dev->dev_private; 4777 4778 I915_WRITE(RING_CTL(base), 0); 4779 I915_WRITE(RING_HEAD(base), 0); 4780 I915_WRITE(RING_TAIL(base), 0); 4781 I915_WRITE(RING_START(base), 0); 4782 } 4783 4784 static void init_unused_rings(struct drm_device *dev) 4785 { 4786 if (IS_I830(dev)) { 4787 init_unused_ring(dev, PRB1_BASE); 4788 init_unused_ring(dev, SRB0_BASE); 4789 init_unused_ring(dev, SRB1_BASE); 4790 init_unused_ring(dev, SRB2_BASE); 4791 init_unused_ring(dev, SRB3_BASE); 4792 } else if (IS_GEN2(dev)) { 4793 init_unused_ring(dev, SRB0_BASE); 4794 init_unused_ring(dev, SRB1_BASE); 4795 } else if (IS_GEN3(dev)) { 4796 init_unused_ring(dev, PRB1_BASE); 4797 init_unused_ring(dev, PRB2_BASE); 4798 } 4799 } 4800 4801 int i915_gem_init_rings(struct drm_device *dev) 4802 { 4803 struct drm_i915_private *dev_priv = dev->dev_private; 4804 int ret; 4805 4806 /* 4807 * At least 830 can leave some of the unused rings 4808 * "active" (ie. head != tail) after resume which 4809 * will prevent c3 entry. Makes sure all unused rings 4810 * are totally idle. 4811 */ 4812 init_unused_rings(dev); 4813 4814 ret = intel_init_render_ring_buffer(dev); 4815 if (ret) 4816 return ret; 4817 4818 if (HAS_BSD(dev)) { 4819 ret = intel_init_bsd_ring_buffer(dev); 4820 if (ret) 4821 goto cleanup_render_ring; 4822 } 4823 4824 if (intel_enable_blt(dev)) { 4825 ret = intel_init_blt_ring_buffer(dev); 4826 if (ret) 4827 goto cleanup_bsd_ring; 4828 } 4829 4830 if (HAS_VEBOX(dev)) { 4831 ret = intel_init_vebox_ring_buffer(dev); 4832 if (ret) 4833 goto cleanup_blt_ring; 4834 } 4835 4836 if (HAS_BSD2(dev)) { 4837 ret = intel_init_bsd2_ring_buffer(dev); 4838 if (ret) 4839 goto cleanup_vebox_ring; 4840 } 4841 4842 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); 4843 if (ret) 4844 goto cleanup_bsd2_ring; 4845 4846 return 0; 4847 4848 cleanup_bsd2_ring: 4849 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]); 4850 cleanup_vebox_ring: 4851 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); 4852 cleanup_blt_ring: 4853 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); 4854 cleanup_bsd_ring: 4855 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); 4856 cleanup_render_ring: 4857 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); 4858 4859 return ret; 4860 } 4861 4862 int 4863 i915_gem_init_hw(struct drm_device *dev) 4864 { 4865 struct drm_i915_private *dev_priv = dev->dev_private; 4866 int ret, i; 4867 4868 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4869 return -EIO; 4870 4871 if (dev_priv->ellc_size) 4872 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4873 4874 if (IS_HASWELL(dev)) 4875 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ? 4876 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4877 4878 if (HAS_PCH_NOP(dev)) { 4879 if (IS_IVYBRIDGE(dev)) { 4880 u32 temp = I915_READ(GEN7_MSG_CTL); 4881 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4882 I915_WRITE(GEN7_MSG_CTL, temp); 4883 } else if (INTEL_INFO(dev)->gen >= 7) { 4884 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 4885 temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 4886 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 4887 } 4888 } 4889 4890 i915_gem_init_swizzling(dev); 4891 4892 ret = dev_priv->gt.init_rings(dev); 4893 if (ret) 4894 return ret; 4895 4896 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4897 i915_gem_l3_remap(&dev_priv->ring[RCS], i); 4898 4899 /* 4900 * XXX: Contexts should only be initialized once. Doing a switch to the 4901 * default context switch however is something we'd like to do after 4902 * reset or thaw (the latter may not actually be necessary for HW, but 4903 * goes with our code better). Context switching requires rings (for 4904 * the do_switch), but before enabling PPGTT. So don't move this. 4905 */ 4906 ret = i915_gem_context_enable(dev_priv); 4907 if (ret && ret != -EIO) { 4908 DRM_ERROR("Context enable failed %d\n", ret); 4909 i915_gem_cleanup_ringbuffer(dev); 4910 4911 return ret; 4912 } 4913 4914 ret = i915_ppgtt_init_hw(dev); 4915 if (ret && ret != -EIO) { 4916 DRM_ERROR("PPGTT enable failed %d\n", ret); 4917 i915_gem_cleanup_ringbuffer(dev); 4918 } 4919 4920 return ret; 4921 } 4922 4923 int i915_gem_init(struct drm_device *dev) 4924 { 4925 struct drm_i915_private *dev_priv = dev->dev_private; 4926 int ret; 4927 4928 i915.enable_execlists = intel_sanitize_enable_execlists(dev, 4929 i915.enable_execlists); 4930 4931 mutex_lock(&dev->struct_mutex); 4932 4933 if (IS_VALLEYVIEW(dev)) { 4934 /* VLVA0 (potential hack), BIOS isn't actually waking us */ 4935 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); 4936 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 4937 VLV_GTLC_ALLOWWAKEACK), 10)) 4938 DRM_DEBUG_DRIVER("allow wake ack timed out\n"); 4939 } 4940 4941 if (!i915.enable_execlists) { 4942 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission; 4943 dev_priv->gt.init_rings = i915_gem_init_rings; 4944 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer; 4945 dev_priv->gt.stop_ring = intel_stop_ring_buffer; 4946 } else { 4947 dev_priv->gt.do_execbuf = intel_execlists_submission; 4948 dev_priv->gt.init_rings = intel_logical_rings_init; 4949 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup; 4950 dev_priv->gt.stop_ring = intel_logical_ring_stop; 4951 } 4952 4953 ret = i915_gem_init_userptr(dev); 4954 if (ret) { 4955 mutex_unlock(&dev->struct_mutex); 4956 return ret; 4957 } 4958 4959 i915_gem_init_global_gtt(dev); 4960 4961 ret = i915_gem_context_init(dev); 4962 if (ret) { 4963 mutex_unlock(&dev->struct_mutex); 4964 return ret; 4965 } 4966 4967 ret = i915_gem_init_hw(dev); 4968 if (ret == -EIO) { 4969 /* Allow ring initialisation to fail by marking the GPU as 4970 * wedged. But we only want to do this where the GPU is angry, 4971 * for all other failure, such as an allocation failure, bail. 4972 */ 4973 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 4974 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 4975 ret = 0; 4976 } 4977 mutex_unlock(&dev->struct_mutex); 4978 4979 return ret; 4980 } 4981 4982 void 4983 i915_gem_cleanup_ringbuffer(struct drm_device *dev) 4984 { 4985 struct drm_i915_private *dev_priv = dev->dev_private; 4986 struct intel_engine_cs *ring; 4987 int i; 4988 4989 for_each_ring(ring, dev_priv, i) 4990 dev_priv->gt.cleanup_ring(ring); 4991 } 4992 4993 static void 4994 init_ring_lists(struct intel_engine_cs *ring) 4995 { 4996 INIT_LIST_HEAD(&ring->active_list); 4997 INIT_LIST_HEAD(&ring->request_list); 4998 } 4999 5000 void i915_init_vm(struct drm_i915_private *dev_priv, 5001 struct i915_address_space *vm) 5002 { 5003 if (!i915_is_ggtt(vm)) 5004 drm_mm_init(&vm->mm, vm->start, vm->total); 5005 vm->dev = dev_priv->dev; 5006 INIT_LIST_HEAD(&vm->active_list); 5007 INIT_LIST_HEAD(&vm->inactive_list); 5008 INIT_LIST_HEAD(&vm->global_link); 5009 list_add_tail(&vm->global_link, &dev_priv->vm_list); 5010 } 5011 5012 void 5013 i915_gem_load(struct drm_device *dev) 5014 { 5015 struct drm_i915_private *dev_priv = dev->dev_private; 5016 int i; 5017 5018 dev_priv->slab = 5019 kmem_cache_create("i915_gem_object", 5020 sizeof(struct drm_i915_gem_object), 0, 5021 SLAB_HWCACHE_ALIGN, 5022 NULL); 5023 5024 INIT_LIST_HEAD(&dev_priv->vm_list); 5025 i915_init_vm(dev_priv, &dev_priv->gtt.base); 5026 5027 INIT_LIST_HEAD(&dev_priv->context_list); 5028 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 5029 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 5030 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 5031 for (i = 0; i < I915_NUM_RINGS; i++) 5032 init_ring_lists(&dev_priv->ring[i]); 5033 for (i = 0; i < I915_MAX_NUM_FENCES; i++) 5034 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 5035 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 5036 i915_gem_retire_work_handler); 5037 INIT_DELAYED_WORK(&dev_priv->mm.idle_work, 5038 i915_gem_idle_work_handler); 5039 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 5040 5041 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 5042 if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) { 5043 I915_WRITE(MI_ARB_STATE, 5044 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 5045 } 5046 5047 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 5048 5049 /* Old X drivers will take 0-2 for front, back, depth buffers */ 5050 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 5051 dev_priv->fence_reg_start = 3; 5052 5053 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) 5054 dev_priv->num_fence_regs = 32; 5055 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 5056 dev_priv->num_fence_regs = 16; 5057 else 5058 dev_priv->num_fence_regs = 8; 5059 5060 /* Initialize fence registers to zero */ 5061 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 5062 i915_gem_restore_fences(dev); 5063 5064 i915_gem_detect_bit_6_swizzle(dev); 5065 init_waitqueue_head(&dev_priv->pending_flip_queue); 5066 5067 dev_priv->mm.interruptible = true; 5068 5069 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; 5070 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; 5071 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; 5072 register_shrinker(&dev_priv->mm.shrinker); 5073 5074 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; 5075 register_oom_notifier(&dev_priv->mm.oom_notifier); 5076 5077 mutex_init(&dev_priv->fb_tracking.lock); 5078 } 5079 5080 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5081 { 5082 struct drm_i915_file_private *file_priv = file->driver_priv; 5083 5084 cancel_delayed_work_sync(&file_priv->mm.idle_work); 5085 5086 /* Clean up our request list when the client is going away, so that 5087 * later retire_requests won't dereference our soon-to-be-gone 5088 * file_priv. 5089 */ 5090 spin_lock(&file_priv->mm.lock); 5091 while (!list_empty(&file_priv->mm.request_list)) { 5092 struct drm_i915_gem_request *request; 5093 5094 request = list_first_entry(&file_priv->mm.request_list, 5095 struct drm_i915_gem_request, 5096 client_list); 5097 list_del(&request->client_list); 5098 request->file_priv = NULL; 5099 } 5100 spin_unlock(&file_priv->mm.lock); 5101 } 5102 5103 static void 5104 i915_gem_file_idle_work_handler(struct work_struct *work) 5105 { 5106 struct drm_i915_file_private *file_priv = 5107 container_of(work, typeof(*file_priv), mm.idle_work.work); 5108 5109 atomic_set(&file_priv->rps_wait_boost, false); 5110 } 5111 5112 int i915_gem_open(struct drm_device *dev, struct drm_file *file) 5113 { 5114 struct drm_i915_file_private *file_priv; 5115 int ret; 5116 5117 DRM_DEBUG_DRIVER("\n"); 5118 5119 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 5120 if (!file_priv) 5121 return -ENOMEM; 5122 5123 file->driver_priv = file_priv; 5124 file_priv->dev_priv = dev->dev_private; 5125 file_priv->file = file; 5126 5127 spin_lock_init(&file_priv->mm.lock); 5128 INIT_LIST_HEAD(&file_priv->mm.request_list); 5129 INIT_DELAYED_WORK(&file_priv->mm.idle_work, 5130 i915_gem_file_idle_work_handler); 5131 5132 ret = i915_gem_context_open(dev, file); 5133 if (ret) 5134 kfree(file_priv); 5135 5136 return ret; 5137 } 5138 5139 /** 5140 * i915_gem_track_fb - update frontbuffer tracking 5141 * old: current GEM buffer for the frontbuffer slots 5142 * new: new GEM buffer for the frontbuffer slots 5143 * frontbuffer_bits: bitmask of frontbuffer slots 5144 * 5145 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 5146 * from @old and setting them in @new. Both @old and @new can be NULL. 5147 */ 5148 void i915_gem_track_fb(struct drm_i915_gem_object *old, 5149 struct drm_i915_gem_object *new, 5150 unsigned frontbuffer_bits) 5151 { 5152 if (old) { 5153 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex)); 5154 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits)); 5155 old->frontbuffer_bits &= ~frontbuffer_bits; 5156 } 5157 5158 if (new) { 5159 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex)); 5160 WARN_ON(new->frontbuffer_bits & frontbuffer_bits); 5161 new->frontbuffer_bits |= frontbuffer_bits; 5162 } 5163 } 5164 5165 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 5166 { 5167 if (!mutex_is_locked(mutex)) 5168 return false; 5169 5170 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) 5171 return mutex->owner == task; 5172 #else 5173 /* Since UP may be pre-empted, we cannot assume that we own the lock */ 5174 return false; 5175 #endif 5176 } 5177 5178 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) 5179 { 5180 if (!mutex_trylock(&dev->struct_mutex)) { 5181 if (!mutex_is_locked_by(&dev->struct_mutex, current)) 5182 return false; 5183 5184 if (to_i915(dev)->mm.shrinker_no_lock_stealing) 5185 return false; 5186 5187 *unlock = false; 5188 } else 5189 *unlock = true; 5190 5191 return true; 5192 } 5193 5194 static int num_vma_bound(struct drm_i915_gem_object *obj) 5195 { 5196 struct i915_vma *vma; 5197 int count = 0; 5198 5199 list_for_each_entry(vma, &obj->vma_list, vma_link) 5200 if (drm_mm_node_allocated(&vma->node)) 5201 count++; 5202 5203 return count; 5204 } 5205 5206 static unsigned long 5207 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) 5208 { 5209 struct drm_i915_private *dev_priv = 5210 container_of(shrinker, struct drm_i915_private, mm.shrinker); 5211 struct drm_device *dev = dev_priv->dev; 5212 struct drm_i915_gem_object *obj; 5213 unsigned long count; 5214 bool unlock; 5215 5216 if (!i915_gem_shrinker_lock(dev, &unlock)) 5217 return 0; 5218 5219 count = 0; 5220 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 5221 if (obj->pages_pin_count == 0) 5222 count += obj->base.size >> PAGE_SHIFT; 5223 5224 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 5225 if (!i915_gem_obj_is_pinned(obj) && 5226 obj->pages_pin_count == num_vma_bound(obj)) 5227 count += obj->base.size >> PAGE_SHIFT; 5228 } 5229 5230 if (unlock) 5231 mutex_unlock(&dev->struct_mutex); 5232 5233 return count; 5234 } 5235 5236 /* All the new VM stuff */ 5237 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, 5238 struct i915_address_space *vm) 5239 { 5240 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5241 struct i915_vma *vma; 5242 5243 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5244 5245 list_for_each_entry(vma, &o->vma_list, vma_link) { 5246 if (vma->vm == vm) 5247 return vma->node.start; 5248 5249 } 5250 WARN(1, "%s vma for this object not found.\n", 5251 i915_is_ggtt(vm) ? "global" : "ppgtt"); 5252 return -1; 5253 } 5254 5255 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 5256 struct i915_address_space *vm) 5257 { 5258 struct i915_vma *vma; 5259 5260 list_for_each_entry(vma, &o->vma_list, vma_link) 5261 if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) 5262 return true; 5263 5264 return false; 5265 } 5266 5267 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) 5268 { 5269 struct i915_vma *vma; 5270 5271 list_for_each_entry(vma, &o->vma_list, vma_link) 5272 if (drm_mm_node_allocated(&vma->node)) 5273 return true; 5274 5275 return false; 5276 } 5277 5278 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 5279 struct i915_address_space *vm) 5280 { 5281 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5282 struct i915_vma *vma; 5283 5284 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5285 5286 BUG_ON(list_empty(&o->vma_list)); 5287 5288 list_for_each_entry(vma, &o->vma_list, vma_link) 5289 if (vma->vm == vm) 5290 return vma->node.size; 5291 5292 return 0; 5293 } 5294 5295 static unsigned long 5296 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) 5297 { 5298 struct drm_i915_private *dev_priv = 5299 container_of(shrinker, struct drm_i915_private, mm.shrinker); 5300 struct drm_device *dev = dev_priv->dev; 5301 unsigned long freed; 5302 bool unlock; 5303 5304 if (!i915_gem_shrinker_lock(dev, &unlock)) 5305 return SHRINK_STOP; 5306 5307 freed = i915_gem_shrink(dev_priv, 5308 sc->nr_to_scan, 5309 I915_SHRINK_BOUND | 5310 I915_SHRINK_UNBOUND | 5311 I915_SHRINK_PURGEABLE); 5312 if (freed < sc->nr_to_scan) 5313 freed += i915_gem_shrink(dev_priv, 5314 sc->nr_to_scan - freed, 5315 I915_SHRINK_BOUND | 5316 I915_SHRINK_UNBOUND); 5317 if (unlock) 5318 mutex_unlock(&dev->struct_mutex); 5319 5320 return freed; 5321 } 5322 5323 static int 5324 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) 5325 { 5326 struct drm_i915_private *dev_priv = 5327 container_of(nb, struct drm_i915_private, mm.oom_notifier); 5328 struct drm_device *dev = dev_priv->dev; 5329 struct drm_i915_gem_object *obj; 5330 unsigned long timeout = msecs_to_jiffies(5000) + 1; 5331 unsigned long pinned, bound, unbound, freed_pages; 5332 bool was_interruptible; 5333 bool unlock; 5334 5335 while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { 5336 schedule_timeout_killable(1); 5337 if (fatal_signal_pending(current)) 5338 return NOTIFY_DONE; 5339 } 5340 if (timeout == 0) { 5341 pr_err("Unable to purge GPU memory due lock contention.\n"); 5342 return NOTIFY_DONE; 5343 } 5344 5345 was_interruptible = dev_priv->mm.interruptible; 5346 dev_priv->mm.interruptible = false; 5347 5348 freed_pages = i915_gem_shrink_all(dev_priv); 5349 5350 dev_priv->mm.interruptible = was_interruptible; 5351 5352 /* Because we may be allocating inside our own driver, we cannot 5353 * assert that there are no objects with pinned pages that are not 5354 * being pointed to by hardware. 5355 */ 5356 unbound = bound = pinned = 0; 5357 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 5358 if (!obj->base.filp) /* not backed by a freeable object */ 5359 continue; 5360 5361 if (obj->pages_pin_count) 5362 pinned += obj->base.size; 5363 else 5364 unbound += obj->base.size; 5365 } 5366 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 5367 if (!obj->base.filp) 5368 continue; 5369 5370 if (obj->pages_pin_count) 5371 pinned += obj->base.size; 5372 else 5373 bound += obj->base.size; 5374 } 5375 5376 if (unlock) 5377 mutex_unlock(&dev->struct_mutex); 5378 5379 if (freed_pages || unbound || bound) 5380 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", 5381 freed_pages << PAGE_SHIFT, pinned); 5382 if (unbound || bound) 5383 pr_err("%lu and %lu bytes still available in the " 5384 "bound and unbound GPU page lists.\n", 5385 bound, unbound); 5386 5387 *(unsigned long *)ptr += freed_pages; 5388 return NOTIFY_DONE; 5389 } 5390 5391 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) 5392 { 5393 struct i915_vma *vma; 5394 5395 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); 5396 if (vma->vm != i915_obj_to_ggtt(obj)) 5397 return NULL; 5398 5399 return vma; 5400 } 5401