1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_vma_manager.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_vgpu.h" 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 #include <linux/shmem_fs.h> 36 #include <linux/slab.h> 37 #include <linux/swap.h> 38 #include <linux/pci.h> 39 #include <linux/dma-buf.h> 40 41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 43 static __must_check int 44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 45 bool readonly); 46 static void 47 i915_gem_object_retire(struct drm_i915_gem_object *obj); 48 49 static void i915_gem_write_fence(struct drm_device *dev, int reg, 50 struct drm_i915_gem_object *obj); 51 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 52 struct drm_i915_fence_reg *fence, 53 bool enable); 54 55 static bool cpu_cache_is_coherent(struct drm_device *dev, 56 enum i915_cache_level level) 57 { 58 return HAS_LLC(dev) || level != I915_CACHE_NONE; 59 } 60 61 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 62 { 63 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 64 return true; 65 66 return obj->pin_display; 67 } 68 69 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) 70 { 71 if (obj->tiling_mode) 72 i915_gem_release_mmap(obj); 73 74 /* As we do not have an associated fence register, we will force 75 * a tiling change if we ever need to acquire one. 76 */ 77 obj->fence_dirty = false; 78 obj->fence_reg = I915_FENCE_REG_NONE; 79 } 80 81 /* some bookkeeping */ 82 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 83 size_t size) 84 { 85 spin_lock(&dev_priv->mm.object_stat_lock); 86 dev_priv->mm.object_count++; 87 dev_priv->mm.object_memory += size; 88 spin_unlock(&dev_priv->mm.object_stat_lock); 89 } 90 91 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 92 size_t size) 93 { 94 spin_lock(&dev_priv->mm.object_stat_lock); 95 dev_priv->mm.object_count--; 96 dev_priv->mm.object_memory -= size; 97 spin_unlock(&dev_priv->mm.object_stat_lock); 98 } 99 100 static int 101 i915_gem_wait_for_error(struct i915_gpu_error *error) 102 { 103 int ret; 104 105 #define EXIT_COND (!i915_reset_in_progress(error) || \ 106 i915_terminally_wedged(error)) 107 if (EXIT_COND) 108 return 0; 109 110 /* 111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 112 * userspace. If it takes that long something really bad is going on and 113 * we should simply try to bail out and fail as gracefully as possible. 114 */ 115 ret = wait_event_interruptible_timeout(error->reset_queue, 116 EXIT_COND, 117 10*HZ); 118 if (ret == 0) { 119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 120 return -EIO; 121 } else if (ret < 0) { 122 return ret; 123 } 124 #undef EXIT_COND 125 126 return 0; 127 } 128 129 int i915_mutex_lock_interruptible(struct drm_device *dev) 130 { 131 struct drm_i915_private *dev_priv = dev->dev_private; 132 int ret; 133 134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 135 if (ret) 136 return ret; 137 138 ret = mutex_lock_interruptible(&dev->struct_mutex); 139 if (ret) 140 return ret; 141 142 WARN_ON(i915_verify_lists(dev)); 143 return 0; 144 } 145 146 int 147 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 148 struct drm_file *file) 149 { 150 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_gem_get_aperture *args = data; 152 struct drm_i915_gem_object *obj; 153 size_t pinned; 154 155 pinned = 0; 156 mutex_lock(&dev->struct_mutex); 157 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 158 if (i915_gem_obj_is_pinned(obj)) 159 pinned += i915_gem_obj_ggtt_size(obj); 160 mutex_unlock(&dev->struct_mutex); 161 162 args->aper_size = dev_priv->gtt.base.total; 163 args->aper_available_size = args->aper_size - pinned; 164 165 return 0; 166 } 167 168 static int 169 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 170 { 171 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 172 char *vaddr = obj->phys_handle->vaddr; 173 struct sg_table *st; 174 struct scatterlist *sg; 175 int i; 176 177 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 178 return -EINVAL; 179 180 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 181 struct page *page; 182 char *src; 183 184 page = shmem_read_mapping_page(mapping, i); 185 if (IS_ERR(page)) 186 return PTR_ERR(page); 187 188 src = kmap_atomic(page); 189 memcpy(vaddr, src, PAGE_SIZE); 190 drm_clflush_virt_range(vaddr, PAGE_SIZE); 191 kunmap_atomic(src); 192 193 page_cache_release(page); 194 vaddr += PAGE_SIZE; 195 } 196 197 i915_gem_chipset_flush(obj->base.dev); 198 199 st = kmalloc(sizeof(*st), GFP_KERNEL); 200 if (st == NULL) 201 return -ENOMEM; 202 203 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 204 kfree(st); 205 return -ENOMEM; 206 } 207 208 sg = st->sgl; 209 sg->offset = 0; 210 sg->length = obj->base.size; 211 212 sg_dma_address(sg) = obj->phys_handle->busaddr; 213 sg_dma_len(sg) = obj->base.size; 214 215 obj->pages = st; 216 obj->has_dma_mapping = true; 217 return 0; 218 } 219 220 static void 221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) 222 { 223 int ret; 224 225 BUG_ON(obj->madv == __I915_MADV_PURGED); 226 227 ret = i915_gem_object_set_to_cpu_domain(obj, true); 228 if (ret) { 229 /* In the event of a disaster, abandon all caches and 230 * hope for the best. 231 */ 232 WARN_ON(ret != -EIO); 233 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 234 } 235 236 if (obj->madv == I915_MADV_DONTNEED) 237 obj->dirty = 0; 238 239 if (obj->dirty) { 240 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 241 char *vaddr = obj->phys_handle->vaddr; 242 int i; 243 244 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 245 struct page *page; 246 char *dst; 247 248 page = shmem_read_mapping_page(mapping, i); 249 if (IS_ERR(page)) 250 continue; 251 252 dst = kmap_atomic(page); 253 drm_clflush_virt_range(vaddr, PAGE_SIZE); 254 memcpy(dst, vaddr, PAGE_SIZE); 255 kunmap_atomic(dst); 256 257 set_page_dirty(page); 258 if (obj->madv == I915_MADV_WILLNEED) 259 mark_page_accessed(page); 260 page_cache_release(page); 261 vaddr += PAGE_SIZE; 262 } 263 obj->dirty = 0; 264 } 265 266 sg_free_table(obj->pages); 267 kfree(obj->pages); 268 269 obj->has_dma_mapping = false; 270 } 271 272 static void 273 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 274 { 275 drm_pci_free(obj->base.dev, obj->phys_handle); 276 } 277 278 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 279 .get_pages = i915_gem_object_get_pages_phys, 280 .put_pages = i915_gem_object_put_pages_phys, 281 .release = i915_gem_object_release_phys, 282 }; 283 284 static int 285 drop_pages(struct drm_i915_gem_object *obj) 286 { 287 struct i915_vma *vma, *next; 288 int ret; 289 290 drm_gem_object_reference(&obj->base); 291 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) 292 if (i915_vma_unbind(vma)) 293 break; 294 295 ret = i915_gem_object_put_pages(obj); 296 drm_gem_object_unreference(&obj->base); 297 298 return ret; 299 } 300 301 int 302 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 303 int align) 304 { 305 drm_dma_handle_t *phys; 306 int ret; 307 308 if (obj->phys_handle) { 309 if ((unsigned long)obj->phys_handle->vaddr & (align -1)) 310 return -EBUSY; 311 312 return 0; 313 } 314 315 if (obj->madv != I915_MADV_WILLNEED) 316 return -EFAULT; 317 318 if (obj->base.filp == NULL) 319 return -EINVAL; 320 321 ret = drop_pages(obj); 322 if (ret) 323 return ret; 324 325 /* create a new object */ 326 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); 327 if (!phys) 328 return -ENOMEM; 329 330 obj->phys_handle = phys; 331 obj->ops = &i915_gem_phys_ops; 332 333 return i915_gem_object_get_pages(obj); 334 } 335 336 static int 337 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, 338 struct drm_i915_gem_pwrite *args, 339 struct drm_file *file_priv) 340 { 341 struct drm_device *dev = obj->base.dev; 342 void *vaddr = obj->phys_handle->vaddr + args->offset; 343 char __user *user_data = to_user_ptr(args->data_ptr); 344 int ret = 0; 345 346 /* We manually control the domain here and pretend that it 347 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 348 */ 349 ret = i915_gem_object_wait_rendering(obj, false); 350 if (ret) 351 return ret; 352 353 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); 354 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 355 unsigned long unwritten; 356 357 /* The physical object once assigned is fixed for the lifetime 358 * of the obj, so we can safely drop the lock and continue 359 * to access vaddr. 360 */ 361 mutex_unlock(&dev->struct_mutex); 362 unwritten = copy_from_user(vaddr, user_data, args->size); 363 mutex_lock(&dev->struct_mutex); 364 if (unwritten) { 365 ret = -EFAULT; 366 goto out; 367 } 368 } 369 370 drm_clflush_virt_range(vaddr, args->size); 371 i915_gem_chipset_flush(dev); 372 373 out: 374 intel_fb_obj_flush(obj, false); 375 return ret; 376 } 377 378 void *i915_gem_object_alloc(struct drm_device *dev) 379 { 380 struct drm_i915_private *dev_priv = dev->dev_private; 381 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL); 382 } 383 384 void i915_gem_object_free(struct drm_i915_gem_object *obj) 385 { 386 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 387 kmem_cache_free(dev_priv->slab, obj); 388 } 389 390 static int 391 i915_gem_create(struct drm_file *file, 392 struct drm_device *dev, 393 uint64_t size, 394 uint32_t *handle_p) 395 { 396 struct drm_i915_gem_object *obj; 397 int ret; 398 u32 handle; 399 400 size = roundup(size, PAGE_SIZE); 401 if (size == 0) 402 return -EINVAL; 403 404 /* Allocate the new object */ 405 obj = i915_gem_alloc_object(dev, size); 406 if (obj == NULL) 407 return -ENOMEM; 408 409 ret = drm_gem_handle_create(file, &obj->base, &handle); 410 /* drop reference from allocate - handle holds it now */ 411 drm_gem_object_unreference_unlocked(&obj->base); 412 if (ret) 413 return ret; 414 415 *handle_p = handle; 416 return 0; 417 } 418 419 int 420 i915_gem_dumb_create(struct drm_file *file, 421 struct drm_device *dev, 422 struct drm_mode_create_dumb *args) 423 { 424 /* have to work out size/pitch and return them */ 425 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 426 args->size = args->pitch * args->height; 427 return i915_gem_create(file, dev, 428 args->size, &args->handle); 429 } 430 431 /** 432 * Creates a new mm object and returns a handle to it. 433 */ 434 int 435 i915_gem_create_ioctl(struct drm_device *dev, void *data, 436 struct drm_file *file) 437 { 438 struct drm_i915_gem_create *args = data; 439 440 return i915_gem_create(file, dev, 441 args->size, &args->handle); 442 } 443 444 static inline int 445 __copy_to_user_swizzled(char __user *cpu_vaddr, 446 const char *gpu_vaddr, int gpu_offset, 447 int length) 448 { 449 int ret, cpu_offset = 0; 450 451 while (length > 0) { 452 int cacheline_end = ALIGN(gpu_offset + 1, 64); 453 int this_length = min(cacheline_end - gpu_offset, length); 454 int swizzled_gpu_offset = gpu_offset ^ 64; 455 456 ret = __copy_to_user(cpu_vaddr + cpu_offset, 457 gpu_vaddr + swizzled_gpu_offset, 458 this_length); 459 if (ret) 460 return ret + length; 461 462 cpu_offset += this_length; 463 gpu_offset += this_length; 464 length -= this_length; 465 } 466 467 return 0; 468 } 469 470 static inline int 471 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 472 const char __user *cpu_vaddr, 473 int length) 474 { 475 int ret, cpu_offset = 0; 476 477 while (length > 0) { 478 int cacheline_end = ALIGN(gpu_offset + 1, 64); 479 int this_length = min(cacheline_end - gpu_offset, length); 480 int swizzled_gpu_offset = gpu_offset ^ 64; 481 482 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 483 cpu_vaddr + cpu_offset, 484 this_length); 485 if (ret) 486 return ret + length; 487 488 cpu_offset += this_length; 489 gpu_offset += this_length; 490 length -= this_length; 491 } 492 493 return 0; 494 } 495 496 /* 497 * Pins the specified object's pages and synchronizes the object with 498 * GPU accesses. Sets needs_clflush to non-zero if the caller should 499 * flush the object from the CPU cache. 500 */ 501 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 502 int *needs_clflush) 503 { 504 int ret; 505 506 *needs_clflush = 0; 507 508 if (!obj->base.filp) 509 return -EINVAL; 510 511 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { 512 /* If we're not in the cpu read domain, set ourself into the gtt 513 * read domain and manually flush cachelines (if required). This 514 * optimizes for the case when the gpu will dirty the data 515 * anyway again before the next pread happens. */ 516 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, 517 obj->cache_level); 518 ret = i915_gem_object_wait_rendering(obj, true); 519 if (ret) 520 return ret; 521 522 i915_gem_object_retire(obj); 523 } 524 525 ret = i915_gem_object_get_pages(obj); 526 if (ret) 527 return ret; 528 529 i915_gem_object_pin_pages(obj); 530 531 return ret; 532 } 533 534 /* Per-page copy function for the shmem pread fastpath. 535 * Flushes invalid cachelines before reading the target if 536 * needs_clflush is set. */ 537 static int 538 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, 539 char __user *user_data, 540 bool page_do_bit17_swizzling, bool needs_clflush) 541 { 542 char *vaddr; 543 int ret; 544 545 if (unlikely(page_do_bit17_swizzling)) 546 return -EINVAL; 547 548 vaddr = kmap_atomic(page); 549 if (needs_clflush) 550 drm_clflush_virt_range(vaddr + shmem_page_offset, 551 page_length); 552 ret = __copy_to_user_inatomic(user_data, 553 vaddr + shmem_page_offset, 554 page_length); 555 kunmap_atomic(vaddr); 556 557 return ret ? -EFAULT : 0; 558 } 559 560 static void 561 shmem_clflush_swizzled_range(char *addr, unsigned long length, 562 bool swizzled) 563 { 564 if (unlikely(swizzled)) { 565 unsigned long start = (unsigned long) addr; 566 unsigned long end = (unsigned long) addr + length; 567 568 /* For swizzling simply ensure that we always flush both 569 * channels. Lame, but simple and it works. Swizzled 570 * pwrite/pread is far from a hotpath - current userspace 571 * doesn't use it at all. */ 572 start = round_down(start, 128); 573 end = round_up(end, 128); 574 575 drm_clflush_virt_range((void *)start, end - start); 576 } else { 577 drm_clflush_virt_range(addr, length); 578 } 579 580 } 581 582 /* Only difference to the fast-path function is that this can handle bit17 583 * and uses non-atomic copy and kmap functions. */ 584 static int 585 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, 586 char __user *user_data, 587 bool page_do_bit17_swizzling, bool needs_clflush) 588 { 589 char *vaddr; 590 int ret; 591 592 vaddr = kmap(page); 593 if (needs_clflush) 594 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 595 page_length, 596 page_do_bit17_swizzling); 597 598 if (page_do_bit17_swizzling) 599 ret = __copy_to_user_swizzled(user_data, 600 vaddr, shmem_page_offset, 601 page_length); 602 else 603 ret = __copy_to_user(user_data, 604 vaddr + shmem_page_offset, 605 page_length); 606 kunmap(page); 607 608 return ret ? - EFAULT : 0; 609 } 610 611 static int 612 i915_gem_shmem_pread(struct drm_device *dev, 613 struct drm_i915_gem_object *obj, 614 struct drm_i915_gem_pread *args, 615 struct drm_file *file) 616 { 617 char __user *user_data; 618 ssize_t remain; 619 loff_t offset; 620 int shmem_page_offset, page_length, ret = 0; 621 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 622 int prefaulted = 0; 623 int needs_clflush = 0; 624 struct sg_page_iter sg_iter; 625 626 user_data = to_user_ptr(args->data_ptr); 627 remain = args->size; 628 629 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 630 631 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); 632 if (ret) 633 return ret; 634 635 offset = args->offset; 636 637 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 638 offset >> PAGE_SHIFT) { 639 struct page *page = sg_page_iter_page(&sg_iter); 640 641 if (remain <= 0) 642 break; 643 644 /* Operation in this page 645 * 646 * shmem_page_offset = offset within page in shmem file 647 * page_length = bytes to copy for this page 648 */ 649 shmem_page_offset = offset_in_page(offset); 650 page_length = remain; 651 if ((shmem_page_offset + page_length) > PAGE_SIZE) 652 page_length = PAGE_SIZE - shmem_page_offset; 653 654 page_do_bit17_swizzling = obj_do_bit17_swizzling && 655 (page_to_phys(page) & (1 << 17)) != 0; 656 657 ret = shmem_pread_fast(page, shmem_page_offset, page_length, 658 user_data, page_do_bit17_swizzling, 659 needs_clflush); 660 if (ret == 0) 661 goto next_page; 662 663 mutex_unlock(&dev->struct_mutex); 664 665 if (likely(!i915.prefault_disable) && !prefaulted) { 666 ret = fault_in_multipages_writeable(user_data, remain); 667 /* Userspace is tricking us, but we've already clobbered 668 * its pages with the prefault and promised to write the 669 * data up to the first fault. Hence ignore any errors 670 * and just continue. */ 671 (void)ret; 672 prefaulted = 1; 673 } 674 675 ret = shmem_pread_slow(page, shmem_page_offset, page_length, 676 user_data, page_do_bit17_swizzling, 677 needs_clflush); 678 679 mutex_lock(&dev->struct_mutex); 680 681 if (ret) 682 goto out; 683 684 next_page: 685 remain -= page_length; 686 user_data += page_length; 687 offset += page_length; 688 } 689 690 out: 691 i915_gem_object_unpin_pages(obj); 692 693 return ret; 694 } 695 696 /** 697 * Reads data from the object referenced by handle. 698 * 699 * On error, the contents of *data are undefined. 700 */ 701 int 702 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 703 struct drm_file *file) 704 { 705 struct drm_i915_gem_pread *args = data; 706 struct drm_i915_gem_object *obj; 707 int ret = 0; 708 709 if (args->size == 0) 710 return 0; 711 712 if (!access_ok(VERIFY_WRITE, 713 to_user_ptr(args->data_ptr), 714 args->size)) 715 return -EFAULT; 716 717 ret = i915_mutex_lock_interruptible(dev); 718 if (ret) 719 return ret; 720 721 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 722 if (&obj->base == NULL) { 723 ret = -ENOENT; 724 goto unlock; 725 } 726 727 /* Bounds check source. */ 728 if (args->offset > obj->base.size || 729 args->size > obj->base.size - args->offset) { 730 ret = -EINVAL; 731 goto out; 732 } 733 734 /* prime objects have no backing filp to GEM pread/pwrite 735 * pages from. 736 */ 737 if (!obj->base.filp) { 738 ret = -EINVAL; 739 goto out; 740 } 741 742 trace_i915_gem_object_pread(obj, args->offset, args->size); 743 744 ret = i915_gem_shmem_pread(dev, obj, args, file); 745 746 out: 747 drm_gem_object_unreference(&obj->base); 748 unlock: 749 mutex_unlock(&dev->struct_mutex); 750 return ret; 751 } 752 753 /* This is the fast write path which cannot handle 754 * page faults in the source data 755 */ 756 757 static inline int 758 fast_user_write(struct io_mapping *mapping, 759 loff_t page_base, int page_offset, 760 char __user *user_data, 761 int length) 762 { 763 void __iomem *vaddr_atomic; 764 void *vaddr; 765 unsigned long unwritten; 766 767 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 768 /* We can use the cpu mem copy function because this is X86. */ 769 vaddr = (void __force*)vaddr_atomic + page_offset; 770 unwritten = __copy_from_user_inatomic_nocache(vaddr, 771 user_data, length); 772 io_mapping_unmap_atomic(vaddr_atomic); 773 return unwritten; 774 } 775 776 /** 777 * This is the fast pwrite path, where we copy the data directly from the 778 * user into the GTT, uncached. 779 */ 780 static int 781 i915_gem_gtt_pwrite_fast(struct drm_device *dev, 782 struct drm_i915_gem_object *obj, 783 struct drm_i915_gem_pwrite *args, 784 struct drm_file *file) 785 { 786 struct drm_i915_private *dev_priv = dev->dev_private; 787 ssize_t remain; 788 loff_t offset, page_base; 789 char __user *user_data; 790 int page_offset, page_length, ret; 791 792 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); 793 if (ret) 794 goto out; 795 796 ret = i915_gem_object_set_to_gtt_domain(obj, true); 797 if (ret) 798 goto out_unpin; 799 800 ret = i915_gem_object_put_fence(obj); 801 if (ret) 802 goto out_unpin; 803 804 user_data = to_user_ptr(args->data_ptr); 805 remain = args->size; 806 807 offset = i915_gem_obj_ggtt_offset(obj) + args->offset; 808 809 intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); 810 811 while (remain > 0) { 812 /* Operation in this page 813 * 814 * page_base = page offset within aperture 815 * page_offset = offset within page 816 * page_length = bytes to copy for this page 817 */ 818 page_base = offset & PAGE_MASK; 819 page_offset = offset_in_page(offset); 820 page_length = remain; 821 if ((page_offset + remain) > PAGE_SIZE) 822 page_length = PAGE_SIZE - page_offset; 823 824 /* If we get a fault while copying data, then (presumably) our 825 * source page isn't available. Return the error and we'll 826 * retry in the slow path. 827 */ 828 if (fast_user_write(dev_priv->gtt.mappable, page_base, 829 page_offset, user_data, page_length)) { 830 ret = -EFAULT; 831 goto out_flush; 832 } 833 834 remain -= page_length; 835 user_data += page_length; 836 offset += page_length; 837 } 838 839 out_flush: 840 intel_fb_obj_flush(obj, false); 841 out_unpin: 842 i915_gem_object_ggtt_unpin(obj); 843 out: 844 return ret; 845 } 846 847 /* Per-page copy function for the shmem pwrite fastpath. 848 * Flushes invalid cachelines before writing to the target if 849 * needs_clflush_before is set and flushes out any written cachelines after 850 * writing if needs_clflush is set. */ 851 static int 852 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, 853 char __user *user_data, 854 bool page_do_bit17_swizzling, 855 bool needs_clflush_before, 856 bool needs_clflush_after) 857 { 858 char *vaddr; 859 int ret; 860 861 if (unlikely(page_do_bit17_swizzling)) 862 return -EINVAL; 863 864 vaddr = kmap_atomic(page); 865 if (needs_clflush_before) 866 drm_clflush_virt_range(vaddr + shmem_page_offset, 867 page_length); 868 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, 869 user_data, page_length); 870 if (needs_clflush_after) 871 drm_clflush_virt_range(vaddr + shmem_page_offset, 872 page_length); 873 kunmap_atomic(vaddr); 874 875 return ret ? -EFAULT : 0; 876 } 877 878 /* Only difference to the fast-path function is that this can handle bit17 879 * and uses non-atomic copy and kmap functions. */ 880 static int 881 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, 882 char __user *user_data, 883 bool page_do_bit17_swizzling, 884 bool needs_clflush_before, 885 bool needs_clflush_after) 886 { 887 char *vaddr; 888 int ret; 889 890 vaddr = kmap(page); 891 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 892 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 893 page_length, 894 page_do_bit17_swizzling); 895 if (page_do_bit17_swizzling) 896 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, 897 user_data, 898 page_length); 899 else 900 ret = __copy_from_user(vaddr + shmem_page_offset, 901 user_data, 902 page_length); 903 if (needs_clflush_after) 904 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 905 page_length, 906 page_do_bit17_swizzling); 907 kunmap(page); 908 909 return ret ? -EFAULT : 0; 910 } 911 912 static int 913 i915_gem_shmem_pwrite(struct drm_device *dev, 914 struct drm_i915_gem_object *obj, 915 struct drm_i915_gem_pwrite *args, 916 struct drm_file *file) 917 { 918 ssize_t remain; 919 loff_t offset; 920 char __user *user_data; 921 int shmem_page_offset, page_length, ret = 0; 922 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 923 int hit_slowpath = 0; 924 int needs_clflush_after = 0; 925 int needs_clflush_before = 0; 926 struct sg_page_iter sg_iter; 927 928 user_data = to_user_ptr(args->data_ptr); 929 remain = args->size; 930 931 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 932 933 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 934 /* If we're not in the cpu write domain, set ourself into the gtt 935 * write domain and manually flush cachelines (if required). This 936 * optimizes for the case when the gpu will use the data 937 * right away and we therefore have to clflush anyway. */ 938 needs_clflush_after = cpu_write_needs_clflush(obj); 939 ret = i915_gem_object_wait_rendering(obj, false); 940 if (ret) 941 return ret; 942 943 i915_gem_object_retire(obj); 944 } 945 /* Same trick applies to invalidate partially written cachelines read 946 * before writing. */ 947 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) 948 needs_clflush_before = 949 !cpu_cache_is_coherent(dev, obj->cache_level); 950 951 ret = i915_gem_object_get_pages(obj); 952 if (ret) 953 return ret; 954 955 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); 956 957 i915_gem_object_pin_pages(obj); 958 959 offset = args->offset; 960 obj->dirty = 1; 961 962 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 963 offset >> PAGE_SHIFT) { 964 struct page *page = sg_page_iter_page(&sg_iter); 965 int partial_cacheline_write; 966 967 if (remain <= 0) 968 break; 969 970 /* Operation in this page 971 * 972 * shmem_page_offset = offset within page in shmem file 973 * page_length = bytes to copy for this page 974 */ 975 shmem_page_offset = offset_in_page(offset); 976 977 page_length = remain; 978 if ((shmem_page_offset + page_length) > PAGE_SIZE) 979 page_length = PAGE_SIZE - shmem_page_offset; 980 981 /* If we don't overwrite a cacheline completely we need to be 982 * careful to have up-to-date data by first clflushing. Don't 983 * overcomplicate things and flush the entire patch. */ 984 partial_cacheline_write = needs_clflush_before && 985 ((shmem_page_offset | page_length) 986 & (boot_cpu_data.x86_clflush_size - 1)); 987 988 page_do_bit17_swizzling = obj_do_bit17_swizzling && 989 (page_to_phys(page) & (1 << 17)) != 0; 990 991 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, 992 user_data, page_do_bit17_swizzling, 993 partial_cacheline_write, 994 needs_clflush_after); 995 if (ret == 0) 996 goto next_page; 997 998 hit_slowpath = 1; 999 mutex_unlock(&dev->struct_mutex); 1000 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, 1001 user_data, page_do_bit17_swizzling, 1002 partial_cacheline_write, 1003 needs_clflush_after); 1004 1005 mutex_lock(&dev->struct_mutex); 1006 1007 if (ret) 1008 goto out; 1009 1010 next_page: 1011 remain -= page_length; 1012 user_data += page_length; 1013 offset += page_length; 1014 } 1015 1016 out: 1017 i915_gem_object_unpin_pages(obj); 1018 1019 if (hit_slowpath) { 1020 /* 1021 * Fixup: Flush cpu caches in case we didn't flush the dirty 1022 * cachelines in-line while writing and the object moved 1023 * out of the cpu write domain while we've dropped the lock. 1024 */ 1025 if (!needs_clflush_after && 1026 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 1027 if (i915_gem_clflush_object(obj, obj->pin_display)) 1028 i915_gem_chipset_flush(dev); 1029 } 1030 } 1031 1032 if (needs_clflush_after) 1033 i915_gem_chipset_flush(dev); 1034 1035 intel_fb_obj_flush(obj, false); 1036 return ret; 1037 } 1038 1039 /** 1040 * Writes data to the object referenced by handle. 1041 * 1042 * On error, the contents of the buffer that were to be modified are undefined. 1043 */ 1044 int 1045 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1046 struct drm_file *file) 1047 { 1048 struct drm_i915_private *dev_priv = dev->dev_private; 1049 struct drm_i915_gem_pwrite *args = data; 1050 struct drm_i915_gem_object *obj; 1051 int ret; 1052 1053 if (args->size == 0) 1054 return 0; 1055 1056 if (!access_ok(VERIFY_READ, 1057 to_user_ptr(args->data_ptr), 1058 args->size)) 1059 return -EFAULT; 1060 1061 if (likely(!i915.prefault_disable)) { 1062 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 1063 args->size); 1064 if (ret) 1065 return -EFAULT; 1066 } 1067 1068 intel_runtime_pm_get(dev_priv); 1069 1070 ret = i915_mutex_lock_interruptible(dev); 1071 if (ret) 1072 goto put_rpm; 1073 1074 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1075 if (&obj->base == NULL) { 1076 ret = -ENOENT; 1077 goto unlock; 1078 } 1079 1080 /* Bounds check destination. */ 1081 if (args->offset > obj->base.size || 1082 args->size > obj->base.size - args->offset) { 1083 ret = -EINVAL; 1084 goto out; 1085 } 1086 1087 /* prime objects have no backing filp to GEM pread/pwrite 1088 * pages from. 1089 */ 1090 if (!obj->base.filp) { 1091 ret = -EINVAL; 1092 goto out; 1093 } 1094 1095 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1096 1097 ret = -EFAULT; 1098 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1099 * it would end up going through the fenced access, and we'll get 1100 * different detiling behavior between reading and writing. 1101 * pread/pwrite currently are reading and writing from the CPU 1102 * perspective, requiring manual detiling by the client. 1103 */ 1104 if (obj->tiling_mode == I915_TILING_NONE && 1105 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 1106 cpu_write_needs_clflush(obj)) { 1107 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 1108 /* Note that the gtt paths might fail with non-page-backed user 1109 * pointers (e.g. gtt mappings when moving data between 1110 * textures). Fallback to the shmem path in that case. */ 1111 } 1112 1113 if (ret == -EFAULT || ret == -ENOSPC) { 1114 if (obj->phys_handle) 1115 ret = i915_gem_phys_pwrite(obj, args, file); 1116 else 1117 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 1118 } 1119 1120 out: 1121 drm_gem_object_unreference(&obj->base); 1122 unlock: 1123 mutex_unlock(&dev->struct_mutex); 1124 put_rpm: 1125 intel_runtime_pm_put(dev_priv); 1126 1127 return ret; 1128 } 1129 1130 int 1131 i915_gem_check_wedge(struct i915_gpu_error *error, 1132 bool interruptible) 1133 { 1134 if (i915_reset_in_progress(error)) { 1135 /* Non-interruptible callers can't handle -EAGAIN, hence return 1136 * -EIO unconditionally for these. */ 1137 if (!interruptible) 1138 return -EIO; 1139 1140 /* Recovery complete, but the reset failed ... */ 1141 if (i915_terminally_wedged(error)) 1142 return -EIO; 1143 1144 /* 1145 * Check if GPU Reset is in progress - we need intel_ring_begin 1146 * to work properly to reinit the hw state while the gpu is 1147 * still marked as reset-in-progress. Handle this with a flag. 1148 */ 1149 if (!error->reload_in_reset) 1150 return -EAGAIN; 1151 } 1152 1153 return 0; 1154 } 1155 1156 /* 1157 * Compare arbitrary request against outstanding lazy request. Emit on match. 1158 */ 1159 int 1160 i915_gem_check_olr(struct drm_i915_gem_request *req) 1161 { 1162 int ret; 1163 1164 WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); 1165 1166 ret = 0; 1167 if (req == req->ring->outstanding_lazy_request) 1168 ret = i915_add_request(req->ring); 1169 1170 return ret; 1171 } 1172 1173 static void fake_irq(unsigned long data) 1174 { 1175 wake_up_process((struct task_struct *)data); 1176 } 1177 1178 static bool missed_irq(struct drm_i915_private *dev_priv, 1179 struct intel_engine_cs *ring) 1180 { 1181 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); 1182 } 1183 1184 static bool can_wait_boost(struct drm_i915_file_private *file_priv) 1185 { 1186 if (file_priv == NULL) 1187 return true; 1188 1189 return !atomic_xchg(&file_priv->rps_wait_boost, true); 1190 } 1191 1192 /** 1193 * __i915_wait_request - wait until execution of request has finished 1194 * @req: duh! 1195 * @reset_counter: reset sequence associated with the given request 1196 * @interruptible: do an interruptible wait (normally yes) 1197 * @timeout: in - how long to wait (NULL forever); out - how much time remaining 1198 * 1199 * Note: It is of utmost importance that the passed in seqno and reset_counter 1200 * values have been read by the caller in an smp safe manner. Where read-side 1201 * locks are involved, it is sufficient to read the reset_counter before 1202 * unlocking the lock that protects the seqno. For lockless tricks, the 1203 * reset_counter _must_ be read before, and an appropriate smp_rmb must be 1204 * inserted. 1205 * 1206 * Returns 0 if the request was found within the alloted time. Else returns the 1207 * errno with remaining time filled in timeout argument. 1208 */ 1209 int __i915_wait_request(struct drm_i915_gem_request *req, 1210 unsigned reset_counter, 1211 bool interruptible, 1212 s64 *timeout, 1213 struct drm_i915_file_private *file_priv) 1214 { 1215 struct intel_engine_cs *ring = i915_gem_request_get_ring(req); 1216 struct drm_device *dev = ring->dev; 1217 struct drm_i915_private *dev_priv = dev->dev_private; 1218 const bool irq_test_in_progress = 1219 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1220 DEFINE_WAIT(wait); 1221 unsigned long timeout_expire; 1222 s64 before, now; 1223 int ret; 1224 1225 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); 1226 1227 if (i915_gem_request_completed(req, true)) 1228 return 0; 1229 1230 timeout_expire = timeout ? 1231 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; 1232 1233 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { 1234 gen6_rps_boost(dev_priv); 1235 if (file_priv) 1236 mod_delayed_work(dev_priv->wq, 1237 &file_priv->mm.idle_work, 1238 msecs_to_jiffies(100)); 1239 } 1240 1241 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) 1242 return -ENODEV; 1243 1244 /* Record current time in case interrupted by signal, or wedged */ 1245 trace_i915_gem_request_wait_begin(req); 1246 before = ktime_get_raw_ns(); 1247 for (;;) { 1248 struct timer_list timer; 1249 1250 prepare_to_wait(&ring->irq_queue, &wait, 1251 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 1252 1253 /* We need to check whether any gpu reset happened in between 1254 * the caller grabbing the seqno and now ... */ 1255 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) { 1256 /* ... but upgrade the -EAGAIN to an -EIO if the gpu 1257 * is truely gone. */ 1258 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1259 if (ret == 0) 1260 ret = -EAGAIN; 1261 break; 1262 } 1263 1264 if (i915_gem_request_completed(req, false)) { 1265 ret = 0; 1266 break; 1267 } 1268 1269 if (interruptible && signal_pending(current)) { 1270 ret = -ERESTARTSYS; 1271 break; 1272 } 1273 1274 if (timeout && time_after_eq(jiffies, timeout_expire)) { 1275 ret = -ETIME; 1276 break; 1277 } 1278 1279 timer.function = NULL; 1280 if (timeout || missed_irq(dev_priv, ring)) { 1281 unsigned long expire; 1282 1283 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); 1284 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire; 1285 mod_timer(&timer, expire); 1286 } 1287 1288 io_schedule(); 1289 1290 if (timer.function) { 1291 del_singleshot_timer_sync(&timer); 1292 destroy_timer_on_stack(&timer); 1293 } 1294 } 1295 now = ktime_get_raw_ns(); 1296 trace_i915_gem_request_wait_end(req); 1297 1298 if (!irq_test_in_progress) 1299 ring->irq_put(ring); 1300 1301 finish_wait(&ring->irq_queue, &wait); 1302 1303 if (timeout) { 1304 s64 tres = *timeout - (now - before); 1305 1306 *timeout = tres < 0 ? 0 : tres; 1307 1308 /* 1309 * Apparently ktime isn't accurate enough and occasionally has a 1310 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 1311 * things up to make the test happy. We allow up to 1 jiffy. 1312 * 1313 * This is a regrssion from the timespec->ktime conversion. 1314 */ 1315 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) 1316 *timeout = 0; 1317 } 1318 1319 return ret; 1320 } 1321 1322 /** 1323 * Waits for a request to be signaled, and cleans up the 1324 * request and object lists appropriately for that event. 1325 */ 1326 int 1327 i915_wait_request(struct drm_i915_gem_request *req) 1328 { 1329 struct drm_device *dev; 1330 struct drm_i915_private *dev_priv; 1331 bool interruptible; 1332 unsigned reset_counter; 1333 int ret; 1334 1335 BUG_ON(req == NULL); 1336 1337 dev = req->ring->dev; 1338 dev_priv = dev->dev_private; 1339 interruptible = dev_priv->mm.interruptible; 1340 1341 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1342 1343 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1344 if (ret) 1345 return ret; 1346 1347 ret = i915_gem_check_olr(req); 1348 if (ret) 1349 return ret; 1350 1351 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1352 i915_gem_request_reference(req); 1353 ret = __i915_wait_request(req, reset_counter, 1354 interruptible, NULL, NULL); 1355 i915_gem_request_unreference(req); 1356 return ret; 1357 } 1358 1359 static int 1360 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj) 1361 { 1362 if (!obj->active) 1363 return 0; 1364 1365 /* Manually manage the write flush as we may have not yet 1366 * retired the buffer. 1367 * 1368 * Note that the last_write_req is always the earlier of 1369 * the two (read/write) requests, so if we haved successfully waited, 1370 * we know we have passed the last write. 1371 */ 1372 i915_gem_request_assign(&obj->last_write_req, NULL); 1373 1374 return 0; 1375 } 1376 1377 /** 1378 * Ensures that all rendering to the object has completed and the object is 1379 * safe to unbind from the GTT or access from the CPU. 1380 */ 1381 static __must_check int 1382 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 1383 bool readonly) 1384 { 1385 struct drm_i915_gem_request *req; 1386 int ret; 1387 1388 req = readonly ? obj->last_write_req : obj->last_read_req; 1389 if (!req) 1390 return 0; 1391 1392 ret = i915_wait_request(req); 1393 if (ret) 1394 return ret; 1395 1396 return i915_gem_object_wait_rendering__tail(obj); 1397 } 1398 1399 /* A nonblocking variant of the above wait. This is a highly dangerous routine 1400 * as the object state may change during this call. 1401 */ 1402 static __must_check int 1403 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, 1404 struct drm_i915_file_private *file_priv, 1405 bool readonly) 1406 { 1407 struct drm_i915_gem_request *req; 1408 struct drm_device *dev = obj->base.dev; 1409 struct drm_i915_private *dev_priv = dev->dev_private; 1410 unsigned reset_counter; 1411 int ret; 1412 1413 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1414 BUG_ON(!dev_priv->mm.interruptible); 1415 1416 req = readonly ? obj->last_write_req : obj->last_read_req; 1417 if (!req) 1418 return 0; 1419 1420 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); 1421 if (ret) 1422 return ret; 1423 1424 ret = i915_gem_check_olr(req); 1425 if (ret) 1426 return ret; 1427 1428 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1429 i915_gem_request_reference(req); 1430 mutex_unlock(&dev->struct_mutex); 1431 ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv); 1432 mutex_lock(&dev->struct_mutex); 1433 i915_gem_request_unreference(req); 1434 if (ret) 1435 return ret; 1436 1437 return i915_gem_object_wait_rendering__tail(obj); 1438 } 1439 1440 /** 1441 * Called when user space prepares to use an object with the CPU, either 1442 * through the mmap ioctl's mapping or a GTT mapping. 1443 */ 1444 int 1445 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1446 struct drm_file *file) 1447 { 1448 struct drm_i915_gem_set_domain *args = data; 1449 struct drm_i915_gem_object *obj; 1450 uint32_t read_domains = args->read_domains; 1451 uint32_t write_domain = args->write_domain; 1452 int ret; 1453 1454 /* Only handle setting domains to types used by the CPU. */ 1455 if (write_domain & I915_GEM_GPU_DOMAINS) 1456 return -EINVAL; 1457 1458 if (read_domains & I915_GEM_GPU_DOMAINS) 1459 return -EINVAL; 1460 1461 /* Having something in the write domain implies it's in the read 1462 * domain, and only that read domain. Enforce that in the request. 1463 */ 1464 if (write_domain != 0 && read_domains != write_domain) 1465 return -EINVAL; 1466 1467 ret = i915_mutex_lock_interruptible(dev); 1468 if (ret) 1469 return ret; 1470 1471 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1472 if (&obj->base == NULL) { 1473 ret = -ENOENT; 1474 goto unlock; 1475 } 1476 1477 /* Try to flush the object off the GPU without holding the lock. 1478 * We will repeat the flush holding the lock in the normal manner 1479 * to catch cases where we are gazumped. 1480 */ 1481 ret = i915_gem_object_wait_rendering__nonblocking(obj, 1482 file->driver_priv, 1483 !write_domain); 1484 if (ret) 1485 goto unref; 1486 1487 if (read_domains & I915_GEM_DOMAIN_GTT) 1488 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1489 else 1490 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1491 1492 unref: 1493 drm_gem_object_unreference(&obj->base); 1494 unlock: 1495 mutex_unlock(&dev->struct_mutex); 1496 return ret; 1497 } 1498 1499 /** 1500 * Called when user space has done writes to this buffer 1501 */ 1502 int 1503 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1504 struct drm_file *file) 1505 { 1506 struct drm_i915_gem_sw_finish *args = data; 1507 struct drm_i915_gem_object *obj; 1508 int ret = 0; 1509 1510 ret = i915_mutex_lock_interruptible(dev); 1511 if (ret) 1512 return ret; 1513 1514 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1515 if (&obj->base == NULL) { 1516 ret = -ENOENT; 1517 goto unlock; 1518 } 1519 1520 /* Pinned buffers may be scanout, so flush the cache */ 1521 if (obj->pin_display) 1522 i915_gem_object_flush_cpu_write_domain(obj); 1523 1524 drm_gem_object_unreference(&obj->base); 1525 unlock: 1526 mutex_unlock(&dev->struct_mutex); 1527 return ret; 1528 } 1529 1530 /** 1531 * Maps the contents of an object, returning the address it is mapped 1532 * into. 1533 * 1534 * While the mapping holds a reference on the contents of the object, it doesn't 1535 * imply a ref on the object itself. 1536 * 1537 * IMPORTANT: 1538 * 1539 * DRM driver writers who look a this function as an example for how to do GEM 1540 * mmap support, please don't implement mmap support like here. The modern way 1541 * to implement DRM mmap support is with an mmap offset ioctl (like 1542 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1543 * That way debug tooling like valgrind will understand what's going on, hiding 1544 * the mmap call in a driver private ioctl will break that. The i915 driver only 1545 * does cpu mmaps this way because we didn't know better. 1546 */ 1547 int 1548 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1549 struct drm_file *file) 1550 { 1551 struct drm_i915_gem_mmap *args = data; 1552 struct drm_gem_object *obj; 1553 unsigned long addr; 1554 1555 if (args->flags & ~(I915_MMAP_WC)) 1556 return -EINVAL; 1557 1558 if (args->flags & I915_MMAP_WC && !cpu_has_pat) 1559 return -ENODEV; 1560 1561 obj = drm_gem_object_lookup(dev, file, args->handle); 1562 if (obj == NULL) 1563 return -ENOENT; 1564 1565 /* prime objects have no backing filp to GEM mmap 1566 * pages from. 1567 */ 1568 if (!obj->filp) { 1569 drm_gem_object_unreference_unlocked(obj); 1570 return -EINVAL; 1571 } 1572 1573 addr = vm_mmap(obj->filp, 0, args->size, 1574 PROT_READ | PROT_WRITE, MAP_SHARED, 1575 args->offset); 1576 if (args->flags & I915_MMAP_WC) { 1577 struct mm_struct *mm = current->mm; 1578 struct vm_area_struct *vma; 1579 1580 down_write(&mm->mmap_sem); 1581 vma = find_vma(mm, addr); 1582 if (vma) 1583 vma->vm_page_prot = 1584 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1585 else 1586 addr = -ENOMEM; 1587 up_write(&mm->mmap_sem); 1588 } 1589 drm_gem_object_unreference_unlocked(obj); 1590 if (IS_ERR((void *)addr)) 1591 return addr; 1592 1593 args->addr_ptr = (uint64_t) addr; 1594 1595 return 0; 1596 } 1597 1598 /** 1599 * i915_gem_fault - fault a page into the GTT 1600 * vma: VMA in question 1601 * vmf: fault info 1602 * 1603 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1604 * from userspace. The fault handler takes care of binding the object to 1605 * the GTT (if needed), allocating and programming a fence register (again, 1606 * only if needed based on whether the old reg is still valid or the object 1607 * is tiled) and inserting a new PTE into the faulting process. 1608 * 1609 * Note that the faulting process may involve evicting existing objects 1610 * from the GTT and/or fence registers to make room. So performance may 1611 * suffer if the GTT working set is large or there are few fence registers 1612 * left. 1613 */ 1614 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1615 { 1616 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); 1617 struct drm_device *dev = obj->base.dev; 1618 struct drm_i915_private *dev_priv = dev->dev_private; 1619 pgoff_t page_offset; 1620 unsigned long pfn; 1621 int ret = 0; 1622 bool write = !!(vmf->flags & FAULT_FLAG_WRITE); 1623 1624 intel_runtime_pm_get(dev_priv); 1625 1626 /* We don't use vmf->pgoff since that has the fake offset */ 1627 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 1628 PAGE_SHIFT; 1629 1630 ret = i915_mutex_lock_interruptible(dev); 1631 if (ret) 1632 goto out; 1633 1634 trace_i915_gem_object_fault(obj, page_offset, true, write); 1635 1636 /* Try to flush the object off the GPU first without holding the lock. 1637 * Upon reacquiring the lock, we will perform our sanity checks and then 1638 * repeat the flush holding the lock in the normal manner to catch cases 1639 * where we are gazumped. 1640 */ 1641 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write); 1642 if (ret) 1643 goto unlock; 1644 1645 /* Access to snoopable pages through the GTT is incoherent. */ 1646 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { 1647 ret = -EFAULT; 1648 goto unlock; 1649 } 1650 1651 /* Now bind it into the GTT if needed */ 1652 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); 1653 if (ret) 1654 goto unlock; 1655 1656 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1657 if (ret) 1658 goto unpin; 1659 1660 ret = i915_gem_object_get_fence(obj); 1661 if (ret) 1662 goto unpin; 1663 1664 /* Finally, remap it using the new GTT offset */ 1665 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); 1666 pfn >>= PAGE_SHIFT; 1667 1668 if (!obj->fault_mappable) { 1669 unsigned long size = min_t(unsigned long, 1670 vma->vm_end - vma->vm_start, 1671 obj->base.size); 1672 int i; 1673 1674 for (i = 0; i < size >> PAGE_SHIFT; i++) { 1675 ret = vm_insert_pfn(vma, 1676 (unsigned long)vma->vm_start + i * PAGE_SIZE, 1677 pfn + i); 1678 if (ret) 1679 break; 1680 } 1681 1682 obj->fault_mappable = true; 1683 } else 1684 ret = vm_insert_pfn(vma, 1685 (unsigned long)vmf->virtual_address, 1686 pfn + page_offset); 1687 unpin: 1688 i915_gem_object_ggtt_unpin(obj); 1689 unlock: 1690 mutex_unlock(&dev->struct_mutex); 1691 out: 1692 switch (ret) { 1693 case -EIO: 1694 /* 1695 * We eat errors when the gpu is terminally wedged to avoid 1696 * userspace unduly crashing (gl has no provisions for mmaps to 1697 * fail). But any other -EIO isn't ours (e.g. swap in failure) 1698 * and so needs to be reported. 1699 */ 1700 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 1701 ret = VM_FAULT_SIGBUS; 1702 break; 1703 } 1704 case -EAGAIN: 1705 /* 1706 * EAGAIN means the gpu is hung and we'll wait for the error 1707 * handler to reset everything when re-faulting in 1708 * i915_mutex_lock_interruptible. 1709 */ 1710 case 0: 1711 case -ERESTARTSYS: 1712 case -EINTR: 1713 case -EBUSY: 1714 /* 1715 * EBUSY is ok: this just means that another thread 1716 * already did the job. 1717 */ 1718 ret = VM_FAULT_NOPAGE; 1719 break; 1720 case -ENOMEM: 1721 ret = VM_FAULT_OOM; 1722 break; 1723 case -ENOSPC: 1724 case -EFAULT: 1725 ret = VM_FAULT_SIGBUS; 1726 break; 1727 default: 1728 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 1729 ret = VM_FAULT_SIGBUS; 1730 break; 1731 } 1732 1733 intel_runtime_pm_put(dev_priv); 1734 return ret; 1735 } 1736 1737 /** 1738 * i915_gem_release_mmap - remove physical page mappings 1739 * @obj: obj in question 1740 * 1741 * Preserve the reservation of the mmapping with the DRM core code, but 1742 * relinquish ownership of the pages back to the system. 1743 * 1744 * It is vital that we remove the page mapping if we have mapped a tiled 1745 * object through the GTT and then lose the fence register due to 1746 * resource pressure. Similarly if the object has been moved out of the 1747 * aperture, than pages mapped into userspace must be revoked. Removing the 1748 * mapping will then trigger a page fault on the next user access, allowing 1749 * fixup by i915_gem_fault(). 1750 */ 1751 void 1752 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 1753 { 1754 if (!obj->fault_mappable) 1755 return; 1756 1757 drm_vma_node_unmap(&obj->base.vma_node, 1758 obj->base.dev->anon_inode->i_mapping); 1759 obj->fault_mappable = false; 1760 } 1761 1762 void 1763 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) 1764 { 1765 struct drm_i915_gem_object *obj; 1766 1767 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 1768 i915_gem_release_mmap(obj); 1769 } 1770 1771 uint32_t 1772 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1773 { 1774 uint32_t gtt_size; 1775 1776 if (INTEL_INFO(dev)->gen >= 4 || 1777 tiling_mode == I915_TILING_NONE) 1778 return size; 1779 1780 /* Previous chips need a power-of-two fence region when tiling */ 1781 if (INTEL_INFO(dev)->gen == 3) 1782 gtt_size = 1024*1024; 1783 else 1784 gtt_size = 512*1024; 1785 1786 while (gtt_size < size) 1787 gtt_size <<= 1; 1788 1789 return gtt_size; 1790 } 1791 1792 /** 1793 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 1794 * @obj: object to check 1795 * 1796 * Return the required GTT alignment for an object, taking into account 1797 * potential fence register mapping. 1798 */ 1799 uint32_t 1800 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 1801 int tiling_mode, bool fenced) 1802 { 1803 /* 1804 * Minimum alignment is 4k (GTT page size), but might be greater 1805 * if a fence register is needed for the object. 1806 */ 1807 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) || 1808 tiling_mode == I915_TILING_NONE) 1809 return 4096; 1810 1811 /* 1812 * Previous chips need to be aligned to the size of the smallest 1813 * fence register that can contain the object. 1814 */ 1815 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1816 } 1817 1818 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 1819 { 1820 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1821 int ret; 1822 1823 if (drm_vma_node_has_offset(&obj->base.vma_node)) 1824 return 0; 1825 1826 dev_priv->mm.shrinker_no_lock_stealing = true; 1827 1828 ret = drm_gem_create_mmap_offset(&obj->base); 1829 if (ret != -ENOSPC) 1830 goto out; 1831 1832 /* Badly fragmented mmap space? The only way we can recover 1833 * space is by destroying unwanted objects. We can't randomly release 1834 * mmap_offsets as userspace expects them to be persistent for the 1835 * lifetime of the objects. The closest we can is to release the 1836 * offsets on purgeable objects by truncating it and marking it purged, 1837 * which prevents userspace from ever using that object again. 1838 */ 1839 i915_gem_shrink(dev_priv, 1840 obj->base.size >> PAGE_SHIFT, 1841 I915_SHRINK_BOUND | 1842 I915_SHRINK_UNBOUND | 1843 I915_SHRINK_PURGEABLE); 1844 ret = drm_gem_create_mmap_offset(&obj->base); 1845 if (ret != -ENOSPC) 1846 goto out; 1847 1848 i915_gem_shrink_all(dev_priv); 1849 ret = drm_gem_create_mmap_offset(&obj->base); 1850 out: 1851 dev_priv->mm.shrinker_no_lock_stealing = false; 1852 1853 return ret; 1854 } 1855 1856 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 1857 { 1858 drm_gem_free_mmap_offset(&obj->base); 1859 } 1860 1861 int 1862 i915_gem_mmap_gtt(struct drm_file *file, 1863 struct drm_device *dev, 1864 uint32_t handle, 1865 uint64_t *offset) 1866 { 1867 struct drm_i915_private *dev_priv = dev->dev_private; 1868 struct drm_i915_gem_object *obj; 1869 int ret; 1870 1871 ret = i915_mutex_lock_interruptible(dev); 1872 if (ret) 1873 return ret; 1874 1875 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 1876 if (&obj->base == NULL) { 1877 ret = -ENOENT; 1878 goto unlock; 1879 } 1880 1881 if (obj->base.size > dev_priv->gtt.mappable_end) { 1882 ret = -E2BIG; 1883 goto out; 1884 } 1885 1886 if (obj->madv != I915_MADV_WILLNEED) { 1887 DRM_DEBUG("Attempting to mmap a purgeable buffer\n"); 1888 ret = -EFAULT; 1889 goto out; 1890 } 1891 1892 ret = i915_gem_object_create_mmap_offset(obj); 1893 if (ret) 1894 goto out; 1895 1896 *offset = drm_vma_node_offset_addr(&obj->base.vma_node); 1897 1898 out: 1899 drm_gem_object_unreference(&obj->base); 1900 unlock: 1901 mutex_unlock(&dev->struct_mutex); 1902 return ret; 1903 } 1904 1905 /** 1906 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1907 * @dev: DRM device 1908 * @data: GTT mapping ioctl data 1909 * @file: GEM object info 1910 * 1911 * Simply returns the fake offset to userspace so it can mmap it. 1912 * The mmap call will end up in drm_gem_mmap(), which will set things 1913 * up so we can get faults in the handler above. 1914 * 1915 * The fault handler will take care of binding the object into the GTT 1916 * (since it may have been evicted to make room for something), allocating 1917 * a fence register, and mapping the appropriate aperture address into 1918 * userspace. 1919 */ 1920 int 1921 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1922 struct drm_file *file) 1923 { 1924 struct drm_i915_gem_mmap_gtt *args = data; 1925 1926 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1927 } 1928 1929 /* Immediately discard the backing storage */ 1930 static void 1931 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 1932 { 1933 i915_gem_object_free_mmap_offset(obj); 1934 1935 if (obj->base.filp == NULL) 1936 return; 1937 1938 /* Our goal here is to return as much of the memory as 1939 * is possible back to the system as we are called from OOM. 1940 * To do this we must instruct the shmfs to drop all of its 1941 * backing pages, *now*. 1942 */ 1943 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 1944 obj->madv = __I915_MADV_PURGED; 1945 } 1946 1947 /* Try to discard unwanted pages */ 1948 static void 1949 i915_gem_object_invalidate(struct drm_i915_gem_object *obj) 1950 { 1951 struct address_space *mapping; 1952 1953 switch (obj->madv) { 1954 case I915_MADV_DONTNEED: 1955 i915_gem_object_truncate(obj); 1956 case __I915_MADV_PURGED: 1957 return; 1958 } 1959 1960 if (obj->base.filp == NULL) 1961 return; 1962 1963 mapping = file_inode(obj->base.filp)->i_mapping, 1964 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 1965 } 1966 1967 static void 1968 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1969 { 1970 struct sg_page_iter sg_iter; 1971 int ret; 1972 1973 BUG_ON(obj->madv == __I915_MADV_PURGED); 1974 1975 ret = i915_gem_object_set_to_cpu_domain(obj, true); 1976 if (ret) { 1977 /* In the event of a disaster, abandon all caches and 1978 * hope for the best. 1979 */ 1980 WARN_ON(ret != -EIO); 1981 i915_gem_clflush_object(obj, true); 1982 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 1983 } 1984 1985 if (i915_gem_object_needs_bit17_swizzle(obj)) 1986 i915_gem_object_save_bit_17_swizzle(obj); 1987 1988 if (obj->madv == I915_MADV_DONTNEED) 1989 obj->dirty = 0; 1990 1991 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 1992 struct page *page = sg_page_iter_page(&sg_iter); 1993 1994 if (obj->dirty) 1995 set_page_dirty(page); 1996 1997 if (obj->madv == I915_MADV_WILLNEED) 1998 mark_page_accessed(page); 1999 2000 page_cache_release(page); 2001 } 2002 obj->dirty = 0; 2003 2004 sg_free_table(obj->pages); 2005 kfree(obj->pages); 2006 } 2007 2008 int 2009 i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 2010 { 2011 const struct drm_i915_gem_object_ops *ops = obj->ops; 2012 2013 if (obj->pages == NULL) 2014 return 0; 2015 2016 if (obj->pages_pin_count) 2017 return -EBUSY; 2018 2019 BUG_ON(i915_gem_obj_bound_any(obj)); 2020 2021 /* ->put_pages might need to allocate memory for the bit17 swizzle 2022 * array, hence protect them from being reaped by removing them from gtt 2023 * lists early. */ 2024 list_del(&obj->global_list); 2025 2026 ops->put_pages(obj); 2027 obj->pages = NULL; 2028 2029 i915_gem_object_invalidate(obj); 2030 2031 return 0; 2032 } 2033 2034 static int 2035 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2036 { 2037 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2038 int page_count, i; 2039 struct address_space *mapping; 2040 struct sg_table *st; 2041 struct scatterlist *sg; 2042 struct sg_page_iter sg_iter; 2043 struct page *page; 2044 unsigned long last_pfn = 0; /* suppress gcc warning */ 2045 gfp_t gfp; 2046 2047 /* Assert that the object is not currently in any GPU domain. As it 2048 * wasn't in the GTT, there shouldn't be any way it could have been in 2049 * a GPU cache 2050 */ 2051 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2052 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2053 2054 st = kmalloc(sizeof(*st), GFP_KERNEL); 2055 if (st == NULL) 2056 return -ENOMEM; 2057 2058 page_count = obj->base.size / PAGE_SIZE; 2059 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2060 kfree(st); 2061 return -ENOMEM; 2062 } 2063 2064 /* Get the list of pages out of our struct file. They'll be pinned 2065 * at this point until we release them. 2066 * 2067 * Fail silently without starting the shrinker 2068 */ 2069 mapping = file_inode(obj->base.filp)->i_mapping; 2070 gfp = mapping_gfp_mask(mapping); 2071 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 2072 gfp &= ~(__GFP_IO | __GFP_WAIT); 2073 sg = st->sgl; 2074 st->nents = 0; 2075 for (i = 0; i < page_count; i++) { 2076 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2077 if (IS_ERR(page)) { 2078 i915_gem_shrink(dev_priv, 2079 page_count, 2080 I915_SHRINK_BOUND | 2081 I915_SHRINK_UNBOUND | 2082 I915_SHRINK_PURGEABLE); 2083 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2084 } 2085 if (IS_ERR(page)) { 2086 /* We've tried hard to allocate the memory by reaping 2087 * our own buffer, now let the real VM do its job and 2088 * go down in flames if truly OOM. 2089 */ 2090 i915_gem_shrink_all(dev_priv); 2091 page = shmem_read_mapping_page(mapping, i); 2092 if (IS_ERR(page)) 2093 goto err_pages; 2094 } 2095 #ifdef CONFIG_SWIOTLB 2096 if (swiotlb_nr_tbl()) { 2097 st->nents++; 2098 sg_set_page(sg, page, PAGE_SIZE, 0); 2099 sg = sg_next(sg); 2100 continue; 2101 } 2102 #endif 2103 if (!i || page_to_pfn(page) != last_pfn + 1) { 2104 if (i) 2105 sg = sg_next(sg); 2106 st->nents++; 2107 sg_set_page(sg, page, PAGE_SIZE, 0); 2108 } else { 2109 sg->length += PAGE_SIZE; 2110 } 2111 last_pfn = page_to_pfn(page); 2112 2113 /* Check that the i965g/gm workaround works. */ 2114 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 2115 } 2116 #ifdef CONFIG_SWIOTLB 2117 if (!swiotlb_nr_tbl()) 2118 #endif 2119 sg_mark_end(sg); 2120 obj->pages = st; 2121 2122 if (i915_gem_object_needs_bit17_swizzle(obj)) 2123 i915_gem_object_do_bit_17_swizzle(obj); 2124 2125 if (obj->tiling_mode != I915_TILING_NONE && 2126 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2127 i915_gem_object_pin_pages(obj); 2128 2129 return 0; 2130 2131 err_pages: 2132 sg_mark_end(sg); 2133 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2134 page_cache_release(sg_page_iter_page(&sg_iter)); 2135 sg_free_table(st); 2136 kfree(st); 2137 2138 /* shmemfs first checks if there is enough memory to allocate the page 2139 * and reports ENOSPC should there be insufficient, along with the usual 2140 * ENOMEM for a genuine allocation failure. 2141 * 2142 * We use ENOSPC in our driver to mean that we have run out of aperture 2143 * space and so want to translate the error from shmemfs back to our 2144 * usual understanding of ENOMEM. 2145 */ 2146 if (PTR_ERR(page) == -ENOSPC) 2147 return -ENOMEM; 2148 else 2149 return PTR_ERR(page); 2150 } 2151 2152 /* Ensure that the associated pages are gathered from the backing storage 2153 * and pinned into our object. i915_gem_object_get_pages() may be called 2154 * multiple times before they are released by a single call to 2155 * i915_gem_object_put_pages() - once the pages are no longer referenced 2156 * either as a result of memory pressure (reaping pages under the shrinker) 2157 * or as the object is itself released. 2158 */ 2159 int 2160 i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2161 { 2162 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2163 const struct drm_i915_gem_object_ops *ops = obj->ops; 2164 int ret; 2165 2166 if (obj->pages) 2167 return 0; 2168 2169 if (obj->madv != I915_MADV_WILLNEED) { 2170 DRM_DEBUG("Attempting to obtain a purgeable object\n"); 2171 return -EFAULT; 2172 } 2173 2174 BUG_ON(obj->pages_pin_count); 2175 2176 ret = ops->get_pages(obj); 2177 if (ret) 2178 return ret; 2179 2180 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); 2181 return 0; 2182 } 2183 2184 static void 2185 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 2186 struct intel_engine_cs *ring) 2187 { 2188 struct drm_i915_gem_request *req; 2189 struct intel_engine_cs *old_ring; 2190 2191 BUG_ON(ring == NULL); 2192 2193 req = intel_ring_get_request(ring); 2194 old_ring = i915_gem_request_get_ring(obj->last_read_req); 2195 2196 if (old_ring != ring && obj->last_write_req) { 2197 /* Keep the request relative to the current ring */ 2198 i915_gem_request_assign(&obj->last_write_req, req); 2199 } 2200 2201 /* Add a reference if we're newly entering the active list. */ 2202 if (!obj->active) { 2203 drm_gem_object_reference(&obj->base); 2204 obj->active = 1; 2205 } 2206 2207 list_move_tail(&obj->ring_list, &ring->active_list); 2208 2209 i915_gem_request_assign(&obj->last_read_req, req); 2210 } 2211 2212 void i915_vma_move_to_active(struct i915_vma *vma, 2213 struct intel_engine_cs *ring) 2214 { 2215 list_move_tail(&vma->mm_list, &vma->vm->active_list); 2216 return i915_gem_object_move_to_active(vma->obj, ring); 2217 } 2218 2219 static void 2220 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2221 { 2222 struct i915_vma *vma; 2223 2224 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 2225 BUG_ON(!obj->active); 2226 2227 list_for_each_entry(vma, &obj->vma_list, vma_link) { 2228 if (!list_empty(&vma->mm_list)) 2229 list_move_tail(&vma->mm_list, &vma->vm->inactive_list); 2230 } 2231 2232 intel_fb_obj_flush(obj, true); 2233 2234 list_del_init(&obj->ring_list); 2235 2236 i915_gem_request_assign(&obj->last_read_req, NULL); 2237 i915_gem_request_assign(&obj->last_write_req, NULL); 2238 obj->base.write_domain = 0; 2239 2240 i915_gem_request_assign(&obj->last_fenced_req, NULL); 2241 2242 obj->active = 0; 2243 drm_gem_object_unreference(&obj->base); 2244 2245 WARN_ON(i915_verify_lists(dev)); 2246 } 2247 2248 static void 2249 i915_gem_object_retire(struct drm_i915_gem_object *obj) 2250 { 2251 if (obj->last_read_req == NULL) 2252 return; 2253 2254 if (i915_gem_request_completed(obj->last_read_req, true)) 2255 i915_gem_object_move_to_inactive(obj); 2256 } 2257 2258 static int 2259 i915_gem_init_seqno(struct drm_device *dev, u32 seqno) 2260 { 2261 struct drm_i915_private *dev_priv = dev->dev_private; 2262 struct intel_engine_cs *ring; 2263 int ret, i, j; 2264 2265 /* Carefully retire all requests without writing to the rings */ 2266 for_each_ring(ring, dev_priv, i) { 2267 ret = intel_ring_idle(ring); 2268 if (ret) 2269 return ret; 2270 } 2271 i915_gem_retire_requests(dev); 2272 2273 /* Finally reset hw state */ 2274 for_each_ring(ring, dev_priv, i) { 2275 intel_ring_init_seqno(ring, seqno); 2276 2277 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++) 2278 ring->semaphore.sync_seqno[j] = 0; 2279 } 2280 2281 return 0; 2282 } 2283 2284 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) 2285 { 2286 struct drm_i915_private *dev_priv = dev->dev_private; 2287 int ret; 2288 2289 if (seqno == 0) 2290 return -EINVAL; 2291 2292 /* HWS page needs to be set less than what we 2293 * will inject to ring 2294 */ 2295 ret = i915_gem_init_seqno(dev, seqno - 1); 2296 if (ret) 2297 return ret; 2298 2299 /* Carefully set the last_seqno value so that wrap 2300 * detection still works 2301 */ 2302 dev_priv->next_seqno = seqno; 2303 dev_priv->last_seqno = seqno - 1; 2304 if (dev_priv->last_seqno == 0) 2305 dev_priv->last_seqno--; 2306 2307 return 0; 2308 } 2309 2310 int 2311 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 2312 { 2313 struct drm_i915_private *dev_priv = dev->dev_private; 2314 2315 /* reserve 0 for non-seqno */ 2316 if (dev_priv->next_seqno == 0) { 2317 int ret = i915_gem_init_seqno(dev, 0); 2318 if (ret) 2319 return ret; 2320 2321 dev_priv->next_seqno = 1; 2322 } 2323 2324 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++; 2325 return 0; 2326 } 2327 2328 int __i915_add_request(struct intel_engine_cs *ring, 2329 struct drm_file *file, 2330 struct drm_i915_gem_object *obj) 2331 { 2332 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2333 struct drm_i915_gem_request *request; 2334 struct intel_ringbuffer *ringbuf; 2335 u32 request_start; 2336 int ret; 2337 2338 request = ring->outstanding_lazy_request; 2339 if (WARN_ON(request == NULL)) 2340 return -ENOMEM; 2341 2342 if (i915.enable_execlists) { 2343 ringbuf = request->ctx->engine[ring->id].ringbuf; 2344 } else 2345 ringbuf = ring->buffer; 2346 2347 request_start = intel_ring_get_tail(ringbuf); 2348 /* 2349 * Emit any outstanding flushes - execbuf can fail to emit the flush 2350 * after having emitted the batchbuffer command. Hence we need to fix 2351 * things up similar to emitting the lazy request. The difference here 2352 * is that the flush _must_ happen before the next request, no matter 2353 * what. 2354 */ 2355 if (i915.enable_execlists) { 2356 ret = logical_ring_flush_all_caches(ringbuf, request->ctx); 2357 if (ret) 2358 return ret; 2359 } else { 2360 ret = intel_ring_flush_all_caches(ring); 2361 if (ret) 2362 return ret; 2363 } 2364 2365 /* Record the position of the start of the request so that 2366 * should we detect the updated seqno part-way through the 2367 * GPU processing the request, we never over-estimate the 2368 * position of the head. 2369 */ 2370 request->postfix = intel_ring_get_tail(ringbuf); 2371 2372 if (i915.enable_execlists) { 2373 ret = ring->emit_request(ringbuf, request); 2374 if (ret) 2375 return ret; 2376 } else { 2377 ret = ring->add_request(ring); 2378 if (ret) 2379 return ret; 2380 } 2381 2382 request->head = request_start; 2383 request->tail = intel_ring_get_tail(ringbuf); 2384 2385 /* Whilst this request exists, batch_obj will be on the 2386 * active_list, and so will hold the active reference. Only when this 2387 * request is retired will the the batch_obj be moved onto the 2388 * inactive_list and lose its active reference. Hence we do not need 2389 * to explicitly hold another reference here. 2390 */ 2391 request->batch_obj = obj; 2392 2393 if (!i915.enable_execlists) { 2394 /* Hold a reference to the current context so that we can inspect 2395 * it later in case a hangcheck error event fires. 2396 */ 2397 request->ctx = ring->last_context; 2398 if (request->ctx) 2399 i915_gem_context_reference(request->ctx); 2400 } 2401 2402 request->emitted_jiffies = jiffies; 2403 list_add_tail(&request->list, &ring->request_list); 2404 request->file_priv = NULL; 2405 2406 if (file) { 2407 struct drm_i915_file_private *file_priv = file->driver_priv; 2408 2409 spin_lock(&file_priv->mm.lock); 2410 request->file_priv = file_priv; 2411 list_add_tail(&request->client_list, 2412 &file_priv->mm.request_list); 2413 spin_unlock(&file_priv->mm.lock); 2414 2415 request->pid = get_pid(task_pid(current)); 2416 } 2417 2418 trace_i915_gem_request_add(request); 2419 ring->outstanding_lazy_request = NULL; 2420 2421 i915_queue_hangcheck(ring->dev); 2422 2423 cancel_delayed_work_sync(&dev_priv->mm.idle_work); 2424 queue_delayed_work(dev_priv->wq, 2425 &dev_priv->mm.retire_work, 2426 round_jiffies_up_relative(HZ)); 2427 intel_mark_busy(dev_priv->dev); 2428 2429 return 0; 2430 } 2431 2432 static inline void 2433 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) 2434 { 2435 struct drm_i915_file_private *file_priv = request->file_priv; 2436 2437 if (!file_priv) 2438 return; 2439 2440 spin_lock(&file_priv->mm.lock); 2441 list_del(&request->client_list); 2442 request->file_priv = NULL; 2443 spin_unlock(&file_priv->mm.lock); 2444 } 2445 2446 static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2447 const struct intel_context *ctx) 2448 { 2449 unsigned long elapsed; 2450 2451 elapsed = get_seconds() - ctx->hang_stats.guilty_ts; 2452 2453 if (ctx->hang_stats.banned) 2454 return true; 2455 2456 if (ctx->hang_stats.ban_period_seconds && 2457 elapsed <= ctx->hang_stats.ban_period_seconds) { 2458 if (!i915_gem_context_is_default(ctx)) { 2459 DRM_DEBUG("context hanging too fast, banning!\n"); 2460 return true; 2461 } else if (i915_stop_ring_allow_ban(dev_priv)) { 2462 if (i915_stop_ring_allow_warn(dev_priv)) 2463 DRM_ERROR("gpu hanging too fast, banning!\n"); 2464 return true; 2465 } 2466 } 2467 2468 return false; 2469 } 2470 2471 static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2472 struct intel_context *ctx, 2473 const bool guilty) 2474 { 2475 struct i915_ctx_hang_stats *hs; 2476 2477 if (WARN_ON(!ctx)) 2478 return; 2479 2480 hs = &ctx->hang_stats; 2481 2482 if (guilty) { 2483 hs->banned = i915_context_is_banned(dev_priv, ctx); 2484 hs->batch_active++; 2485 hs->guilty_ts = get_seconds(); 2486 } else { 2487 hs->batch_pending++; 2488 } 2489 } 2490 2491 static void i915_gem_free_request(struct drm_i915_gem_request *request) 2492 { 2493 list_del(&request->list); 2494 i915_gem_request_remove_from_client(request); 2495 2496 put_pid(request->pid); 2497 2498 i915_gem_request_unreference(request); 2499 } 2500 2501 void i915_gem_request_free(struct kref *req_ref) 2502 { 2503 struct drm_i915_gem_request *req = container_of(req_ref, 2504 typeof(*req), ref); 2505 struct intel_context *ctx = req->ctx; 2506 2507 if (ctx) { 2508 if (i915.enable_execlists) { 2509 struct intel_engine_cs *ring = req->ring; 2510 2511 if (ctx != ring->default_context) 2512 intel_lr_context_unpin(ring, ctx); 2513 } 2514 2515 i915_gem_context_unreference(ctx); 2516 } 2517 2518 kfree(req); 2519 } 2520 2521 struct drm_i915_gem_request * 2522 i915_gem_find_active_request(struct intel_engine_cs *ring) 2523 { 2524 struct drm_i915_gem_request *request; 2525 2526 list_for_each_entry(request, &ring->request_list, list) { 2527 if (i915_gem_request_completed(request, false)) 2528 continue; 2529 2530 return request; 2531 } 2532 2533 return NULL; 2534 } 2535 2536 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, 2537 struct intel_engine_cs *ring) 2538 { 2539 struct drm_i915_gem_request *request; 2540 bool ring_hung; 2541 2542 request = i915_gem_find_active_request(ring); 2543 2544 if (request == NULL) 2545 return; 2546 2547 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; 2548 2549 i915_set_reset_status(dev_priv, request->ctx, ring_hung); 2550 2551 list_for_each_entry_continue(request, &ring->request_list, list) 2552 i915_set_reset_status(dev_priv, request->ctx, false); 2553 } 2554 2555 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2556 struct intel_engine_cs *ring) 2557 { 2558 while (!list_empty(&ring->active_list)) { 2559 struct drm_i915_gem_object *obj; 2560 2561 obj = list_first_entry(&ring->active_list, 2562 struct drm_i915_gem_object, 2563 ring_list); 2564 2565 i915_gem_object_move_to_inactive(obj); 2566 } 2567 2568 /* 2569 * Clear the execlists queue up before freeing the requests, as those 2570 * are the ones that keep the context and ringbuffer backing objects 2571 * pinned in place. 2572 */ 2573 while (!list_empty(&ring->execlist_queue)) { 2574 struct drm_i915_gem_request *submit_req; 2575 2576 submit_req = list_first_entry(&ring->execlist_queue, 2577 struct drm_i915_gem_request, 2578 execlist_link); 2579 list_del(&submit_req->execlist_link); 2580 intel_runtime_pm_put(dev_priv); 2581 2582 if (submit_req->ctx != ring->default_context) 2583 intel_lr_context_unpin(ring, submit_req->ctx); 2584 2585 i915_gem_request_unreference(submit_req); 2586 } 2587 2588 /* 2589 * We must free the requests after all the corresponding objects have 2590 * been moved off active lists. Which is the same order as the normal 2591 * retire_requests function does. This is important if object hold 2592 * implicit references on things like e.g. ppgtt address spaces through 2593 * the request. 2594 */ 2595 while (!list_empty(&ring->request_list)) { 2596 struct drm_i915_gem_request *request; 2597 2598 request = list_first_entry(&ring->request_list, 2599 struct drm_i915_gem_request, 2600 list); 2601 2602 i915_gem_free_request(request); 2603 } 2604 2605 /* This may not have been flushed before the reset, so clean it now */ 2606 i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); 2607 } 2608 2609 void i915_gem_restore_fences(struct drm_device *dev) 2610 { 2611 struct drm_i915_private *dev_priv = dev->dev_private; 2612 int i; 2613 2614 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2615 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2616 2617 /* 2618 * Commit delayed tiling changes if we have an object still 2619 * attached to the fence, otherwise just clear the fence. 2620 */ 2621 if (reg->obj) { 2622 i915_gem_object_update_fence(reg->obj, reg, 2623 reg->obj->tiling_mode); 2624 } else { 2625 i915_gem_write_fence(dev, i, NULL); 2626 } 2627 } 2628 } 2629 2630 void i915_gem_reset(struct drm_device *dev) 2631 { 2632 struct drm_i915_private *dev_priv = dev->dev_private; 2633 struct intel_engine_cs *ring; 2634 int i; 2635 2636 /* 2637 * Before we free the objects from the requests, we need to inspect 2638 * them for finding the guilty party. As the requests only borrow 2639 * their reference to the objects, the inspection must be done first. 2640 */ 2641 for_each_ring(ring, dev_priv, i) 2642 i915_gem_reset_ring_status(dev_priv, ring); 2643 2644 for_each_ring(ring, dev_priv, i) 2645 i915_gem_reset_ring_cleanup(dev_priv, ring); 2646 2647 i915_gem_context_reset(dev); 2648 2649 i915_gem_restore_fences(dev); 2650 } 2651 2652 /** 2653 * This function clears the request list as sequence numbers are passed. 2654 */ 2655 void 2656 i915_gem_retire_requests_ring(struct intel_engine_cs *ring) 2657 { 2658 if (list_empty(&ring->request_list)) 2659 return; 2660 2661 WARN_ON(i915_verify_lists(ring->dev)); 2662 2663 /* Retire requests first as we use it above for the early return. 2664 * If we retire requests last, we may use a later seqno and so clear 2665 * the requests lists without clearing the active list, leading to 2666 * confusion. 2667 */ 2668 while (!list_empty(&ring->request_list)) { 2669 struct drm_i915_gem_request *request; 2670 2671 request = list_first_entry(&ring->request_list, 2672 struct drm_i915_gem_request, 2673 list); 2674 2675 if (!i915_gem_request_completed(request, true)) 2676 break; 2677 2678 trace_i915_gem_request_retire(request); 2679 2680 /* We know the GPU must have read the request to have 2681 * sent us the seqno + interrupt, so use the position 2682 * of tail of the request to update the last known position 2683 * of the GPU head. 2684 */ 2685 request->ringbuf->last_retired_head = request->postfix; 2686 2687 i915_gem_free_request(request); 2688 } 2689 2690 /* Move any buffers on the active list that are no longer referenced 2691 * by the ringbuffer to the flushing/inactive lists as appropriate, 2692 * before we free the context associated with the requests. 2693 */ 2694 while (!list_empty(&ring->active_list)) { 2695 struct drm_i915_gem_object *obj; 2696 2697 obj = list_first_entry(&ring->active_list, 2698 struct drm_i915_gem_object, 2699 ring_list); 2700 2701 if (!i915_gem_request_completed(obj->last_read_req, true)) 2702 break; 2703 2704 i915_gem_object_move_to_inactive(obj); 2705 } 2706 2707 if (unlikely(ring->trace_irq_req && 2708 i915_gem_request_completed(ring->trace_irq_req, true))) { 2709 ring->irq_put(ring); 2710 i915_gem_request_assign(&ring->trace_irq_req, NULL); 2711 } 2712 2713 WARN_ON(i915_verify_lists(ring->dev)); 2714 } 2715 2716 bool 2717 i915_gem_retire_requests(struct drm_device *dev) 2718 { 2719 struct drm_i915_private *dev_priv = dev->dev_private; 2720 struct intel_engine_cs *ring; 2721 bool idle = true; 2722 int i; 2723 2724 for_each_ring(ring, dev_priv, i) { 2725 i915_gem_retire_requests_ring(ring); 2726 idle &= list_empty(&ring->request_list); 2727 if (i915.enable_execlists) { 2728 unsigned long flags; 2729 2730 spin_lock_irqsave(&ring->execlist_lock, flags); 2731 idle &= list_empty(&ring->execlist_queue); 2732 spin_unlock_irqrestore(&ring->execlist_lock, flags); 2733 2734 intel_execlists_retire_requests(ring); 2735 } 2736 } 2737 2738 if (idle) 2739 mod_delayed_work(dev_priv->wq, 2740 &dev_priv->mm.idle_work, 2741 msecs_to_jiffies(100)); 2742 2743 return idle; 2744 } 2745 2746 static void 2747 i915_gem_retire_work_handler(struct work_struct *work) 2748 { 2749 struct drm_i915_private *dev_priv = 2750 container_of(work, typeof(*dev_priv), mm.retire_work.work); 2751 struct drm_device *dev = dev_priv->dev; 2752 bool idle; 2753 2754 /* Come back later if the device is busy... */ 2755 idle = false; 2756 if (mutex_trylock(&dev->struct_mutex)) { 2757 idle = i915_gem_retire_requests(dev); 2758 mutex_unlock(&dev->struct_mutex); 2759 } 2760 if (!idle) 2761 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2762 round_jiffies_up_relative(HZ)); 2763 } 2764 2765 static void 2766 i915_gem_idle_work_handler(struct work_struct *work) 2767 { 2768 struct drm_i915_private *dev_priv = 2769 container_of(work, typeof(*dev_priv), mm.idle_work.work); 2770 2771 intel_mark_idle(dev_priv->dev); 2772 } 2773 2774 /** 2775 * Ensures that an object will eventually get non-busy by flushing any required 2776 * write domains, emitting any outstanding lazy request and retiring and 2777 * completed requests. 2778 */ 2779 static int 2780 i915_gem_object_flush_active(struct drm_i915_gem_object *obj) 2781 { 2782 struct intel_engine_cs *ring; 2783 int ret; 2784 2785 if (obj->active) { 2786 ring = i915_gem_request_get_ring(obj->last_read_req); 2787 2788 ret = i915_gem_check_olr(obj->last_read_req); 2789 if (ret) 2790 return ret; 2791 2792 i915_gem_retire_requests_ring(ring); 2793 } 2794 2795 return 0; 2796 } 2797 2798 /** 2799 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 2800 * @DRM_IOCTL_ARGS: standard ioctl arguments 2801 * 2802 * Returns 0 if successful, else an error is returned with the remaining time in 2803 * the timeout parameter. 2804 * -ETIME: object is still busy after timeout 2805 * -ERESTARTSYS: signal interrupted the wait 2806 * -ENONENT: object doesn't exist 2807 * Also possible, but rare: 2808 * -EAGAIN: GPU wedged 2809 * -ENOMEM: damn 2810 * -ENODEV: Internal IRQ fail 2811 * -E?: The add request failed 2812 * 2813 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 2814 * non-zero timeout parameter the wait ioctl will wait for the given number of 2815 * nanoseconds on an object becoming unbusy. Since the wait itself does so 2816 * without holding struct_mutex the object may become re-busied before this 2817 * function completes. A similar but shorter * race condition exists in the busy 2818 * ioctl 2819 */ 2820 int 2821 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 2822 { 2823 struct drm_i915_private *dev_priv = dev->dev_private; 2824 struct drm_i915_gem_wait *args = data; 2825 struct drm_i915_gem_object *obj; 2826 struct drm_i915_gem_request *req; 2827 unsigned reset_counter; 2828 int ret = 0; 2829 2830 if (args->flags != 0) 2831 return -EINVAL; 2832 2833 ret = i915_mutex_lock_interruptible(dev); 2834 if (ret) 2835 return ret; 2836 2837 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle)); 2838 if (&obj->base == NULL) { 2839 mutex_unlock(&dev->struct_mutex); 2840 return -ENOENT; 2841 } 2842 2843 /* Need to make sure the object gets inactive eventually. */ 2844 ret = i915_gem_object_flush_active(obj); 2845 if (ret) 2846 goto out; 2847 2848 if (!obj->active || !obj->last_read_req) 2849 goto out; 2850 2851 req = obj->last_read_req; 2852 2853 /* Do this after OLR check to make sure we make forward progress polling 2854 * on this IOCTL with a timeout == 0 (like busy ioctl) 2855 */ 2856 if (args->timeout_ns == 0) { 2857 ret = -ETIME; 2858 goto out; 2859 } 2860 2861 drm_gem_object_unreference(&obj->base); 2862 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2863 i915_gem_request_reference(req); 2864 mutex_unlock(&dev->struct_mutex); 2865 2866 ret = __i915_wait_request(req, reset_counter, true, 2867 args->timeout_ns > 0 ? &args->timeout_ns : NULL, 2868 file->driver_priv); 2869 mutex_lock(&dev->struct_mutex); 2870 i915_gem_request_unreference(req); 2871 mutex_unlock(&dev->struct_mutex); 2872 return ret; 2873 2874 out: 2875 drm_gem_object_unreference(&obj->base); 2876 mutex_unlock(&dev->struct_mutex); 2877 return ret; 2878 } 2879 2880 /** 2881 * i915_gem_object_sync - sync an object to a ring. 2882 * 2883 * @obj: object which may be in use on another ring. 2884 * @to: ring we wish to use the object on. May be NULL. 2885 * 2886 * This code is meant to abstract object synchronization with the GPU. 2887 * Calling with NULL implies synchronizing the object with the CPU 2888 * rather than a particular GPU ring. 2889 * 2890 * Returns 0 if successful, else propagates up the lower layer error. 2891 */ 2892 int 2893 i915_gem_object_sync(struct drm_i915_gem_object *obj, 2894 struct intel_engine_cs *to) 2895 { 2896 struct intel_engine_cs *from; 2897 u32 seqno; 2898 int ret, idx; 2899 2900 from = i915_gem_request_get_ring(obj->last_read_req); 2901 2902 if (from == NULL || to == from) 2903 return 0; 2904 2905 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) 2906 return i915_gem_object_wait_rendering(obj, false); 2907 2908 idx = intel_ring_sync_index(from, to); 2909 2910 seqno = i915_gem_request_get_seqno(obj->last_read_req); 2911 /* Optimization: Avoid semaphore sync when we are sure we already 2912 * waited for an object with higher seqno */ 2913 if (seqno <= from->semaphore.sync_seqno[idx]) 2914 return 0; 2915 2916 ret = i915_gem_check_olr(obj->last_read_req); 2917 if (ret) 2918 return ret; 2919 2920 trace_i915_gem_ring_sync_to(from, to, obj->last_read_req); 2921 ret = to->semaphore.sync_to(to, from, seqno); 2922 if (!ret) 2923 /* We use last_read_req because sync_to() 2924 * might have just caused seqno wrap under 2925 * the radar. 2926 */ 2927 from->semaphore.sync_seqno[idx] = 2928 i915_gem_request_get_seqno(obj->last_read_req); 2929 2930 return ret; 2931 } 2932 2933 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) 2934 { 2935 u32 old_write_domain, old_read_domains; 2936 2937 /* Force a pagefault for domain tracking on next user access */ 2938 i915_gem_release_mmap(obj); 2939 2940 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 2941 return; 2942 2943 /* Wait for any direct GTT access to complete */ 2944 mb(); 2945 2946 old_read_domains = obj->base.read_domains; 2947 old_write_domain = obj->base.write_domain; 2948 2949 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; 2950 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; 2951 2952 trace_i915_gem_object_change_domain(obj, 2953 old_read_domains, 2954 old_write_domain); 2955 } 2956 2957 int i915_vma_unbind(struct i915_vma *vma) 2958 { 2959 struct drm_i915_gem_object *obj = vma->obj; 2960 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2961 int ret; 2962 2963 if (list_empty(&vma->vma_link)) 2964 return 0; 2965 2966 if (!drm_mm_node_allocated(&vma->node)) { 2967 i915_gem_vma_destroy(vma); 2968 return 0; 2969 } 2970 2971 if (vma->pin_count) 2972 return -EBUSY; 2973 2974 BUG_ON(obj->pages == NULL); 2975 2976 ret = i915_gem_object_finish_gpu(obj); 2977 if (ret) 2978 return ret; 2979 /* Continue on if we fail due to EIO, the GPU is hung so we 2980 * should be safe and we need to cleanup or else we might 2981 * cause memory corruption through use-after-free. 2982 */ 2983 2984 if (i915_is_ggtt(vma->vm) && 2985 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { 2986 i915_gem_object_finish_gtt(obj); 2987 2988 /* release the fence reg _after_ flushing */ 2989 ret = i915_gem_object_put_fence(obj); 2990 if (ret) 2991 return ret; 2992 } 2993 2994 trace_i915_vma_unbind(vma); 2995 2996 vma->unbind_vma(vma); 2997 2998 list_del_init(&vma->mm_list); 2999 if (i915_is_ggtt(vma->vm)) { 3000 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { 3001 obj->map_and_fenceable = false; 3002 } else if (vma->ggtt_view.pages) { 3003 sg_free_table(vma->ggtt_view.pages); 3004 kfree(vma->ggtt_view.pages); 3005 vma->ggtt_view.pages = NULL; 3006 } 3007 } 3008 3009 drm_mm_remove_node(&vma->node); 3010 i915_gem_vma_destroy(vma); 3011 3012 /* Since the unbound list is global, only move to that list if 3013 * no more VMAs exist. */ 3014 if (list_empty(&obj->vma_list)) { 3015 /* Throw away the active reference before 3016 * moving to the unbound list. */ 3017 i915_gem_object_retire(obj); 3018 3019 i915_gem_gtt_finish_object(obj); 3020 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 3021 } 3022 3023 /* And finally now the object is completely decoupled from this vma, 3024 * we can drop its hold on the backing storage and allow it to be 3025 * reaped by the shrinker. 3026 */ 3027 i915_gem_object_unpin_pages(obj); 3028 3029 return 0; 3030 } 3031 3032 int i915_gpu_idle(struct drm_device *dev) 3033 { 3034 struct drm_i915_private *dev_priv = dev->dev_private; 3035 struct intel_engine_cs *ring; 3036 int ret, i; 3037 3038 /* Flush everything onto the inactive list. */ 3039 for_each_ring(ring, dev_priv, i) { 3040 if (!i915.enable_execlists) { 3041 ret = i915_switch_context(ring, ring->default_context); 3042 if (ret) 3043 return ret; 3044 } 3045 3046 ret = intel_ring_idle(ring); 3047 if (ret) 3048 return ret; 3049 } 3050 3051 return 0; 3052 } 3053 3054 static void i965_write_fence_reg(struct drm_device *dev, int reg, 3055 struct drm_i915_gem_object *obj) 3056 { 3057 struct drm_i915_private *dev_priv = dev->dev_private; 3058 int fence_reg; 3059 int fence_pitch_shift; 3060 3061 if (INTEL_INFO(dev)->gen >= 6) { 3062 fence_reg = FENCE_REG_SANDYBRIDGE_0; 3063 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; 3064 } else { 3065 fence_reg = FENCE_REG_965_0; 3066 fence_pitch_shift = I965_FENCE_PITCH_SHIFT; 3067 } 3068 3069 fence_reg += reg * 8; 3070 3071 /* To w/a incoherency with non-atomic 64-bit register updates, 3072 * we split the 64-bit update into two 32-bit writes. In order 3073 * for a partial fence not to be evaluated between writes, we 3074 * precede the update with write to turn off the fence register, 3075 * and only enable the fence as the last step. 3076 * 3077 * For extra levels of paranoia, we make sure each step lands 3078 * before applying the next step. 3079 */ 3080 I915_WRITE(fence_reg, 0); 3081 POSTING_READ(fence_reg); 3082 3083 if (obj) { 3084 u32 size = i915_gem_obj_ggtt_size(obj); 3085 uint64_t val; 3086 3087 /* Adjust fence size to match tiled area */ 3088 if (obj->tiling_mode != I915_TILING_NONE) { 3089 uint32_t row_size = obj->stride * 3090 (obj->tiling_mode == I915_TILING_Y ? 32 : 8); 3091 size = (size / row_size) * row_size; 3092 } 3093 3094 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 3095 0xfffff000) << 32; 3096 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; 3097 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; 3098 if (obj->tiling_mode == I915_TILING_Y) 3099 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 3100 val |= I965_FENCE_REG_VALID; 3101 3102 I915_WRITE(fence_reg + 4, val >> 32); 3103 POSTING_READ(fence_reg + 4); 3104 3105 I915_WRITE(fence_reg + 0, val); 3106 POSTING_READ(fence_reg); 3107 } else { 3108 I915_WRITE(fence_reg + 4, 0); 3109 POSTING_READ(fence_reg + 4); 3110 } 3111 } 3112 3113 static void i915_write_fence_reg(struct drm_device *dev, int reg, 3114 struct drm_i915_gem_object *obj) 3115 { 3116 struct drm_i915_private *dev_priv = dev->dev_private; 3117 u32 val; 3118 3119 if (obj) { 3120 u32 size = i915_gem_obj_ggtt_size(obj); 3121 int pitch_val; 3122 int tile_width; 3123 3124 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || 3125 (size & -size) != size || 3126 (i915_gem_obj_ggtt_offset(obj) & (size - 1)), 3127 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", 3128 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); 3129 3130 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 3131 tile_width = 128; 3132 else 3133 tile_width = 512; 3134 3135 /* Note: pitch better be a power of two tile widths */ 3136 pitch_val = obj->stride / tile_width; 3137 pitch_val = ffs(pitch_val) - 1; 3138 3139 val = i915_gem_obj_ggtt_offset(obj); 3140 if (obj->tiling_mode == I915_TILING_Y) 3141 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 3142 val |= I915_FENCE_SIZE_BITS(size); 3143 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 3144 val |= I830_FENCE_REG_VALID; 3145 } else 3146 val = 0; 3147 3148 if (reg < 8) 3149 reg = FENCE_REG_830_0 + reg * 4; 3150 else 3151 reg = FENCE_REG_945_8 + (reg - 8) * 4; 3152 3153 I915_WRITE(reg, val); 3154 POSTING_READ(reg); 3155 } 3156 3157 static void i830_write_fence_reg(struct drm_device *dev, int reg, 3158 struct drm_i915_gem_object *obj) 3159 { 3160 struct drm_i915_private *dev_priv = dev->dev_private; 3161 uint32_t val; 3162 3163 if (obj) { 3164 u32 size = i915_gem_obj_ggtt_size(obj); 3165 uint32_t pitch_val; 3166 3167 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || 3168 (size & -size) != size || 3169 (i915_gem_obj_ggtt_offset(obj) & (size - 1)), 3170 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", 3171 i915_gem_obj_ggtt_offset(obj), size); 3172 3173 pitch_val = obj->stride / 128; 3174 pitch_val = ffs(pitch_val) - 1; 3175 3176 val = i915_gem_obj_ggtt_offset(obj); 3177 if (obj->tiling_mode == I915_TILING_Y) 3178 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 3179 val |= I830_FENCE_SIZE_BITS(size); 3180 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 3181 val |= I830_FENCE_REG_VALID; 3182 } else 3183 val = 0; 3184 3185 I915_WRITE(FENCE_REG_830_0 + reg * 4, val); 3186 POSTING_READ(FENCE_REG_830_0 + reg * 4); 3187 } 3188 3189 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) 3190 { 3191 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; 3192 } 3193 3194 static void i915_gem_write_fence(struct drm_device *dev, int reg, 3195 struct drm_i915_gem_object *obj) 3196 { 3197 struct drm_i915_private *dev_priv = dev->dev_private; 3198 3199 /* Ensure that all CPU reads are completed before installing a fence 3200 * and all writes before removing the fence. 3201 */ 3202 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) 3203 mb(); 3204 3205 WARN(obj && (!obj->stride || !obj->tiling_mode), 3206 "bogus fence setup with stride: 0x%x, tiling mode: %i\n", 3207 obj->stride, obj->tiling_mode); 3208 3209 if (IS_GEN2(dev)) 3210 i830_write_fence_reg(dev, reg, obj); 3211 else if (IS_GEN3(dev)) 3212 i915_write_fence_reg(dev, reg, obj); 3213 else if (INTEL_INFO(dev)->gen >= 4) 3214 i965_write_fence_reg(dev, reg, obj); 3215 3216 /* And similarly be paranoid that no direct access to this region 3217 * is reordered to before the fence is installed. 3218 */ 3219 if (i915_gem_object_needs_mb(obj)) 3220 mb(); 3221 } 3222 3223 static inline int fence_number(struct drm_i915_private *dev_priv, 3224 struct drm_i915_fence_reg *fence) 3225 { 3226 return fence - dev_priv->fence_regs; 3227 } 3228 3229 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 3230 struct drm_i915_fence_reg *fence, 3231 bool enable) 3232 { 3233 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3234 int reg = fence_number(dev_priv, fence); 3235 3236 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); 3237 3238 if (enable) { 3239 obj->fence_reg = reg; 3240 fence->obj = obj; 3241 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); 3242 } else { 3243 obj->fence_reg = I915_FENCE_REG_NONE; 3244 fence->obj = NULL; 3245 list_del_init(&fence->lru_list); 3246 } 3247 obj->fence_dirty = false; 3248 } 3249 3250 static int 3251 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) 3252 { 3253 if (obj->last_fenced_req) { 3254 int ret = i915_wait_request(obj->last_fenced_req); 3255 if (ret) 3256 return ret; 3257 3258 i915_gem_request_assign(&obj->last_fenced_req, NULL); 3259 } 3260 3261 return 0; 3262 } 3263 3264 int 3265 i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 3266 { 3267 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3268 struct drm_i915_fence_reg *fence; 3269 int ret; 3270 3271 ret = i915_gem_object_wait_fence(obj); 3272 if (ret) 3273 return ret; 3274 3275 if (obj->fence_reg == I915_FENCE_REG_NONE) 3276 return 0; 3277 3278 fence = &dev_priv->fence_regs[obj->fence_reg]; 3279 3280 if (WARN_ON(fence->pin_count)) 3281 return -EBUSY; 3282 3283 i915_gem_object_fence_lost(obj); 3284 i915_gem_object_update_fence(obj, fence, false); 3285 3286 return 0; 3287 } 3288 3289 static struct drm_i915_fence_reg * 3290 i915_find_fence_reg(struct drm_device *dev) 3291 { 3292 struct drm_i915_private *dev_priv = dev->dev_private; 3293 struct drm_i915_fence_reg *reg, *avail; 3294 int i; 3295 3296 /* First try to find a free reg */ 3297 avail = NULL; 3298 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 3299 reg = &dev_priv->fence_regs[i]; 3300 if (!reg->obj) 3301 return reg; 3302 3303 if (!reg->pin_count) 3304 avail = reg; 3305 } 3306 3307 if (avail == NULL) 3308 goto deadlock; 3309 3310 /* None available, try to steal one or wait for a user to finish */ 3311 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { 3312 if (reg->pin_count) 3313 continue; 3314 3315 return reg; 3316 } 3317 3318 deadlock: 3319 /* Wait for completion of pending flips which consume fences */ 3320 if (intel_has_pending_fb_unpin(dev)) 3321 return ERR_PTR(-EAGAIN); 3322 3323 return ERR_PTR(-EDEADLK); 3324 } 3325 3326 /** 3327 * i915_gem_object_get_fence - set up fencing for an object 3328 * @obj: object to map through a fence reg 3329 * 3330 * When mapping objects through the GTT, userspace wants to be able to write 3331 * to them without having to worry about swizzling if the object is tiled. 3332 * This function walks the fence regs looking for a free one for @obj, 3333 * stealing one if it can't find any. 3334 * 3335 * It then sets up the reg based on the object's properties: address, pitch 3336 * and tiling format. 3337 * 3338 * For an untiled surface, this removes any existing fence. 3339 */ 3340 int 3341 i915_gem_object_get_fence(struct drm_i915_gem_object *obj) 3342 { 3343 struct drm_device *dev = obj->base.dev; 3344 struct drm_i915_private *dev_priv = dev->dev_private; 3345 bool enable = obj->tiling_mode != I915_TILING_NONE; 3346 struct drm_i915_fence_reg *reg; 3347 int ret; 3348 3349 /* Have we updated the tiling parameters upon the object and so 3350 * will need to serialise the write to the associated fence register? 3351 */ 3352 if (obj->fence_dirty) { 3353 ret = i915_gem_object_wait_fence(obj); 3354 if (ret) 3355 return ret; 3356 } 3357 3358 /* Just update our place in the LRU if our fence is getting reused. */ 3359 if (obj->fence_reg != I915_FENCE_REG_NONE) { 3360 reg = &dev_priv->fence_regs[obj->fence_reg]; 3361 if (!obj->fence_dirty) { 3362 list_move_tail(®->lru_list, 3363 &dev_priv->mm.fence_list); 3364 return 0; 3365 } 3366 } else if (enable) { 3367 if (WARN_ON(!obj->map_and_fenceable)) 3368 return -EINVAL; 3369 3370 reg = i915_find_fence_reg(dev); 3371 if (IS_ERR(reg)) 3372 return PTR_ERR(reg); 3373 3374 if (reg->obj) { 3375 struct drm_i915_gem_object *old = reg->obj; 3376 3377 ret = i915_gem_object_wait_fence(old); 3378 if (ret) 3379 return ret; 3380 3381 i915_gem_object_fence_lost(old); 3382 } 3383 } else 3384 return 0; 3385 3386 i915_gem_object_update_fence(obj, reg, enable); 3387 3388 return 0; 3389 } 3390 3391 static bool i915_gem_valid_gtt_space(struct i915_vma *vma, 3392 unsigned long cache_level) 3393 { 3394 struct drm_mm_node *gtt_space = &vma->node; 3395 struct drm_mm_node *other; 3396 3397 /* 3398 * On some machines we have to be careful when putting differing types 3399 * of snoopable memory together to avoid the prefetcher crossing memory 3400 * domains and dying. During vm initialisation, we decide whether or not 3401 * these constraints apply and set the drm_mm.color_adjust 3402 * appropriately. 3403 */ 3404 if (vma->vm->mm.color_adjust == NULL) 3405 return true; 3406 3407 if (!drm_mm_node_allocated(gtt_space)) 3408 return true; 3409 3410 if (list_empty(>t_space->node_list)) 3411 return true; 3412 3413 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); 3414 if (other->allocated && !other->hole_follows && other->color != cache_level) 3415 return false; 3416 3417 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); 3418 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) 3419 return false; 3420 3421 return true; 3422 } 3423 3424 /** 3425 * Finds free space in the GTT aperture and binds the object there. 3426 */ 3427 static struct i915_vma * 3428 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3429 struct i915_address_space *vm, 3430 const struct i915_ggtt_view *ggtt_view, 3431 unsigned alignment, 3432 uint64_t flags) 3433 { 3434 struct drm_device *dev = obj->base.dev; 3435 struct drm_i915_private *dev_priv = dev->dev_private; 3436 u32 size, fence_size, fence_alignment, unfenced_alignment; 3437 unsigned long start = 3438 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 3439 unsigned long end = 3440 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3441 struct i915_vma *vma; 3442 int ret; 3443 3444 if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) 3445 return ERR_PTR(-EINVAL); 3446 3447 fence_size = i915_gem_get_gtt_size(dev, 3448 obj->base.size, 3449 obj->tiling_mode); 3450 fence_alignment = i915_gem_get_gtt_alignment(dev, 3451 obj->base.size, 3452 obj->tiling_mode, true); 3453 unfenced_alignment = 3454 i915_gem_get_gtt_alignment(dev, 3455 obj->base.size, 3456 obj->tiling_mode, false); 3457 3458 if (alignment == 0) 3459 alignment = flags & PIN_MAPPABLE ? fence_alignment : 3460 unfenced_alignment; 3461 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) { 3462 DRM_DEBUG("Invalid object alignment requested %u\n", alignment); 3463 return ERR_PTR(-EINVAL); 3464 } 3465 3466 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; 3467 3468 /* If the object is bigger than the entire aperture, reject it early 3469 * before evicting everything in a vain attempt to find space. 3470 */ 3471 if (obj->base.size > end) { 3472 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", 3473 obj->base.size, 3474 flags & PIN_MAPPABLE ? "mappable" : "total", 3475 end); 3476 return ERR_PTR(-E2BIG); 3477 } 3478 3479 ret = i915_gem_object_get_pages(obj); 3480 if (ret) 3481 return ERR_PTR(ret); 3482 3483 i915_gem_object_pin_pages(obj); 3484 3485 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) : 3486 i915_gem_obj_lookup_or_create_vma(obj, vm); 3487 3488 if (IS_ERR(vma)) 3489 goto err_unpin; 3490 3491 search_free: 3492 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3493 size, alignment, 3494 obj->cache_level, 3495 start, end, 3496 DRM_MM_SEARCH_DEFAULT, 3497 DRM_MM_CREATE_DEFAULT); 3498 if (ret) { 3499 ret = i915_gem_evict_something(dev, vm, size, alignment, 3500 obj->cache_level, 3501 start, end, 3502 flags); 3503 if (ret == 0) 3504 goto search_free; 3505 3506 goto err_free_vma; 3507 } 3508 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { 3509 ret = -EINVAL; 3510 goto err_remove_node; 3511 } 3512 3513 ret = i915_gem_gtt_prepare_object(obj); 3514 if (ret) 3515 goto err_remove_node; 3516 3517 /* allocate before insert / bind */ 3518 if (vma->vm->allocate_va_range) { 3519 trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size, 3520 VM_TO_TRACE_NAME(vma->vm)); 3521 ret = vma->vm->allocate_va_range(vma->vm, 3522 vma->node.start, 3523 vma->node.size); 3524 if (ret) 3525 goto err_remove_node; 3526 } 3527 3528 trace_i915_vma_bind(vma, flags); 3529 ret = i915_vma_bind(vma, obj->cache_level, 3530 flags & PIN_GLOBAL ? GLOBAL_BIND : 0); 3531 if (ret) 3532 goto err_finish_gtt; 3533 3534 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3535 list_add_tail(&vma->mm_list, &vm->inactive_list); 3536 3537 return vma; 3538 3539 err_finish_gtt: 3540 i915_gem_gtt_finish_object(obj); 3541 err_remove_node: 3542 drm_mm_remove_node(&vma->node); 3543 err_free_vma: 3544 i915_gem_vma_destroy(vma); 3545 vma = ERR_PTR(ret); 3546 err_unpin: 3547 i915_gem_object_unpin_pages(obj); 3548 return vma; 3549 } 3550 3551 bool 3552 i915_gem_clflush_object(struct drm_i915_gem_object *obj, 3553 bool force) 3554 { 3555 /* If we don't have a page list set up, then we're not pinned 3556 * to GPU, and we can ignore the cache flush because it'll happen 3557 * again at bind time. 3558 */ 3559 if (obj->pages == NULL) 3560 return false; 3561 3562 /* 3563 * Stolen memory is always coherent with the GPU as it is explicitly 3564 * marked as wc by the system, or the system is cache-coherent. 3565 */ 3566 if (obj->stolen || obj->phys_handle) 3567 return false; 3568 3569 /* If the GPU is snooping the contents of the CPU cache, 3570 * we do not need to manually clear the CPU cache lines. However, 3571 * the caches are only snooped when the render cache is 3572 * flushed/invalidated. As we always have to emit invalidations 3573 * and flushes when moving into and out of the RENDER domain, correct 3574 * snooping behaviour occurs naturally as the result of our domain 3575 * tracking. 3576 */ 3577 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) { 3578 obj->cache_dirty = true; 3579 return false; 3580 } 3581 3582 trace_i915_gem_object_clflush(obj); 3583 drm_clflush_sg(obj->pages); 3584 obj->cache_dirty = false; 3585 3586 return true; 3587 } 3588 3589 /** Flushes the GTT write domain for the object if it's dirty. */ 3590 static void 3591 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) 3592 { 3593 uint32_t old_write_domain; 3594 3595 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 3596 return; 3597 3598 /* No actual flushing is required for the GTT write domain. Writes 3599 * to it immediately go to main memory as far as we know, so there's 3600 * no chipset flush. It also doesn't land in render cache. 3601 * 3602 * However, we do have to enforce the order so that all writes through 3603 * the GTT land before any writes to the device, such as updates to 3604 * the GATT itself. 3605 */ 3606 wmb(); 3607 3608 old_write_domain = obj->base.write_domain; 3609 obj->base.write_domain = 0; 3610 3611 intel_fb_obj_flush(obj, false); 3612 3613 trace_i915_gem_object_change_domain(obj, 3614 obj->base.read_domains, 3615 old_write_domain); 3616 } 3617 3618 /** Flushes the CPU write domain for the object if it's dirty. */ 3619 static void 3620 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) 3621 { 3622 uint32_t old_write_domain; 3623 3624 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 3625 return; 3626 3627 if (i915_gem_clflush_object(obj, obj->pin_display)) 3628 i915_gem_chipset_flush(obj->base.dev); 3629 3630 old_write_domain = obj->base.write_domain; 3631 obj->base.write_domain = 0; 3632 3633 intel_fb_obj_flush(obj, false); 3634 3635 trace_i915_gem_object_change_domain(obj, 3636 obj->base.read_domains, 3637 old_write_domain); 3638 } 3639 3640 /** 3641 * Moves a single object to the GTT read, and possibly write domain. 3642 * 3643 * This function returns when the move is complete, including waiting on 3644 * flushes to occur. 3645 */ 3646 int 3647 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3648 { 3649 uint32_t old_write_domain, old_read_domains; 3650 struct i915_vma *vma; 3651 int ret; 3652 3653 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3654 return 0; 3655 3656 ret = i915_gem_object_wait_rendering(obj, !write); 3657 if (ret) 3658 return ret; 3659 3660 i915_gem_object_retire(obj); 3661 3662 /* Flush and acquire obj->pages so that we are coherent through 3663 * direct access in memory with previous cached writes through 3664 * shmemfs and that our cache domain tracking remains valid. 3665 * For example, if the obj->filp was moved to swap without us 3666 * being notified and releasing the pages, we would mistakenly 3667 * continue to assume that the obj remained out of the CPU cached 3668 * domain. 3669 */ 3670 ret = i915_gem_object_get_pages(obj); 3671 if (ret) 3672 return ret; 3673 3674 i915_gem_object_flush_cpu_write_domain(obj); 3675 3676 /* Serialise direct access to this object with the barriers for 3677 * coherent writes from the GPU, by effectively invalidating the 3678 * GTT domain upon first access. 3679 */ 3680 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3681 mb(); 3682 3683 old_write_domain = obj->base.write_domain; 3684 old_read_domains = obj->base.read_domains; 3685 3686 /* It should now be out of any other write domains, and we can update 3687 * the domain values for our changes. 3688 */ 3689 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3690 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3691 if (write) { 3692 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3693 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3694 obj->dirty = 1; 3695 } 3696 3697 if (write) 3698 intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); 3699 3700 trace_i915_gem_object_change_domain(obj, 3701 old_read_domains, 3702 old_write_domain); 3703 3704 /* And bump the LRU for this access */ 3705 vma = i915_gem_obj_to_ggtt(obj); 3706 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) 3707 list_move_tail(&vma->mm_list, 3708 &to_i915(obj->base.dev)->gtt.base.inactive_list); 3709 3710 return 0; 3711 } 3712 3713 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3714 enum i915_cache_level cache_level) 3715 { 3716 struct drm_device *dev = obj->base.dev; 3717 struct i915_vma *vma, *next; 3718 int ret; 3719 3720 if (obj->cache_level == cache_level) 3721 return 0; 3722 3723 if (i915_gem_obj_is_pinned(obj)) { 3724 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3725 return -EBUSY; 3726 } 3727 3728 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 3729 if (!i915_gem_valid_gtt_space(vma, cache_level)) { 3730 ret = i915_vma_unbind(vma); 3731 if (ret) 3732 return ret; 3733 } 3734 } 3735 3736 if (i915_gem_obj_bound_any(obj)) { 3737 ret = i915_gem_object_finish_gpu(obj); 3738 if (ret) 3739 return ret; 3740 3741 i915_gem_object_finish_gtt(obj); 3742 3743 /* Before SandyBridge, you could not use tiling or fence 3744 * registers with snooped memory, so relinquish any fences 3745 * currently pointing to our region in the aperture. 3746 */ 3747 if (INTEL_INFO(dev)->gen < 6) { 3748 ret = i915_gem_object_put_fence(obj); 3749 if (ret) 3750 return ret; 3751 } 3752 3753 list_for_each_entry(vma, &obj->vma_list, vma_link) 3754 if (drm_mm_node_allocated(&vma->node)) { 3755 ret = i915_vma_bind(vma, cache_level, 3756 vma->bound & GLOBAL_BIND); 3757 if (ret) 3758 return ret; 3759 } 3760 } 3761 3762 list_for_each_entry(vma, &obj->vma_list, vma_link) 3763 vma->node.color = cache_level; 3764 obj->cache_level = cache_level; 3765 3766 if (obj->cache_dirty && 3767 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 3768 cpu_write_needs_clflush(obj)) { 3769 if (i915_gem_clflush_object(obj, true)) 3770 i915_gem_chipset_flush(obj->base.dev); 3771 } 3772 3773 return 0; 3774 } 3775 3776 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3777 struct drm_file *file) 3778 { 3779 struct drm_i915_gem_caching *args = data; 3780 struct drm_i915_gem_object *obj; 3781 int ret; 3782 3783 ret = i915_mutex_lock_interruptible(dev); 3784 if (ret) 3785 return ret; 3786 3787 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3788 if (&obj->base == NULL) { 3789 ret = -ENOENT; 3790 goto unlock; 3791 } 3792 3793 switch (obj->cache_level) { 3794 case I915_CACHE_LLC: 3795 case I915_CACHE_L3_LLC: 3796 args->caching = I915_CACHING_CACHED; 3797 break; 3798 3799 case I915_CACHE_WT: 3800 args->caching = I915_CACHING_DISPLAY; 3801 break; 3802 3803 default: 3804 args->caching = I915_CACHING_NONE; 3805 break; 3806 } 3807 3808 drm_gem_object_unreference(&obj->base); 3809 unlock: 3810 mutex_unlock(&dev->struct_mutex); 3811 return ret; 3812 } 3813 3814 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3815 struct drm_file *file) 3816 { 3817 struct drm_i915_gem_caching *args = data; 3818 struct drm_i915_gem_object *obj; 3819 enum i915_cache_level level; 3820 int ret; 3821 3822 switch (args->caching) { 3823 case I915_CACHING_NONE: 3824 level = I915_CACHE_NONE; 3825 break; 3826 case I915_CACHING_CACHED: 3827 level = I915_CACHE_LLC; 3828 break; 3829 case I915_CACHING_DISPLAY: 3830 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE; 3831 break; 3832 default: 3833 return -EINVAL; 3834 } 3835 3836 ret = i915_mutex_lock_interruptible(dev); 3837 if (ret) 3838 return ret; 3839 3840 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3841 if (&obj->base == NULL) { 3842 ret = -ENOENT; 3843 goto unlock; 3844 } 3845 3846 ret = i915_gem_object_set_cache_level(obj, level); 3847 3848 drm_gem_object_unreference(&obj->base); 3849 unlock: 3850 mutex_unlock(&dev->struct_mutex); 3851 return ret; 3852 } 3853 3854 static bool is_pin_display(struct drm_i915_gem_object *obj) 3855 { 3856 struct i915_vma *vma; 3857 3858 vma = i915_gem_obj_to_ggtt(obj); 3859 if (!vma) 3860 return false; 3861 3862 /* There are 2 sources that pin objects: 3863 * 1. The display engine (scanouts, sprites, cursors); 3864 * 2. Reservations for execbuffer; 3865 * 3866 * We can ignore reservations as we hold the struct_mutex and 3867 * are only called outside of the reservation path. 3868 */ 3869 return vma->pin_count; 3870 } 3871 3872 /* 3873 * Prepare buffer for display plane (scanout, cursors, etc). 3874 * Can be called from an uninterruptible phase (modesetting) and allows 3875 * any flushes to be pipelined (for pageflips). 3876 */ 3877 int 3878 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3879 u32 alignment, 3880 struct intel_engine_cs *pipelined, 3881 const struct i915_ggtt_view *view) 3882 { 3883 u32 old_read_domains, old_write_domain; 3884 bool was_pin_display; 3885 int ret; 3886 3887 if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) { 3888 ret = i915_gem_object_sync(obj, pipelined); 3889 if (ret) 3890 return ret; 3891 } 3892 3893 /* Mark the pin_display early so that we account for the 3894 * display coherency whilst setting up the cache domains. 3895 */ 3896 was_pin_display = obj->pin_display; 3897 obj->pin_display = true; 3898 3899 /* The display engine is not coherent with the LLC cache on gen6. As 3900 * a result, we make sure that the pinning that is about to occur is 3901 * done with uncached PTEs. This is lowest common denominator for all 3902 * chipsets. 3903 * 3904 * However for gen6+, we could do better by using the GFDT bit instead 3905 * of uncaching, which would allow us to flush all the LLC-cached data 3906 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3907 */ 3908 ret = i915_gem_object_set_cache_level(obj, 3909 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE); 3910 if (ret) 3911 goto err_unpin_display; 3912 3913 /* As the user may map the buffer once pinned in the display plane 3914 * (e.g. libkms for the bootup splash), we have to ensure that we 3915 * always use map_and_fenceable for all scanout buffers. 3916 */ 3917 ret = i915_gem_object_ggtt_pin(obj, view, alignment, 3918 view->type == I915_GGTT_VIEW_NORMAL ? 3919 PIN_MAPPABLE : 0); 3920 if (ret) 3921 goto err_unpin_display; 3922 3923 i915_gem_object_flush_cpu_write_domain(obj); 3924 3925 old_write_domain = obj->base.write_domain; 3926 old_read_domains = obj->base.read_domains; 3927 3928 /* It should now be out of any other write domains, and we can update 3929 * the domain values for our changes. 3930 */ 3931 obj->base.write_domain = 0; 3932 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3933 3934 trace_i915_gem_object_change_domain(obj, 3935 old_read_domains, 3936 old_write_domain); 3937 3938 return 0; 3939 3940 err_unpin_display: 3941 WARN_ON(was_pin_display != is_pin_display(obj)); 3942 obj->pin_display = was_pin_display; 3943 return ret; 3944 } 3945 3946 void 3947 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 3948 const struct i915_ggtt_view *view) 3949 { 3950 i915_gem_object_ggtt_unpin_view(obj, view); 3951 3952 obj->pin_display = is_pin_display(obj); 3953 } 3954 3955 int 3956 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) 3957 { 3958 int ret; 3959 3960 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) 3961 return 0; 3962 3963 ret = i915_gem_object_wait_rendering(obj, false); 3964 if (ret) 3965 return ret; 3966 3967 /* Ensure that we invalidate the GPU's caches and TLBs. */ 3968 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 3969 return 0; 3970 } 3971 3972 /** 3973 * Moves a single object to the CPU read, and possibly write domain. 3974 * 3975 * This function returns when the move is complete, including waiting on 3976 * flushes to occur. 3977 */ 3978 int 3979 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 3980 { 3981 uint32_t old_write_domain, old_read_domains; 3982 int ret; 3983 3984 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 3985 return 0; 3986 3987 ret = i915_gem_object_wait_rendering(obj, !write); 3988 if (ret) 3989 return ret; 3990 3991 i915_gem_object_retire(obj); 3992 i915_gem_object_flush_gtt_write_domain(obj); 3993 3994 old_write_domain = obj->base.write_domain; 3995 old_read_domains = obj->base.read_domains; 3996 3997 /* Flush the CPU cache if it's still invalid. */ 3998 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 3999 i915_gem_clflush_object(obj, false); 4000 4001 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 4002 } 4003 4004 /* It should now be out of any other write domains, and we can update 4005 * the domain values for our changes. 4006 */ 4007 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 4008 4009 /* If we're writing through the CPU, then the GPU read domains will 4010 * need to be invalidated at next use. 4011 */ 4012 if (write) { 4013 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4014 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4015 } 4016 4017 if (write) 4018 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); 4019 4020 trace_i915_gem_object_change_domain(obj, 4021 old_read_domains, 4022 old_write_domain); 4023 4024 return 0; 4025 } 4026 4027 /* Throttle our rendering by waiting until the ring has completed our requests 4028 * emitted over 20 msec ago. 4029 * 4030 * Note that if we were to use the current jiffies each time around the loop, 4031 * we wouldn't escape the function with any frames outstanding if the time to 4032 * render a frame was over 20ms. 4033 * 4034 * This should get us reasonable parallelism between CPU and GPU but also 4035 * relatively low latency when blocking on a particular request to finish. 4036 */ 4037 static int 4038 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 4039 { 4040 struct drm_i915_private *dev_priv = dev->dev_private; 4041 struct drm_i915_file_private *file_priv = file->driver_priv; 4042 unsigned long recent_enough = jiffies - msecs_to_jiffies(20); 4043 struct drm_i915_gem_request *request, *target = NULL; 4044 unsigned reset_counter; 4045 int ret; 4046 4047 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 4048 if (ret) 4049 return ret; 4050 4051 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); 4052 if (ret) 4053 return ret; 4054 4055 spin_lock(&file_priv->mm.lock); 4056 list_for_each_entry(request, &file_priv->mm.request_list, client_list) { 4057 if (time_after_eq(request->emitted_jiffies, recent_enough)) 4058 break; 4059 4060 target = request; 4061 } 4062 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 4063 if (target) 4064 i915_gem_request_reference(target); 4065 spin_unlock(&file_priv->mm.lock); 4066 4067 if (target == NULL) 4068 return 0; 4069 4070 ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); 4071 if (ret == 0) 4072 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 4073 4074 mutex_lock(&dev->struct_mutex); 4075 i915_gem_request_unreference(target); 4076 mutex_unlock(&dev->struct_mutex); 4077 4078 return ret; 4079 } 4080 4081 static bool 4082 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) 4083 { 4084 struct drm_i915_gem_object *obj = vma->obj; 4085 4086 if (alignment && 4087 vma->node.start & (alignment - 1)) 4088 return true; 4089 4090 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) 4091 return true; 4092 4093 if (flags & PIN_OFFSET_BIAS && 4094 vma->node.start < (flags & PIN_OFFSET_MASK)) 4095 return true; 4096 4097 return false; 4098 } 4099 4100 static int 4101 i915_gem_object_do_pin(struct drm_i915_gem_object *obj, 4102 struct i915_address_space *vm, 4103 const struct i915_ggtt_view *ggtt_view, 4104 uint32_t alignment, 4105 uint64_t flags) 4106 { 4107 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4108 struct i915_vma *vma; 4109 unsigned bound; 4110 int ret; 4111 4112 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) 4113 return -ENODEV; 4114 4115 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) 4116 return -EINVAL; 4117 4118 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) 4119 return -EINVAL; 4120 4121 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) 4122 return -EINVAL; 4123 4124 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) : 4125 i915_gem_obj_to_vma(obj, vm); 4126 4127 if (IS_ERR(vma)) 4128 return PTR_ERR(vma); 4129 4130 if (vma) { 4131 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 4132 return -EBUSY; 4133 4134 if (i915_vma_misplaced(vma, alignment, flags)) { 4135 unsigned long offset; 4136 offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) : 4137 i915_gem_obj_offset(obj, vm); 4138 WARN(vma->pin_count, 4139 "bo is already pinned in %s with incorrect alignment:" 4140 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 4141 " obj->map_and_fenceable=%d\n", 4142 ggtt_view ? "ggtt" : "ppgtt", 4143 offset, 4144 alignment, 4145 !!(flags & PIN_MAPPABLE), 4146 obj->map_and_fenceable); 4147 ret = i915_vma_unbind(vma); 4148 if (ret) 4149 return ret; 4150 4151 vma = NULL; 4152 } 4153 } 4154 4155 bound = vma ? vma->bound : 0; 4156 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { 4157 /* In true PPGTT, bind has possibly changed PDEs, which 4158 * means we must do a context switch before the GPU can 4159 * accurately read some of the VMAs. 4160 */ 4161 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment, 4162 flags); 4163 if (IS_ERR(vma)) 4164 return PTR_ERR(vma); 4165 } 4166 4167 if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) { 4168 ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND); 4169 if (ret) 4170 return ret; 4171 } 4172 4173 if ((bound ^ vma->bound) & GLOBAL_BIND) { 4174 bool mappable, fenceable; 4175 u32 fence_size, fence_alignment; 4176 4177 fence_size = i915_gem_get_gtt_size(obj->base.dev, 4178 obj->base.size, 4179 obj->tiling_mode); 4180 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, 4181 obj->base.size, 4182 obj->tiling_mode, 4183 true); 4184 4185 fenceable = (vma->node.size == fence_size && 4186 (vma->node.start & (fence_alignment - 1)) == 0); 4187 4188 mappable = (vma->node.start + fence_size <= 4189 dev_priv->gtt.mappable_end); 4190 4191 obj->map_and_fenceable = mappable && fenceable; 4192 } 4193 4194 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); 4195 4196 vma->pin_count++; 4197 if (flags & PIN_MAPPABLE) 4198 obj->pin_mappable |= true; 4199 4200 return 0; 4201 } 4202 4203 int 4204 i915_gem_object_pin(struct drm_i915_gem_object *obj, 4205 struct i915_address_space *vm, 4206 uint32_t alignment, 4207 uint64_t flags) 4208 { 4209 return i915_gem_object_do_pin(obj, vm, 4210 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL, 4211 alignment, flags); 4212 } 4213 4214 int 4215 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 4216 const struct i915_ggtt_view *view, 4217 uint32_t alignment, 4218 uint64_t flags) 4219 { 4220 if (WARN_ONCE(!view, "no view specified")) 4221 return -EINVAL; 4222 4223 return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view, 4224 alignment, flags | PIN_GLOBAL); 4225 } 4226 4227 void 4228 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 4229 const struct i915_ggtt_view *view) 4230 { 4231 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); 4232 4233 BUG_ON(!vma); 4234 WARN_ON(vma->pin_count == 0); 4235 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view)); 4236 4237 if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL) 4238 obj->pin_mappable = false; 4239 } 4240 4241 bool 4242 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 4243 { 4244 if (obj->fence_reg != I915_FENCE_REG_NONE) { 4245 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4246 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); 4247 4248 WARN_ON(!ggtt_vma || 4249 dev_priv->fence_regs[obj->fence_reg].pin_count > 4250 ggtt_vma->pin_count); 4251 dev_priv->fence_regs[obj->fence_reg].pin_count++; 4252 return true; 4253 } else 4254 return false; 4255 } 4256 4257 void 4258 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) 4259 { 4260 if (obj->fence_reg != I915_FENCE_REG_NONE) { 4261 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4262 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); 4263 dev_priv->fence_regs[obj->fence_reg].pin_count--; 4264 } 4265 } 4266 4267 int 4268 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 4269 struct drm_file *file) 4270 { 4271 struct drm_i915_gem_busy *args = data; 4272 struct drm_i915_gem_object *obj; 4273 int ret; 4274 4275 ret = i915_mutex_lock_interruptible(dev); 4276 if (ret) 4277 return ret; 4278 4279 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 4280 if (&obj->base == NULL) { 4281 ret = -ENOENT; 4282 goto unlock; 4283 } 4284 4285 /* Count all active objects as busy, even if they are currently not used 4286 * by the gpu. Users of this interface expect objects to eventually 4287 * become non-busy without any further actions, therefore emit any 4288 * necessary flushes here. 4289 */ 4290 ret = i915_gem_object_flush_active(obj); 4291 4292 args->busy = obj->active; 4293 if (obj->last_read_req) { 4294 struct intel_engine_cs *ring; 4295 BUILD_BUG_ON(I915_NUM_RINGS > 16); 4296 ring = i915_gem_request_get_ring(obj->last_read_req); 4297 args->busy |= intel_ring_flag(ring) << 16; 4298 } 4299 4300 drm_gem_object_unreference(&obj->base); 4301 unlock: 4302 mutex_unlock(&dev->struct_mutex); 4303 return ret; 4304 } 4305 4306 int 4307 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 4308 struct drm_file *file_priv) 4309 { 4310 return i915_gem_ring_throttle(dev, file_priv); 4311 } 4312 4313 int 4314 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4315 struct drm_file *file_priv) 4316 { 4317 struct drm_i915_private *dev_priv = dev->dev_private; 4318 struct drm_i915_gem_madvise *args = data; 4319 struct drm_i915_gem_object *obj; 4320 int ret; 4321 4322 switch (args->madv) { 4323 case I915_MADV_DONTNEED: 4324 case I915_MADV_WILLNEED: 4325 break; 4326 default: 4327 return -EINVAL; 4328 } 4329 4330 ret = i915_mutex_lock_interruptible(dev); 4331 if (ret) 4332 return ret; 4333 4334 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); 4335 if (&obj->base == NULL) { 4336 ret = -ENOENT; 4337 goto unlock; 4338 } 4339 4340 if (i915_gem_obj_is_pinned(obj)) { 4341 ret = -EINVAL; 4342 goto out; 4343 } 4344 4345 if (obj->pages && 4346 obj->tiling_mode != I915_TILING_NONE && 4347 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 4348 if (obj->madv == I915_MADV_WILLNEED) 4349 i915_gem_object_unpin_pages(obj); 4350 if (args->madv == I915_MADV_WILLNEED) 4351 i915_gem_object_pin_pages(obj); 4352 } 4353 4354 if (obj->madv != __I915_MADV_PURGED) 4355 obj->madv = args->madv; 4356 4357 /* if the object is no longer attached, discard its backing storage */ 4358 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL) 4359 i915_gem_object_truncate(obj); 4360 4361 args->retained = obj->madv != __I915_MADV_PURGED; 4362 4363 out: 4364 drm_gem_object_unreference(&obj->base); 4365 unlock: 4366 mutex_unlock(&dev->struct_mutex); 4367 return ret; 4368 } 4369 4370 void i915_gem_object_init(struct drm_i915_gem_object *obj, 4371 const struct drm_i915_gem_object_ops *ops) 4372 { 4373 INIT_LIST_HEAD(&obj->global_list); 4374 INIT_LIST_HEAD(&obj->ring_list); 4375 INIT_LIST_HEAD(&obj->obj_exec_link); 4376 INIT_LIST_HEAD(&obj->vma_list); 4377 INIT_LIST_HEAD(&obj->batch_pool_list); 4378 4379 obj->ops = ops; 4380 4381 obj->fence_reg = I915_FENCE_REG_NONE; 4382 obj->madv = I915_MADV_WILLNEED; 4383 4384 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); 4385 } 4386 4387 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4388 .get_pages = i915_gem_object_get_pages_gtt, 4389 .put_pages = i915_gem_object_put_pages_gtt, 4390 }; 4391 4392 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 4393 size_t size) 4394 { 4395 struct drm_i915_gem_object *obj; 4396 struct address_space *mapping; 4397 gfp_t mask; 4398 4399 obj = i915_gem_object_alloc(dev); 4400 if (obj == NULL) 4401 return NULL; 4402 4403 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 4404 i915_gem_object_free(obj); 4405 return NULL; 4406 } 4407 4408 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4409 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { 4410 /* 965gm cannot relocate objects above 4GiB. */ 4411 mask &= ~__GFP_HIGHMEM; 4412 mask |= __GFP_DMA32; 4413 } 4414 4415 mapping = file_inode(obj->base.filp)->i_mapping; 4416 mapping_set_gfp_mask(mapping, mask); 4417 4418 i915_gem_object_init(obj, &i915_gem_object_ops); 4419 4420 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4421 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4422 4423 if (HAS_LLC(dev)) { 4424 /* On some devices, we can have the GPU use the LLC (the CPU 4425 * cache) for about a 10% performance improvement 4426 * compared to uncached. Graphics requests other than 4427 * display scanout are coherent with the CPU in 4428 * accessing this cache. This means in this mode we 4429 * don't need to clflush on the CPU side, and on the 4430 * GPU side we only need to flush internal caches to 4431 * get data visible to the CPU. 4432 * 4433 * However, we maintain the display planes as UC, and so 4434 * need to rebind when first used as such. 4435 */ 4436 obj->cache_level = I915_CACHE_LLC; 4437 } else 4438 obj->cache_level = I915_CACHE_NONE; 4439 4440 trace_i915_gem_object_create(obj); 4441 4442 return obj; 4443 } 4444 4445 static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4446 { 4447 /* If we are the last user of the backing storage (be it shmemfs 4448 * pages or stolen etc), we know that the pages are going to be 4449 * immediately released. In this case, we can then skip copying 4450 * back the contents from the GPU. 4451 */ 4452 4453 if (obj->madv != I915_MADV_WILLNEED) 4454 return false; 4455 4456 if (obj->base.filp == NULL) 4457 return true; 4458 4459 /* At first glance, this looks racy, but then again so would be 4460 * userspace racing mmap against close. However, the first external 4461 * reference to the filp can only be obtained through the 4462 * i915_gem_mmap_ioctl() which safeguards us against the user 4463 * acquiring such a reference whilst we are in the middle of 4464 * freeing the object. 4465 */ 4466 return atomic_long_read(&obj->base.filp->f_count) == 1; 4467 } 4468 4469 void i915_gem_free_object(struct drm_gem_object *gem_obj) 4470 { 4471 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4472 struct drm_device *dev = obj->base.dev; 4473 struct drm_i915_private *dev_priv = dev->dev_private; 4474 struct i915_vma *vma, *next; 4475 4476 intel_runtime_pm_get(dev_priv); 4477 4478 trace_i915_gem_object_destroy(obj); 4479 4480 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4481 int ret; 4482 4483 vma->pin_count = 0; 4484 ret = i915_vma_unbind(vma); 4485 if (WARN_ON(ret == -ERESTARTSYS)) { 4486 bool was_interruptible; 4487 4488 was_interruptible = dev_priv->mm.interruptible; 4489 dev_priv->mm.interruptible = false; 4490 4491 WARN_ON(i915_vma_unbind(vma)); 4492 4493 dev_priv->mm.interruptible = was_interruptible; 4494 } 4495 } 4496 4497 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4498 * before progressing. */ 4499 if (obj->stolen) 4500 i915_gem_object_unpin_pages(obj); 4501 4502 WARN_ON(obj->frontbuffer_bits); 4503 4504 if (obj->pages && obj->madv == I915_MADV_WILLNEED && 4505 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES && 4506 obj->tiling_mode != I915_TILING_NONE) 4507 i915_gem_object_unpin_pages(obj); 4508 4509 if (WARN_ON(obj->pages_pin_count)) 4510 obj->pages_pin_count = 0; 4511 if (discard_backing_storage(obj)) 4512 obj->madv = I915_MADV_DONTNEED; 4513 i915_gem_object_put_pages(obj); 4514 i915_gem_object_free_mmap_offset(obj); 4515 4516 BUG_ON(obj->pages); 4517 4518 if (obj->base.import_attach) 4519 drm_prime_gem_destroy(&obj->base, NULL); 4520 4521 if (obj->ops->release) 4522 obj->ops->release(obj); 4523 4524 drm_gem_object_release(&obj->base); 4525 i915_gem_info_remove_obj(dev_priv, obj->base.size); 4526 4527 kfree(obj->bit_17); 4528 i915_gem_object_free(obj); 4529 4530 intel_runtime_pm_put(dev_priv); 4531 } 4532 4533 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 4534 struct i915_address_space *vm) 4535 { 4536 struct i915_vma *vma; 4537 list_for_each_entry(vma, &obj->vma_list, vma_link) { 4538 if (i915_is_ggtt(vma->vm) && 4539 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 4540 continue; 4541 if (vma->vm == vm) 4542 return vma; 4543 } 4544 return NULL; 4545 } 4546 4547 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 4548 const struct i915_ggtt_view *view) 4549 { 4550 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); 4551 struct i915_vma *vma; 4552 4553 if (WARN_ONCE(!view, "no view specified")) 4554 return ERR_PTR(-EINVAL); 4555 4556 list_for_each_entry(vma, &obj->vma_list, vma_link) 4557 if (vma->vm == ggtt && 4558 i915_ggtt_view_equal(&vma->ggtt_view, view)) 4559 return vma; 4560 return NULL; 4561 } 4562 4563 void i915_gem_vma_destroy(struct i915_vma *vma) 4564 { 4565 struct i915_address_space *vm = NULL; 4566 WARN_ON(vma->node.allocated); 4567 4568 /* Keep the vma as a placeholder in the execbuffer reservation lists */ 4569 if (!list_empty(&vma->exec_list)) 4570 return; 4571 4572 vm = vma->vm; 4573 4574 if (!i915_is_ggtt(vm)) 4575 i915_ppgtt_put(i915_vm_to_ppgtt(vm)); 4576 4577 list_del(&vma->vma_link); 4578 4579 kfree(vma); 4580 } 4581 4582 static void 4583 i915_gem_stop_ringbuffers(struct drm_device *dev) 4584 { 4585 struct drm_i915_private *dev_priv = dev->dev_private; 4586 struct intel_engine_cs *ring; 4587 int i; 4588 4589 for_each_ring(ring, dev_priv, i) 4590 dev_priv->gt.stop_ring(ring); 4591 } 4592 4593 int 4594 i915_gem_suspend(struct drm_device *dev) 4595 { 4596 struct drm_i915_private *dev_priv = dev->dev_private; 4597 int ret = 0; 4598 4599 mutex_lock(&dev->struct_mutex); 4600 ret = i915_gpu_idle(dev); 4601 if (ret) 4602 goto err; 4603 4604 i915_gem_retire_requests(dev); 4605 4606 i915_gem_stop_ringbuffers(dev); 4607 mutex_unlock(&dev->struct_mutex); 4608 4609 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4610 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4611 flush_delayed_work(&dev_priv->mm.idle_work); 4612 4613 /* Assert that we sucessfully flushed all the work and 4614 * reset the GPU back to its idle, low power state. 4615 */ 4616 WARN_ON(dev_priv->mm.busy); 4617 4618 return 0; 4619 4620 err: 4621 mutex_unlock(&dev->struct_mutex); 4622 return ret; 4623 } 4624 4625 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice) 4626 { 4627 struct drm_device *dev = ring->dev; 4628 struct drm_i915_private *dev_priv = dev->dev_private; 4629 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); 4630 u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; 4631 int i, ret; 4632 4633 if (!HAS_L3_DPF(dev) || !remap_info) 4634 return 0; 4635 4636 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3); 4637 if (ret) 4638 return ret; 4639 4640 /* 4641 * Note: We do not worry about the concurrent register cacheline hang 4642 * here because no other code should access these registers other than 4643 * at initialization time. 4644 */ 4645 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 4646 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 4647 intel_ring_emit(ring, reg_base + i); 4648 intel_ring_emit(ring, remap_info[i/4]); 4649 } 4650 4651 intel_ring_advance(ring); 4652 4653 return ret; 4654 } 4655 4656 void i915_gem_init_swizzling(struct drm_device *dev) 4657 { 4658 struct drm_i915_private *dev_priv = dev->dev_private; 4659 4660 if (INTEL_INFO(dev)->gen < 5 || 4661 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4662 return; 4663 4664 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 4665 DISP_TILE_SURFACE_SWIZZLING); 4666 4667 if (IS_GEN5(dev)) 4668 return; 4669 4670 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 4671 if (IS_GEN6(dev)) 4672 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4673 else if (IS_GEN7(dev)) 4674 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4675 else if (IS_GEN8(dev)) 4676 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 4677 else 4678 BUG(); 4679 } 4680 4681 static bool 4682 intel_enable_blt(struct drm_device *dev) 4683 { 4684 if (!HAS_BLT(dev)) 4685 return false; 4686 4687 /* The blitter was dysfunctional on early prototypes */ 4688 if (IS_GEN6(dev) && dev->pdev->revision < 8) { 4689 DRM_INFO("BLT not supported on this pre-production hardware;" 4690 " graphics performance will be degraded.\n"); 4691 return false; 4692 } 4693 4694 return true; 4695 } 4696 4697 static void init_unused_ring(struct drm_device *dev, u32 base) 4698 { 4699 struct drm_i915_private *dev_priv = dev->dev_private; 4700 4701 I915_WRITE(RING_CTL(base), 0); 4702 I915_WRITE(RING_HEAD(base), 0); 4703 I915_WRITE(RING_TAIL(base), 0); 4704 I915_WRITE(RING_START(base), 0); 4705 } 4706 4707 static void init_unused_rings(struct drm_device *dev) 4708 { 4709 if (IS_I830(dev)) { 4710 init_unused_ring(dev, PRB1_BASE); 4711 init_unused_ring(dev, SRB0_BASE); 4712 init_unused_ring(dev, SRB1_BASE); 4713 init_unused_ring(dev, SRB2_BASE); 4714 init_unused_ring(dev, SRB3_BASE); 4715 } else if (IS_GEN2(dev)) { 4716 init_unused_ring(dev, SRB0_BASE); 4717 init_unused_ring(dev, SRB1_BASE); 4718 } else if (IS_GEN3(dev)) { 4719 init_unused_ring(dev, PRB1_BASE); 4720 init_unused_ring(dev, PRB2_BASE); 4721 } 4722 } 4723 4724 int i915_gem_init_rings(struct drm_device *dev) 4725 { 4726 struct drm_i915_private *dev_priv = dev->dev_private; 4727 int ret; 4728 4729 ret = intel_init_render_ring_buffer(dev); 4730 if (ret) 4731 return ret; 4732 4733 if (HAS_BSD(dev)) { 4734 ret = intel_init_bsd_ring_buffer(dev); 4735 if (ret) 4736 goto cleanup_render_ring; 4737 } 4738 4739 if (intel_enable_blt(dev)) { 4740 ret = intel_init_blt_ring_buffer(dev); 4741 if (ret) 4742 goto cleanup_bsd_ring; 4743 } 4744 4745 if (HAS_VEBOX(dev)) { 4746 ret = intel_init_vebox_ring_buffer(dev); 4747 if (ret) 4748 goto cleanup_blt_ring; 4749 } 4750 4751 if (HAS_BSD2(dev)) { 4752 ret = intel_init_bsd2_ring_buffer(dev); 4753 if (ret) 4754 goto cleanup_vebox_ring; 4755 } 4756 4757 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); 4758 if (ret) 4759 goto cleanup_bsd2_ring; 4760 4761 return 0; 4762 4763 cleanup_bsd2_ring: 4764 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]); 4765 cleanup_vebox_ring: 4766 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); 4767 cleanup_blt_ring: 4768 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); 4769 cleanup_bsd_ring: 4770 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); 4771 cleanup_render_ring: 4772 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); 4773 4774 return ret; 4775 } 4776 4777 int 4778 i915_gem_init_hw(struct drm_device *dev) 4779 { 4780 struct drm_i915_private *dev_priv = dev->dev_private; 4781 struct intel_engine_cs *ring; 4782 int ret, i; 4783 4784 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4785 return -EIO; 4786 4787 /* Double layer security blanket, see i915_gem_init() */ 4788 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4789 4790 if (dev_priv->ellc_size) 4791 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4792 4793 if (IS_HASWELL(dev)) 4794 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ? 4795 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4796 4797 if (HAS_PCH_NOP(dev)) { 4798 if (IS_IVYBRIDGE(dev)) { 4799 u32 temp = I915_READ(GEN7_MSG_CTL); 4800 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4801 I915_WRITE(GEN7_MSG_CTL, temp); 4802 } else if (INTEL_INFO(dev)->gen >= 7) { 4803 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 4804 temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 4805 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 4806 } 4807 } 4808 4809 i915_gem_init_swizzling(dev); 4810 4811 /* 4812 * At least 830 can leave some of the unused rings 4813 * "active" (ie. head != tail) after resume which 4814 * will prevent c3 entry. Makes sure all unused rings 4815 * are totally idle. 4816 */ 4817 init_unused_rings(dev); 4818 4819 for_each_ring(ring, dev_priv, i) { 4820 ret = ring->init_hw(ring); 4821 if (ret) 4822 goto out; 4823 } 4824 4825 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4826 i915_gem_l3_remap(&dev_priv->ring[RCS], i); 4827 4828 ret = i915_ppgtt_init_hw(dev); 4829 if (ret && ret != -EIO) { 4830 DRM_ERROR("PPGTT enable failed %d\n", ret); 4831 i915_gem_cleanup_ringbuffer(dev); 4832 } 4833 4834 ret = i915_gem_context_enable(dev_priv); 4835 if (ret && ret != -EIO) { 4836 DRM_ERROR("Context enable failed %d\n", ret); 4837 i915_gem_cleanup_ringbuffer(dev); 4838 4839 goto out; 4840 } 4841 4842 out: 4843 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4844 return ret; 4845 } 4846 4847 int i915_gem_init(struct drm_device *dev) 4848 { 4849 struct drm_i915_private *dev_priv = dev->dev_private; 4850 int ret; 4851 4852 i915.enable_execlists = intel_sanitize_enable_execlists(dev, 4853 i915.enable_execlists); 4854 4855 mutex_lock(&dev->struct_mutex); 4856 4857 if (IS_VALLEYVIEW(dev)) { 4858 /* VLVA0 (potential hack), BIOS isn't actually waking us */ 4859 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); 4860 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 4861 VLV_GTLC_ALLOWWAKEACK), 10)) 4862 DRM_DEBUG_DRIVER("allow wake ack timed out\n"); 4863 } 4864 4865 if (!i915.enable_execlists) { 4866 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission; 4867 dev_priv->gt.init_rings = i915_gem_init_rings; 4868 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer; 4869 dev_priv->gt.stop_ring = intel_stop_ring_buffer; 4870 } else { 4871 dev_priv->gt.do_execbuf = intel_execlists_submission; 4872 dev_priv->gt.init_rings = intel_logical_rings_init; 4873 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup; 4874 dev_priv->gt.stop_ring = intel_logical_ring_stop; 4875 } 4876 4877 /* This is just a security blanket to placate dragons. 4878 * On some systems, we very sporadically observe that the first TLBs 4879 * used by the CS may be stale, despite us poking the TLB reset. If 4880 * we hold the forcewake during initialisation these problems 4881 * just magically go away. 4882 */ 4883 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4884 4885 ret = i915_gem_init_userptr(dev); 4886 if (ret) 4887 goto out_unlock; 4888 4889 i915_gem_init_global_gtt(dev); 4890 4891 ret = i915_gem_context_init(dev); 4892 if (ret) 4893 goto out_unlock; 4894 4895 ret = dev_priv->gt.init_rings(dev); 4896 if (ret) 4897 goto out_unlock; 4898 4899 ret = i915_gem_init_hw(dev); 4900 if (ret == -EIO) { 4901 /* Allow ring initialisation to fail by marking the GPU as 4902 * wedged. But we only want to do this where the GPU is angry, 4903 * for all other failure, such as an allocation failure, bail. 4904 */ 4905 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 4906 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 4907 ret = 0; 4908 } 4909 4910 out_unlock: 4911 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4912 mutex_unlock(&dev->struct_mutex); 4913 4914 return ret; 4915 } 4916 4917 void 4918 i915_gem_cleanup_ringbuffer(struct drm_device *dev) 4919 { 4920 struct drm_i915_private *dev_priv = dev->dev_private; 4921 struct intel_engine_cs *ring; 4922 int i; 4923 4924 for_each_ring(ring, dev_priv, i) 4925 dev_priv->gt.cleanup_ring(ring); 4926 } 4927 4928 static void 4929 init_ring_lists(struct intel_engine_cs *ring) 4930 { 4931 INIT_LIST_HEAD(&ring->active_list); 4932 INIT_LIST_HEAD(&ring->request_list); 4933 } 4934 4935 void i915_init_vm(struct drm_i915_private *dev_priv, 4936 struct i915_address_space *vm) 4937 { 4938 if (!i915_is_ggtt(vm)) 4939 drm_mm_init(&vm->mm, vm->start, vm->total); 4940 vm->dev = dev_priv->dev; 4941 INIT_LIST_HEAD(&vm->active_list); 4942 INIT_LIST_HEAD(&vm->inactive_list); 4943 INIT_LIST_HEAD(&vm->global_link); 4944 list_add_tail(&vm->global_link, &dev_priv->vm_list); 4945 } 4946 4947 void 4948 i915_gem_load(struct drm_device *dev) 4949 { 4950 struct drm_i915_private *dev_priv = dev->dev_private; 4951 int i; 4952 4953 dev_priv->slab = 4954 kmem_cache_create("i915_gem_object", 4955 sizeof(struct drm_i915_gem_object), 0, 4956 SLAB_HWCACHE_ALIGN, 4957 NULL); 4958 4959 INIT_LIST_HEAD(&dev_priv->vm_list); 4960 i915_init_vm(dev_priv, &dev_priv->gtt.base); 4961 4962 INIT_LIST_HEAD(&dev_priv->context_list); 4963 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4964 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4965 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4966 for (i = 0; i < I915_NUM_RINGS; i++) 4967 init_ring_lists(&dev_priv->ring[i]); 4968 for (i = 0; i < I915_MAX_NUM_FENCES; i++) 4969 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4970 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4971 i915_gem_retire_work_handler); 4972 INIT_DELAYED_WORK(&dev_priv->mm.idle_work, 4973 i915_gem_idle_work_handler); 4974 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4975 4976 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 4977 4978 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) 4979 dev_priv->num_fence_regs = 32; 4980 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4981 dev_priv->num_fence_regs = 16; 4982 else 4983 dev_priv->num_fence_regs = 8; 4984 4985 if (intel_vgpu_active(dev)) 4986 dev_priv->num_fence_regs = 4987 I915_READ(vgtif_reg(avail_rs.fence_num)); 4988 4989 /* Initialize fence registers to zero */ 4990 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4991 i915_gem_restore_fences(dev); 4992 4993 i915_gem_detect_bit_6_swizzle(dev); 4994 init_waitqueue_head(&dev_priv->pending_flip_queue); 4995 4996 dev_priv->mm.interruptible = true; 4997 4998 i915_gem_shrinker_init(dev_priv); 4999 5000 i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool); 5001 5002 mutex_init(&dev_priv->fb_tracking.lock); 5003 } 5004 5005 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5006 { 5007 struct drm_i915_file_private *file_priv = file->driver_priv; 5008 5009 cancel_delayed_work_sync(&file_priv->mm.idle_work); 5010 5011 /* Clean up our request list when the client is going away, so that 5012 * later retire_requests won't dereference our soon-to-be-gone 5013 * file_priv. 5014 */ 5015 spin_lock(&file_priv->mm.lock); 5016 while (!list_empty(&file_priv->mm.request_list)) { 5017 struct drm_i915_gem_request *request; 5018 5019 request = list_first_entry(&file_priv->mm.request_list, 5020 struct drm_i915_gem_request, 5021 client_list); 5022 list_del(&request->client_list); 5023 request->file_priv = NULL; 5024 } 5025 spin_unlock(&file_priv->mm.lock); 5026 } 5027 5028 static void 5029 i915_gem_file_idle_work_handler(struct work_struct *work) 5030 { 5031 struct drm_i915_file_private *file_priv = 5032 container_of(work, typeof(*file_priv), mm.idle_work.work); 5033 5034 atomic_set(&file_priv->rps_wait_boost, false); 5035 } 5036 5037 int i915_gem_open(struct drm_device *dev, struct drm_file *file) 5038 { 5039 struct drm_i915_file_private *file_priv; 5040 int ret; 5041 5042 DRM_DEBUG_DRIVER("\n"); 5043 5044 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 5045 if (!file_priv) 5046 return -ENOMEM; 5047 5048 file->driver_priv = file_priv; 5049 file_priv->dev_priv = dev->dev_private; 5050 file_priv->file = file; 5051 5052 spin_lock_init(&file_priv->mm.lock); 5053 INIT_LIST_HEAD(&file_priv->mm.request_list); 5054 INIT_DELAYED_WORK(&file_priv->mm.idle_work, 5055 i915_gem_file_idle_work_handler); 5056 5057 ret = i915_gem_context_open(dev, file); 5058 if (ret) 5059 kfree(file_priv); 5060 5061 return ret; 5062 } 5063 5064 /** 5065 * i915_gem_track_fb - update frontbuffer tracking 5066 * old: current GEM buffer for the frontbuffer slots 5067 * new: new GEM buffer for the frontbuffer slots 5068 * frontbuffer_bits: bitmask of frontbuffer slots 5069 * 5070 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 5071 * from @old and setting them in @new. Both @old and @new can be NULL. 5072 */ 5073 void i915_gem_track_fb(struct drm_i915_gem_object *old, 5074 struct drm_i915_gem_object *new, 5075 unsigned frontbuffer_bits) 5076 { 5077 if (old) { 5078 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex)); 5079 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits)); 5080 old->frontbuffer_bits &= ~frontbuffer_bits; 5081 } 5082 5083 if (new) { 5084 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex)); 5085 WARN_ON(new->frontbuffer_bits & frontbuffer_bits); 5086 new->frontbuffer_bits |= frontbuffer_bits; 5087 } 5088 } 5089 5090 /* All the new VM stuff */ 5091 unsigned long 5092 i915_gem_obj_offset(struct drm_i915_gem_object *o, 5093 struct i915_address_space *vm) 5094 { 5095 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5096 struct i915_vma *vma; 5097 5098 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5099 5100 list_for_each_entry(vma, &o->vma_list, vma_link) { 5101 if (i915_is_ggtt(vma->vm) && 5102 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5103 continue; 5104 if (vma->vm == vm) 5105 return vma->node.start; 5106 } 5107 5108 WARN(1, "%s vma for this object not found.\n", 5109 i915_is_ggtt(vm) ? "global" : "ppgtt"); 5110 return -1; 5111 } 5112 5113 unsigned long 5114 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 5115 const struct i915_ggtt_view *view) 5116 { 5117 struct i915_address_space *ggtt = i915_obj_to_ggtt(o); 5118 struct i915_vma *vma; 5119 5120 list_for_each_entry(vma, &o->vma_list, vma_link) 5121 if (vma->vm == ggtt && 5122 i915_ggtt_view_equal(&vma->ggtt_view, view)) 5123 return vma->node.start; 5124 5125 WARN(1, "global vma for this object not found.\n"); 5126 return -1; 5127 } 5128 5129 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 5130 struct i915_address_space *vm) 5131 { 5132 struct i915_vma *vma; 5133 5134 list_for_each_entry(vma, &o->vma_list, vma_link) { 5135 if (i915_is_ggtt(vma->vm) && 5136 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5137 continue; 5138 if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) 5139 return true; 5140 } 5141 5142 return false; 5143 } 5144 5145 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 5146 const struct i915_ggtt_view *view) 5147 { 5148 struct i915_address_space *ggtt = i915_obj_to_ggtt(o); 5149 struct i915_vma *vma; 5150 5151 list_for_each_entry(vma, &o->vma_list, vma_link) 5152 if (vma->vm == ggtt && 5153 i915_ggtt_view_equal(&vma->ggtt_view, view) && 5154 drm_mm_node_allocated(&vma->node)) 5155 return true; 5156 5157 return false; 5158 } 5159 5160 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) 5161 { 5162 struct i915_vma *vma; 5163 5164 list_for_each_entry(vma, &o->vma_list, vma_link) 5165 if (drm_mm_node_allocated(&vma->node)) 5166 return true; 5167 5168 return false; 5169 } 5170 5171 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 5172 struct i915_address_space *vm) 5173 { 5174 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5175 struct i915_vma *vma; 5176 5177 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5178 5179 BUG_ON(list_empty(&o->vma_list)); 5180 5181 list_for_each_entry(vma, &o->vma_list, vma_link) { 5182 if (i915_is_ggtt(vma->vm) && 5183 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5184 continue; 5185 if (vma->vm == vm) 5186 return vma->node.size; 5187 } 5188 return 0; 5189 } 5190 5191 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) 5192 { 5193 struct i915_vma *vma; 5194 list_for_each_entry(vma, &obj->vma_list, vma_link) { 5195 if (i915_is_ggtt(vma->vm) && 5196 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5197 continue; 5198 if (vma->pin_count > 0) 5199 return true; 5200 } 5201 return false; 5202 } 5203 5204