1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_vma_manager.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_vgpu.h" 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 #include <linux/shmem_fs.h> 36 #include <linux/slab.h> 37 #include <linux/swap.h> 38 #include <linux/pci.h> 39 #include <linux/dma-buf.h> 40 41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 43 static __must_check int 44 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 45 bool readonly); 46 static void 47 i915_gem_object_retire(struct drm_i915_gem_object *obj); 48 49 static void i915_gem_write_fence(struct drm_device *dev, int reg, 50 struct drm_i915_gem_object *obj); 51 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 52 struct drm_i915_fence_reg *fence, 53 bool enable); 54 55 static bool cpu_cache_is_coherent(struct drm_device *dev, 56 enum i915_cache_level level) 57 { 58 return HAS_LLC(dev) || level != I915_CACHE_NONE; 59 } 60 61 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 62 { 63 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 64 return true; 65 66 return obj->pin_display; 67 } 68 69 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) 70 { 71 if (obj->tiling_mode) 72 i915_gem_release_mmap(obj); 73 74 /* As we do not have an associated fence register, we will force 75 * a tiling change if we ever need to acquire one. 76 */ 77 obj->fence_dirty = false; 78 obj->fence_reg = I915_FENCE_REG_NONE; 79 } 80 81 /* some bookkeeping */ 82 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 83 size_t size) 84 { 85 spin_lock(&dev_priv->mm.object_stat_lock); 86 dev_priv->mm.object_count++; 87 dev_priv->mm.object_memory += size; 88 spin_unlock(&dev_priv->mm.object_stat_lock); 89 } 90 91 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 92 size_t size) 93 { 94 spin_lock(&dev_priv->mm.object_stat_lock); 95 dev_priv->mm.object_count--; 96 dev_priv->mm.object_memory -= size; 97 spin_unlock(&dev_priv->mm.object_stat_lock); 98 } 99 100 static int 101 i915_gem_wait_for_error(struct i915_gpu_error *error) 102 { 103 int ret; 104 105 #define EXIT_COND (!i915_reset_in_progress(error) || \ 106 i915_terminally_wedged(error)) 107 if (EXIT_COND) 108 return 0; 109 110 /* 111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 112 * userspace. If it takes that long something really bad is going on and 113 * we should simply try to bail out and fail as gracefully as possible. 114 */ 115 ret = wait_event_interruptible_timeout(error->reset_queue, 116 EXIT_COND, 117 10*HZ); 118 if (ret == 0) { 119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 120 return -EIO; 121 } else if (ret < 0) { 122 return ret; 123 } 124 #undef EXIT_COND 125 126 return 0; 127 } 128 129 int i915_mutex_lock_interruptible(struct drm_device *dev) 130 { 131 struct drm_i915_private *dev_priv = dev->dev_private; 132 int ret; 133 134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 135 if (ret) 136 return ret; 137 138 ret = mutex_lock_interruptible(&dev->struct_mutex); 139 if (ret) 140 return ret; 141 142 WARN_ON(i915_verify_lists(dev)); 143 return 0; 144 } 145 146 int 147 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 148 struct drm_file *file) 149 { 150 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_gem_get_aperture *args = data; 152 struct drm_i915_gem_object *obj; 153 size_t pinned; 154 155 pinned = 0; 156 mutex_lock(&dev->struct_mutex); 157 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 158 if (i915_gem_obj_is_pinned(obj)) 159 pinned += i915_gem_obj_ggtt_size(obj); 160 mutex_unlock(&dev->struct_mutex); 161 162 args->aper_size = dev_priv->gtt.base.total; 163 args->aper_available_size = args->aper_size - pinned; 164 165 return 0; 166 } 167 168 static int 169 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 170 { 171 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 172 char *vaddr = obj->phys_handle->vaddr; 173 struct sg_table *st; 174 struct scatterlist *sg; 175 int i; 176 177 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 178 return -EINVAL; 179 180 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 181 struct page *page; 182 char *src; 183 184 page = shmem_read_mapping_page(mapping, i); 185 if (IS_ERR(page)) 186 return PTR_ERR(page); 187 188 src = kmap_atomic(page); 189 memcpy(vaddr, src, PAGE_SIZE); 190 drm_clflush_virt_range(vaddr, PAGE_SIZE); 191 kunmap_atomic(src); 192 193 page_cache_release(page); 194 vaddr += PAGE_SIZE; 195 } 196 197 i915_gem_chipset_flush(obj->base.dev); 198 199 st = kmalloc(sizeof(*st), GFP_KERNEL); 200 if (st == NULL) 201 return -ENOMEM; 202 203 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 204 kfree(st); 205 return -ENOMEM; 206 } 207 208 sg = st->sgl; 209 sg->offset = 0; 210 sg->length = obj->base.size; 211 212 sg_dma_address(sg) = obj->phys_handle->busaddr; 213 sg_dma_len(sg) = obj->base.size; 214 215 obj->pages = st; 216 obj->has_dma_mapping = true; 217 return 0; 218 } 219 220 static void 221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) 222 { 223 int ret; 224 225 BUG_ON(obj->madv == __I915_MADV_PURGED); 226 227 ret = i915_gem_object_set_to_cpu_domain(obj, true); 228 if (ret) { 229 /* In the event of a disaster, abandon all caches and 230 * hope for the best. 231 */ 232 WARN_ON(ret != -EIO); 233 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 234 } 235 236 if (obj->madv == I915_MADV_DONTNEED) 237 obj->dirty = 0; 238 239 if (obj->dirty) { 240 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 241 char *vaddr = obj->phys_handle->vaddr; 242 int i; 243 244 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 245 struct page *page; 246 char *dst; 247 248 page = shmem_read_mapping_page(mapping, i); 249 if (IS_ERR(page)) 250 continue; 251 252 dst = kmap_atomic(page); 253 drm_clflush_virt_range(vaddr, PAGE_SIZE); 254 memcpy(dst, vaddr, PAGE_SIZE); 255 kunmap_atomic(dst); 256 257 set_page_dirty(page); 258 if (obj->madv == I915_MADV_WILLNEED) 259 mark_page_accessed(page); 260 page_cache_release(page); 261 vaddr += PAGE_SIZE; 262 } 263 obj->dirty = 0; 264 } 265 266 sg_free_table(obj->pages); 267 kfree(obj->pages); 268 269 obj->has_dma_mapping = false; 270 } 271 272 static void 273 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 274 { 275 drm_pci_free(obj->base.dev, obj->phys_handle); 276 } 277 278 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 279 .get_pages = i915_gem_object_get_pages_phys, 280 .put_pages = i915_gem_object_put_pages_phys, 281 .release = i915_gem_object_release_phys, 282 }; 283 284 static int 285 drop_pages(struct drm_i915_gem_object *obj) 286 { 287 struct i915_vma *vma, *next; 288 int ret; 289 290 drm_gem_object_reference(&obj->base); 291 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) 292 if (i915_vma_unbind(vma)) 293 break; 294 295 ret = i915_gem_object_put_pages(obj); 296 drm_gem_object_unreference(&obj->base); 297 298 return ret; 299 } 300 301 int 302 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 303 int align) 304 { 305 drm_dma_handle_t *phys; 306 int ret; 307 308 if (obj->phys_handle) { 309 if ((unsigned long)obj->phys_handle->vaddr & (align -1)) 310 return -EBUSY; 311 312 return 0; 313 } 314 315 if (obj->madv != I915_MADV_WILLNEED) 316 return -EFAULT; 317 318 if (obj->base.filp == NULL) 319 return -EINVAL; 320 321 ret = drop_pages(obj); 322 if (ret) 323 return ret; 324 325 /* create a new object */ 326 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); 327 if (!phys) 328 return -ENOMEM; 329 330 obj->phys_handle = phys; 331 obj->ops = &i915_gem_phys_ops; 332 333 return i915_gem_object_get_pages(obj); 334 } 335 336 static int 337 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, 338 struct drm_i915_gem_pwrite *args, 339 struct drm_file *file_priv) 340 { 341 struct drm_device *dev = obj->base.dev; 342 void *vaddr = obj->phys_handle->vaddr + args->offset; 343 char __user *user_data = to_user_ptr(args->data_ptr); 344 int ret = 0; 345 346 /* We manually control the domain here and pretend that it 347 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 348 */ 349 ret = i915_gem_object_wait_rendering(obj, false); 350 if (ret) 351 return ret; 352 353 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); 354 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 355 unsigned long unwritten; 356 357 /* The physical object once assigned is fixed for the lifetime 358 * of the obj, so we can safely drop the lock and continue 359 * to access vaddr. 360 */ 361 mutex_unlock(&dev->struct_mutex); 362 unwritten = copy_from_user(vaddr, user_data, args->size); 363 mutex_lock(&dev->struct_mutex); 364 if (unwritten) { 365 ret = -EFAULT; 366 goto out; 367 } 368 } 369 370 drm_clflush_virt_range(vaddr, args->size); 371 i915_gem_chipset_flush(dev); 372 373 out: 374 intel_fb_obj_flush(obj, false); 375 return ret; 376 } 377 378 void *i915_gem_object_alloc(struct drm_device *dev) 379 { 380 struct drm_i915_private *dev_priv = dev->dev_private; 381 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL); 382 } 383 384 void i915_gem_object_free(struct drm_i915_gem_object *obj) 385 { 386 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 387 kmem_cache_free(dev_priv->slab, obj); 388 } 389 390 static int 391 i915_gem_create(struct drm_file *file, 392 struct drm_device *dev, 393 uint64_t size, 394 uint32_t *handle_p) 395 { 396 struct drm_i915_gem_object *obj; 397 int ret; 398 u32 handle; 399 400 size = roundup(size, PAGE_SIZE); 401 if (size == 0) 402 return -EINVAL; 403 404 /* Allocate the new object */ 405 obj = i915_gem_alloc_object(dev, size); 406 if (obj == NULL) 407 return -ENOMEM; 408 409 ret = drm_gem_handle_create(file, &obj->base, &handle); 410 /* drop reference from allocate - handle holds it now */ 411 drm_gem_object_unreference_unlocked(&obj->base); 412 if (ret) 413 return ret; 414 415 *handle_p = handle; 416 return 0; 417 } 418 419 int 420 i915_gem_dumb_create(struct drm_file *file, 421 struct drm_device *dev, 422 struct drm_mode_create_dumb *args) 423 { 424 /* have to work out size/pitch and return them */ 425 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 426 args->size = args->pitch * args->height; 427 return i915_gem_create(file, dev, 428 args->size, &args->handle); 429 } 430 431 /** 432 * Creates a new mm object and returns a handle to it. 433 */ 434 int 435 i915_gem_create_ioctl(struct drm_device *dev, void *data, 436 struct drm_file *file) 437 { 438 struct drm_i915_gem_create *args = data; 439 440 return i915_gem_create(file, dev, 441 args->size, &args->handle); 442 } 443 444 static inline int 445 __copy_to_user_swizzled(char __user *cpu_vaddr, 446 const char *gpu_vaddr, int gpu_offset, 447 int length) 448 { 449 int ret, cpu_offset = 0; 450 451 while (length > 0) { 452 int cacheline_end = ALIGN(gpu_offset + 1, 64); 453 int this_length = min(cacheline_end - gpu_offset, length); 454 int swizzled_gpu_offset = gpu_offset ^ 64; 455 456 ret = __copy_to_user(cpu_vaddr + cpu_offset, 457 gpu_vaddr + swizzled_gpu_offset, 458 this_length); 459 if (ret) 460 return ret + length; 461 462 cpu_offset += this_length; 463 gpu_offset += this_length; 464 length -= this_length; 465 } 466 467 return 0; 468 } 469 470 static inline int 471 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 472 const char __user *cpu_vaddr, 473 int length) 474 { 475 int ret, cpu_offset = 0; 476 477 while (length > 0) { 478 int cacheline_end = ALIGN(gpu_offset + 1, 64); 479 int this_length = min(cacheline_end - gpu_offset, length); 480 int swizzled_gpu_offset = gpu_offset ^ 64; 481 482 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 483 cpu_vaddr + cpu_offset, 484 this_length); 485 if (ret) 486 return ret + length; 487 488 cpu_offset += this_length; 489 gpu_offset += this_length; 490 length -= this_length; 491 } 492 493 return 0; 494 } 495 496 /* 497 * Pins the specified object's pages and synchronizes the object with 498 * GPU accesses. Sets needs_clflush to non-zero if the caller should 499 * flush the object from the CPU cache. 500 */ 501 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 502 int *needs_clflush) 503 { 504 int ret; 505 506 *needs_clflush = 0; 507 508 if (!obj->base.filp) 509 return -EINVAL; 510 511 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { 512 /* If we're not in the cpu read domain, set ourself into the gtt 513 * read domain and manually flush cachelines (if required). This 514 * optimizes for the case when the gpu will dirty the data 515 * anyway again before the next pread happens. */ 516 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, 517 obj->cache_level); 518 ret = i915_gem_object_wait_rendering(obj, true); 519 if (ret) 520 return ret; 521 522 i915_gem_object_retire(obj); 523 } 524 525 ret = i915_gem_object_get_pages(obj); 526 if (ret) 527 return ret; 528 529 i915_gem_object_pin_pages(obj); 530 531 return ret; 532 } 533 534 /* Per-page copy function for the shmem pread fastpath. 535 * Flushes invalid cachelines before reading the target if 536 * needs_clflush is set. */ 537 static int 538 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, 539 char __user *user_data, 540 bool page_do_bit17_swizzling, bool needs_clflush) 541 { 542 char *vaddr; 543 int ret; 544 545 if (unlikely(page_do_bit17_swizzling)) 546 return -EINVAL; 547 548 vaddr = kmap_atomic(page); 549 if (needs_clflush) 550 drm_clflush_virt_range(vaddr + shmem_page_offset, 551 page_length); 552 ret = __copy_to_user_inatomic(user_data, 553 vaddr + shmem_page_offset, 554 page_length); 555 kunmap_atomic(vaddr); 556 557 return ret ? -EFAULT : 0; 558 } 559 560 static void 561 shmem_clflush_swizzled_range(char *addr, unsigned long length, 562 bool swizzled) 563 { 564 if (unlikely(swizzled)) { 565 unsigned long start = (unsigned long) addr; 566 unsigned long end = (unsigned long) addr + length; 567 568 /* For swizzling simply ensure that we always flush both 569 * channels. Lame, but simple and it works. Swizzled 570 * pwrite/pread is far from a hotpath - current userspace 571 * doesn't use it at all. */ 572 start = round_down(start, 128); 573 end = round_up(end, 128); 574 575 drm_clflush_virt_range((void *)start, end - start); 576 } else { 577 drm_clflush_virt_range(addr, length); 578 } 579 580 } 581 582 /* Only difference to the fast-path function is that this can handle bit17 583 * and uses non-atomic copy and kmap functions. */ 584 static int 585 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, 586 char __user *user_data, 587 bool page_do_bit17_swizzling, bool needs_clflush) 588 { 589 char *vaddr; 590 int ret; 591 592 vaddr = kmap(page); 593 if (needs_clflush) 594 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 595 page_length, 596 page_do_bit17_swizzling); 597 598 if (page_do_bit17_swizzling) 599 ret = __copy_to_user_swizzled(user_data, 600 vaddr, shmem_page_offset, 601 page_length); 602 else 603 ret = __copy_to_user(user_data, 604 vaddr + shmem_page_offset, 605 page_length); 606 kunmap(page); 607 608 return ret ? - EFAULT : 0; 609 } 610 611 static int 612 i915_gem_shmem_pread(struct drm_device *dev, 613 struct drm_i915_gem_object *obj, 614 struct drm_i915_gem_pread *args, 615 struct drm_file *file) 616 { 617 char __user *user_data; 618 ssize_t remain; 619 loff_t offset; 620 int shmem_page_offset, page_length, ret = 0; 621 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 622 int prefaulted = 0; 623 int needs_clflush = 0; 624 struct sg_page_iter sg_iter; 625 626 user_data = to_user_ptr(args->data_ptr); 627 remain = args->size; 628 629 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 630 631 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); 632 if (ret) 633 return ret; 634 635 offset = args->offset; 636 637 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 638 offset >> PAGE_SHIFT) { 639 struct page *page = sg_page_iter_page(&sg_iter); 640 641 if (remain <= 0) 642 break; 643 644 /* Operation in this page 645 * 646 * shmem_page_offset = offset within page in shmem file 647 * page_length = bytes to copy for this page 648 */ 649 shmem_page_offset = offset_in_page(offset); 650 page_length = remain; 651 if ((shmem_page_offset + page_length) > PAGE_SIZE) 652 page_length = PAGE_SIZE - shmem_page_offset; 653 654 page_do_bit17_swizzling = obj_do_bit17_swizzling && 655 (page_to_phys(page) & (1 << 17)) != 0; 656 657 ret = shmem_pread_fast(page, shmem_page_offset, page_length, 658 user_data, page_do_bit17_swizzling, 659 needs_clflush); 660 if (ret == 0) 661 goto next_page; 662 663 mutex_unlock(&dev->struct_mutex); 664 665 if (likely(!i915.prefault_disable) && !prefaulted) { 666 ret = fault_in_multipages_writeable(user_data, remain); 667 /* Userspace is tricking us, but we've already clobbered 668 * its pages with the prefault and promised to write the 669 * data up to the first fault. Hence ignore any errors 670 * and just continue. */ 671 (void)ret; 672 prefaulted = 1; 673 } 674 675 ret = shmem_pread_slow(page, shmem_page_offset, page_length, 676 user_data, page_do_bit17_swizzling, 677 needs_clflush); 678 679 mutex_lock(&dev->struct_mutex); 680 681 if (ret) 682 goto out; 683 684 next_page: 685 remain -= page_length; 686 user_data += page_length; 687 offset += page_length; 688 } 689 690 out: 691 i915_gem_object_unpin_pages(obj); 692 693 return ret; 694 } 695 696 /** 697 * Reads data from the object referenced by handle. 698 * 699 * On error, the contents of *data are undefined. 700 */ 701 int 702 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 703 struct drm_file *file) 704 { 705 struct drm_i915_gem_pread *args = data; 706 struct drm_i915_gem_object *obj; 707 int ret = 0; 708 709 if (args->size == 0) 710 return 0; 711 712 if (!access_ok(VERIFY_WRITE, 713 to_user_ptr(args->data_ptr), 714 args->size)) 715 return -EFAULT; 716 717 ret = i915_mutex_lock_interruptible(dev); 718 if (ret) 719 return ret; 720 721 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 722 if (&obj->base == NULL) { 723 ret = -ENOENT; 724 goto unlock; 725 } 726 727 /* Bounds check source. */ 728 if (args->offset > obj->base.size || 729 args->size > obj->base.size - args->offset) { 730 ret = -EINVAL; 731 goto out; 732 } 733 734 /* prime objects have no backing filp to GEM pread/pwrite 735 * pages from. 736 */ 737 if (!obj->base.filp) { 738 ret = -EINVAL; 739 goto out; 740 } 741 742 trace_i915_gem_object_pread(obj, args->offset, args->size); 743 744 ret = i915_gem_shmem_pread(dev, obj, args, file); 745 746 out: 747 drm_gem_object_unreference(&obj->base); 748 unlock: 749 mutex_unlock(&dev->struct_mutex); 750 return ret; 751 } 752 753 /* This is the fast write path which cannot handle 754 * page faults in the source data 755 */ 756 757 static inline int 758 fast_user_write(struct io_mapping *mapping, 759 loff_t page_base, int page_offset, 760 char __user *user_data, 761 int length) 762 { 763 void __iomem *vaddr_atomic; 764 void *vaddr; 765 unsigned long unwritten; 766 767 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 768 /* We can use the cpu mem copy function because this is X86. */ 769 vaddr = (void __force*)vaddr_atomic + page_offset; 770 unwritten = __copy_from_user_inatomic_nocache(vaddr, 771 user_data, length); 772 io_mapping_unmap_atomic(vaddr_atomic); 773 return unwritten; 774 } 775 776 /** 777 * This is the fast pwrite path, where we copy the data directly from the 778 * user into the GTT, uncached. 779 */ 780 static int 781 i915_gem_gtt_pwrite_fast(struct drm_device *dev, 782 struct drm_i915_gem_object *obj, 783 struct drm_i915_gem_pwrite *args, 784 struct drm_file *file) 785 { 786 struct drm_i915_private *dev_priv = dev->dev_private; 787 ssize_t remain; 788 loff_t offset, page_base; 789 char __user *user_data; 790 int page_offset, page_length, ret; 791 792 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); 793 if (ret) 794 goto out; 795 796 ret = i915_gem_object_set_to_gtt_domain(obj, true); 797 if (ret) 798 goto out_unpin; 799 800 ret = i915_gem_object_put_fence(obj); 801 if (ret) 802 goto out_unpin; 803 804 user_data = to_user_ptr(args->data_ptr); 805 remain = args->size; 806 807 offset = i915_gem_obj_ggtt_offset(obj) + args->offset; 808 809 intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); 810 811 while (remain > 0) { 812 /* Operation in this page 813 * 814 * page_base = page offset within aperture 815 * page_offset = offset within page 816 * page_length = bytes to copy for this page 817 */ 818 page_base = offset & PAGE_MASK; 819 page_offset = offset_in_page(offset); 820 page_length = remain; 821 if ((page_offset + remain) > PAGE_SIZE) 822 page_length = PAGE_SIZE - page_offset; 823 824 /* If we get a fault while copying data, then (presumably) our 825 * source page isn't available. Return the error and we'll 826 * retry in the slow path. 827 */ 828 if (fast_user_write(dev_priv->gtt.mappable, page_base, 829 page_offset, user_data, page_length)) { 830 ret = -EFAULT; 831 goto out_flush; 832 } 833 834 remain -= page_length; 835 user_data += page_length; 836 offset += page_length; 837 } 838 839 out_flush: 840 intel_fb_obj_flush(obj, false); 841 out_unpin: 842 i915_gem_object_ggtt_unpin(obj); 843 out: 844 return ret; 845 } 846 847 /* Per-page copy function for the shmem pwrite fastpath. 848 * Flushes invalid cachelines before writing to the target if 849 * needs_clflush_before is set and flushes out any written cachelines after 850 * writing if needs_clflush is set. */ 851 static int 852 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, 853 char __user *user_data, 854 bool page_do_bit17_swizzling, 855 bool needs_clflush_before, 856 bool needs_clflush_after) 857 { 858 char *vaddr; 859 int ret; 860 861 if (unlikely(page_do_bit17_swizzling)) 862 return -EINVAL; 863 864 vaddr = kmap_atomic(page); 865 if (needs_clflush_before) 866 drm_clflush_virt_range(vaddr + shmem_page_offset, 867 page_length); 868 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, 869 user_data, page_length); 870 if (needs_clflush_after) 871 drm_clflush_virt_range(vaddr + shmem_page_offset, 872 page_length); 873 kunmap_atomic(vaddr); 874 875 return ret ? -EFAULT : 0; 876 } 877 878 /* Only difference to the fast-path function is that this can handle bit17 879 * and uses non-atomic copy and kmap functions. */ 880 static int 881 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, 882 char __user *user_data, 883 bool page_do_bit17_swizzling, 884 bool needs_clflush_before, 885 bool needs_clflush_after) 886 { 887 char *vaddr; 888 int ret; 889 890 vaddr = kmap(page); 891 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 892 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 893 page_length, 894 page_do_bit17_swizzling); 895 if (page_do_bit17_swizzling) 896 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, 897 user_data, 898 page_length); 899 else 900 ret = __copy_from_user(vaddr + shmem_page_offset, 901 user_data, 902 page_length); 903 if (needs_clflush_after) 904 shmem_clflush_swizzled_range(vaddr + shmem_page_offset, 905 page_length, 906 page_do_bit17_swizzling); 907 kunmap(page); 908 909 return ret ? -EFAULT : 0; 910 } 911 912 static int 913 i915_gem_shmem_pwrite(struct drm_device *dev, 914 struct drm_i915_gem_object *obj, 915 struct drm_i915_gem_pwrite *args, 916 struct drm_file *file) 917 { 918 ssize_t remain; 919 loff_t offset; 920 char __user *user_data; 921 int shmem_page_offset, page_length, ret = 0; 922 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 923 int hit_slowpath = 0; 924 int needs_clflush_after = 0; 925 int needs_clflush_before = 0; 926 struct sg_page_iter sg_iter; 927 928 user_data = to_user_ptr(args->data_ptr); 929 remain = args->size; 930 931 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 932 933 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 934 /* If we're not in the cpu write domain, set ourself into the gtt 935 * write domain and manually flush cachelines (if required). This 936 * optimizes for the case when the gpu will use the data 937 * right away and we therefore have to clflush anyway. */ 938 needs_clflush_after = cpu_write_needs_clflush(obj); 939 ret = i915_gem_object_wait_rendering(obj, false); 940 if (ret) 941 return ret; 942 943 i915_gem_object_retire(obj); 944 } 945 /* Same trick applies to invalidate partially written cachelines read 946 * before writing. */ 947 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) 948 needs_clflush_before = 949 !cpu_cache_is_coherent(dev, obj->cache_level); 950 951 ret = i915_gem_object_get_pages(obj); 952 if (ret) 953 return ret; 954 955 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); 956 957 i915_gem_object_pin_pages(obj); 958 959 offset = args->offset; 960 obj->dirty = 1; 961 962 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 963 offset >> PAGE_SHIFT) { 964 struct page *page = sg_page_iter_page(&sg_iter); 965 int partial_cacheline_write; 966 967 if (remain <= 0) 968 break; 969 970 /* Operation in this page 971 * 972 * shmem_page_offset = offset within page in shmem file 973 * page_length = bytes to copy for this page 974 */ 975 shmem_page_offset = offset_in_page(offset); 976 977 page_length = remain; 978 if ((shmem_page_offset + page_length) > PAGE_SIZE) 979 page_length = PAGE_SIZE - shmem_page_offset; 980 981 /* If we don't overwrite a cacheline completely we need to be 982 * careful to have up-to-date data by first clflushing. Don't 983 * overcomplicate things and flush the entire patch. */ 984 partial_cacheline_write = needs_clflush_before && 985 ((shmem_page_offset | page_length) 986 & (boot_cpu_data.x86_clflush_size - 1)); 987 988 page_do_bit17_swizzling = obj_do_bit17_swizzling && 989 (page_to_phys(page) & (1 << 17)) != 0; 990 991 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, 992 user_data, page_do_bit17_swizzling, 993 partial_cacheline_write, 994 needs_clflush_after); 995 if (ret == 0) 996 goto next_page; 997 998 hit_slowpath = 1; 999 mutex_unlock(&dev->struct_mutex); 1000 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, 1001 user_data, page_do_bit17_swizzling, 1002 partial_cacheline_write, 1003 needs_clflush_after); 1004 1005 mutex_lock(&dev->struct_mutex); 1006 1007 if (ret) 1008 goto out; 1009 1010 next_page: 1011 remain -= page_length; 1012 user_data += page_length; 1013 offset += page_length; 1014 } 1015 1016 out: 1017 i915_gem_object_unpin_pages(obj); 1018 1019 if (hit_slowpath) { 1020 /* 1021 * Fixup: Flush cpu caches in case we didn't flush the dirty 1022 * cachelines in-line while writing and the object moved 1023 * out of the cpu write domain while we've dropped the lock. 1024 */ 1025 if (!needs_clflush_after && 1026 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 1027 if (i915_gem_clflush_object(obj, obj->pin_display)) 1028 i915_gem_chipset_flush(dev); 1029 } 1030 } 1031 1032 if (needs_clflush_after) 1033 i915_gem_chipset_flush(dev); 1034 1035 intel_fb_obj_flush(obj, false); 1036 return ret; 1037 } 1038 1039 /** 1040 * Writes data to the object referenced by handle. 1041 * 1042 * On error, the contents of the buffer that were to be modified are undefined. 1043 */ 1044 int 1045 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1046 struct drm_file *file) 1047 { 1048 struct drm_i915_private *dev_priv = dev->dev_private; 1049 struct drm_i915_gem_pwrite *args = data; 1050 struct drm_i915_gem_object *obj; 1051 int ret; 1052 1053 if (args->size == 0) 1054 return 0; 1055 1056 if (!access_ok(VERIFY_READ, 1057 to_user_ptr(args->data_ptr), 1058 args->size)) 1059 return -EFAULT; 1060 1061 if (likely(!i915.prefault_disable)) { 1062 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 1063 args->size); 1064 if (ret) 1065 return -EFAULT; 1066 } 1067 1068 intel_runtime_pm_get(dev_priv); 1069 1070 ret = i915_mutex_lock_interruptible(dev); 1071 if (ret) 1072 goto put_rpm; 1073 1074 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1075 if (&obj->base == NULL) { 1076 ret = -ENOENT; 1077 goto unlock; 1078 } 1079 1080 /* Bounds check destination. */ 1081 if (args->offset > obj->base.size || 1082 args->size > obj->base.size - args->offset) { 1083 ret = -EINVAL; 1084 goto out; 1085 } 1086 1087 /* prime objects have no backing filp to GEM pread/pwrite 1088 * pages from. 1089 */ 1090 if (!obj->base.filp) { 1091 ret = -EINVAL; 1092 goto out; 1093 } 1094 1095 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1096 1097 ret = -EFAULT; 1098 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1099 * it would end up going through the fenced access, and we'll get 1100 * different detiling behavior between reading and writing. 1101 * pread/pwrite currently are reading and writing from the CPU 1102 * perspective, requiring manual detiling by the client. 1103 */ 1104 if (obj->tiling_mode == I915_TILING_NONE && 1105 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 1106 cpu_write_needs_clflush(obj)) { 1107 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 1108 /* Note that the gtt paths might fail with non-page-backed user 1109 * pointers (e.g. gtt mappings when moving data between 1110 * textures). Fallback to the shmem path in that case. */ 1111 } 1112 1113 if (ret == -EFAULT || ret == -ENOSPC) { 1114 if (obj->phys_handle) 1115 ret = i915_gem_phys_pwrite(obj, args, file); 1116 else 1117 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 1118 } 1119 1120 out: 1121 drm_gem_object_unreference(&obj->base); 1122 unlock: 1123 mutex_unlock(&dev->struct_mutex); 1124 put_rpm: 1125 intel_runtime_pm_put(dev_priv); 1126 1127 return ret; 1128 } 1129 1130 int 1131 i915_gem_check_wedge(struct i915_gpu_error *error, 1132 bool interruptible) 1133 { 1134 if (i915_reset_in_progress(error)) { 1135 /* Non-interruptible callers can't handle -EAGAIN, hence return 1136 * -EIO unconditionally for these. */ 1137 if (!interruptible) 1138 return -EIO; 1139 1140 /* Recovery complete, but the reset failed ... */ 1141 if (i915_terminally_wedged(error)) 1142 return -EIO; 1143 1144 /* 1145 * Check if GPU Reset is in progress - we need intel_ring_begin 1146 * to work properly to reinit the hw state while the gpu is 1147 * still marked as reset-in-progress. Handle this with a flag. 1148 */ 1149 if (!error->reload_in_reset) 1150 return -EAGAIN; 1151 } 1152 1153 return 0; 1154 } 1155 1156 /* 1157 * Compare arbitrary request against outstanding lazy request. Emit on match. 1158 */ 1159 int 1160 i915_gem_check_olr(struct drm_i915_gem_request *req) 1161 { 1162 int ret; 1163 1164 WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); 1165 1166 ret = 0; 1167 if (req == req->ring->outstanding_lazy_request) 1168 ret = i915_add_request(req->ring); 1169 1170 return ret; 1171 } 1172 1173 static void fake_irq(unsigned long data) 1174 { 1175 wake_up_process((struct task_struct *)data); 1176 } 1177 1178 static bool missed_irq(struct drm_i915_private *dev_priv, 1179 struct intel_engine_cs *ring) 1180 { 1181 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); 1182 } 1183 1184 static bool can_wait_boost(struct drm_i915_file_private *file_priv) 1185 { 1186 if (file_priv == NULL) 1187 return true; 1188 1189 return !atomic_xchg(&file_priv->rps_wait_boost, true); 1190 } 1191 1192 /** 1193 * __i915_wait_request - wait until execution of request has finished 1194 * @req: duh! 1195 * @reset_counter: reset sequence associated with the given request 1196 * @interruptible: do an interruptible wait (normally yes) 1197 * @timeout: in - how long to wait (NULL forever); out - how much time remaining 1198 * 1199 * Note: It is of utmost importance that the passed in seqno and reset_counter 1200 * values have been read by the caller in an smp safe manner. Where read-side 1201 * locks are involved, it is sufficient to read the reset_counter before 1202 * unlocking the lock that protects the seqno. For lockless tricks, the 1203 * reset_counter _must_ be read before, and an appropriate smp_rmb must be 1204 * inserted. 1205 * 1206 * Returns 0 if the request was found within the alloted time. Else returns the 1207 * errno with remaining time filled in timeout argument. 1208 */ 1209 int __i915_wait_request(struct drm_i915_gem_request *req, 1210 unsigned reset_counter, 1211 bool interruptible, 1212 s64 *timeout, 1213 struct drm_i915_file_private *file_priv) 1214 { 1215 struct intel_engine_cs *ring = i915_gem_request_get_ring(req); 1216 struct drm_device *dev = ring->dev; 1217 struct drm_i915_private *dev_priv = dev->dev_private; 1218 const bool irq_test_in_progress = 1219 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1220 DEFINE_WAIT(wait); 1221 unsigned long timeout_expire; 1222 s64 before, now; 1223 int ret; 1224 1225 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); 1226 1227 if (i915_gem_request_completed(req, true)) 1228 return 0; 1229 1230 timeout_expire = timeout ? 1231 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; 1232 1233 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { 1234 gen6_rps_boost(dev_priv); 1235 if (file_priv) 1236 mod_delayed_work(dev_priv->wq, 1237 &file_priv->mm.idle_work, 1238 msecs_to_jiffies(100)); 1239 } 1240 1241 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) 1242 return -ENODEV; 1243 1244 /* Record current time in case interrupted by signal, or wedged */ 1245 trace_i915_gem_request_wait_begin(req); 1246 before = ktime_get_raw_ns(); 1247 for (;;) { 1248 struct timer_list timer; 1249 1250 prepare_to_wait(&ring->irq_queue, &wait, 1251 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 1252 1253 /* We need to check whether any gpu reset happened in between 1254 * the caller grabbing the seqno and now ... */ 1255 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) { 1256 /* ... but upgrade the -EAGAIN to an -EIO if the gpu 1257 * is truely gone. */ 1258 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1259 if (ret == 0) 1260 ret = -EAGAIN; 1261 break; 1262 } 1263 1264 if (i915_gem_request_completed(req, false)) { 1265 ret = 0; 1266 break; 1267 } 1268 1269 if (interruptible && signal_pending(current)) { 1270 ret = -ERESTARTSYS; 1271 break; 1272 } 1273 1274 if (timeout && time_after_eq(jiffies, timeout_expire)) { 1275 ret = -ETIME; 1276 break; 1277 } 1278 1279 timer.function = NULL; 1280 if (timeout || missed_irq(dev_priv, ring)) { 1281 unsigned long expire; 1282 1283 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); 1284 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire; 1285 mod_timer(&timer, expire); 1286 } 1287 1288 io_schedule(); 1289 1290 if (timer.function) { 1291 del_singleshot_timer_sync(&timer); 1292 destroy_timer_on_stack(&timer); 1293 } 1294 } 1295 now = ktime_get_raw_ns(); 1296 trace_i915_gem_request_wait_end(req); 1297 1298 if (!irq_test_in_progress) 1299 ring->irq_put(ring); 1300 1301 finish_wait(&ring->irq_queue, &wait); 1302 1303 if (timeout) { 1304 s64 tres = *timeout - (now - before); 1305 1306 *timeout = tres < 0 ? 0 : tres; 1307 1308 /* 1309 * Apparently ktime isn't accurate enough and occasionally has a 1310 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 1311 * things up to make the test happy. We allow up to 1 jiffy. 1312 * 1313 * This is a regrssion from the timespec->ktime conversion. 1314 */ 1315 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) 1316 *timeout = 0; 1317 } 1318 1319 return ret; 1320 } 1321 1322 /** 1323 * Waits for a request to be signaled, and cleans up the 1324 * request and object lists appropriately for that event. 1325 */ 1326 int 1327 i915_wait_request(struct drm_i915_gem_request *req) 1328 { 1329 struct drm_device *dev; 1330 struct drm_i915_private *dev_priv; 1331 bool interruptible; 1332 unsigned reset_counter; 1333 int ret; 1334 1335 BUG_ON(req == NULL); 1336 1337 dev = req->ring->dev; 1338 dev_priv = dev->dev_private; 1339 interruptible = dev_priv->mm.interruptible; 1340 1341 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1342 1343 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1344 if (ret) 1345 return ret; 1346 1347 ret = i915_gem_check_olr(req); 1348 if (ret) 1349 return ret; 1350 1351 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1352 i915_gem_request_reference(req); 1353 ret = __i915_wait_request(req, reset_counter, 1354 interruptible, NULL, NULL); 1355 i915_gem_request_unreference(req); 1356 return ret; 1357 } 1358 1359 static int 1360 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj) 1361 { 1362 if (!obj->active) 1363 return 0; 1364 1365 /* Manually manage the write flush as we may have not yet 1366 * retired the buffer. 1367 * 1368 * Note that the last_write_req is always the earlier of 1369 * the two (read/write) requests, so if we haved successfully waited, 1370 * we know we have passed the last write. 1371 */ 1372 i915_gem_request_assign(&obj->last_write_req, NULL); 1373 1374 return 0; 1375 } 1376 1377 /** 1378 * Ensures that all rendering to the object has completed and the object is 1379 * safe to unbind from the GTT or access from the CPU. 1380 */ 1381 static __must_check int 1382 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 1383 bool readonly) 1384 { 1385 struct drm_i915_gem_request *req; 1386 int ret; 1387 1388 req = readonly ? obj->last_write_req : obj->last_read_req; 1389 if (!req) 1390 return 0; 1391 1392 ret = i915_wait_request(req); 1393 if (ret) 1394 return ret; 1395 1396 return i915_gem_object_wait_rendering__tail(obj); 1397 } 1398 1399 /* A nonblocking variant of the above wait. This is a highly dangerous routine 1400 * as the object state may change during this call. 1401 */ 1402 static __must_check int 1403 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, 1404 struct drm_i915_file_private *file_priv, 1405 bool readonly) 1406 { 1407 struct drm_i915_gem_request *req; 1408 struct drm_device *dev = obj->base.dev; 1409 struct drm_i915_private *dev_priv = dev->dev_private; 1410 unsigned reset_counter; 1411 int ret; 1412 1413 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1414 BUG_ON(!dev_priv->mm.interruptible); 1415 1416 req = readonly ? obj->last_write_req : obj->last_read_req; 1417 if (!req) 1418 return 0; 1419 1420 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); 1421 if (ret) 1422 return ret; 1423 1424 ret = i915_gem_check_olr(req); 1425 if (ret) 1426 return ret; 1427 1428 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1429 i915_gem_request_reference(req); 1430 mutex_unlock(&dev->struct_mutex); 1431 ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv); 1432 mutex_lock(&dev->struct_mutex); 1433 i915_gem_request_unreference(req); 1434 if (ret) 1435 return ret; 1436 1437 return i915_gem_object_wait_rendering__tail(obj); 1438 } 1439 1440 /** 1441 * Called when user space prepares to use an object with the CPU, either 1442 * through the mmap ioctl's mapping or a GTT mapping. 1443 */ 1444 int 1445 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1446 struct drm_file *file) 1447 { 1448 struct drm_i915_gem_set_domain *args = data; 1449 struct drm_i915_gem_object *obj; 1450 uint32_t read_domains = args->read_domains; 1451 uint32_t write_domain = args->write_domain; 1452 int ret; 1453 1454 /* Only handle setting domains to types used by the CPU. */ 1455 if (write_domain & I915_GEM_GPU_DOMAINS) 1456 return -EINVAL; 1457 1458 if (read_domains & I915_GEM_GPU_DOMAINS) 1459 return -EINVAL; 1460 1461 /* Having something in the write domain implies it's in the read 1462 * domain, and only that read domain. Enforce that in the request. 1463 */ 1464 if (write_domain != 0 && read_domains != write_domain) 1465 return -EINVAL; 1466 1467 ret = i915_mutex_lock_interruptible(dev); 1468 if (ret) 1469 return ret; 1470 1471 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1472 if (&obj->base == NULL) { 1473 ret = -ENOENT; 1474 goto unlock; 1475 } 1476 1477 /* Try to flush the object off the GPU without holding the lock. 1478 * We will repeat the flush holding the lock in the normal manner 1479 * to catch cases where we are gazumped. 1480 */ 1481 ret = i915_gem_object_wait_rendering__nonblocking(obj, 1482 file->driver_priv, 1483 !write_domain); 1484 if (ret) 1485 goto unref; 1486 1487 if (read_domains & I915_GEM_DOMAIN_GTT) 1488 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1489 else 1490 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1491 1492 unref: 1493 drm_gem_object_unreference(&obj->base); 1494 unlock: 1495 mutex_unlock(&dev->struct_mutex); 1496 return ret; 1497 } 1498 1499 /** 1500 * Called when user space has done writes to this buffer 1501 */ 1502 int 1503 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1504 struct drm_file *file) 1505 { 1506 struct drm_i915_gem_sw_finish *args = data; 1507 struct drm_i915_gem_object *obj; 1508 int ret = 0; 1509 1510 ret = i915_mutex_lock_interruptible(dev); 1511 if (ret) 1512 return ret; 1513 1514 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 1515 if (&obj->base == NULL) { 1516 ret = -ENOENT; 1517 goto unlock; 1518 } 1519 1520 /* Pinned buffers may be scanout, so flush the cache */ 1521 if (obj->pin_display) 1522 i915_gem_object_flush_cpu_write_domain(obj); 1523 1524 drm_gem_object_unreference(&obj->base); 1525 unlock: 1526 mutex_unlock(&dev->struct_mutex); 1527 return ret; 1528 } 1529 1530 /** 1531 * Maps the contents of an object, returning the address it is mapped 1532 * into. 1533 * 1534 * While the mapping holds a reference on the contents of the object, it doesn't 1535 * imply a ref on the object itself. 1536 * 1537 * IMPORTANT: 1538 * 1539 * DRM driver writers who look a this function as an example for how to do GEM 1540 * mmap support, please don't implement mmap support like here. The modern way 1541 * to implement DRM mmap support is with an mmap offset ioctl (like 1542 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1543 * That way debug tooling like valgrind will understand what's going on, hiding 1544 * the mmap call in a driver private ioctl will break that. The i915 driver only 1545 * does cpu mmaps this way because we didn't know better. 1546 */ 1547 int 1548 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1549 struct drm_file *file) 1550 { 1551 struct drm_i915_gem_mmap *args = data; 1552 struct drm_gem_object *obj; 1553 unsigned long addr; 1554 1555 if (args->flags & ~(I915_MMAP_WC)) 1556 return -EINVAL; 1557 1558 if (args->flags & I915_MMAP_WC && !cpu_has_pat) 1559 return -ENODEV; 1560 1561 obj = drm_gem_object_lookup(dev, file, args->handle); 1562 if (obj == NULL) 1563 return -ENOENT; 1564 1565 /* prime objects have no backing filp to GEM mmap 1566 * pages from. 1567 */ 1568 if (!obj->filp) { 1569 drm_gem_object_unreference_unlocked(obj); 1570 return -EINVAL; 1571 } 1572 1573 addr = vm_mmap(obj->filp, 0, args->size, 1574 PROT_READ | PROT_WRITE, MAP_SHARED, 1575 args->offset); 1576 if (args->flags & I915_MMAP_WC) { 1577 struct mm_struct *mm = current->mm; 1578 struct vm_area_struct *vma; 1579 1580 down_write(&mm->mmap_sem); 1581 vma = find_vma(mm, addr); 1582 if (vma) 1583 vma->vm_page_prot = 1584 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1585 else 1586 addr = -ENOMEM; 1587 up_write(&mm->mmap_sem); 1588 } 1589 drm_gem_object_unreference_unlocked(obj); 1590 if (IS_ERR((void *)addr)) 1591 return addr; 1592 1593 args->addr_ptr = (uint64_t) addr; 1594 1595 return 0; 1596 } 1597 1598 /** 1599 * i915_gem_fault - fault a page into the GTT 1600 * vma: VMA in question 1601 * vmf: fault info 1602 * 1603 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1604 * from userspace. The fault handler takes care of binding the object to 1605 * the GTT (if needed), allocating and programming a fence register (again, 1606 * only if needed based on whether the old reg is still valid or the object 1607 * is tiled) and inserting a new PTE into the faulting process. 1608 * 1609 * Note that the faulting process may involve evicting existing objects 1610 * from the GTT and/or fence registers to make room. So performance may 1611 * suffer if the GTT working set is large or there are few fence registers 1612 * left. 1613 */ 1614 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1615 { 1616 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); 1617 struct drm_device *dev = obj->base.dev; 1618 struct drm_i915_private *dev_priv = dev->dev_private; 1619 pgoff_t page_offset; 1620 unsigned long pfn; 1621 int ret = 0; 1622 bool write = !!(vmf->flags & FAULT_FLAG_WRITE); 1623 1624 intel_runtime_pm_get(dev_priv); 1625 1626 /* We don't use vmf->pgoff since that has the fake offset */ 1627 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 1628 PAGE_SHIFT; 1629 1630 ret = i915_mutex_lock_interruptible(dev); 1631 if (ret) 1632 goto out; 1633 1634 trace_i915_gem_object_fault(obj, page_offset, true, write); 1635 1636 /* Try to flush the object off the GPU first without holding the lock. 1637 * Upon reacquiring the lock, we will perform our sanity checks and then 1638 * repeat the flush holding the lock in the normal manner to catch cases 1639 * where we are gazumped. 1640 */ 1641 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write); 1642 if (ret) 1643 goto unlock; 1644 1645 /* Access to snoopable pages through the GTT is incoherent. */ 1646 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { 1647 ret = -EFAULT; 1648 goto unlock; 1649 } 1650 1651 /* Now bind it into the GTT if needed */ 1652 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); 1653 if (ret) 1654 goto unlock; 1655 1656 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1657 if (ret) 1658 goto unpin; 1659 1660 ret = i915_gem_object_get_fence(obj); 1661 if (ret) 1662 goto unpin; 1663 1664 /* Finally, remap it using the new GTT offset */ 1665 pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); 1666 pfn >>= PAGE_SHIFT; 1667 1668 if (!obj->fault_mappable) { 1669 unsigned long size = min_t(unsigned long, 1670 vma->vm_end - vma->vm_start, 1671 obj->base.size); 1672 int i; 1673 1674 for (i = 0; i < size >> PAGE_SHIFT; i++) { 1675 ret = vm_insert_pfn(vma, 1676 (unsigned long)vma->vm_start + i * PAGE_SIZE, 1677 pfn + i); 1678 if (ret) 1679 break; 1680 } 1681 1682 obj->fault_mappable = true; 1683 } else 1684 ret = vm_insert_pfn(vma, 1685 (unsigned long)vmf->virtual_address, 1686 pfn + page_offset); 1687 unpin: 1688 i915_gem_object_ggtt_unpin(obj); 1689 unlock: 1690 mutex_unlock(&dev->struct_mutex); 1691 out: 1692 switch (ret) { 1693 case -EIO: 1694 /* 1695 * We eat errors when the gpu is terminally wedged to avoid 1696 * userspace unduly crashing (gl has no provisions for mmaps to 1697 * fail). But any other -EIO isn't ours (e.g. swap in failure) 1698 * and so needs to be reported. 1699 */ 1700 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 1701 ret = VM_FAULT_SIGBUS; 1702 break; 1703 } 1704 case -EAGAIN: 1705 /* 1706 * EAGAIN means the gpu is hung and we'll wait for the error 1707 * handler to reset everything when re-faulting in 1708 * i915_mutex_lock_interruptible. 1709 */ 1710 case 0: 1711 case -ERESTARTSYS: 1712 case -EINTR: 1713 case -EBUSY: 1714 /* 1715 * EBUSY is ok: this just means that another thread 1716 * already did the job. 1717 */ 1718 ret = VM_FAULT_NOPAGE; 1719 break; 1720 case -ENOMEM: 1721 ret = VM_FAULT_OOM; 1722 break; 1723 case -ENOSPC: 1724 case -EFAULT: 1725 ret = VM_FAULT_SIGBUS; 1726 break; 1727 default: 1728 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 1729 ret = VM_FAULT_SIGBUS; 1730 break; 1731 } 1732 1733 intel_runtime_pm_put(dev_priv); 1734 return ret; 1735 } 1736 1737 /** 1738 * i915_gem_release_mmap - remove physical page mappings 1739 * @obj: obj in question 1740 * 1741 * Preserve the reservation of the mmapping with the DRM core code, but 1742 * relinquish ownership of the pages back to the system. 1743 * 1744 * It is vital that we remove the page mapping if we have mapped a tiled 1745 * object through the GTT and then lose the fence register due to 1746 * resource pressure. Similarly if the object has been moved out of the 1747 * aperture, than pages mapped into userspace must be revoked. Removing the 1748 * mapping will then trigger a page fault on the next user access, allowing 1749 * fixup by i915_gem_fault(). 1750 */ 1751 void 1752 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 1753 { 1754 if (!obj->fault_mappable) 1755 return; 1756 1757 drm_vma_node_unmap(&obj->base.vma_node, 1758 obj->base.dev->anon_inode->i_mapping); 1759 obj->fault_mappable = false; 1760 } 1761 1762 void 1763 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) 1764 { 1765 struct drm_i915_gem_object *obj; 1766 1767 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 1768 i915_gem_release_mmap(obj); 1769 } 1770 1771 uint32_t 1772 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1773 { 1774 uint32_t gtt_size; 1775 1776 if (INTEL_INFO(dev)->gen >= 4 || 1777 tiling_mode == I915_TILING_NONE) 1778 return size; 1779 1780 /* Previous chips need a power-of-two fence region when tiling */ 1781 if (INTEL_INFO(dev)->gen == 3) 1782 gtt_size = 1024*1024; 1783 else 1784 gtt_size = 512*1024; 1785 1786 while (gtt_size < size) 1787 gtt_size <<= 1; 1788 1789 return gtt_size; 1790 } 1791 1792 /** 1793 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 1794 * @obj: object to check 1795 * 1796 * Return the required GTT alignment for an object, taking into account 1797 * potential fence register mapping. 1798 */ 1799 uint32_t 1800 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 1801 int tiling_mode, bool fenced) 1802 { 1803 /* 1804 * Minimum alignment is 4k (GTT page size), but might be greater 1805 * if a fence register is needed for the object. 1806 */ 1807 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) || 1808 tiling_mode == I915_TILING_NONE) 1809 return 4096; 1810 1811 /* 1812 * Previous chips need to be aligned to the size of the smallest 1813 * fence register that can contain the object. 1814 */ 1815 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1816 } 1817 1818 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 1819 { 1820 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1821 int ret; 1822 1823 if (drm_vma_node_has_offset(&obj->base.vma_node)) 1824 return 0; 1825 1826 dev_priv->mm.shrinker_no_lock_stealing = true; 1827 1828 ret = drm_gem_create_mmap_offset(&obj->base); 1829 if (ret != -ENOSPC) 1830 goto out; 1831 1832 /* Badly fragmented mmap space? The only way we can recover 1833 * space is by destroying unwanted objects. We can't randomly release 1834 * mmap_offsets as userspace expects them to be persistent for the 1835 * lifetime of the objects. The closest we can is to release the 1836 * offsets on purgeable objects by truncating it and marking it purged, 1837 * which prevents userspace from ever using that object again. 1838 */ 1839 i915_gem_shrink(dev_priv, 1840 obj->base.size >> PAGE_SHIFT, 1841 I915_SHRINK_BOUND | 1842 I915_SHRINK_UNBOUND | 1843 I915_SHRINK_PURGEABLE); 1844 ret = drm_gem_create_mmap_offset(&obj->base); 1845 if (ret != -ENOSPC) 1846 goto out; 1847 1848 i915_gem_shrink_all(dev_priv); 1849 ret = drm_gem_create_mmap_offset(&obj->base); 1850 out: 1851 dev_priv->mm.shrinker_no_lock_stealing = false; 1852 1853 return ret; 1854 } 1855 1856 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 1857 { 1858 drm_gem_free_mmap_offset(&obj->base); 1859 } 1860 1861 int 1862 i915_gem_mmap_gtt(struct drm_file *file, 1863 struct drm_device *dev, 1864 uint32_t handle, 1865 uint64_t *offset) 1866 { 1867 struct drm_i915_private *dev_priv = dev->dev_private; 1868 struct drm_i915_gem_object *obj; 1869 int ret; 1870 1871 ret = i915_mutex_lock_interruptible(dev); 1872 if (ret) 1873 return ret; 1874 1875 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); 1876 if (&obj->base == NULL) { 1877 ret = -ENOENT; 1878 goto unlock; 1879 } 1880 1881 if (obj->base.size > dev_priv->gtt.mappable_end) { 1882 ret = -E2BIG; 1883 goto out; 1884 } 1885 1886 if (obj->madv != I915_MADV_WILLNEED) { 1887 DRM_DEBUG("Attempting to mmap a purgeable buffer\n"); 1888 ret = -EFAULT; 1889 goto out; 1890 } 1891 1892 ret = i915_gem_object_create_mmap_offset(obj); 1893 if (ret) 1894 goto out; 1895 1896 *offset = drm_vma_node_offset_addr(&obj->base.vma_node); 1897 1898 out: 1899 drm_gem_object_unreference(&obj->base); 1900 unlock: 1901 mutex_unlock(&dev->struct_mutex); 1902 return ret; 1903 } 1904 1905 /** 1906 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1907 * @dev: DRM device 1908 * @data: GTT mapping ioctl data 1909 * @file: GEM object info 1910 * 1911 * Simply returns the fake offset to userspace so it can mmap it. 1912 * The mmap call will end up in drm_gem_mmap(), which will set things 1913 * up so we can get faults in the handler above. 1914 * 1915 * The fault handler will take care of binding the object into the GTT 1916 * (since it may have been evicted to make room for something), allocating 1917 * a fence register, and mapping the appropriate aperture address into 1918 * userspace. 1919 */ 1920 int 1921 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1922 struct drm_file *file) 1923 { 1924 struct drm_i915_gem_mmap_gtt *args = data; 1925 1926 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1927 } 1928 1929 /* Immediately discard the backing storage */ 1930 static void 1931 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 1932 { 1933 i915_gem_object_free_mmap_offset(obj); 1934 1935 if (obj->base.filp == NULL) 1936 return; 1937 1938 /* Our goal here is to return as much of the memory as 1939 * is possible back to the system as we are called from OOM. 1940 * To do this we must instruct the shmfs to drop all of its 1941 * backing pages, *now*. 1942 */ 1943 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 1944 obj->madv = __I915_MADV_PURGED; 1945 } 1946 1947 /* Try to discard unwanted pages */ 1948 static void 1949 i915_gem_object_invalidate(struct drm_i915_gem_object *obj) 1950 { 1951 struct address_space *mapping; 1952 1953 switch (obj->madv) { 1954 case I915_MADV_DONTNEED: 1955 i915_gem_object_truncate(obj); 1956 case __I915_MADV_PURGED: 1957 return; 1958 } 1959 1960 if (obj->base.filp == NULL) 1961 return; 1962 1963 mapping = file_inode(obj->base.filp)->i_mapping, 1964 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 1965 } 1966 1967 static void 1968 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1969 { 1970 struct sg_page_iter sg_iter; 1971 int ret; 1972 1973 BUG_ON(obj->madv == __I915_MADV_PURGED); 1974 1975 ret = i915_gem_object_set_to_cpu_domain(obj, true); 1976 if (ret) { 1977 /* In the event of a disaster, abandon all caches and 1978 * hope for the best. 1979 */ 1980 WARN_ON(ret != -EIO); 1981 i915_gem_clflush_object(obj, true); 1982 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 1983 } 1984 1985 if (i915_gem_object_needs_bit17_swizzle(obj)) 1986 i915_gem_object_save_bit_17_swizzle(obj); 1987 1988 if (obj->madv == I915_MADV_DONTNEED) 1989 obj->dirty = 0; 1990 1991 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 1992 struct page *page = sg_page_iter_page(&sg_iter); 1993 1994 if (obj->dirty) 1995 set_page_dirty(page); 1996 1997 if (obj->madv == I915_MADV_WILLNEED) 1998 mark_page_accessed(page); 1999 2000 page_cache_release(page); 2001 } 2002 obj->dirty = 0; 2003 2004 sg_free_table(obj->pages); 2005 kfree(obj->pages); 2006 } 2007 2008 int 2009 i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 2010 { 2011 const struct drm_i915_gem_object_ops *ops = obj->ops; 2012 2013 if (obj->pages == NULL) 2014 return 0; 2015 2016 if (obj->pages_pin_count) 2017 return -EBUSY; 2018 2019 BUG_ON(i915_gem_obj_bound_any(obj)); 2020 2021 /* ->put_pages might need to allocate memory for the bit17 swizzle 2022 * array, hence protect them from being reaped by removing them from gtt 2023 * lists early. */ 2024 list_del(&obj->global_list); 2025 2026 ops->put_pages(obj); 2027 obj->pages = NULL; 2028 2029 i915_gem_object_invalidate(obj); 2030 2031 return 0; 2032 } 2033 2034 static int 2035 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2036 { 2037 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2038 int page_count, i; 2039 struct address_space *mapping; 2040 struct sg_table *st; 2041 struct scatterlist *sg; 2042 struct sg_page_iter sg_iter; 2043 struct page *page; 2044 unsigned long last_pfn = 0; /* suppress gcc warning */ 2045 gfp_t gfp; 2046 2047 /* Assert that the object is not currently in any GPU domain. As it 2048 * wasn't in the GTT, there shouldn't be any way it could have been in 2049 * a GPU cache 2050 */ 2051 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2052 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2053 2054 st = kmalloc(sizeof(*st), GFP_KERNEL); 2055 if (st == NULL) 2056 return -ENOMEM; 2057 2058 page_count = obj->base.size / PAGE_SIZE; 2059 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2060 kfree(st); 2061 return -ENOMEM; 2062 } 2063 2064 /* Get the list of pages out of our struct file. They'll be pinned 2065 * at this point until we release them. 2066 * 2067 * Fail silently without starting the shrinker 2068 */ 2069 mapping = file_inode(obj->base.filp)->i_mapping; 2070 gfp = mapping_gfp_mask(mapping); 2071 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 2072 gfp &= ~(__GFP_IO | __GFP_WAIT); 2073 sg = st->sgl; 2074 st->nents = 0; 2075 for (i = 0; i < page_count; i++) { 2076 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2077 if (IS_ERR(page)) { 2078 i915_gem_shrink(dev_priv, 2079 page_count, 2080 I915_SHRINK_BOUND | 2081 I915_SHRINK_UNBOUND | 2082 I915_SHRINK_PURGEABLE); 2083 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2084 } 2085 if (IS_ERR(page)) { 2086 /* We've tried hard to allocate the memory by reaping 2087 * our own buffer, now let the real VM do its job and 2088 * go down in flames if truly OOM. 2089 */ 2090 i915_gem_shrink_all(dev_priv); 2091 page = shmem_read_mapping_page(mapping, i); 2092 if (IS_ERR(page)) 2093 goto err_pages; 2094 } 2095 #ifdef CONFIG_SWIOTLB 2096 if (swiotlb_nr_tbl()) { 2097 st->nents++; 2098 sg_set_page(sg, page, PAGE_SIZE, 0); 2099 sg = sg_next(sg); 2100 continue; 2101 } 2102 #endif 2103 if (!i || page_to_pfn(page) != last_pfn + 1) { 2104 if (i) 2105 sg = sg_next(sg); 2106 st->nents++; 2107 sg_set_page(sg, page, PAGE_SIZE, 0); 2108 } else { 2109 sg->length += PAGE_SIZE; 2110 } 2111 last_pfn = page_to_pfn(page); 2112 2113 /* Check that the i965g/gm workaround works. */ 2114 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 2115 } 2116 #ifdef CONFIG_SWIOTLB 2117 if (!swiotlb_nr_tbl()) 2118 #endif 2119 sg_mark_end(sg); 2120 obj->pages = st; 2121 2122 if (i915_gem_object_needs_bit17_swizzle(obj)) 2123 i915_gem_object_do_bit_17_swizzle(obj); 2124 2125 if (obj->tiling_mode != I915_TILING_NONE && 2126 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2127 i915_gem_object_pin_pages(obj); 2128 2129 return 0; 2130 2131 err_pages: 2132 sg_mark_end(sg); 2133 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2134 page_cache_release(sg_page_iter_page(&sg_iter)); 2135 sg_free_table(st); 2136 kfree(st); 2137 2138 /* shmemfs first checks if there is enough memory to allocate the page 2139 * and reports ENOSPC should there be insufficient, along with the usual 2140 * ENOMEM for a genuine allocation failure. 2141 * 2142 * We use ENOSPC in our driver to mean that we have run out of aperture 2143 * space and so want to translate the error from shmemfs back to our 2144 * usual understanding of ENOMEM. 2145 */ 2146 if (PTR_ERR(page) == -ENOSPC) 2147 return -ENOMEM; 2148 else 2149 return PTR_ERR(page); 2150 } 2151 2152 /* Ensure that the associated pages are gathered from the backing storage 2153 * and pinned into our object. i915_gem_object_get_pages() may be called 2154 * multiple times before they are released by a single call to 2155 * i915_gem_object_put_pages() - once the pages are no longer referenced 2156 * either as a result of memory pressure (reaping pages under the shrinker) 2157 * or as the object is itself released. 2158 */ 2159 int 2160 i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2161 { 2162 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2163 const struct drm_i915_gem_object_ops *ops = obj->ops; 2164 int ret; 2165 2166 if (obj->pages) 2167 return 0; 2168 2169 if (obj->madv != I915_MADV_WILLNEED) { 2170 DRM_DEBUG("Attempting to obtain a purgeable object\n"); 2171 return -EFAULT; 2172 } 2173 2174 BUG_ON(obj->pages_pin_count); 2175 2176 ret = ops->get_pages(obj); 2177 if (ret) 2178 return ret; 2179 2180 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); 2181 return 0; 2182 } 2183 2184 static void 2185 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 2186 struct intel_engine_cs *ring) 2187 { 2188 struct drm_i915_gem_request *req; 2189 struct intel_engine_cs *old_ring; 2190 2191 BUG_ON(ring == NULL); 2192 2193 req = intel_ring_get_request(ring); 2194 old_ring = i915_gem_request_get_ring(obj->last_read_req); 2195 2196 if (old_ring != ring && obj->last_write_req) { 2197 /* Keep the request relative to the current ring */ 2198 i915_gem_request_assign(&obj->last_write_req, req); 2199 } 2200 2201 /* Add a reference if we're newly entering the active list. */ 2202 if (!obj->active) { 2203 drm_gem_object_reference(&obj->base); 2204 obj->active = 1; 2205 } 2206 2207 list_move_tail(&obj->ring_list, &ring->active_list); 2208 2209 i915_gem_request_assign(&obj->last_read_req, req); 2210 } 2211 2212 void i915_vma_move_to_active(struct i915_vma *vma, 2213 struct intel_engine_cs *ring) 2214 { 2215 list_move_tail(&vma->mm_list, &vma->vm->active_list); 2216 return i915_gem_object_move_to_active(vma->obj, ring); 2217 } 2218 2219 static void 2220 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2221 { 2222 struct i915_vma *vma; 2223 2224 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 2225 BUG_ON(!obj->active); 2226 2227 list_for_each_entry(vma, &obj->vma_list, vma_link) { 2228 if (!list_empty(&vma->mm_list)) 2229 list_move_tail(&vma->mm_list, &vma->vm->inactive_list); 2230 } 2231 2232 intel_fb_obj_flush(obj, true); 2233 2234 list_del_init(&obj->ring_list); 2235 2236 i915_gem_request_assign(&obj->last_read_req, NULL); 2237 i915_gem_request_assign(&obj->last_write_req, NULL); 2238 obj->base.write_domain = 0; 2239 2240 i915_gem_request_assign(&obj->last_fenced_req, NULL); 2241 2242 obj->active = 0; 2243 drm_gem_object_unreference(&obj->base); 2244 2245 WARN_ON(i915_verify_lists(dev)); 2246 } 2247 2248 static void 2249 i915_gem_object_retire(struct drm_i915_gem_object *obj) 2250 { 2251 if (obj->last_read_req == NULL) 2252 return; 2253 2254 if (i915_gem_request_completed(obj->last_read_req, true)) 2255 i915_gem_object_move_to_inactive(obj); 2256 } 2257 2258 static int 2259 i915_gem_init_seqno(struct drm_device *dev, u32 seqno) 2260 { 2261 struct drm_i915_private *dev_priv = dev->dev_private; 2262 struct intel_engine_cs *ring; 2263 int ret, i, j; 2264 2265 /* Carefully retire all requests without writing to the rings */ 2266 for_each_ring(ring, dev_priv, i) { 2267 ret = intel_ring_idle(ring); 2268 if (ret) 2269 return ret; 2270 } 2271 i915_gem_retire_requests(dev); 2272 2273 /* Finally reset hw state */ 2274 for_each_ring(ring, dev_priv, i) { 2275 intel_ring_init_seqno(ring, seqno); 2276 2277 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++) 2278 ring->semaphore.sync_seqno[j] = 0; 2279 } 2280 2281 return 0; 2282 } 2283 2284 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) 2285 { 2286 struct drm_i915_private *dev_priv = dev->dev_private; 2287 int ret; 2288 2289 if (seqno == 0) 2290 return -EINVAL; 2291 2292 /* HWS page needs to be set less than what we 2293 * will inject to ring 2294 */ 2295 ret = i915_gem_init_seqno(dev, seqno - 1); 2296 if (ret) 2297 return ret; 2298 2299 /* Carefully set the last_seqno value so that wrap 2300 * detection still works 2301 */ 2302 dev_priv->next_seqno = seqno; 2303 dev_priv->last_seqno = seqno - 1; 2304 if (dev_priv->last_seqno == 0) 2305 dev_priv->last_seqno--; 2306 2307 return 0; 2308 } 2309 2310 int 2311 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 2312 { 2313 struct drm_i915_private *dev_priv = dev->dev_private; 2314 2315 /* reserve 0 for non-seqno */ 2316 if (dev_priv->next_seqno == 0) { 2317 int ret = i915_gem_init_seqno(dev, 0); 2318 if (ret) 2319 return ret; 2320 2321 dev_priv->next_seqno = 1; 2322 } 2323 2324 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++; 2325 return 0; 2326 } 2327 2328 int __i915_add_request(struct intel_engine_cs *ring, 2329 struct drm_file *file, 2330 struct drm_i915_gem_object *obj) 2331 { 2332 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2333 struct drm_i915_gem_request *request; 2334 struct intel_ringbuffer *ringbuf; 2335 u32 request_start; 2336 int ret; 2337 2338 request = ring->outstanding_lazy_request; 2339 if (WARN_ON(request == NULL)) 2340 return -ENOMEM; 2341 2342 if (i915.enable_execlists) { 2343 ringbuf = request->ctx->engine[ring->id].ringbuf; 2344 } else 2345 ringbuf = ring->buffer; 2346 2347 request_start = intel_ring_get_tail(ringbuf); 2348 /* 2349 * Emit any outstanding flushes - execbuf can fail to emit the flush 2350 * after having emitted the batchbuffer command. Hence we need to fix 2351 * things up similar to emitting the lazy request. The difference here 2352 * is that the flush _must_ happen before the next request, no matter 2353 * what. 2354 */ 2355 if (i915.enable_execlists) { 2356 ret = logical_ring_flush_all_caches(ringbuf, request->ctx); 2357 if (ret) 2358 return ret; 2359 } else { 2360 ret = intel_ring_flush_all_caches(ring); 2361 if (ret) 2362 return ret; 2363 } 2364 2365 /* Record the position of the start of the request so that 2366 * should we detect the updated seqno part-way through the 2367 * GPU processing the request, we never over-estimate the 2368 * position of the head. 2369 */ 2370 request->postfix = intel_ring_get_tail(ringbuf); 2371 2372 if (i915.enable_execlists) { 2373 ret = ring->emit_request(ringbuf, request); 2374 if (ret) 2375 return ret; 2376 } else { 2377 ret = ring->add_request(ring); 2378 if (ret) 2379 return ret; 2380 2381 request->tail = intel_ring_get_tail(ringbuf); 2382 } 2383 2384 request->head = request_start; 2385 2386 /* Whilst this request exists, batch_obj will be on the 2387 * active_list, and so will hold the active reference. Only when this 2388 * request is retired will the the batch_obj be moved onto the 2389 * inactive_list and lose its active reference. Hence we do not need 2390 * to explicitly hold another reference here. 2391 */ 2392 request->batch_obj = obj; 2393 2394 if (!i915.enable_execlists) { 2395 /* Hold a reference to the current context so that we can inspect 2396 * it later in case a hangcheck error event fires. 2397 */ 2398 request->ctx = ring->last_context; 2399 if (request->ctx) 2400 i915_gem_context_reference(request->ctx); 2401 } 2402 2403 request->emitted_jiffies = jiffies; 2404 list_add_tail(&request->list, &ring->request_list); 2405 request->file_priv = NULL; 2406 2407 if (file) { 2408 struct drm_i915_file_private *file_priv = file->driver_priv; 2409 2410 spin_lock(&file_priv->mm.lock); 2411 request->file_priv = file_priv; 2412 list_add_tail(&request->client_list, 2413 &file_priv->mm.request_list); 2414 spin_unlock(&file_priv->mm.lock); 2415 2416 request->pid = get_pid(task_pid(current)); 2417 } 2418 2419 trace_i915_gem_request_add(request); 2420 ring->outstanding_lazy_request = NULL; 2421 2422 i915_queue_hangcheck(ring->dev); 2423 2424 cancel_delayed_work_sync(&dev_priv->mm.idle_work); 2425 queue_delayed_work(dev_priv->wq, 2426 &dev_priv->mm.retire_work, 2427 round_jiffies_up_relative(HZ)); 2428 intel_mark_busy(dev_priv->dev); 2429 2430 return 0; 2431 } 2432 2433 static inline void 2434 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) 2435 { 2436 struct drm_i915_file_private *file_priv = request->file_priv; 2437 2438 if (!file_priv) 2439 return; 2440 2441 spin_lock(&file_priv->mm.lock); 2442 list_del(&request->client_list); 2443 request->file_priv = NULL; 2444 spin_unlock(&file_priv->mm.lock); 2445 } 2446 2447 static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2448 const struct intel_context *ctx) 2449 { 2450 unsigned long elapsed; 2451 2452 elapsed = get_seconds() - ctx->hang_stats.guilty_ts; 2453 2454 if (ctx->hang_stats.banned) 2455 return true; 2456 2457 if (ctx->hang_stats.ban_period_seconds && 2458 elapsed <= ctx->hang_stats.ban_period_seconds) { 2459 if (!i915_gem_context_is_default(ctx)) { 2460 DRM_DEBUG("context hanging too fast, banning!\n"); 2461 return true; 2462 } else if (i915_stop_ring_allow_ban(dev_priv)) { 2463 if (i915_stop_ring_allow_warn(dev_priv)) 2464 DRM_ERROR("gpu hanging too fast, banning!\n"); 2465 return true; 2466 } 2467 } 2468 2469 return false; 2470 } 2471 2472 static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2473 struct intel_context *ctx, 2474 const bool guilty) 2475 { 2476 struct i915_ctx_hang_stats *hs; 2477 2478 if (WARN_ON(!ctx)) 2479 return; 2480 2481 hs = &ctx->hang_stats; 2482 2483 if (guilty) { 2484 hs->banned = i915_context_is_banned(dev_priv, ctx); 2485 hs->batch_active++; 2486 hs->guilty_ts = get_seconds(); 2487 } else { 2488 hs->batch_pending++; 2489 } 2490 } 2491 2492 static void i915_gem_free_request(struct drm_i915_gem_request *request) 2493 { 2494 list_del(&request->list); 2495 i915_gem_request_remove_from_client(request); 2496 2497 put_pid(request->pid); 2498 2499 i915_gem_request_unreference(request); 2500 } 2501 2502 void i915_gem_request_free(struct kref *req_ref) 2503 { 2504 struct drm_i915_gem_request *req = container_of(req_ref, 2505 typeof(*req), ref); 2506 struct intel_context *ctx = req->ctx; 2507 2508 if (ctx) { 2509 if (i915.enable_execlists) { 2510 struct intel_engine_cs *ring = req->ring; 2511 2512 if (ctx != ring->default_context) 2513 intel_lr_context_unpin(ring, ctx); 2514 } 2515 2516 i915_gem_context_unreference(ctx); 2517 } 2518 2519 kfree(req); 2520 } 2521 2522 struct drm_i915_gem_request * 2523 i915_gem_find_active_request(struct intel_engine_cs *ring) 2524 { 2525 struct drm_i915_gem_request *request; 2526 2527 list_for_each_entry(request, &ring->request_list, list) { 2528 if (i915_gem_request_completed(request, false)) 2529 continue; 2530 2531 return request; 2532 } 2533 2534 return NULL; 2535 } 2536 2537 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, 2538 struct intel_engine_cs *ring) 2539 { 2540 struct drm_i915_gem_request *request; 2541 bool ring_hung; 2542 2543 request = i915_gem_find_active_request(ring); 2544 2545 if (request == NULL) 2546 return; 2547 2548 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; 2549 2550 i915_set_reset_status(dev_priv, request->ctx, ring_hung); 2551 2552 list_for_each_entry_continue(request, &ring->request_list, list) 2553 i915_set_reset_status(dev_priv, request->ctx, false); 2554 } 2555 2556 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, 2557 struct intel_engine_cs *ring) 2558 { 2559 while (!list_empty(&ring->active_list)) { 2560 struct drm_i915_gem_object *obj; 2561 2562 obj = list_first_entry(&ring->active_list, 2563 struct drm_i915_gem_object, 2564 ring_list); 2565 2566 i915_gem_object_move_to_inactive(obj); 2567 } 2568 2569 /* 2570 * Clear the execlists queue up before freeing the requests, as those 2571 * are the ones that keep the context and ringbuffer backing objects 2572 * pinned in place. 2573 */ 2574 while (!list_empty(&ring->execlist_queue)) { 2575 struct drm_i915_gem_request *submit_req; 2576 2577 submit_req = list_first_entry(&ring->execlist_queue, 2578 struct drm_i915_gem_request, 2579 execlist_link); 2580 list_del(&submit_req->execlist_link); 2581 intel_runtime_pm_put(dev_priv); 2582 2583 if (submit_req->ctx != ring->default_context) 2584 intel_lr_context_unpin(ring, submit_req->ctx); 2585 2586 i915_gem_request_unreference(submit_req); 2587 } 2588 2589 /* 2590 * We must free the requests after all the corresponding objects have 2591 * been moved off active lists. Which is the same order as the normal 2592 * retire_requests function does. This is important if object hold 2593 * implicit references on things like e.g. ppgtt address spaces through 2594 * the request. 2595 */ 2596 while (!list_empty(&ring->request_list)) { 2597 struct drm_i915_gem_request *request; 2598 2599 request = list_first_entry(&ring->request_list, 2600 struct drm_i915_gem_request, 2601 list); 2602 2603 i915_gem_free_request(request); 2604 } 2605 2606 /* This may not have been flushed before the reset, so clean it now */ 2607 i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); 2608 } 2609 2610 void i915_gem_restore_fences(struct drm_device *dev) 2611 { 2612 struct drm_i915_private *dev_priv = dev->dev_private; 2613 int i; 2614 2615 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2616 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2617 2618 /* 2619 * Commit delayed tiling changes if we have an object still 2620 * attached to the fence, otherwise just clear the fence. 2621 */ 2622 if (reg->obj) { 2623 i915_gem_object_update_fence(reg->obj, reg, 2624 reg->obj->tiling_mode); 2625 } else { 2626 i915_gem_write_fence(dev, i, NULL); 2627 } 2628 } 2629 } 2630 2631 void i915_gem_reset(struct drm_device *dev) 2632 { 2633 struct drm_i915_private *dev_priv = dev->dev_private; 2634 struct intel_engine_cs *ring; 2635 int i; 2636 2637 /* 2638 * Before we free the objects from the requests, we need to inspect 2639 * them for finding the guilty party. As the requests only borrow 2640 * their reference to the objects, the inspection must be done first. 2641 */ 2642 for_each_ring(ring, dev_priv, i) 2643 i915_gem_reset_ring_status(dev_priv, ring); 2644 2645 for_each_ring(ring, dev_priv, i) 2646 i915_gem_reset_ring_cleanup(dev_priv, ring); 2647 2648 i915_gem_context_reset(dev); 2649 2650 i915_gem_restore_fences(dev); 2651 } 2652 2653 /** 2654 * This function clears the request list as sequence numbers are passed. 2655 */ 2656 void 2657 i915_gem_retire_requests_ring(struct intel_engine_cs *ring) 2658 { 2659 if (list_empty(&ring->request_list)) 2660 return; 2661 2662 WARN_ON(i915_verify_lists(ring->dev)); 2663 2664 /* Retire requests first as we use it above for the early return. 2665 * If we retire requests last, we may use a later seqno and so clear 2666 * the requests lists without clearing the active list, leading to 2667 * confusion. 2668 */ 2669 while (!list_empty(&ring->request_list)) { 2670 struct drm_i915_gem_request *request; 2671 2672 request = list_first_entry(&ring->request_list, 2673 struct drm_i915_gem_request, 2674 list); 2675 2676 if (!i915_gem_request_completed(request, true)) 2677 break; 2678 2679 trace_i915_gem_request_retire(request); 2680 2681 /* We know the GPU must have read the request to have 2682 * sent us the seqno + interrupt, so use the position 2683 * of tail of the request to update the last known position 2684 * of the GPU head. 2685 */ 2686 request->ringbuf->last_retired_head = request->postfix; 2687 2688 i915_gem_free_request(request); 2689 } 2690 2691 /* Move any buffers on the active list that are no longer referenced 2692 * by the ringbuffer to the flushing/inactive lists as appropriate, 2693 * before we free the context associated with the requests. 2694 */ 2695 while (!list_empty(&ring->active_list)) { 2696 struct drm_i915_gem_object *obj; 2697 2698 obj = list_first_entry(&ring->active_list, 2699 struct drm_i915_gem_object, 2700 ring_list); 2701 2702 if (!i915_gem_request_completed(obj->last_read_req, true)) 2703 break; 2704 2705 i915_gem_object_move_to_inactive(obj); 2706 } 2707 2708 if (unlikely(ring->trace_irq_req && 2709 i915_gem_request_completed(ring->trace_irq_req, true))) { 2710 ring->irq_put(ring); 2711 i915_gem_request_assign(&ring->trace_irq_req, NULL); 2712 } 2713 2714 WARN_ON(i915_verify_lists(ring->dev)); 2715 } 2716 2717 bool 2718 i915_gem_retire_requests(struct drm_device *dev) 2719 { 2720 struct drm_i915_private *dev_priv = dev->dev_private; 2721 struct intel_engine_cs *ring; 2722 bool idle = true; 2723 int i; 2724 2725 for_each_ring(ring, dev_priv, i) { 2726 i915_gem_retire_requests_ring(ring); 2727 idle &= list_empty(&ring->request_list); 2728 if (i915.enable_execlists) { 2729 unsigned long flags; 2730 2731 spin_lock_irqsave(&ring->execlist_lock, flags); 2732 idle &= list_empty(&ring->execlist_queue); 2733 spin_unlock_irqrestore(&ring->execlist_lock, flags); 2734 2735 intel_execlists_retire_requests(ring); 2736 } 2737 } 2738 2739 if (idle) 2740 mod_delayed_work(dev_priv->wq, 2741 &dev_priv->mm.idle_work, 2742 msecs_to_jiffies(100)); 2743 2744 return idle; 2745 } 2746 2747 static void 2748 i915_gem_retire_work_handler(struct work_struct *work) 2749 { 2750 struct drm_i915_private *dev_priv = 2751 container_of(work, typeof(*dev_priv), mm.retire_work.work); 2752 struct drm_device *dev = dev_priv->dev; 2753 bool idle; 2754 2755 /* Come back later if the device is busy... */ 2756 idle = false; 2757 if (mutex_trylock(&dev->struct_mutex)) { 2758 idle = i915_gem_retire_requests(dev); 2759 mutex_unlock(&dev->struct_mutex); 2760 } 2761 if (!idle) 2762 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2763 round_jiffies_up_relative(HZ)); 2764 } 2765 2766 static void 2767 i915_gem_idle_work_handler(struct work_struct *work) 2768 { 2769 struct drm_i915_private *dev_priv = 2770 container_of(work, typeof(*dev_priv), mm.idle_work.work); 2771 2772 intel_mark_idle(dev_priv->dev); 2773 } 2774 2775 /** 2776 * Ensures that an object will eventually get non-busy by flushing any required 2777 * write domains, emitting any outstanding lazy request and retiring and 2778 * completed requests. 2779 */ 2780 static int 2781 i915_gem_object_flush_active(struct drm_i915_gem_object *obj) 2782 { 2783 struct intel_engine_cs *ring; 2784 int ret; 2785 2786 if (obj->active) { 2787 ring = i915_gem_request_get_ring(obj->last_read_req); 2788 2789 ret = i915_gem_check_olr(obj->last_read_req); 2790 if (ret) 2791 return ret; 2792 2793 i915_gem_retire_requests_ring(ring); 2794 } 2795 2796 return 0; 2797 } 2798 2799 /** 2800 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 2801 * @DRM_IOCTL_ARGS: standard ioctl arguments 2802 * 2803 * Returns 0 if successful, else an error is returned with the remaining time in 2804 * the timeout parameter. 2805 * -ETIME: object is still busy after timeout 2806 * -ERESTARTSYS: signal interrupted the wait 2807 * -ENONENT: object doesn't exist 2808 * Also possible, but rare: 2809 * -EAGAIN: GPU wedged 2810 * -ENOMEM: damn 2811 * -ENODEV: Internal IRQ fail 2812 * -E?: The add request failed 2813 * 2814 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 2815 * non-zero timeout parameter the wait ioctl will wait for the given number of 2816 * nanoseconds on an object becoming unbusy. Since the wait itself does so 2817 * without holding struct_mutex the object may become re-busied before this 2818 * function completes. A similar but shorter * race condition exists in the busy 2819 * ioctl 2820 */ 2821 int 2822 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 2823 { 2824 struct drm_i915_private *dev_priv = dev->dev_private; 2825 struct drm_i915_gem_wait *args = data; 2826 struct drm_i915_gem_object *obj; 2827 struct drm_i915_gem_request *req; 2828 unsigned reset_counter; 2829 int ret = 0; 2830 2831 if (args->flags != 0) 2832 return -EINVAL; 2833 2834 ret = i915_mutex_lock_interruptible(dev); 2835 if (ret) 2836 return ret; 2837 2838 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle)); 2839 if (&obj->base == NULL) { 2840 mutex_unlock(&dev->struct_mutex); 2841 return -ENOENT; 2842 } 2843 2844 /* Need to make sure the object gets inactive eventually. */ 2845 ret = i915_gem_object_flush_active(obj); 2846 if (ret) 2847 goto out; 2848 2849 if (!obj->active || !obj->last_read_req) 2850 goto out; 2851 2852 req = obj->last_read_req; 2853 2854 /* Do this after OLR check to make sure we make forward progress polling 2855 * on this IOCTL with a timeout == 0 (like busy ioctl) 2856 */ 2857 if (args->timeout_ns == 0) { 2858 ret = -ETIME; 2859 goto out; 2860 } 2861 2862 drm_gem_object_unreference(&obj->base); 2863 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2864 i915_gem_request_reference(req); 2865 mutex_unlock(&dev->struct_mutex); 2866 2867 ret = __i915_wait_request(req, reset_counter, true, 2868 args->timeout_ns > 0 ? &args->timeout_ns : NULL, 2869 file->driver_priv); 2870 mutex_lock(&dev->struct_mutex); 2871 i915_gem_request_unreference(req); 2872 mutex_unlock(&dev->struct_mutex); 2873 return ret; 2874 2875 out: 2876 drm_gem_object_unreference(&obj->base); 2877 mutex_unlock(&dev->struct_mutex); 2878 return ret; 2879 } 2880 2881 /** 2882 * i915_gem_object_sync - sync an object to a ring. 2883 * 2884 * @obj: object which may be in use on another ring. 2885 * @to: ring we wish to use the object on. May be NULL. 2886 * 2887 * This code is meant to abstract object synchronization with the GPU. 2888 * Calling with NULL implies synchronizing the object with the CPU 2889 * rather than a particular GPU ring. 2890 * 2891 * Returns 0 if successful, else propagates up the lower layer error. 2892 */ 2893 int 2894 i915_gem_object_sync(struct drm_i915_gem_object *obj, 2895 struct intel_engine_cs *to) 2896 { 2897 struct intel_engine_cs *from; 2898 u32 seqno; 2899 int ret, idx; 2900 2901 from = i915_gem_request_get_ring(obj->last_read_req); 2902 2903 if (from == NULL || to == from) 2904 return 0; 2905 2906 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) 2907 return i915_gem_object_wait_rendering(obj, false); 2908 2909 idx = intel_ring_sync_index(from, to); 2910 2911 seqno = i915_gem_request_get_seqno(obj->last_read_req); 2912 /* Optimization: Avoid semaphore sync when we are sure we already 2913 * waited for an object with higher seqno */ 2914 if (seqno <= from->semaphore.sync_seqno[idx]) 2915 return 0; 2916 2917 ret = i915_gem_check_olr(obj->last_read_req); 2918 if (ret) 2919 return ret; 2920 2921 trace_i915_gem_ring_sync_to(from, to, obj->last_read_req); 2922 ret = to->semaphore.sync_to(to, from, seqno); 2923 if (!ret) 2924 /* We use last_read_req because sync_to() 2925 * might have just caused seqno wrap under 2926 * the radar. 2927 */ 2928 from->semaphore.sync_seqno[idx] = 2929 i915_gem_request_get_seqno(obj->last_read_req); 2930 2931 return ret; 2932 } 2933 2934 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) 2935 { 2936 u32 old_write_domain, old_read_domains; 2937 2938 /* Force a pagefault for domain tracking on next user access */ 2939 i915_gem_release_mmap(obj); 2940 2941 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 2942 return; 2943 2944 /* Wait for any direct GTT access to complete */ 2945 mb(); 2946 2947 old_read_domains = obj->base.read_domains; 2948 old_write_domain = obj->base.write_domain; 2949 2950 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; 2951 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; 2952 2953 trace_i915_gem_object_change_domain(obj, 2954 old_read_domains, 2955 old_write_domain); 2956 } 2957 2958 int i915_vma_unbind(struct i915_vma *vma) 2959 { 2960 struct drm_i915_gem_object *obj = vma->obj; 2961 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2962 int ret; 2963 2964 if (list_empty(&vma->vma_link)) 2965 return 0; 2966 2967 if (!drm_mm_node_allocated(&vma->node)) { 2968 i915_gem_vma_destroy(vma); 2969 return 0; 2970 } 2971 2972 if (vma->pin_count) 2973 return -EBUSY; 2974 2975 BUG_ON(obj->pages == NULL); 2976 2977 ret = i915_gem_object_finish_gpu(obj); 2978 if (ret) 2979 return ret; 2980 /* Continue on if we fail due to EIO, the GPU is hung so we 2981 * should be safe and we need to cleanup or else we might 2982 * cause memory corruption through use-after-free. 2983 */ 2984 2985 if (i915_is_ggtt(vma->vm) && 2986 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { 2987 i915_gem_object_finish_gtt(obj); 2988 2989 /* release the fence reg _after_ flushing */ 2990 ret = i915_gem_object_put_fence(obj); 2991 if (ret) 2992 return ret; 2993 } 2994 2995 trace_i915_vma_unbind(vma); 2996 2997 vma->unbind_vma(vma); 2998 2999 list_del_init(&vma->mm_list); 3000 if (i915_is_ggtt(vma->vm)) { 3001 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { 3002 obj->map_and_fenceable = false; 3003 } else if (vma->ggtt_view.pages) { 3004 sg_free_table(vma->ggtt_view.pages); 3005 kfree(vma->ggtt_view.pages); 3006 vma->ggtt_view.pages = NULL; 3007 } 3008 } 3009 3010 drm_mm_remove_node(&vma->node); 3011 i915_gem_vma_destroy(vma); 3012 3013 /* Since the unbound list is global, only move to that list if 3014 * no more VMAs exist. */ 3015 if (list_empty(&obj->vma_list)) { 3016 /* Throw away the active reference before 3017 * moving to the unbound list. */ 3018 i915_gem_object_retire(obj); 3019 3020 i915_gem_gtt_finish_object(obj); 3021 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 3022 } 3023 3024 /* And finally now the object is completely decoupled from this vma, 3025 * we can drop its hold on the backing storage and allow it to be 3026 * reaped by the shrinker. 3027 */ 3028 i915_gem_object_unpin_pages(obj); 3029 3030 return 0; 3031 } 3032 3033 int i915_gpu_idle(struct drm_device *dev) 3034 { 3035 struct drm_i915_private *dev_priv = dev->dev_private; 3036 struct intel_engine_cs *ring; 3037 int ret, i; 3038 3039 /* Flush everything onto the inactive list. */ 3040 for_each_ring(ring, dev_priv, i) { 3041 if (!i915.enable_execlists) { 3042 ret = i915_switch_context(ring, ring->default_context); 3043 if (ret) 3044 return ret; 3045 } 3046 3047 ret = intel_ring_idle(ring); 3048 if (ret) 3049 return ret; 3050 } 3051 3052 return 0; 3053 } 3054 3055 static void i965_write_fence_reg(struct drm_device *dev, int reg, 3056 struct drm_i915_gem_object *obj) 3057 { 3058 struct drm_i915_private *dev_priv = dev->dev_private; 3059 int fence_reg; 3060 int fence_pitch_shift; 3061 3062 if (INTEL_INFO(dev)->gen >= 6) { 3063 fence_reg = FENCE_REG_SANDYBRIDGE_0; 3064 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; 3065 } else { 3066 fence_reg = FENCE_REG_965_0; 3067 fence_pitch_shift = I965_FENCE_PITCH_SHIFT; 3068 } 3069 3070 fence_reg += reg * 8; 3071 3072 /* To w/a incoherency with non-atomic 64-bit register updates, 3073 * we split the 64-bit update into two 32-bit writes. In order 3074 * for a partial fence not to be evaluated between writes, we 3075 * precede the update with write to turn off the fence register, 3076 * and only enable the fence as the last step. 3077 * 3078 * For extra levels of paranoia, we make sure each step lands 3079 * before applying the next step. 3080 */ 3081 I915_WRITE(fence_reg, 0); 3082 POSTING_READ(fence_reg); 3083 3084 if (obj) { 3085 u32 size = i915_gem_obj_ggtt_size(obj); 3086 uint64_t val; 3087 3088 /* Adjust fence size to match tiled area */ 3089 if (obj->tiling_mode != I915_TILING_NONE) { 3090 uint32_t row_size = obj->stride * 3091 (obj->tiling_mode == I915_TILING_Y ? 32 : 8); 3092 size = (size / row_size) * row_size; 3093 } 3094 3095 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 3096 0xfffff000) << 32; 3097 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; 3098 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; 3099 if (obj->tiling_mode == I915_TILING_Y) 3100 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 3101 val |= I965_FENCE_REG_VALID; 3102 3103 I915_WRITE(fence_reg + 4, val >> 32); 3104 POSTING_READ(fence_reg + 4); 3105 3106 I915_WRITE(fence_reg + 0, val); 3107 POSTING_READ(fence_reg); 3108 } else { 3109 I915_WRITE(fence_reg + 4, 0); 3110 POSTING_READ(fence_reg + 4); 3111 } 3112 } 3113 3114 static void i915_write_fence_reg(struct drm_device *dev, int reg, 3115 struct drm_i915_gem_object *obj) 3116 { 3117 struct drm_i915_private *dev_priv = dev->dev_private; 3118 u32 val; 3119 3120 if (obj) { 3121 u32 size = i915_gem_obj_ggtt_size(obj); 3122 int pitch_val; 3123 int tile_width; 3124 3125 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || 3126 (size & -size) != size || 3127 (i915_gem_obj_ggtt_offset(obj) & (size - 1)), 3128 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", 3129 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); 3130 3131 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 3132 tile_width = 128; 3133 else 3134 tile_width = 512; 3135 3136 /* Note: pitch better be a power of two tile widths */ 3137 pitch_val = obj->stride / tile_width; 3138 pitch_val = ffs(pitch_val) - 1; 3139 3140 val = i915_gem_obj_ggtt_offset(obj); 3141 if (obj->tiling_mode == I915_TILING_Y) 3142 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 3143 val |= I915_FENCE_SIZE_BITS(size); 3144 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 3145 val |= I830_FENCE_REG_VALID; 3146 } else 3147 val = 0; 3148 3149 if (reg < 8) 3150 reg = FENCE_REG_830_0 + reg * 4; 3151 else 3152 reg = FENCE_REG_945_8 + (reg - 8) * 4; 3153 3154 I915_WRITE(reg, val); 3155 POSTING_READ(reg); 3156 } 3157 3158 static void i830_write_fence_reg(struct drm_device *dev, int reg, 3159 struct drm_i915_gem_object *obj) 3160 { 3161 struct drm_i915_private *dev_priv = dev->dev_private; 3162 uint32_t val; 3163 3164 if (obj) { 3165 u32 size = i915_gem_obj_ggtt_size(obj); 3166 uint32_t pitch_val; 3167 3168 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || 3169 (size & -size) != size || 3170 (i915_gem_obj_ggtt_offset(obj) & (size - 1)), 3171 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", 3172 i915_gem_obj_ggtt_offset(obj), size); 3173 3174 pitch_val = obj->stride / 128; 3175 pitch_val = ffs(pitch_val) - 1; 3176 3177 val = i915_gem_obj_ggtt_offset(obj); 3178 if (obj->tiling_mode == I915_TILING_Y) 3179 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 3180 val |= I830_FENCE_SIZE_BITS(size); 3181 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 3182 val |= I830_FENCE_REG_VALID; 3183 } else 3184 val = 0; 3185 3186 I915_WRITE(FENCE_REG_830_0 + reg * 4, val); 3187 POSTING_READ(FENCE_REG_830_0 + reg * 4); 3188 } 3189 3190 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) 3191 { 3192 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; 3193 } 3194 3195 static void i915_gem_write_fence(struct drm_device *dev, int reg, 3196 struct drm_i915_gem_object *obj) 3197 { 3198 struct drm_i915_private *dev_priv = dev->dev_private; 3199 3200 /* Ensure that all CPU reads are completed before installing a fence 3201 * and all writes before removing the fence. 3202 */ 3203 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) 3204 mb(); 3205 3206 WARN(obj && (!obj->stride || !obj->tiling_mode), 3207 "bogus fence setup with stride: 0x%x, tiling mode: %i\n", 3208 obj->stride, obj->tiling_mode); 3209 3210 if (IS_GEN2(dev)) 3211 i830_write_fence_reg(dev, reg, obj); 3212 else if (IS_GEN3(dev)) 3213 i915_write_fence_reg(dev, reg, obj); 3214 else if (INTEL_INFO(dev)->gen >= 4) 3215 i965_write_fence_reg(dev, reg, obj); 3216 3217 /* And similarly be paranoid that no direct access to this region 3218 * is reordered to before the fence is installed. 3219 */ 3220 if (i915_gem_object_needs_mb(obj)) 3221 mb(); 3222 } 3223 3224 static inline int fence_number(struct drm_i915_private *dev_priv, 3225 struct drm_i915_fence_reg *fence) 3226 { 3227 return fence - dev_priv->fence_regs; 3228 } 3229 3230 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, 3231 struct drm_i915_fence_reg *fence, 3232 bool enable) 3233 { 3234 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3235 int reg = fence_number(dev_priv, fence); 3236 3237 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); 3238 3239 if (enable) { 3240 obj->fence_reg = reg; 3241 fence->obj = obj; 3242 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); 3243 } else { 3244 obj->fence_reg = I915_FENCE_REG_NONE; 3245 fence->obj = NULL; 3246 list_del_init(&fence->lru_list); 3247 } 3248 obj->fence_dirty = false; 3249 } 3250 3251 static int 3252 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) 3253 { 3254 if (obj->last_fenced_req) { 3255 int ret = i915_wait_request(obj->last_fenced_req); 3256 if (ret) 3257 return ret; 3258 3259 i915_gem_request_assign(&obj->last_fenced_req, NULL); 3260 } 3261 3262 return 0; 3263 } 3264 3265 int 3266 i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 3267 { 3268 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3269 struct drm_i915_fence_reg *fence; 3270 int ret; 3271 3272 ret = i915_gem_object_wait_fence(obj); 3273 if (ret) 3274 return ret; 3275 3276 if (obj->fence_reg == I915_FENCE_REG_NONE) 3277 return 0; 3278 3279 fence = &dev_priv->fence_regs[obj->fence_reg]; 3280 3281 if (WARN_ON(fence->pin_count)) 3282 return -EBUSY; 3283 3284 i915_gem_object_fence_lost(obj); 3285 i915_gem_object_update_fence(obj, fence, false); 3286 3287 return 0; 3288 } 3289 3290 static struct drm_i915_fence_reg * 3291 i915_find_fence_reg(struct drm_device *dev) 3292 { 3293 struct drm_i915_private *dev_priv = dev->dev_private; 3294 struct drm_i915_fence_reg *reg, *avail; 3295 int i; 3296 3297 /* First try to find a free reg */ 3298 avail = NULL; 3299 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 3300 reg = &dev_priv->fence_regs[i]; 3301 if (!reg->obj) 3302 return reg; 3303 3304 if (!reg->pin_count) 3305 avail = reg; 3306 } 3307 3308 if (avail == NULL) 3309 goto deadlock; 3310 3311 /* None available, try to steal one or wait for a user to finish */ 3312 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { 3313 if (reg->pin_count) 3314 continue; 3315 3316 return reg; 3317 } 3318 3319 deadlock: 3320 /* Wait for completion of pending flips which consume fences */ 3321 if (intel_has_pending_fb_unpin(dev)) 3322 return ERR_PTR(-EAGAIN); 3323 3324 return ERR_PTR(-EDEADLK); 3325 } 3326 3327 /** 3328 * i915_gem_object_get_fence - set up fencing for an object 3329 * @obj: object to map through a fence reg 3330 * 3331 * When mapping objects through the GTT, userspace wants to be able to write 3332 * to them without having to worry about swizzling if the object is tiled. 3333 * This function walks the fence regs looking for a free one for @obj, 3334 * stealing one if it can't find any. 3335 * 3336 * It then sets up the reg based on the object's properties: address, pitch 3337 * and tiling format. 3338 * 3339 * For an untiled surface, this removes any existing fence. 3340 */ 3341 int 3342 i915_gem_object_get_fence(struct drm_i915_gem_object *obj) 3343 { 3344 struct drm_device *dev = obj->base.dev; 3345 struct drm_i915_private *dev_priv = dev->dev_private; 3346 bool enable = obj->tiling_mode != I915_TILING_NONE; 3347 struct drm_i915_fence_reg *reg; 3348 int ret; 3349 3350 /* Have we updated the tiling parameters upon the object and so 3351 * will need to serialise the write to the associated fence register? 3352 */ 3353 if (obj->fence_dirty) { 3354 ret = i915_gem_object_wait_fence(obj); 3355 if (ret) 3356 return ret; 3357 } 3358 3359 /* Just update our place in the LRU if our fence is getting reused. */ 3360 if (obj->fence_reg != I915_FENCE_REG_NONE) { 3361 reg = &dev_priv->fence_regs[obj->fence_reg]; 3362 if (!obj->fence_dirty) { 3363 list_move_tail(®->lru_list, 3364 &dev_priv->mm.fence_list); 3365 return 0; 3366 } 3367 } else if (enable) { 3368 if (WARN_ON(!obj->map_and_fenceable)) 3369 return -EINVAL; 3370 3371 reg = i915_find_fence_reg(dev); 3372 if (IS_ERR(reg)) 3373 return PTR_ERR(reg); 3374 3375 if (reg->obj) { 3376 struct drm_i915_gem_object *old = reg->obj; 3377 3378 ret = i915_gem_object_wait_fence(old); 3379 if (ret) 3380 return ret; 3381 3382 i915_gem_object_fence_lost(old); 3383 } 3384 } else 3385 return 0; 3386 3387 i915_gem_object_update_fence(obj, reg, enable); 3388 3389 return 0; 3390 } 3391 3392 static bool i915_gem_valid_gtt_space(struct i915_vma *vma, 3393 unsigned long cache_level) 3394 { 3395 struct drm_mm_node *gtt_space = &vma->node; 3396 struct drm_mm_node *other; 3397 3398 /* 3399 * On some machines we have to be careful when putting differing types 3400 * of snoopable memory together to avoid the prefetcher crossing memory 3401 * domains and dying. During vm initialisation, we decide whether or not 3402 * these constraints apply and set the drm_mm.color_adjust 3403 * appropriately. 3404 */ 3405 if (vma->vm->mm.color_adjust == NULL) 3406 return true; 3407 3408 if (!drm_mm_node_allocated(gtt_space)) 3409 return true; 3410 3411 if (list_empty(>t_space->node_list)) 3412 return true; 3413 3414 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); 3415 if (other->allocated && !other->hole_follows && other->color != cache_level) 3416 return false; 3417 3418 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); 3419 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) 3420 return false; 3421 3422 return true; 3423 } 3424 3425 /** 3426 * Finds free space in the GTT aperture and binds the object there. 3427 */ 3428 static struct i915_vma * 3429 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3430 struct i915_address_space *vm, 3431 const struct i915_ggtt_view *ggtt_view, 3432 unsigned alignment, 3433 uint64_t flags) 3434 { 3435 struct drm_device *dev = obj->base.dev; 3436 struct drm_i915_private *dev_priv = dev->dev_private; 3437 u32 size, fence_size, fence_alignment, unfenced_alignment; 3438 unsigned long start = 3439 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 3440 unsigned long end = 3441 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3442 struct i915_vma *vma; 3443 int ret; 3444 3445 if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) 3446 return ERR_PTR(-EINVAL); 3447 3448 fence_size = i915_gem_get_gtt_size(dev, 3449 obj->base.size, 3450 obj->tiling_mode); 3451 fence_alignment = i915_gem_get_gtt_alignment(dev, 3452 obj->base.size, 3453 obj->tiling_mode, true); 3454 unfenced_alignment = 3455 i915_gem_get_gtt_alignment(dev, 3456 obj->base.size, 3457 obj->tiling_mode, false); 3458 3459 if (alignment == 0) 3460 alignment = flags & PIN_MAPPABLE ? fence_alignment : 3461 unfenced_alignment; 3462 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) { 3463 DRM_DEBUG("Invalid object alignment requested %u\n", alignment); 3464 return ERR_PTR(-EINVAL); 3465 } 3466 3467 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; 3468 3469 /* If the object is bigger than the entire aperture, reject it early 3470 * before evicting everything in a vain attempt to find space. 3471 */ 3472 if (obj->base.size > end) { 3473 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", 3474 obj->base.size, 3475 flags & PIN_MAPPABLE ? "mappable" : "total", 3476 end); 3477 return ERR_PTR(-E2BIG); 3478 } 3479 3480 ret = i915_gem_object_get_pages(obj); 3481 if (ret) 3482 return ERR_PTR(ret); 3483 3484 i915_gem_object_pin_pages(obj); 3485 3486 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) : 3487 i915_gem_obj_lookup_or_create_vma(obj, vm); 3488 3489 if (IS_ERR(vma)) 3490 goto err_unpin; 3491 3492 search_free: 3493 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3494 size, alignment, 3495 obj->cache_level, 3496 start, end, 3497 DRM_MM_SEARCH_DEFAULT, 3498 DRM_MM_CREATE_DEFAULT); 3499 if (ret) { 3500 ret = i915_gem_evict_something(dev, vm, size, alignment, 3501 obj->cache_level, 3502 start, end, 3503 flags); 3504 if (ret == 0) 3505 goto search_free; 3506 3507 goto err_free_vma; 3508 } 3509 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { 3510 ret = -EINVAL; 3511 goto err_remove_node; 3512 } 3513 3514 ret = i915_gem_gtt_prepare_object(obj); 3515 if (ret) 3516 goto err_remove_node; 3517 3518 /* allocate before insert / bind */ 3519 if (vma->vm->allocate_va_range) { 3520 trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size, 3521 VM_TO_TRACE_NAME(vma->vm)); 3522 ret = vma->vm->allocate_va_range(vma->vm, 3523 vma->node.start, 3524 vma->node.size); 3525 if (ret) 3526 goto err_remove_node; 3527 } 3528 3529 trace_i915_vma_bind(vma, flags); 3530 ret = i915_vma_bind(vma, obj->cache_level, 3531 flags & PIN_GLOBAL ? GLOBAL_BIND : 0); 3532 if (ret) 3533 goto err_finish_gtt; 3534 3535 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3536 list_add_tail(&vma->mm_list, &vm->inactive_list); 3537 3538 return vma; 3539 3540 err_finish_gtt: 3541 i915_gem_gtt_finish_object(obj); 3542 err_remove_node: 3543 drm_mm_remove_node(&vma->node); 3544 err_free_vma: 3545 i915_gem_vma_destroy(vma); 3546 vma = ERR_PTR(ret); 3547 err_unpin: 3548 i915_gem_object_unpin_pages(obj); 3549 return vma; 3550 } 3551 3552 bool 3553 i915_gem_clflush_object(struct drm_i915_gem_object *obj, 3554 bool force) 3555 { 3556 /* If we don't have a page list set up, then we're not pinned 3557 * to GPU, and we can ignore the cache flush because it'll happen 3558 * again at bind time. 3559 */ 3560 if (obj->pages == NULL) 3561 return false; 3562 3563 /* 3564 * Stolen memory is always coherent with the GPU as it is explicitly 3565 * marked as wc by the system, or the system is cache-coherent. 3566 */ 3567 if (obj->stolen || obj->phys_handle) 3568 return false; 3569 3570 /* If the GPU is snooping the contents of the CPU cache, 3571 * we do not need to manually clear the CPU cache lines. However, 3572 * the caches are only snooped when the render cache is 3573 * flushed/invalidated. As we always have to emit invalidations 3574 * and flushes when moving into and out of the RENDER domain, correct 3575 * snooping behaviour occurs naturally as the result of our domain 3576 * tracking. 3577 */ 3578 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) { 3579 obj->cache_dirty = true; 3580 return false; 3581 } 3582 3583 trace_i915_gem_object_clflush(obj); 3584 drm_clflush_sg(obj->pages); 3585 obj->cache_dirty = false; 3586 3587 return true; 3588 } 3589 3590 /** Flushes the GTT write domain for the object if it's dirty. */ 3591 static void 3592 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) 3593 { 3594 uint32_t old_write_domain; 3595 3596 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 3597 return; 3598 3599 /* No actual flushing is required for the GTT write domain. Writes 3600 * to it immediately go to main memory as far as we know, so there's 3601 * no chipset flush. It also doesn't land in render cache. 3602 * 3603 * However, we do have to enforce the order so that all writes through 3604 * the GTT land before any writes to the device, such as updates to 3605 * the GATT itself. 3606 */ 3607 wmb(); 3608 3609 old_write_domain = obj->base.write_domain; 3610 obj->base.write_domain = 0; 3611 3612 intel_fb_obj_flush(obj, false); 3613 3614 trace_i915_gem_object_change_domain(obj, 3615 obj->base.read_domains, 3616 old_write_domain); 3617 } 3618 3619 /** Flushes the CPU write domain for the object if it's dirty. */ 3620 static void 3621 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) 3622 { 3623 uint32_t old_write_domain; 3624 3625 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 3626 return; 3627 3628 if (i915_gem_clflush_object(obj, obj->pin_display)) 3629 i915_gem_chipset_flush(obj->base.dev); 3630 3631 old_write_domain = obj->base.write_domain; 3632 obj->base.write_domain = 0; 3633 3634 intel_fb_obj_flush(obj, false); 3635 3636 trace_i915_gem_object_change_domain(obj, 3637 obj->base.read_domains, 3638 old_write_domain); 3639 } 3640 3641 /** 3642 * Moves a single object to the GTT read, and possibly write domain. 3643 * 3644 * This function returns when the move is complete, including waiting on 3645 * flushes to occur. 3646 */ 3647 int 3648 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3649 { 3650 uint32_t old_write_domain, old_read_domains; 3651 struct i915_vma *vma; 3652 int ret; 3653 3654 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3655 return 0; 3656 3657 ret = i915_gem_object_wait_rendering(obj, !write); 3658 if (ret) 3659 return ret; 3660 3661 i915_gem_object_retire(obj); 3662 3663 /* Flush and acquire obj->pages so that we are coherent through 3664 * direct access in memory with previous cached writes through 3665 * shmemfs and that our cache domain tracking remains valid. 3666 * For example, if the obj->filp was moved to swap without us 3667 * being notified and releasing the pages, we would mistakenly 3668 * continue to assume that the obj remained out of the CPU cached 3669 * domain. 3670 */ 3671 ret = i915_gem_object_get_pages(obj); 3672 if (ret) 3673 return ret; 3674 3675 i915_gem_object_flush_cpu_write_domain(obj); 3676 3677 /* Serialise direct access to this object with the barriers for 3678 * coherent writes from the GPU, by effectively invalidating the 3679 * GTT domain upon first access. 3680 */ 3681 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3682 mb(); 3683 3684 old_write_domain = obj->base.write_domain; 3685 old_read_domains = obj->base.read_domains; 3686 3687 /* It should now be out of any other write domains, and we can update 3688 * the domain values for our changes. 3689 */ 3690 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3691 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3692 if (write) { 3693 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3694 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3695 obj->dirty = 1; 3696 } 3697 3698 if (write) 3699 intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); 3700 3701 trace_i915_gem_object_change_domain(obj, 3702 old_read_domains, 3703 old_write_domain); 3704 3705 /* And bump the LRU for this access */ 3706 vma = i915_gem_obj_to_ggtt(obj); 3707 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) 3708 list_move_tail(&vma->mm_list, 3709 &to_i915(obj->base.dev)->gtt.base.inactive_list); 3710 3711 return 0; 3712 } 3713 3714 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3715 enum i915_cache_level cache_level) 3716 { 3717 struct drm_device *dev = obj->base.dev; 3718 struct i915_vma *vma, *next; 3719 int ret; 3720 3721 if (obj->cache_level == cache_level) 3722 return 0; 3723 3724 if (i915_gem_obj_is_pinned(obj)) { 3725 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3726 return -EBUSY; 3727 } 3728 3729 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 3730 if (!i915_gem_valid_gtt_space(vma, cache_level)) { 3731 ret = i915_vma_unbind(vma); 3732 if (ret) 3733 return ret; 3734 } 3735 } 3736 3737 if (i915_gem_obj_bound_any(obj)) { 3738 ret = i915_gem_object_finish_gpu(obj); 3739 if (ret) 3740 return ret; 3741 3742 i915_gem_object_finish_gtt(obj); 3743 3744 /* Before SandyBridge, you could not use tiling or fence 3745 * registers with snooped memory, so relinquish any fences 3746 * currently pointing to our region in the aperture. 3747 */ 3748 if (INTEL_INFO(dev)->gen < 6) { 3749 ret = i915_gem_object_put_fence(obj); 3750 if (ret) 3751 return ret; 3752 } 3753 3754 list_for_each_entry(vma, &obj->vma_list, vma_link) 3755 if (drm_mm_node_allocated(&vma->node)) { 3756 ret = i915_vma_bind(vma, cache_level, 3757 vma->bound & GLOBAL_BIND); 3758 if (ret) 3759 return ret; 3760 } 3761 } 3762 3763 list_for_each_entry(vma, &obj->vma_list, vma_link) 3764 vma->node.color = cache_level; 3765 obj->cache_level = cache_level; 3766 3767 if (obj->cache_dirty && 3768 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 3769 cpu_write_needs_clflush(obj)) { 3770 if (i915_gem_clflush_object(obj, true)) 3771 i915_gem_chipset_flush(obj->base.dev); 3772 } 3773 3774 return 0; 3775 } 3776 3777 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3778 struct drm_file *file) 3779 { 3780 struct drm_i915_gem_caching *args = data; 3781 struct drm_i915_gem_object *obj; 3782 int ret; 3783 3784 ret = i915_mutex_lock_interruptible(dev); 3785 if (ret) 3786 return ret; 3787 3788 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3789 if (&obj->base == NULL) { 3790 ret = -ENOENT; 3791 goto unlock; 3792 } 3793 3794 switch (obj->cache_level) { 3795 case I915_CACHE_LLC: 3796 case I915_CACHE_L3_LLC: 3797 args->caching = I915_CACHING_CACHED; 3798 break; 3799 3800 case I915_CACHE_WT: 3801 args->caching = I915_CACHING_DISPLAY; 3802 break; 3803 3804 default: 3805 args->caching = I915_CACHING_NONE; 3806 break; 3807 } 3808 3809 drm_gem_object_unreference(&obj->base); 3810 unlock: 3811 mutex_unlock(&dev->struct_mutex); 3812 return ret; 3813 } 3814 3815 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3816 struct drm_file *file) 3817 { 3818 struct drm_i915_gem_caching *args = data; 3819 struct drm_i915_gem_object *obj; 3820 enum i915_cache_level level; 3821 int ret; 3822 3823 switch (args->caching) { 3824 case I915_CACHING_NONE: 3825 level = I915_CACHE_NONE; 3826 break; 3827 case I915_CACHING_CACHED: 3828 level = I915_CACHE_LLC; 3829 break; 3830 case I915_CACHING_DISPLAY: 3831 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE; 3832 break; 3833 default: 3834 return -EINVAL; 3835 } 3836 3837 ret = i915_mutex_lock_interruptible(dev); 3838 if (ret) 3839 return ret; 3840 3841 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3842 if (&obj->base == NULL) { 3843 ret = -ENOENT; 3844 goto unlock; 3845 } 3846 3847 ret = i915_gem_object_set_cache_level(obj, level); 3848 3849 drm_gem_object_unreference(&obj->base); 3850 unlock: 3851 mutex_unlock(&dev->struct_mutex); 3852 return ret; 3853 } 3854 3855 static bool is_pin_display(struct drm_i915_gem_object *obj) 3856 { 3857 struct i915_vma *vma; 3858 3859 vma = i915_gem_obj_to_ggtt(obj); 3860 if (!vma) 3861 return false; 3862 3863 /* There are 2 sources that pin objects: 3864 * 1. The display engine (scanouts, sprites, cursors); 3865 * 2. Reservations for execbuffer; 3866 * 3867 * We can ignore reservations as we hold the struct_mutex and 3868 * are only called outside of the reservation path. 3869 */ 3870 return vma->pin_count; 3871 } 3872 3873 /* 3874 * Prepare buffer for display plane (scanout, cursors, etc). 3875 * Can be called from an uninterruptible phase (modesetting) and allows 3876 * any flushes to be pipelined (for pageflips). 3877 */ 3878 int 3879 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3880 u32 alignment, 3881 struct intel_engine_cs *pipelined, 3882 const struct i915_ggtt_view *view) 3883 { 3884 u32 old_read_domains, old_write_domain; 3885 bool was_pin_display; 3886 int ret; 3887 3888 if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) { 3889 ret = i915_gem_object_sync(obj, pipelined); 3890 if (ret) 3891 return ret; 3892 } 3893 3894 /* Mark the pin_display early so that we account for the 3895 * display coherency whilst setting up the cache domains. 3896 */ 3897 was_pin_display = obj->pin_display; 3898 obj->pin_display = true; 3899 3900 /* The display engine is not coherent with the LLC cache on gen6. As 3901 * a result, we make sure that the pinning that is about to occur is 3902 * done with uncached PTEs. This is lowest common denominator for all 3903 * chipsets. 3904 * 3905 * However for gen6+, we could do better by using the GFDT bit instead 3906 * of uncaching, which would allow us to flush all the LLC-cached data 3907 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3908 */ 3909 ret = i915_gem_object_set_cache_level(obj, 3910 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE); 3911 if (ret) 3912 goto err_unpin_display; 3913 3914 /* As the user may map the buffer once pinned in the display plane 3915 * (e.g. libkms for the bootup splash), we have to ensure that we 3916 * always use map_and_fenceable for all scanout buffers. 3917 */ 3918 ret = i915_gem_object_ggtt_pin(obj, view, alignment, 3919 view->type == I915_GGTT_VIEW_NORMAL ? 3920 PIN_MAPPABLE : 0); 3921 if (ret) 3922 goto err_unpin_display; 3923 3924 i915_gem_object_flush_cpu_write_domain(obj); 3925 3926 old_write_domain = obj->base.write_domain; 3927 old_read_domains = obj->base.read_domains; 3928 3929 /* It should now be out of any other write domains, and we can update 3930 * the domain values for our changes. 3931 */ 3932 obj->base.write_domain = 0; 3933 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3934 3935 trace_i915_gem_object_change_domain(obj, 3936 old_read_domains, 3937 old_write_domain); 3938 3939 return 0; 3940 3941 err_unpin_display: 3942 WARN_ON(was_pin_display != is_pin_display(obj)); 3943 obj->pin_display = was_pin_display; 3944 return ret; 3945 } 3946 3947 void 3948 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 3949 const struct i915_ggtt_view *view) 3950 { 3951 i915_gem_object_ggtt_unpin_view(obj, view); 3952 3953 obj->pin_display = is_pin_display(obj); 3954 } 3955 3956 int 3957 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) 3958 { 3959 int ret; 3960 3961 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) 3962 return 0; 3963 3964 ret = i915_gem_object_wait_rendering(obj, false); 3965 if (ret) 3966 return ret; 3967 3968 /* Ensure that we invalidate the GPU's caches and TLBs. */ 3969 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 3970 return 0; 3971 } 3972 3973 /** 3974 * Moves a single object to the CPU read, and possibly write domain. 3975 * 3976 * This function returns when the move is complete, including waiting on 3977 * flushes to occur. 3978 */ 3979 int 3980 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 3981 { 3982 uint32_t old_write_domain, old_read_domains; 3983 int ret; 3984 3985 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 3986 return 0; 3987 3988 ret = i915_gem_object_wait_rendering(obj, !write); 3989 if (ret) 3990 return ret; 3991 3992 i915_gem_object_retire(obj); 3993 i915_gem_object_flush_gtt_write_domain(obj); 3994 3995 old_write_domain = obj->base.write_domain; 3996 old_read_domains = obj->base.read_domains; 3997 3998 /* Flush the CPU cache if it's still invalid. */ 3999 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 4000 i915_gem_clflush_object(obj, false); 4001 4002 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 4003 } 4004 4005 /* It should now be out of any other write domains, and we can update 4006 * the domain values for our changes. 4007 */ 4008 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 4009 4010 /* If we're writing through the CPU, then the GPU read domains will 4011 * need to be invalidated at next use. 4012 */ 4013 if (write) { 4014 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4015 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4016 } 4017 4018 if (write) 4019 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); 4020 4021 trace_i915_gem_object_change_domain(obj, 4022 old_read_domains, 4023 old_write_domain); 4024 4025 return 0; 4026 } 4027 4028 /* Throttle our rendering by waiting until the ring has completed our requests 4029 * emitted over 20 msec ago. 4030 * 4031 * Note that if we were to use the current jiffies each time around the loop, 4032 * we wouldn't escape the function with any frames outstanding if the time to 4033 * render a frame was over 20ms. 4034 * 4035 * This should get us reasonable parallelism between CPU and GPU but also 4036 * relatively low latency when blocking on a particular request to finish. 4037 */ 4038 static int 4039 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 4040 { 4041 struct drm_i915_private *dev_priv = dev->dev_private; 4042 struct drm_i915_file_private *file_priv = file->driver_priv; 4043 unsigned long recent_enough = jiffies - msecs_to_jiffies(20); 4044 struct drm_i915_gem_request *request, *target = NULL; 4045 unsigned reset_counter; 4046 int ret; 4047 4048 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 4049 if (ret) 4050 return ret; 4051 4052 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); 4053 if (ret) 4054 return ret; 4055 4056 spin_lock(&file_priv->mm.lock); 4057 list_for_each_entry(request, &file_priv->mm.request_list, client_list) { 4058 if (time_after_eq(request->emitted_jiffies, recent_enough)) 4059 break; 4060 4061 target = request; 4062 } 4063 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 4064 if (target) 4065 i915_gem_request_reference(target); 4066 spin_unlock(&file_priv->mm.lock); 4067 4068 if (target == NULL) 4069 return 0; 4070 4071 ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); 4072 if (ret == 0) 4073 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 4074 4075 mutex_lock(&dev->struct_mutex); 4076 i915_gem_request_unreference(target); 4077 mutex_unlock(&dev->struct_mutex); 4078 4079 return ret; 4080 } 4081 4082 static bool 4083 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) 4084 { 4085 struct drm_i915_gem_object *obj = vma->obj; 4086 4087 if (alignment && 4088 vma->node.start & (alignment - 1)) 4089 return true; 4090 4091 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) 4092 return true; 4093 4094 if (flags & PIN_OFFSET_BIAS && 4095 vma->node.start < (flags & PIN_OFFSET_MASK)) 4096 return true; 4097 4098 return false; 4099 } 4100 4101 static int 4102 i915_gem_object_do_pin(struct drm_i915_gem_object *obj, 4103 struct i915_address_space *vm, 4104 const struct i915_ggtt_view *ggtt_view, 4105 uint32_t alignment, 4106 uint64_t flags) 4107 { 4108 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4109 struct i915_vma *vma; 4110 unsigned bound; 4111 int ret; 4112 4113 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) 4114 return -ENODEV; 4115 4116 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) 4117 return -EINVAL; 4118 4119 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) 4120 return -EINVAL; 4121 4122 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) 4123 return -EINVAL; 4124 4125 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) : 4126 i915_gem_obj_to_vma(obj, vm); 4127 4128 if (IS_ERR(vma)) 4129 return PTR_ERR(vma); 4130 4131 if (vma) { 4132 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 4133 return -EBUSY; 4134 4135 if (i915_vma_misplaced(vma, alignment, flags)) { 4136 unsigned long offset; 4137 offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) : 4138 i915_gem_obj_offset(obj, vm); 4139 WARN(vma->pin_count, 4140 "bo is already pinned in %s with incorrect alignment:" 4141 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 4142 " obj->map_and_fenceable=%d\n", 4143 ggtt_view ? "ggtt" : "ppgtt", 4144 offset, 4145 alignment, 4146 !!(flags & PIN_MAPPABLE), 4147 obj->map_and_fenceable); 4148 ret = i915_vma_unbind(vma); 4149 if (ret) 4150 return ret; 4151 4152 vma = NULL; 4153 } 4154 } 4155 4156 bound = vma ? vma->bound : 0; 4157 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { 4158 /* In true PPGTT, bind has possibly changed PDEs, which 4159 * means we must do a context switch before the GPU can 4160 * accurately read some of the VMAs. 4161 */ 4162 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment, 4163 flags); 4164 if (IS_ERR(vma)) 4165 return PTR_ERR(vma); 4166 } 4167 4168 if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) { 4169 ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND); 4170 if (ret) 4171 return ret; 4172 } 4173 4174 if ((bound ^ vma->bound) & GLOBAL_BIND) { 4175 bool mappable, fenceable; 4176 u32 fence_size, fence_alignment; 4177 4178 fence_size = i915_gem_get_gtt_size(obj->base.dev, 4179 obj->base.size, 4180 obj->tiling_mode); 4181 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, 4182 obj->base.size, 4183 obj->tiling_mode, 4184 true); 4185 4186 fenceable = (vma->node.size == fence_size && 4187 (vma->node.start & (fence_alignment - 1)) == 0); 4188 4189 mappable = (vma->node.start + fence_size <= 4190 dev_priv->gtt.mappable_end); 4191 4192 obj->map_and_fenceable = mappable && fenceable; 4193 } 4194 4195 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); 4196 4197 vma->pin_count++; 4198 if (flags & PIN_MAPPABLE) 4199 obj->pin_mappable |= true; 4200 4201 return 0; 4202 } 4203 4204 int 4205 i915_gem_object_pin(struct drm_i915_gem_object *obj, 4206 struct i915_address_space *vm, 4207 uint32_t alignment, 4208 uint64_t flags) 4209 { 4210 return i915_gem_object_do_pin(obj, vm, 4211 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL, 4212 alignment, flags); 4213 } 4214 4215 int 4216 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 4217 const struct i915_ggtt_view *view, 4218 uint32_t alignment, 4219 uint64_t flags) 4220 { 4221 if (WARN_ONCE(!view, "no view specified")) 4222 return -EINVAL; 4223 4224 return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view, 4225 alignment, flags | PIN_GLOBAL); 4226 } 4227 4228 void 4229 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 4230 const struct i915_ggtt_view *view) 4231 { 4232 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); 4233 4234 BUG_ON(!vma); 4235 WARN_ON(vma->pin_count == 0); 4236 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view)); 4237 4238 if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL) 4239 obj->pin_mappable = false; 4240 } 4241 4242 bool 4243 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 4244 { 4245 if (obj->fence_reg != I915_FENCE_REG_NONE) { 4246 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4247 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); 4248 4249 WARN_ON(!ggtt_vma || 4250 dev_priv->fence_regs[obj->fence_reg].pin_count > 4251 ggtt_vma->pin_count); 4252 dev_priv->fence_regs[obj->fence_reg].pin_count++; 4253 return true; 4254 } else 4255 return false; 4256 } 4257 4258 void 4259 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) 4260 { 4261 if (obj->fence_reg != I915_FENCE_REG_NONE) { 4262 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4263 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); 4264 dev_priv->fence_regs[obj->fence_reg].pin_count--; 4265 } 4266 } 4267 4268 int 4269 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 4270 struct drm_file *file) 4271 { 4272 struct drm_i915_gem_busy *args = data; 4273 struct drm_i915_gem_object *obj; 4274 int ret; 4275 4276 ret = i915_mutex_lock_interruptible(dev); 4277 if (ret) 4278 return ret; 4279 4280 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 4281 if (&obj->base == NULL) { 4282 ret = -ENOENT; 4283 goto unlock; 4284 } 4285 4286 /* Count all active objects as busy, even if they are currently not used 4287 * by the gpu. Users of this interface expect objects to eventually 4288 * become non-busy without any further actions, therefore emit any 4289 * necessary flushes here. 4290 */ 4291 ret = i915_gem_object_flush_active(obj); 4292 4293 args->busy = obj->active; 4294 if (obj->last_read_req) { 4295 struct intel_engine_cs *ring; 4296 BUILD_BUG_ON(I915_NUM_RINGS > 16); 4297 ring = i915_gem_request_get_ring(obj->last_read_req); 4298 args->busy |= intel_ring_flag(ring) << 16; 4299 } 4300 4301 drm_gem_object_unreference(&obj->base); 4302 unlock: 4303 mutex_unlock(&dev->struct_mutex); 4304 return ret; 4305 } 4306 4307 int 4308 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 4309 struct drm_file *file_priv) 4310 { 4311 return i915_gem_ring_throttle(dev, file_priv); 4312 } 4313 4314 int 4315 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4316 struct drm_file *file_priv) 4317 { 4318 struct drm_i915_private *dev_priv = dev->dev_private; 4319 struct drm_i915_gem_madvise *args = data; 4320 struct drm_i915_gem_object *obj; 4321 int ret; 4322 4323 switch (args->madv) { 4324 case I915_MADV_DONTNEED: 4325 case I915_MADV_WILLNEED: 4326 break; 4327 default: 4328 return -EINVAL; 4329 } 4330 4331 ret = i915_mutex_lock_interruptible(dev); 4332 if (ret) 4333 return ret; 4334 4335 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); 4336 if (&obj->base == NULL) { 4337 ret = -ENOENT; 4338 goto unlock; 4339 } 4340 4341 if (i915_gem_obj_is_pinned(obj)) { 4342 ret = -EINVAL; 4343 goto out; 4344 } 4345 4346 if (obj->pages && 4347 obj->tiling_mode != I915_TILING_NONE && 4348 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 4349 if (obj->madv == I915_MADV_WILLNEED) 4350 i915_gem_object_unpin_pages(obj); 4351 if (args->madv == I915_MADV_WILLNEED) 4352 i915_gem_object_pin_pages(obj); 4353 } 4354 4355 if (obj->madv != __I915_MADV_PURGED) 4356 obj->madv = args->madv; 4357 4358 /* if the object is no longer attached, discard its backing storage */ 4359 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL) 4360 i915_gem_object_truncate(obj); 4361 4362 args->retained = obj->madv != __I915_MADV_PURGED; 4363 4364 out: 4365 drm_gem_object_unreference(&obj->base); 4366 unlock: 4367 mutex_unlock(&dev->struct_mutex); 4368 return ret; 4369 } 4370 4371 void i915_gem_object_init(struct drm_i915_gem_object *obj, 4372 const struct drm_i915_gem_object_ops *ops) 4373 { 4374 INIT_LIST_HEAD(&obj->global_list); 4375 INIT_LIST_HEAD(&obj->ring_list); 4376 INIT_LIST_HEAD(&obj->obj_exec_link); 4377 INIT_LIST_HEAD(&obj->vma_list); 4378 INIT_LIST_HEAD(&obj->batch_pool_list); 4379 4380 obj->ops = ops; 4381 4382 obj->fence_reg = I915_FENCE_REG_NONE; 4383 obj->madv = I915_MADV_WILLNEED; 4384 4385 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); 4386 } 4387 4388 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4389 .get_pages = i915_gem_object_get_pages_gtt, 4390 .put_pages = i915_gem_object_put_pages_gtt, 4391 }; 4392 4393 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 4394 size_t size) 4395 { 4396 struct drm_i915_gem_object *obj; 4397 struct address_space *mapping; 4398 gfp_t mask; 4399 4400 obj = i915_gem_object_alloc(dev); 4401 if (obj == NULL) 4402 return NULL; 4403 4404 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 4405 i915_gem_object_free(obj); 4406 return NULL; 4407 } 4408 4409 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4410 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { 4411 /* 965gm cannot relocate objects above 4GiB. */ 4412 mask &= ~__GFP_HIGHMEM; 4413 mask |= __GFP_DMA32; 4414 } 4415 4416 mapping = file_inode(obj->base.filp)->i_mapping; 4417 mapping_set_gfp_mask(mapping, mask); 4418 4419 i915_gem_object_init(obj, &i915_gem_object_ops); 4420 4421 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4422 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4423 4424 if (HAS_LLC(dev)) { 4425 /* On some devices, we can have the GPU use the LLC (the CPU 4426 * cache) for about a 10% performance improvement 4427 * compared to uncached. Graphics requests other than 4428 * display scanout are coherent with the CPU in 4429 * accessing this cache. This means in this mode we 4430 * don't need to clflush on the CPU side, and on the 4431 * GPU side we only need to flush internal caches to 4432 * get data visible to the CPU. 4433 * 4434 * However, we maintain the display planes as UC, and so 4435 * need to rebind when first used as such. 4436 */ 4437 obj->cache_level = I915_CACHE_LLC; 4438 } else 4439 obj->cache_level = I915_CACHE_NONE; 4440 4441 trace_i915_gem_object_create(obj); 4442 4443 return obj; 4444 } 4445 4446 static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4447 { 4448 /* If we are the last user of the backing storage (be it shmemfs 4449 * pages or stolen etc), we know that the pages are going to be 4450 * immediately released. In this case, we can then skip copying 4451 * back the contents from the GPU. 4452 */ 4453 4454 if (obj->madv != I915_MADV_WILLNEED) 4455 return false; 4456 4457 if (obj->base.filp == NULL) 4458 return true; 4459 4460 /* At first glance, this looks racy, but then again so would be 4461 * userspace racing mmap against close. However, the first external 4462 * reference to the filp can only be obtained through the 4463 * i915_gem_mmap_ioctl() which safeguards us against the user 4464 * acquiring such a reference whilst we are in the middle of 4465 * freeing the object. 4466 */ 4467 return atomic_long_read(&obj->base.filp->f_count) == 1; 4468 } 4469 4470 void i915_gem_free_object(struct drm_gem_object *gem_obj) 4471 { 4472 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4473 struct drm_device *dev = obj->base.dev; 4474 struct drm_i915_private *dev_priv = dev->dev_private; 4475 struct i915_vma *vma, *next; 4476 4477 intel_runtime_pm_get(dev_priv); 4478 4479 trace_i915_gem_object_destroy(obj); 4480 4481 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4482 int ret; 4483 4484 vma->pin_count = 0; 4485 ret = i915_vma_unbind(vma); 4486 if (WARN_ON(ret == -ERESTARTSYS)) { 4487 bool was_interruptible; 4488 4489 was_interruptible = dev_priv->mm.interruptible; 4490 dev_priv->mm.interruptible = false; 4491 4492 WARN_ON(i915_vma_unbind(vma)); 4493 4494 dev_priv->mm.interruptible = was_interruptible; 4495 } 4496 } 4497 4498 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4499 * before progressing. */ 4500 if (obj->stolen) 4501 i915_gem_object_unpin_pages(obj); 4502 4503 WARN_ON(obj->frontbuffer_bits); 4504 4505 if (obj->pages && obj->madv == I915_MADV_WILLNEED && 4506 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES && 4507 obj->tiling_mode != I915_TILING_NONE) 4508 i915_gem_object_unpin_pages(obj); 4509 4510 if (WARN_ON(obj->pages_pin_count)) 4511 obj->pages_pin_count = 0; 4512 if (discard_backing_storage(obj)) 4513 obj->madv = I915_MADV_DONTNEED; 4514 i915_gem_object_put_pages(obj); 4515 i915_gem_object_free_mmap_offset(obj); 4516 4517 BUG_ON(obj->pages); 4518 4519 if (obj->base.import_attach) 4520 drm_prime_gem_destroy(&obj->base, NULL); 4521 4522 if (obj->ops->release) 4523 obj->ops->release(obj); 4524 4525 drm_gem_object_release(&obj->base); 4526 i915_gem_info_remove_obj(dev_priv, obj->base.size); 4527 4528 kfree(obj->bit_17); 4529 i915_gem_object_free(obj); 4530 4531 intel_runtime_pm_put(dev_priv); 4532 } 4533 4534 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 4535 struct i915_address_space *vm) 4536 { 4537 struct i915_vma *vma; 4538 list_for_each_entry(vma, &obj->vma_list, vma_link) { 4539 if (i915_is_ggtt(vma->vm) && 4540 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 4541 continue; 4542 if (vma->vm == vm) 4543 return vma; 4544 } 4545 return NULL; 4546 } 4547 4548 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 4549 const struct i915_ggtt_view *view) 4550 { 4551 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); 4552 struct i915_vma *vma; 4553 4554 if (WARN_ONCE(!view, "no view specified")) 4555 return ERR_PTR(-EINVAL); 4556 4557 list_for_each_entry(vma, &obj->vma_list, vma_link) 4558 if (vma->vm == ggtt && 4559 i915_ggtt_view_equal(&vma->ggtt_view, view)) 4560 return vma; 4561 return NULL; 4562 } 4563 4564 void i915_gem_vma_destroy(struct i915_vma *vma) 4565 { 4566 struct i915_address_space *vm = NULL; 4567 WARN_ON(vma->node.allocated); 4568 4569 /* Keep the vma as a placeholder in the execbuffer reservation lists */ 4570 if (!list_empty(&vma->exec_list)) 4571 return; 4572 4573 vm = vma->vm; 4574 4575 if (!i915_is_ggtt(vm)) 4576 i915_ppgtt_put(i915_vm_to_ppgtt(vm)); 4577 4578 list_del(&vma->vma_link); 4579 4580 kfree(vma); 4581 } 4582 4583 static void 4584 i915_gem_stop_ringbuffers(struct drm_device *dev) 4585 { 4586 struct drm_i915_private *dev_priv = dev->dev_private; 4587 struct intel_engine_cs *ring; 4588 int i; 4589 4590 for_each_ring(ring, dev_priv, i) 4591 dev_priv->gt.stop_ring(ring); 4592 } 4593 4594 int 4595 i915_gem_suspend(struct drm_device *dev) 4596 { 4597 struct drm_i915_private *dev_priv = dev->dev_private; 4598 int ret = 0; 4599 4600 mutex_lock(&dev->struct_mutex); 4601 ret = i915_gpu_idle(dev); 4602 if (ret) 4603 goto err; 4604 4605 i915_gem_retire_requests(dev); 4606 4607 i915_gem_stop_ringbuffers(dev); 4608 mutex_unlock(&dev->struct_mutex); 4609 4610 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4611 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4612 flush_delayed_work(&dev_priv->mm.idle_work); 4613 4614 /* Assert that we sucessfully flushed all the work and 4615 * reset the GPU back to its idle, low power state. 4616 */ 4617 WARN_ON(dev_priv->mm.busy); 4618 4619 return 0; 4620 4621 err: 4622 mutex_unlock(&dev->struct_mutex); 4623 return ret; 4624 } 4625 4626 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice) 4627 { 4628 struct drm_device *dev = ring->dev; 4629 struct drm_i915_private *dev_priv = dev->dev_private; 4630 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); 4631 u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; 4632 int i, ret; 4633 4634 if (!HAS_L3_DPF(dev) || !remap_info) 4635 return 0; 4636 4637 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3); 4638 if (ret) 4639 return ret; 4640 4641 /* 4642 * Note: We do not worry about the concurrent register cacheline hang 4643 * here because no other code should access these registers other than 4644 * at initialization time. 4645 */ 4646 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 4647 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 4648 intel_ring_emit(ring, reg_base + i); 4649 intel_ring_emit(ring, remap_info[i/4]); 4650 } 4651 4652 intel_ring_advance(ring); 4653 4654 return ret; 4655 } 4656 4657 void i915_gem_init_swizzling(struct drm_device *dev) 4658 { 4659 struct drm_i915_private *dev_priv = dev->dev_private; 4660 4661 if (INTEL_INFO(dev)->gen < 5 || 4662 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4663 return; 4664 4665 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 4666 DISP_TILE_SURFACE_SWIZZLING); 4667 4668 if (IS_GEN5(dev)) 4669 return; 4670 4671 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 4672 if (IS_GEN6(dev)) 4673 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4674 else if (IS_GEN7(dev)) 4675 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4676 else if (IS_GEN8(dev)) 4677 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 4678 else 4679 BUG(); 4680 } 4681 4682 static bool 4683 intel_enable_blt(struct drm_device *dev) 4684 { 4685 if (!HAS_BLT(dev)) 4686 return false; 4687 4688 /* The blitter was dysfunctional on early prototypes */ 4689 if (IS_GEN6(dev) && dev->pdev->revision < 8) { 4690 DRM_INFO("BLT not supported on this pre-production hardware;" 4691 " graphics performance will be degraded.\n"); 4692 return false; 4693 } 4694 4695 return true; 4696 } 4697 4698 static void init_unused_ring(struct drm_device *dev, u32 base) 4699 { 4700 struct drm_i915_private *dev_priv = dev->dev_private; 4701 4702 I915_WRITE(RING_CTL(base), 0); 4703 I915_WRITE(RING_HEAD(base), 0); 4704 I915_WRITE(RING_TAIL(base), 0); 4705 I915_WRITE(RING_START(base), 0); 4706 } 4707 4708 static void init_unused_rings(struct drm_device *dev) 4709 { 4710 if (IS_I830(dev)) { 4711 init_unused_ring(dev, PRB1_BASE); 4712 init_unused_ring(dev, SRB0_BASE); 4713 init_unused_ring(dev, SRB1_BASE); 4714 init_unused_ring(dev, SRB2_BASE); 4715 init_unused_ring(dev, SRB3_BASE); 4716 } else if (IS_GEN2(dev)) { 4717 init_unused_ring(dev, SRB0_BASE); 4718 init_unused_ring(dev, SRB1_BASE); 4719 } else if (IS_GEN3(dev)) { 4720 init_unused_ring(dev, PRB1_BASE); 4721 init_unused_ring(dev, PRB2_BASE); 4722 } 4723 } 4724 4725 int i915_gem_init_rings(struct drm_device *dev) 4726 { 4727 struct drm_i915_private *dev_priv = dev->dev_private; 4728 int ret; 4729 4730 ret = intel_init_render_ring_buffer(dev); 4731 if (ret) 4732 return ret; 4733 4734 if (HAS_BSD(dev)) { 4735 ret = intel_init_bsd_ring_buffer(dev); 4736 if (ret) 4737 goto cleanup_render_ring; 4738 } 4739 4740 if (intel_enable_blt(dev)) { 4741 ret = intel_init_blt_ring_buffer(dev); 4742 if (ret) 4743 goto cleanup_bsd_ring; 4744 } 4745 4746 if (HAS_VEBOX(dev)) { 4747 ret = intel_init_vebox_ring_buffer(dev); 4748 if (ret) 4749 goto cleanup_blt_ring; 4750 } 4751 4752 if (HAS_BSD2(dev)) { 4753 ret = intel_init_bsd2_ring_buffer(dev); 4754 if (ret) 4755 goto cleanup_vebox_ring; 4756 } 4757 4758 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); 4759 if (ret) 4760 goto cleanup_bsd2_ring; 4761 4762 return 0; 4763 4764 cleanup_bsd2_ring: 4765 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]); 4766 cleanup_vebox_ring: 4767 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); 4768 cleanup_blt_ring: 4769 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); 4770 cleanup_bsd_ring: 4771 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); 4772 cleanup_render_ring: 4773 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); 4774 4775 return ret; 4776 } 4777 4778 int 4779 i915_gem_init_hw(struct drm_device *dev) 4780 { 4781 struct drm_i915_private *dev_priv = dev->dev_private; 4782 struct intel_engine_cs *ring; 4783 int ret, i; 4784 4785 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4786 return -EIO; 4787 4788 /* Double layer security blanket, see i915_gem_init() */ 4789 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4790 4791 if (dev_priv->ellc_size) 4792 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4793 4794 if (IS_HASWELL(dev)) 4795 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ? 4796 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4797 4798 if (HAS_PCH_NOP(dev)) { 4799 if (IS_IVYBRIDGE(dev)) { 4800 u32 temp = I915_READ(GEN7_MSG_CTL); 4801 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4802 I915_WRITE(GEN7_MSG_CTL, temp); 4803 } else if (INTEL_INFO(dev)->gen >= 7) { 4804 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 4805 temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 4806 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 4807 } 4808 } 4809 4810 i915_gem_init_swizzling(dev); 4811 4812 /* 4813 * At least 830 can leave some of the unused rings 4814 * "active" (ie. head != tail) after resume which 4815 * will prevent c3 entry. Makes sure all unused rings 4816 * are totally idle. 4817 */ 4818 init_unused_rings(dev); 4819 4820 for_each_ring(ring, dev_priv, i) { 4821 ret = ring->init_hw(ring); 4822 if (ret) 4823 goto out; 4824 } 4825 4826 for (i = 0; i < NUM_L3_SLICES(dev); i++) 4827 i915_gem_l3_remap(&dev_priv->ring[RCS], i); 4828 4829 ret = i915_ppgtt_init_hw(dev); 4830 if (ret && ret != -EIO) { 4831 DRM_ERROR("PPGTT enable failed %d\n", ret); 4832 i915_gem_cleanup_ringbuffer(dev); 4833 } 4834 4835 ret = i915_gem_context_enable(dev_priv); 4836 if (ret && ret != -EIO) { 4837 DRM_ERROR("Context enable failed %d\n", ret); 4838 i915_gem_cleanup_ringbuffer(dev); 4839 4840 goto out; 4841 } 4842 4843 out: 4844 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4845 return ret; 4846 } 4847 4848 int i915_gem_init(struct drm_device *dev) 4849 { 4850 struct drm_i915_private *dev_priv = dev->dev_private; 4851 int ret; 4852 4853 i915.enable_execlists = intel_sanitize_enable_execlists(dev, 4854 i915.enable_execlists); 4855 4856 mutex_lock(&dev->struct_mutex); 4857 4858 if (IS_VALLEYVIEW(dev)) { 4859 /* VLVA0 (potential hack), BIOS isn't actually waking us */ 4860 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); 4861 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 4862 VLV_GTLC_ALLOWWAKEACK), 10)) 4863 DRM_DEBUG_DRIVER("allow wake ack timed out\n"); 4864 } 4865 4866 if (!i915.enable_execlists) { 4867 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission; 4868 dev_priv->gt.init_rings = i915_gem_init_rings; 4869 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer; 4870 dev_priv->gt.stop_ring = intel_stop_ring_buffer; 4871 } else { 4872 dev_priv->gt.do_execbuf = intel_execlists_submission; 4873 dev_priv->gt.init_rings = intel_logical_rings_init; 4874 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup; 4875 dev_priv->gt.stop_ring = intel_logical_ring_stop; 4876 } 4877 4878 /* This is just a security blanket to placate dragons. 4879 * On some systems, we very sporadically observe that the first TLBs 4880 * used by the CS may be stale, despite us poking the TLB reset. If 4881 * we hold the forcewake during initialisation these problems 4882 * just magically go away. 4883 */ 4884 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4885 4886 ret = i915_gem_init_userptr(dev); 4887 if (ret) 4888 goto out_unlock; 4889 4890 i915_gem_init_global_gtt(dev); 4891 4892 ret = i915_gem_context_init(dev); 4893 if (ret) 4894 goto out_unlock; 4895 4896 ret = dev_priv->gt.init_rings(dev); 4897 if (ret) 4898 goto out_unlock; 4899 4900 ret = i915_gem_init_hw(dev); 4901 if (ret == -EIO) { 4902 /* Allow ring initialisation to fail by marking the GPU as 4903 * wedged. But we only want to do this where the GPU is angry, 4904 * for all other failure, such as an allocation failure, bail. 4905 */ 4906 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 4907 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 4908 ret = 0; 4909 } 4910 4911 out_unlock: 4912 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4913 mutex_unlock(&dev->struct_mutex); 4914 4915 return ret; 4916 } 4917 4918 void 4919 i915_gem_cleanup_ringbuffer(struct drm_device *dev) 4920 { 4921 struct drm_i915_private *dev_priv = dev->dev_private; 4922 struct intel_engine_cs *ring; 4923 int i; 4924 4925 for_each_ring(ring, dev_priv, i) 4926 dev_priv->gt.cleanup_ring(ring); 4927 } 4928 4929 static void 4930 init_ring_lists(struct intel_engine_cs *ring) 4931 { 4932 INIT_LIST_HEAD(&ring->active_list); 4933 INIT_LIST_HEAD(&ring->request_list); 4934 } 4935 4936 void i915_init_vm(struct drm_i915_private *dev_priv, 4937 struct i915_address_space *vm) 4938 { 4939 if (!i915_is_ggtt(vm)) 4940 drm_mm_init(&vm->mm, vm->start, vm->total); 4941 vm->dev = dev_priv->dev; 4942 INIT_LIST_HEAD(&vm->active_list); 4943 INIT_LIST_HEAD(&vm->inactive_list); 4944 INIT_LIST_HEAD(&vm->global_link); 4945 list_add_tail(&vm->global_link, &dev_priv->vm_list); 4946 } 4947 4948 void 4949 i915_gem_load(struct drm_device *dev) 4950 { 4951 struct drm_i915_private *dev_priv = dev->dev_private; 4952 int i; 4953 4954 dev_priv->slab = 4955 kmem_cache_create("i915_gem_object", 4956 sizeof(struct drm_i915_gem_object), 0, 4957 SLAB_HWCACHE_ALIGN, 4958 NULL); 4959 4960 INIT_LIST_HEAD(&dev_priv->vm_list); 4961 i915_init_vm(dev_priv, &dev_priv->gtt.base); 4962 4963 INIT_LIST_HEAD(&dev_priv->context_list); 4964 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4965 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4966 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4967 for (i = 0; i < I915_NUM_RINGS; i++) 4968 init_ring_lists(&dev_priv->ring[i]); 4969 for (i = 0; i < I915_MAX_NUM_FENCES; i++) 4970 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4971 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4972 i915_gem_retire_work_handler); 4973 INIT_DELAYED_WORK(&dev_priv->mm.idle_work, 4974 i915_gem_idle_work_handler); 4975 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4976 4977 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 4978 4979 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) 4980 dev_priv->num_fence_regs = 32; 4981 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 4982 dev_priv->num_fence_regs = 16; 4983 else 4984 dev_priv->num_fence_regs = 8; 4985 4986 if (intel_vgpu_active(dev)) 4987 dev_priv->num_fence_regs = 4988 I915_READ(vgtif_reg(avail_rs.fence_num)); 4989 4990 /* Initialize fence registers to zero */ 4991 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4992 i915_gem_restore_fences(dev); 4993 4994 i915_gem_detect_bit_6_swizzle(dev); 4995 init_waitqueue_head(&dev_priv->pending_flip_queue); 4996 4997 dev_priv->mm.interruptible = true; 4998 4999 i915_gem_shrinker_init(dev_priv); 5000 5001 i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool); 5002 5003 mutex_init(&dev_priv->fb_tracking.lock); 5004 } 5005 5006 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5007 { 5008 struct drm_i915_file_private *file_priv = file->driver_priv; 5009 5010 cancel_delayed_work_sync(&file_priv->mm.idle_work); 5011 5012 /* Clean up our request list when the client is going away, so that 5013 * later retire_requests won't dereference our soon-to-be-gone 5014 * file_priv. 5015 */ 5016 spin_lock(&file_priv->mm.lock); 5017 while (!list_empty(&file_priv->mm.request_list)) { 5018 struct drm_i915_gem_request *request; 5019 5020 request = list_first_entry(&file_priv->mm.request_list, 5021 struct drm_i915_gem_request, 5022 client_list); 5023 list_del(&request->client_list); 5024 request->file_priv = NULL; 5025 } 5026 spin_unlock(&file_priv->mm.lock); 5027 } 5028 5029 static void 5030 i915_gem_file_idle_work_handler(struct work_struct *work) 5031 { 5032 struct drm_i915_file_private *file_priv = 5033 container_of(work, typeof(*file_priv), mm.idle_work.work); 5034 5035 atomic_set(&file_priv->rps_wait_boost, false); 5036 } 5037 5038 int i915_gem_open(struct drm_device *dev, struct drm_file *file) 5039 { 5040 struct drm_i915_file_private *file_priv; 5041 int ret; 5042 5043 DRM_DEBUG_DRIVER("\n"); 5044 5045 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 5046 if (!file_priv) 5047 return -ENOMEM; 5048 5049 file->driver_priv = file_priv; 5050 file_priv->dev_priv = dev->dev_private; 5051 file_priv->file = file; 5052 5053 spin_lock_init(&file_priv->mm.lock); 5054 INIT_LIST_HEAD(&file_priv->mm.request_list); 5055 INIT_DELAYED_WORK(&file_priv->mm.idle_work, 5056 i915_gem_file_idle_work_handler); 5057 5058 ret = i915_gem_context_open(dev, file); 5059 if (ret) 5060 kfree(file_priv); 5061 5062 return ret; 5063 } 5064 5065 /** 5066 * i915_gem_track_fb - update frontbuffer tracking 5067 * old: current GEM buffer for the frontbuffer slots 5068 * new: new GEM buffer for the frontbuffer slots 5069 * frontbuffer_bits: bitmask of frontbuffer slots 5070 * 5071 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 5072 * from @old and setting them in @new. Both @old and @new can be NULL. 5073 */ 5074 void i915_gem_track_fb(struct drm_i915_gem_object *old, 5075 struct drm_i915_gem_object *new, 5076 unsigned frontbuffer_bits) 5077 { 5078 if (old) { 5079 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex)); 5080 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits)); 5081 old->frontbuffer_bits &= ~frontbuffer_bits; 5082 } 5083 5084 if (new) { 5085 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex)); 5086 WARN_ON(new->frontbuffer_bits & frontbuffer_bits); 5087 new->frontbuffer_bits |= frontbuffer_bits; 5088 } 5089 } 5090 5091 /* All the new VM stuff */ 5092 unsigned long 5093 i915_gem_obj_offset(struct drm_i915_gem_object *o, 5094 struct i915_address_space *vm) 5095 { 5096 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5097 struct i915_vma *vma; 5098 5099 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5100 5101 list_for_each_entry(vma, &o->vma_list, vma_link) { 5102 if (i915_is_ggtt(vma->vm) && 5103 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5104 continue; 5105 if (vma->vm == vm) 5106 return vma->node.start; 5107 } 5108 5109 WARN(1, "%s vma for this object not found.\n", 5110 i915_is_ggtt(vm) ? "global" : "ppgtt"); 5111 return -1; 5112 } 5113 5114 unsigned long 5115 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 5116 const struct i915_ggtt_view *view) 5117 { 5118 struct i915_address_space *ggtt = i915_obj_to_ggtt(o); 5119 struct i915_vma *vma; 5120 5121 list_for_each_entry(vma, &o->vma_list, vma_link) 5122 if (vma->vm == ggtt && 5123 i915_ggtt_view_equal(&vma->ggtt_view, view)) 5124 return vma->node.start; 5125 5126 WARN(1, "global vma for this object not found.\n"); 5127 return -1; 5128 } 5129 5130 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 5131 struct i915_address_space *vm) 5132 { 5133 struct i915_vma *vma; 5134 5135 list_for_each_entry(vma, &o->vma_list, vma_link) { 5136 if (i915_is_ggtt(vma->vm) && 5137 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5138 continue; 5139 if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) 5140 return true; 5141 } 5142 5143 return false; 5144 } 5145 5146 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 5147 const struct i915_ggtt_view *view) 5148 { 5149 struct i915_address_space *ggtt = i915_obj_to_ggtt(o); 5150 struct i915_vma *vma; 5151 5152 list_for_each_entry(vma, &o->vma_list, vma_link) 5153 if (vma->vm == ggtt && 5154 i915_ggtt_view_equal(&vma->ggtt_view, view) && 5155 drm_mm_node_allocated(&vma->node)) 5156 return true; 5157 5158 return false; 5159 } 5160 5161 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) 5162 { 5163 struct i915_vma *vma; 5164 5165 list_for_each_entry(vma, &o->vma_list, vma_link) 5166 if (drm_mm_node_allocated(&vma->node)) 5167 return true; 5168 5169 return false; 5170 } 5171 5172 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 5173 struct i915_address_space *vm) 5174 { 5175 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5176 struct i915_vma *vma; 5177 5178 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5179 5180 BUG_ON(list_empty(&o->vma_list)); 5181 5182 list_for_each_entry(vma, &o->vma_list, vma_link) { 5183 if (i915_is_ggtt(vma->vm) && 5184 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5185 continue; 5186 if (vma->vm == vm) 5187 return vma->node.size; 5188 } 5189 return 0; 5190 } 5191 5192 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) 5193 { 5194 struct i915_vma *vma; 5195 list_for_each_entry(vma, &obj->vma_list, vma_link) { 5196 if (i915_is_ggtt(vma->vm) && 5197 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5198 continue; 5199 if (vma->pin_count > 0) 5200 return true; 5201 } 5202 return false; 5203 } 5204 5205