1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/anon_inodes.h> 8 #include <linux/mman.h> 9 #include <linux/pfn_t.h> 10 #include <linux/sizes.h> 11 12 #include <drm/drm_cache.h> 13 14 #include "gt/intel_gt.h" 15 #include "gt/intel_gt_requests.h" 16 17 #include "i915_drv.h" 18 #include "i915_gem_evict.h" 19 #include "i915_gem_gtt.h" 20 #include "i915_gem_ioctls.h" 21 #include "i915_gem_object.h" 22 #include "i915_gem_mman.h" 23 #include "i915_mm.h" 24 #include "i915_trace.h" 25 #include "i915_user_extensions.h" 26 #include "i915_gem_ttm.h" 27 #include "i915_vma.h" 28 29 static inline bool 30 __vma_matches(struct vm_area_struct *vma, struct file *filp, 31 unsigned long addr, unsigned long size) 32 { 33 if (vma->vm_file != filp) 34 return false; 35 36 return vma->vm_start == addr && 37 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); 38 } 39 40 /** 41 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 42 * it is mapped to. 43 * @dev: drm device 44 * @data: ioctl data blob 45 * @file: drm file 46 * 47 * While the mapping holds a reference on the contents of the object, it doesn't 48 * imply a ref on the object itself. 49 * 50 * IMPORTANT: 51 * 52 * DRM driver writers who look a this function as an example for how to do GEM 53 * mmap support, please don't implement mmap support like here. The modern way 54 * to implement DRM mmap support is with an mmap offset ioctl (like 55 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 56 * That way debug tooling like valgrind will understand what's going on, hiding 57 * the mmap call in a driver private ioctl will break that. The i915 driver only 58 * does cpu mmaps this way because we didn't know better. 59 */ 60 int 61 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 62 struct drm_file *file) 63 { 64 struct drm_i915_private *i915 = to_i915(dev); 65 struct drm_i915_gem_mmap *args = data; 66 struct drm_i915_gem_object *obj; 67 unsigned long addr; 68 69 /* 70 * mmap ioctl is disallowed for all discrete platforms, 71 * and for all platforms with GRAPHICS_VER > 12. 72 */ 73 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0)) 74 return -EOPNOTSUPP; 75 76 if (args->flags & ~(I915_MMAP_WC)) 77 return -EINVAL; 78 79 if (args->flags & I915_MMAP_WC && !pat_enabled()) 80 return -ENODEV; 81 82 obj = i915_gem_object_lookup(file, args->handle); 83 if (!obj) 84 return -ENOENT; 85 86 /* prime objects have no backing filp to GEM mmap 87 * pages from. 88 */ 89 if (!obj->base.filp) { 90 addr = -ENXIO; 91 goto err; 92 } 93 94 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { 95 addr = -EINVAL; 96 goto err; 97 } 98 99 addr = vm_mmap(obj->base.filp, 0, args->size, 100 PROT_READ | PROT_WRITE, MAP_SHARED, 101 args->offset); 102 if (IS_ERR_VALUE(addr)) 103 goto err; 104 105 if (args->flags & I915_MMAP_WC) { 106 struct mm_struct *mm = current->mm; 107 struct vm_area_struct *vma; 108 109 if (mmap_write_lock_killable(mm)) { 110 addr = -EINTR; 111 goto err; 112 } 113 vma = find_vma(mm, addr); 114 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) 115 vma->vm_page_prot = 116 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 117 else 118 addr = -ENOMEM; 119 mmap_write_unlock(mm); 120 if (IS_ERR_VALUE(addr)) 121 goto err; 122 } 123 i915_gem_object_put(obj); 124 125 args->addr_ptr = (u64)addr; 126 return 0; 127 128 err: 129 i915_gem_object_put(obj); 130 return addr; 131 } 132 133 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) 134 { 135 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; 136 } 137 138 /** 139 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 140 * 141 * A history of the GTT mmap interface: 142 * 143 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 144 * aligned and suitable for fencing, and still fit into the available 145 * mappable space left by the pinned display objects. A classic problem 146 * we called the page-fault-of-doom where we would ping-pong between 147 * two objects that could not fit inside the GTT and so the memcpy 148 * would page one object in at the expense of the other between every 149 * single byte. 150 * 151 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 152 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 153 * object is too large for the available space (or simply too large 154 * for the mappable aperture!), a view is created instead and faulted 155 * into userspace. (This view is aligned and sized appropriately for 156 * fenced access.) 157 * 158 * 2 - Recognise WC as a separate cache domain so that we can flush the 159 * delayed writes via GTT before performing direct access via WC. 160 * 161 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial 162 * pagefault; swapin remains transparent. 163 * 164 * 4 - Support multiple fault handlers per object depending on object's 165 * backing storage (a.k.a. MMAP_OFFSET). 166 * 167 * Restrictions: 168 * 169 * * snoopable objects cannot be accessed via the GTT. It can cause machine 170 * hangs on some architectures, corruption on others. An attempt to service 171 * a GTT page fault from a snoopable object will generate a SIGBUS. 172 * 173 * * the object must be able to fit into RAM (physical memory, though no 174 * limited to the mappable aperture). 175 * 176 * 177 * Caveats: 178 * 179 * * a new GTT page fault will synchronize rendering from the GPU and flush 180 * all data to system memory. Subsequent access will not be synchronized. 181 * 182 * * all mappings are revoked on runtime device suspend. 183 * 184 * * there are only 8, 16 or 32 fence registers to share between all users 185 * (older machines require fence register for display and blitter access 186 * as well). Contention of the fence registers will cause the previous users 187 * to be unmapped and any new access will generate new page faults. 188 * 189 * * running out of memory while servicing a fault may generate a SIGBUS, 190 * rather than the expected SIGSEGV. 191 */ 192 int i915_gem_mmap_gtt_version(void) 193 { 194 return 4; 195 } 196 197 static inline struct i915_gtt_view 198 compute_partial_view(const struct drm_i915_gem_object *obj, 199 pgoff_t page_offset, 200 unsigned int chunk) 201 { 202 struct i915_gtt_view view; 203 204 if (i915_gem_object_is_tiled(obj)) 205 chunk = roundup(chunk, tile_row_pages(obj) ?: 1); 206 207 view.type = I915_GTT_VIEW_PARTIAL; 208 view.partial.offset = rounddown(page_offset, chunk); 209 view.partial.size = 210 min_t(unsigned int, chunk, 211 (obj->base.size >> PAGE_SHIFT) - view.partial.offset); 212 213 /* If the partial covers the entire object, just create a normal VMA. */ 214 if (chunk >= obj->base.size >> PAGE_SHIFT) 215 view.type = I915_GTT_VIEW_NORMAL; 216 217 return view; 218 } 219 220 static vm_fault_t i915_error_to_vmf_fault(int err) 221 { 222 switch (err) { 223 default: 224 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); 225 fallthrough; 226 case -EIO: /* shmemfs failure from swap device */ 227 case -EFAULT: /* purged object */ 228 case -ENODEV: /* bad object, how did you get here! */ 229 case -ENXIO: /* unable to access backing store (on device) */ 230 return VM_FAULT_SIGBUS; 231 232 case -ENOMEM: /* our allocation failure */ 233 return VM_FAULT_OOM; 234 235 case 0: 236 case -EAGAIN: 237 case -ENOSPC: /* transient failure to evict? */ 238 case -ERESTARTSYS: 239 case -EINTR: 240 case -EBUSY: 241 /* 242 * EBUSY is ok: this just means that another thread 243 * already did the job. 244 */ 245 return VM_FAULT_NOPAGE; 246 } 247 } 248 249 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) 250 { 251 struct vm_area_struct *area = vmf->vma; 252 struct i915_mmap_offset *mmo = area->vm_private_data; 253 struct drm_i915_gem_object *obj = mmo->obj; 254 resource_size_t iomap; 255 int err; 256 257 /* Sanity check that we allow writing into this object */ 258 if (unlikely(i915_gem_object_is_readonly(obj) && 259 area->vm_flags & VM_WRITE)) 260 return VM_FAULT_SIGBUS; 261 262 if (i915_gem_object_lock_interruptible(obj, NULL)) 263 return VM_FAULT_NOPAGE; 264 265 err = i915_gem_object_pin_pages(obj); 266 if (err) 267 goto out; 268 269 iomap = -1; 270 if (!i915_gem_object_has_struct_page(obj)) { 271 iomap = obj->mm.region->iomap.base; 272 iomap -= obj->mm.region->region.start; 273 } 274 275 /* PTEs are revoked in obj->ops->put_pages() */ 276 err = remap_io_sg(area, 277 area->vm_start, area->vm_end - area->vm_start, 278 obj->mm.pages->sgl, iomap); 279 280 if (area->vm_flags & VM_WRITE) { 281 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 282 obj->mm.dirty = true; 283 } 284 285 i915_gem_object_unpin_pages(obj); 286 287 out: 288 i915_gem_object_unlock(obj); 289 return i915_error_to_vmf_fault(err); 290 } 291 292 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) 293 { 294 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) 295 struct vm_area_struct *area = vmf->vma; 296 struct i915_mmap_offset *mmo = area->vm_private_data; 297 struct drm_i915_gem_object *obj = mmo->obj; 298 struct drm_device *dev = obj->base.dev; 299 struct drm_i915_private *i915 = to_i915(dev); 300 struct intel_runtime_pm *rpm = &i915->runtime_pm; 301 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 302 bool write = area->vm_flags & VM_WRITE; 303 struct i915_gem_ww_ctx ww; 304 intel_wakeref_t wakeref; 305 struct i915_vma *vma; 306 pgoff_t page_offset; 307 int srcu; 308 int ret; 309 310 /* We don't use vmf->pgoff since that has the fake offset */ 311 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; 312 313 trace_i915_gem_object_fault(obj, page_offset, true, write); 314 315 wakeref = intel_runtime_pm_get(rpm); 316 317 i915_gem_ww_ctx_init(&ww, true); 318 retry: 319 ret = i915_gem_object_lock(obj, &ww); 320 if (ret) 321 goto err_rpm; 322 323 /* Sanity check that we allow writing into this object */ 324 if (i915_gem_object_is_readonly(obj) && write) { 325 ret = -EFAULT; 326 goto err_rpm; 327 } 328 329 ret = i915_gem_object_pin_pages(obj); 330 if (ret) 331 goto err_rpm; 332 333 ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); 334 if (ret) 335 goto err_pages; 336 337 /* Now pin it into the GTT as needed */ 338 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, 339 PIN_MAPPABLE | 340 PIN_NONBLOCK /* NOWARN */ | 341 PIN_NOEVICT); 342 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 343 /* Use a partial view if it is bigger than available space */ 344 struct i915_gtt_view view = 345 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 346 unsigned int flags; 347 348 flags = PIN_MAPPABLE | PIN_NOSEARCH; 349 if (view.type == I915_GTT_VIEW_NORMAL) 350 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ 351 352 /* 353 * Userspace is now writing through an untracked VMA, abandon 354 * all hope that the hardware is able to track future writes. 355 */ 356 357 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 358 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 359 flags = PIN_MAPPABLE; 360 view.type = I915_GTT_VIEW_PARTIAL; 361 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 362 } 363 364 /* 365 * The entire mappable GGTT is pinned? Unexpected! 366 * Try to evict the object we locked too, as normally we skip it 367 * due to lack of short term pinning inside execbuf. 368 */ 369 if (vma == ERR_PTR(-ENOSPC)) { 370 ret = mutex_lock_interruptible(&ggtt->vm.mutex); 371 if (!ret) { 372 ret = i915_gem_evict_vm(&ggtt->vm, &ww); 373 mutex_unlock(&ggtt->vm.mutex); 374 } 375 if (ret) 376 goto err_reset; 377 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 378 } 379 } 380 if (IS_ERR(vma)) { 381 ret = PTR_ERR(vma); 382 goto err_reset; 383 } 384 385 /* Access to snoopable pages through the GTT is incoherent. */ 386 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) { 387 ret = -EFAULT; 388 goto err_unpin; 389 } 390 391 ret = i915_vma_pin_fence(vma); 392 if (ret) 393 goto err_unpin; 394 395 /* Finally, remap it using the new GTT offset */ 396 ret = remap_io_mapping(area, 397 area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT), 398 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, 399 min_t(u64, vma->size, area->vm_end - area->vm_start), 400 &ggtt->iomap); 401 if (ret) 402 goto err_fence; 403 404 assert_rpm_wakelock_held(rpm); 405 406 /* Mark as being mmapped into userspace for later revocation */ 407 mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 408 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) 409 list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list); 410 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 411 412 /* Track the mmo associated with the fenced vma */ 413 vma->mmo = mmo; 414 415 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) 416 intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 417 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); 418 419 if (write) { 420 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 421 i915_vma_set_ggtt_write(vma); 422 obj->mm.dirty = true; 423 } 424 425 err_fence: 426 i915_vma_unpin_fence(vma); 427 err_unpin: 428 __i915_vma_unpin(vma); 429 err_reset: 430 intel_gt_reset_unlock(ggtt->vm.gt, srcu); 431 err_pages: 432 i915_gem_object_unpin_pages(obj); 433 err_rpm: 434 if (ret == -EDEADLK) { 435 ret = i915_gem_ww_ctx_backoff(&ww); 436 if (!ret) 437 goto retry; 438 } 439 i915_gem_ww_ctx_fini(&ww); 440 intel_runtime_pm_put(rpm, wakeref); 441 return i915_error_to_vmf_fault(ret); 442 } 443 444 static int 445 vm_access(struct vm_area_struct *area, unsigned long addr, 446 void *buf, int len, int write) 447 { 448 struct i915_mmap_offset *mmo = area->vm_private_data; 449 struct drm_i915_gem_object *obj = mmo->obj; 450 struct i915_gem_ww_ctx ww; 451 void *vaddr; 452 int err = 0; 453 454 if (i915_gem_object_is_readonly(obj) && write) 455 return -EACCES; 456 457 addr -= area->vm_start; 458 if (range_overflows_t(u64, addr, len, obj->base.size)) 459 return -EINVAL; 460 461 i915_gem_ww_ctx_init(&ww, true); 462 retry: 463 err = i915_gem_object_lock(obj, &ww); 464 if (err) 465 goto out; 466 467 /* As this is primarily for debugging, let's focus on simplicity */ 468 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); 469 if (IS_ERR(vaddr)) { 470 err = PTR_ERR(vaddr); 471 goto out; 472 } 473 474 if (write) { 475 memcpy(vaddr + addr, buf, len); 476 __i915_gem_object_flush_map(obj, addr, len); 477 } else { 478 memcpy(buf, vaddr + addr, len); 479 } 480 481 i915_gem_object_unpin_map(obj); 482 out: 483 if (err == -EDEADLK) { 484 err = i915_gem_ww_ctx_backoff(&ww); 485 if (!err) 486 goto retry; 487 } 488 i915_gem_ww_ctx_fini(&ww); 489 490 if (err) 491 return err; 492 493 return len; 494 } 495 496 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) 497 { 498 struct i915_vma *vma; 499 500 GEM_BUG_ON(!obj->userfault_count); 501 502 for_each_ggtt_vma(vma, obj) 503 i915_vma_revoke_mmap(vma); 504 505 GEM_BUG_ON(obj->userfault_count); 506 } 507 508 /* 509 * It is vital that we remove the page mapping if we have mapped a tiled 510 * object through the GTT and then lose the fence register due to 511 * resource pressure. Similarly if the object has been moved out of the 512 * aperture, than pages mapped into userspace must be revoked. Removing the 513 * mapping will then trigger a page fault on the next user access, allowing 514 * fixup by vm_fault_gtt(). 515 */ 516 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) 517 { 518 struct drm_i915_private *i915 = to_i915(obj->base.dev); 519 intel_wakeref_t wakeref; 520 521 /* 522 * Serialisation between user GTT access and our code depends upon 523 * revoking the CPU's PTE whilst the mutex is held. The next user 524 * pagefault then has to wait until we release the mutex. 525 * 526 * Note that RPM complicates somewhat by adding an additional 527 * requirement that operations to the GGTT be made holding the RPM 528 * wakeref. 529 */ 530 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 531 mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 532 533 if (!obj->userfault_count) 534 goto out; 535 536 __i915_gem_object_release_mmap_gtt(obj); 537 538 /* 539 * Ensure that the CPU's PTE are revoked and there are not outstanding 540 * memory transactions from userspace before we return. The TLB 541 * flushing implied above by changing the PTE above *should* be 542 * sufficient, an extra barrier here just provides us with a bit 543 * of paranoid documentation about our requirement to serialise 544 * memory writes before touching registers / GSM. 545 */ 546 wmb(); 547 548 out: 549 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 550 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 551 } 552 553 void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj) 554 { 555 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 556 struct ttm_device *bdev = bo->bdev; 557 558 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 559 560 if (obj->userfault_count) { 561 /* rpm wakeref provide exclusive access */ 562 list_del(&obj->userfault_link); 563 obj->userfault_count = 0; 564 } 565 } 566 567 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) 568 { 569 struct i915_mmap_offset *mmo, *mn; 570 571 if (obj->ops->unmap_virtual) 572 obj->ops->unmap_virtual(obj); 573 574 spin_lock(&obj->mmo.lock); 575 rbtree_postorder_for_each_entry_safe(mmo, mn, 576 &obj->mmo.offsets, offset) { 577 /* 578 * vma_node_unmap for GTT mmaps handled already in 579 * __i915_gem_object_release_mmap_gtt 580 */ 581 if (mmo->mmap_type == I915_MMAP_TYPE_GTT) 582 continue; 583 584 spin_unlock(&obj->mmo.lock); 585 drm_vma_node_unmap(&mmo->vma_node, 586 obj->base.dev->anon_inode->i_mapping); 587 spin_lock(&obj->mmo.lock); 588 } 589 spin_unlock(&obj->mmo.lock); 590 591 if (obj->userfault_count) { 592 mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); 593 list_del(&obj->userfault_link); 594 mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); 595 obj->userfault_count = 0; 596 } 597 } 598 599 static struct i915_mmap_offset * 600 lookup_mmo(struct drm_i915_gem_object *obj, 601 enum i915_mmap_type mmap_type) 602 { 603 struct rb_node *rb; 604 605 spin_lock(&obj->mmo.lock); 606 rb = obj->mmo.offsets.rb_node; 607 while (rb) { 608 struct i915_mmap_offset *mmo = 609 rb_entry(rb, typeof(*mmo), offset); 610 611 if (mmo->mmap_type == mmap_type) { 612 spin_unlock(&obj->mmo.lock); 613 return mmo; 614 } 615 616 if (mmo->mmap_type < mmap_type) 617 rb = rb->rb_right; 618 else 619 rb = rb->rb_left; 620 } 621 spin_unlock(&obj->mmo.lock); 622 623 return NULL; 624 } 625 626 static struct i915_mmap_offset * 627 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo) 628 { 629 struct rb_node *rb, **p; 630 631 spin_lock(&obj->mmo.lock); 632 rb = NULL; 633 p = &obj->mmo.offsets.rb_node; 634 while (*p) { 635 struct i915_mmap_offset *pos; 636 637 rb = *p; 638 pos = rb_entry(rb, typeof(*pos), offset); 639 640 if (pos->mmap_type == mmo->mmap_type) { 641 spin_unlock(&obj->mmo.lock); 642 drm_vma_offset_remove(obj->base.dev->vma_offset_manager, 643 &mmo->vma_node); 644 kfree(mmo); 645 return pos; 646 } 647 648 if (pos->mmap_type < mmo->mmap_type) 649 p = &rb->rb_right; 650 else 651 p = &rb->rb_left; 652 } 653 rb_link_node(&mmo->offset, rb, p); 654 rb_insert_color(&mmo->offset, &obj->mmo.offsets); 655 spin_unlock(&obj->mmo.lock); 656 657 return mmo; 658 } 659 660 static struct i915_mmap_offset * 661 mmap_offset_attach(struct drm_i915_gem_object *obj, 662 enum i915_mmap_type mmap_type, 663 struct drm_file *file) 664 { 665 struct drm_i915_private *i915 = to_i915(obj->base.dev); 666 struct i915_mmap_offset *mmo; 667 int err; 668 669 GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops); 670 671 mmo = lookup_mmo(obj, mmap_type); 672 if (mmo) 673 goto out; 674 675 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL); 676 if (!mmo) 677 return ERR_PTR(-ENOMEM); 678 679 mmo->obj = obj; 680 mmo->mmap_type = mmap_type; 681 drm_vma_node_reset(&mmo->vma_node); 682 683 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, 684 &mmo->vma_node, obj->base.size / PAGE_SIZE); 685 if (likely(!err)) 686 goto insert; 687 688 /* Attempt to reap some mmap space from dead objects */ 689 err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT, 690 NULL); 691 if (err) 692 goto err; 693 694 i915_gem_drain_freed_objects(i915); 695 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, 696 &mmo->vma_node, obj->base.size / PAGE_SIZE); 697 if (err) 698 goto err; 699 700 insert: 701 mmo = insert_mmo(obj, mmo); 702 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo); 703 out: 704 if (file) 705 drm_vma_node_allow(&mmo->vma_node, file); 706 return mmo; 707 708 err: 709 kfree(mmo); 710 return ERR_PTR(err); 711 } 712 713 static int 714 __assign_mmap_offset(struct drm_i915_gem_object *obj, 715 enum i915_mmap_type mmap_type, 716 u64 *offset, struct drm_file *file) 717 { 718 struct i915_mmap_offset *mmo; 719 720 if (i915_gem_object_never_mmap(obj)) 721 return -ENODEV; 722 723 if (obj->ops->mmap_offset) { 724 if (mmap_type != I915_MMAP_TYPE_FIXED) 725 return -ENODEV; 726 727 *offset = obj->ops->mmap_offset(obj); 728 return 0; 729 } 730 731 if (mmap_type == I915_MMAP_TYPE_FIXED) 732 return -ENODEV; 733 734 if (mmap_type != I915_MMAP_TYPE_GTT && 735 !i915_gem_object_has_struct_page(obj) && 736 !i915_gem_object_has_iomem(obj)) 737 return -ENODEV; 738 739 mmo = mmap_offset_attach(obj, mmap_type, file); 740 if (IS_ERR(mmo)) 741 return PTR_ERR(mmo); 742 743 *offset = drm_vma_node_offset_addr(&mmo->vma_node); 744 return 0; 745 } 746 747 static int 748 __assign_mmap_offset_handle(struct drm_file *file, 749 u32 handle, 750 enum i915_mmap_type mmap_type, 751 u64 *offset) 752 { 753 struct drm_i915_gem_object *obj; 754 int err; 755 756 obj = i915_gem_object_lookup(file, handle); 757 if (!obj) 758 return -ENOENT; 759 760 err = i915_gem_object_lock_interruptible(obj, NULL); 761 if (err) 762 goto out_put; 763 err = __assign_mmap_offset(obj, mmap_type, offset, file); 764 i915_gem_object_unlock(obj); 765 out_put: 766 i915_gem_object_put(obj); 767 return err; 768 } 769 770 int 771 i915_gem_dumb_mmap_offset(struct drm_file *file, 772 struct drm_device *dev, 773 u32 handle, 774 u64 *offset) 775 { 776 struct drm_i915_private *i915 = to_i915(dev); 777 enum i915_mmap_type mmap_type; 778 779 if (HAS_LMEM(to_i915(dev))) 780 mmap_type = I915_MMAP_TYPE_FIXED; 781 else if (pat_enabled()) 782 mmap_type = I915_MMAP_TYPE_WC; 783 else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 784 return -ENODEV; 785 else 786 mmap_type = I915_MMAP_TYPE_GTT; 787 788 return __assign_mmap_offset_handle(file, handle, mmap_type, offset); 789 } 790 791 /** 792 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing 793 * @dev: DRM device 794 * @data: GTT mapping ioctl data 795 * @file: GEM object info 796 * 797 * Simply returns the fake offset to userspace so it can mmap it. 798 * The mmap call will end up in drm_gem_mmap(), which will set things 799 * up so we can get faults in the handler above. 800 * 801 * The fault handler will take care of binding the object into the GTT 802 * (since it may have been evicted to make room for something), allocating 803 * a fence register, and mapping the appropriate aperture address into 804 * userspace. 805 */ 806 int 807 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, 808 struct drm_file *file) 809 { 810 struct drm_i915_private *i915 = to_i915(dev); 811 struct drm_i915_gem_mmap_offset *args = data; 812 enum i915_mmap_type type; 813 int err; 814 815 /* 816 * Historically we failed to check args.pad and args.offset 817 * and so we cannot use those fields for user input and we cannot 818 * add -EINVAL for them as the ABI is fixed, i.e. old userspace 819 * may be feeding in garbage in those fields. 820 * 821 * if (args->pad) return -EINVAL; is verbotten! 822 */ 823 824 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 825 NULL, 0, NULL); 826 if (err) 827 return err; 828 829 switch (args->flags) { 830 case I915_MMAP_OFFSET_GTT: 831 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 832 return -ENODEV; 833 type = I915_MMAP_TYPE_GTT; 834 break; 835 836 case I915_MMAP_OFFSET_WC: 837 if (!pat_enabled()) 838 return -ENODEV; 839 type = I915_MMAP_TYPE_WC; 840 break; 841 842 case I915_MMAP_OFFSET_WB: 843 type = I915_MMAP_TYPE_WB; 844 break; 845 846 case I915_MMAP_OFFSET_UC: 847 if (!pat_enabled()) 848 return -ENODEV; 849 type = I915_MMAP_TYPE_UC; 850 break; 851 852 case I915_MMAP_OFFSET_FIXED: 853 type = I915_MMAP_TYPE_FIXED; 854 break; 855 856 default: 857 return -EINVAL; 858 } 859 860 return __assign_mmap_offset_handle(file, args->handle, type, &args->offset); 861 } 862 863 static void vm_open(struct vm_area_struct *vma) 864 { 865 struct i915_mmap_offset *mmo = vma->vm_private_data; 866 struct drm_i915_gem_object *obj = mmo->obj; 867 868 GEM_BUG_ON(!obj); 869 i915_gem_object_get(obj); 870 } 871 872 static void vm_close(struct vm_area_struct *vma) 873 { 874 struct i915_mmap_offset *mmo = vma->vm_private_data; 875 struct drm_i915_gem_object *obj = mmo->obj; 876 877 GEM_BUG_ON(!obj); 878 i915_gem_object_put(obj); 879 } 880 881 static const struct vm_operations_struct vm_ops_gtt = { 882 .fault = vm_fault_gtt, 883 .access = vm_access, 884 .open = vm_open, 885 .close = vm_close, 886 }; 887 888 static const struct vm_operations_struct vm_ops_cpu = { 889 .fault = vm_fault_cpu, 890 .access = vm_access, 891 .open = vm_open, 892 .close = vm_close, 893 }; 894 895 static int singleton_release(struct inode *inode, struct file *file) 896 { 897 struct drm_i915_private *i915 = file->private_data; 898 899 cmpxchg(&i915->gem.mmap_singleton, file, NULL); 900 drm_dev_put(&i915->drm); 901 902 return 0; 903 } 904 905 static const struct file_operations singleton_fops = { 906 .owner = THIS_MODULE, 907 .release = singleton_release, 908 }; 909 910 static struct file *mmap_singleton(struct drm_i915_private *i915) 911 { 912 struct file *file; 913 914 rcu_read_lock(); 915 file = READ_ONCE(i915->gem.mmap_singleton); 916 if (file && !get_file_rcu(file)) 917 file = NULL; 918 rcu_read_unlock(); 919 if (file) 920 return file; 921 922 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR); 923 if (IS_ERR(file)) 924 return file; 925 926 /* Everyone shares a single global address space */ 927 file->f_mapping = i915->drm.anon_inode->i_mapping; 928 929 smp_store_mb(i915->gem.mmap_singleton, file); 930 drm_dev_get(&i915->drm); 931 932 return file; 933 } 934 935 /* 936 * This overcomes the limitation in drm_gem_mmap's assignment of a 937 * drm_gem_object as the vma->vm_private_data. Since we need to 938 * be able to resolve multiple mmap offsets which could be tied 939 * to a single gem object. 940 */ 941 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) 942 { 943 struct drm_vma_offset_node *node; 944 struct drm_file *priv = filp->private_data; 945 struct drm_device *dev = priv->minor->dev; 946 struct drm_i915_gem_object *obj = NULL; 947 struct i915_mmap_offset *mmo = NULL; 948 struct file *anon; 949 950 if (drm_dev_is_unplugged(dev)) 951 return -ENODEV; 952 953 rcu_read_lock(); 954 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 955 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 956 vma->vm_pgoff, 957 vma_pages(vma)); 958 if (node && drm_vma_node_is_allowed(node, priv)) { 959 /* 960 * Skip 0-refcnted objects as it is in the process of being 961 * destroyed and will be invalid when the vma manager lock 962 * is released. 963 */ 964 if (!node->driver_private) { 965 mmo = container_of(node, struct i915_mmap_offset, vma_node); 966 obj = i915_gem_object_get_rcu(mmo->obj); 967 968 GEM_BUG_ON(obj && obj->ops->mmap_ops); 969 } else { 970 obj = i915_gem_object_get_rcu 971 (container_of(node, struct drm_i915_gem_object, 972 base.vma_node)); 973 974 GEM_BUG_ON(obj && !obj->ops->mmap_ops); 975 } 976 } 977 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 978 rcu_read_unlock(); 979 if (!obj) 980 return node ? -EACCES : -EINVAL; 981 982 if (i915_gem_object_is_readonly(obj)) { 983 if (vma->vm_flags & VM_WRITE) { 984 i915_gem_object_put(obj); 985 return -EINVAL; 986 } 987 vma->vm_flags &= ~VM_MAYWRITE; 988 } 989 990 anon = mmap_singleton(to_i915(dev)); 991 if (IS_ERR(anon)) { 992 i915_gem_object_put(obj); 993 return PTR_ERR(anon); 994 } 995 996 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO; 997 998 /* 999 * We keep the ref on mmo->obj, not vm_file, but we require 1000 * vma->vm_file->f_mapping, see vma_link(), for later revocation. 1001 * Our userspace is accustomed to having per-file resource cleanup 1002 * (i.e. contexts, objects and requests) on their close(fd), which 1003 * requires avoiding extraneous references to their filp, hence why 1004 * we prefer to use an anonymous file for their mmaps. 1005 */ 1006 vma_set_file(vma, anon); 1007 /* Drop the initial creation reference, the vma is now holding one. */ 1008 fput(anon); 1009 1010 if (obj->ops->mmap_ops) { 1011 vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags)); 1012 vma->vm_ops = obj->ops->mmap_ops; 1013 vma->vm_private_data = node->driver_private; 1014 return 0; 1015 } 1016 1017 vma->vm_private_data = mmo; 1018 1019 switch (mmo->mmap_type) { 1020 case I915_MMAP_TYPE_WC: 1021 vma->vm_page_prot = 1022 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1023 vma->vm_ops = &vm_ops_cpu; 1024 break; 1025 1026 case I915_MMAP_TYPE_FIXED: 1027 GEM_WARN_ON(1); 1028 fallthrough; 1029 case I915_MMAP_TYPE_WB: 1030 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 1031 vma->vm_ops = &vm_ops_cpu; 1032 break; 1033 1034 case I915_MMAP_TYPE_UC: 1035 vma->vm_page_prot = 1036 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 1037 vma->vm_ops = &vm_ops_cpu; 1038 break; 1039 1040 case I915_MMAP_TYPE_GTT: 1041 vma->vm_page_prot = 1042 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1043 vma->vm_ops = &vm_ops_gtt; 1044 break; 1045 } 1046 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1047 1048 return 0; 1049 } 1050 1051 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1052 #include "selftests/i915_gem_mman.c" 1053 #endif 1054