1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /************************************************************************** 3 * 4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 **************************************************************************/ 28 /* 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 30 */ 31 32 #define pr_fmt(fmt) "[TTM] " fmt 33 34 #include <drm/ttm/ttm_module.h> 35 #include <drm/ttm/ttm_bo_driver.h> 36 #include <drm/ttm/ttm_placement.h> 37 #include <drm/drm_vma_manager.h> 38 #include <linux/mm.h> 39 #include <linux/pfn_t.h> 40 #include <linux/rbtree.h> 41 #include <linux/module.h> 42 #include <linux/uaccess.h> 43 #include <linux/mem_encrypt.h> 44 45 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, 46 struct vm_fault *vmf) 47 { 48 vm_fault_t ret = 0; 49 int err = 0; 50 51 if (likely(!bo->moving)) 52 goto out_unlock; 53 54 /* 55 * Quick non-stalling check for idle. 56 */ 57 if (dma_fence_is_signaled(bo->moving)) 58 goto out_clear; 59 60 /* 61 * If possible, avoid waiting for GPU with mmap_sem 62 * held. 63 */ 64 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { 65 ret = VM_FAULT_RETRY; 66 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 67 goto out_unlock; 68 69 ttm_bo_get(bo); 70 up_read(&vmf->vma->vm_mm->mmap_sem); 71 (void) dma_fence_wait(bo->moving, true); 72 dma_resv_unlock(bo->base.resv); 73 ttm_bo_put(bo); 74 goto out_unlock; 75 } 76 77 /* 78 * Ordinary wait. 79 */ 80 err = dma_fence_wait(bo->moving, true); 81 if (unlikely(err != 0)) { 82 ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : 83 VM_FAULT_NOPAGE; 84 goto out_unlock; 85 } 86 87 out_clear: 88 dma_fence_put(bo->moving); 89 bo->moving = NULL; 90 91 out_unlock: 92 return ret; 93 } 94 95 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, 96 unsigned long page_offset) 97 { 98 struct ttm_bo_device *bdev = bo->bdev; 99 100 if (bdev->driver->io_mem_pfn) 101 return bdev->driver->io_mem_pfn(bo, page_offset); 102 103 return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) 104 + page_offset; 105 } 106 107 /** 108 * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback 109 * @bo: The buffer object 110 * @vmf: The fault structure handed to the callback 111 * 112 * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped 113 * during long waits, and after the wait the callback will be restarted. This 114 * is to allow other threads using the same virtual memory space concurrent 115 * access to map(), unmap() completely unrelated buffer objects. TTM buffer 116 * object reservations sometimes wait for GPU and should therefore be 117 * considered long waits. This function reserves the buffer object interruptibly 118 * taking this into account. Starvation is avoided by the vm system not 119 * allowing too many repeated restarts. 120 * This function is intended to be used in customized fault() and _mkwrite() 121 * handlers. 122 * 123 * Return: 124 * 0 on success and the bo was reserved. 125 * VM_FAULT_RETRY if blocking wait. 126 * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. 127 */ 128 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, 129 struct vm_fault *vmf) 130 { 131 /* 132 * Work around locking order reversal in fault / nopfn 133 * between mmap_sem and bo_reserve: Perform a trylock operation 134 * for reserve, and if it fails, retry the fault after waiting 135 * for the buffer to become unreserved. 136 */ 137 if (unlikely(!dma_resv_trylock(bo->base.resv))) { 138 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { 139 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 140 ttm_bo_get(bo); 141 up_read(&vmf->vma->vm_mm->mmap_sem); 142 (void) ttm_bo_wait_unreserved(bo); 143 ttm_bo_put(bo); 144 } 145 146 return VM_FAULT_RETRY; 147 } 148 149 /* 150 * If we'd want to change locking order to 151 * mmap_sem -> bo::reserve, we'd use a blocking reserve here 152 * instead of retrying the fault... 153 */ 154 return VM_FAULT_NOPAGE; 155 } 156 157 return 0; 158 } 159 EXPORT_SYMBOL(ttm_bo_vm_reserve); 160 161 /** 162 * ttm_bo_vm_fault_reserved - TTM fault helper 163 * @vmf: The struct vm_fault given as argument to the fault callback 164 * @prot: The page protection to be used for this memory area. 165 * @num_prefault: Maximum number of prefault pages. The caller may want to 166 * specify this based on madvice settings and the size of the GPU object 167 * backed by the memory. 168 * 169 * This function inserts one or more page table entries pointing to the 170 * memory backing the buffer object, and then returns a return code 171 * instructing the caller to retry the page access. 172 * 173 * Return: 174 * VM_FAULT_NOPAGE on success or pending signal 175 * VM_FAULT_SIGBUS on unspecified error 176 * VM_FAULT_OOM on out-of-memory 177 * VM_FAULT_RETRY if retryable wait 178 */ 179 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, 180 pgprot_t prot, 181 pgoff_t num_prefault) 182 { 183 struct vm_area_struct *vma = vmf->vma; 184 struct vm_area_struct cvma = *vma; 185 struct ttm_buffer_object *bo = vma->vm_private_data; 186 struct ttm_bo_device *bdev = bo->bdev; 187 unsigned long page_offset; 188 unsigned long page_last; 189 unsigned long pfn; 190 struct ttm_tt *ttm = NULL; 191 struct page *page; 192 int err; 193 pgoff_t i; 194 vm_fault_t ret = VM_FAULT_NOPAGE; 195 unsigned long address = vmf->address; 196 struct ttm_mem_type_manager *man = 197 &bdev->man[bo->mem.mem_type]; 198 199 /* 200 * Refuse to fault imported pages. This should be handled 201 * (if at all) by redirecting mmap to the exporter. 202 */ 203 if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) 204 return VM_FAULT_SIGBUS; 205 206 if (bdev->driver->fault_reserve_notify) { 207 struct dma_fence *moving = dma_fence_get(bo->moving); 208 209 err = bdev->driver->fault_reserve_notify(bo); 210 switch (err) { 211 case 0: 212 break; 213 case -EBUSY: 214 case -ERESTARTSYS: 215 return VM_FAULT_NOPAGE; 216 default: 217 return VM_FAULT_SIGBUS; 218 } 219 220 if (bo->moving != moving) { 221 spin_lock(&ttm_bo_glob.lru_lock); 222 ttm_bo_move_to_lru_tail(bo, NULL); 223 spin_unlock(&ttm_bo_glob.lru_lock); 224 } 225 dma_fence_put(moving); 226 } 227 228 /* 229 * Wait for buffer data in transit, due to a pipelined 230 * move. 231 */ 232 ret = ttm_bo_vm_fault_idle(bo, vmf); 233 if (unlikely(ret != 0)) 234 return ret; 235 236 err = ttm_mem_io_lock(man, true); 237 if (unlikely(err != 0)) 238 return VM_FAULT_NOPAGE; 239 err = ttm_mem_io_reserve_vm(bo); 240 if (unlikely(err != 0)) { 241 ret = VM_FAULT_SIGBUS; 242 goto out_io_unlock; 243 } 244 245 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 246 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); 247 page_last = vma_pages(vma) + vma->vm_pgoff - 248 drm_vma_node_start(&bo->base.vma_node); 249 250 if (unlikely(page_offset >= bo->num_pages)) { 251 ret = VM_FAULT_SIGBUS; 252 goto out_io_unlock; 253 } 254 255 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot); 256 if (!bo->mem.bus.is_iomem) { 257 struct ttm_operation_ctx ctx = { 258 .interruptible = false, 259 .no_wait_gpu = false, 260 .flags = TTM_OPT_FLAG_FORCE_ALLOC 261 262 }; 263 264 ttm = bo->ttm; 265 if (ttm_tt_populate(bo->ttm, &ctx)) { 266 ret = VM_FAULT_OOM; 267 goto out_io_unlock; 268 } 269 } else { 270 /* Iomem should not be marked encrypted */ 271 cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); 272 } 273 274 /* 275 * Speculatively prefault a number of pages. Only error on 276 * first page. 277 */ 278 for (i = 0; i < num_prefault; ++i) { 279 if (bo->mem.bus.is_iomem) { 280 pfn = ttm_bo_io_mem_pfn(bo, page_offset); 281 } else { 282 page = ttm->pages[page_offset]; 283 if (unlikely(!page && i == 0)) { 284 ret = VM_FAULT_OOM; 285 goto out_io_unlock; 286 } else if (unlikely(!page)) { 287 break; 288 } 289 page->index = drm_vma_node_start(&bo->base.vma_node) + 290 page_offset; 291 pfn = page_to_pfn(page); 292 } 293 294 if (vma->vm_flags & VM_MIXEDMAP) 295 ret = vmf_insert_mixed(&cvma, address, 296 __pfn_to_pfn_t(pfn, PFN_DEV)); 297 else 298 ret = vmf_insert_pfn(&cvma, address, pfn); 299 300 /* Never error on prefaulted PTEs */ 301 if (unlikely((ret & VM_FAULT_ERROR))) { 302 if (i == 0) 303 goto out_io_unlock; 304 else 305 break; 306 } 307 308 address += PAGE_SIZE; 309 if (unlikely(++page_offset >= page_last)) 310 break; 311 } 312 ret = VM_FAULT_NOPAGE; 313 out_io_unlock: 314 ttm_mem_io_unlock(man); 315 return ret; 316 } 317 EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); 318 319 static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) 320 { 321 struct vm_area_struct *vma = vmf->vma; 322 pgprot_t prot; 323 struct ttm_buffer_object *bo = vma->vm_private_data; 324 vm_fault_t ret; 325 326 ret = ttm_bo_vm_reserve(bo, vmf); 327 if (ret) 328 return ret; 329 330 prot = vm_get_page_prot(vma->vm_flags); 331 ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); 332 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 333 return ret; 334 335 dma_resv_unlock(bo->base.resv); 336 337 return ret; 338 } 339 340 void ttm_bo_vm_open(struct vm_area_struct *vma) 341 { 342 struct ttm_buffer_object *bo = vma->vm_private_data; 343 344 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); 345 346 ttm_bo_get(bo); 347 } 348 EXPORT_SYMBOL(ttm_bo_vm_open); 349 350 void ttm_bo_vm_close(struct vm_area_struct *vma) 351 { 352 struct ttm_buffer_object *bo = vma->vm_private_data; 353 354 ttm_bo_put(bo); 355 vma->vm_private_data = NULL; 356 } 357 EXPORT_SYMBOL(ttm_bo_vm_close); 358 359 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, 360 unsigned long offset, 361 uint8_t *buf, int len, int write) 362 { 363 unsigned long page = offset >> PAGE_SHIFT; 364 unsigned long bytes_left = len; 365 int ret; 366 367 /* Copy a page at a time, that way no extra virtual address 368 * mapping is needed 369 */ 370 offset -= page << PAGE_SHIFT; 371 do { 372 unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); 373 struct ttm_bo_kmap_obj map; 374 void *ptr; 375 bool is_iomem; 376 377 ret = ttm_bo_kmap(bo, page, 1, &map); 378 if (ret) 379 return ret; 380 381 ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; 382 WARN_ON_ONCE(is_iomem); 383 if (write) 384 memcpy(ptr, buf, bytes); 385 else 386 memcpy(buf, ptr, bytes); 387 ttm_bo_kunmap(&map); 388 389 page++; 390 buf += bytes; 391 bytes_left -= bytes; 392 offset = 0; 393 } while (bytes_left); 394 395 return len; 396 } 397 398 static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, 399 void *buf, int len, int write) 400 { 401 unsigned long offset = (addr) - vma->vm_start; 402 struct ttm_buffer_object *bo = vma->vm_private_data; 403 int ret; 404 405 if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) 406 return -EIO; 407 408 ret = ttm_bo_reserve(bo, true, false, NULL); 409 if (ret) 410 return ret; 411 412 switch (bo->mem.mem_type) { 413 case TTM_PL_SYSTEM: 414 if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 415 ret = ttm_tt_swapin(bo->ttm); 416 if (unlikely(ret != 0)) 417 return ret; 418 } 419 /* fall through */ 420 case TTM_PL_TT: 421 ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); 422 break; 423 default: 424 if (bo->bdev->driver->access_memory) 425 ret = bo->bdev->driver->access_memory( 426 bo, offset, buf, len, write); 427 else 428 ret = -EIO; 429 } 430 431 ttm_bo_unreserve(bo); 432 433 return ret; 434 } 435 436 static const struct vm_operations_struct ttm_bo_vm_ops = { 437 .fault = ttm_bo_vm_fault, 438 .open = ttm_bo_vm_open, 439 .close = ttm_bo_vm_close, 440 .access = ttm_bo_vm_access 441 }; 442 443 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, 444 unsigned long offset, 445 unsigned long pages) 446 { 447 struct drm_vma_offset_node *node; 448 struct ttm_buffer_object *bo = NULL; 449 450 drm_vma_offset_lock_lookup(bdev->vma_manager); 451 452 node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); 453 if (likely(node)) { 454 bo = container_of(node, struct ttm_buffer_object, 455 base.vma_node); 456 bo = ttm_bo_get_unless_zero(bo); 457 } 458 459 drm_vma_offset_unlock_lookup(bdev->vma_manager); 460 461 if (!bo) 462 pr_err("Could not find buffer object to map\n"); 463 464 return bo; 465 } 466 467 static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma) 468 { 469 vma->vm_ops = &ttm_bo_vm_ops; 470 471 /* 472 * Note: We're transferring the bo reference to 473 * vma->vm_private_data here. 474 */ 475 476 vma->vm_private_data = bo; 477 478 /* 479 * We'd like to use VM_PFNMAP on shared mappings, where 480 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, 481 * but for some reason VM_PFNMAP + x86 PAT + write-combine is very 482 * bad for performance. Until that has been sorted out, use 483 * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 484 */ 485 vma->vm_flags |= VM_MIXEDMAP; 486 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 487 } 488 489 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, 490 struct ttm_bo_device *bdev) 491 { 492 struct ttm_bo_driver *driver; 493 struct ttm_buffer_object *bo; 494 int ret; 495 496 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) 497 return -EINVAL; 498 499 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); 500 if (unlikely(!bo)) 501 return -EINVAL; 502 503 driver = bo->bdev->driver; 504 if (unlikely(!driver->verify_access)) { 505 ret = -EPERM; 506 goto out_unref; 507 } 508 ret = driver->verify_access(bo, filp); 509 if (unlikely(ret != 0)) 510 goto out_unref; 511 512 ttm_bo_mmap_vma_setup(bo, vma); 513 return 0; 514 out_unref: 515 ttm_bo_put(bo); 516 return ret; 517 } 518 EXPORT_SYMBOL(ttm_bo_mmap); 519 520 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) 521 { 522 ttm_bo_get(bo); 523 524 /* 525 * FIXME: &drm_gem_object_funcs.mmap is called with the fake offset 526 * removed. Add it back here until the rest of TTM works without it. 527 */ 528 vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node); 529 530 ttm_bo_mmap_vma_setup(bo, vma); 531 return 0; 532 } 533 EXPORT_SYMBOL(ttm_bo_mmap_obj); 534