1 /** 2 * \file drm_vm.c 3 * Memory mapping for DRM 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com> 7 */ 8 9 /* 10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com 11 * 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 14 * All Rights Reserved. 15 * 16 * Permission is hereby granted, free of charge, to any person obtaining a 17 * copy of this software and associated documentation files (the "Software"), 18 * to deal in the Software without restriction, including without limitation 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20 * and/or sell copies of the Software, and to permit persons to whom the 21 * Software is furnished to do so, subject to the following conditions: 22 * 23 * The above copyright notice and this permission notice (including the next 24 * paragraph) shall be included in all copies or substantial portions of the 25 * Software. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 33 * OTHER DEALINGS IN THE SOFTWARE. 34 */ 35 36 #include <drm/drmP.h> 37 #include <linux/export.h> 38 #include <linux/seq_file.h> 39 #if defined(__ia64__) 40 #include <linux/efi.h> 41 #include <linux/slab.h> 42 #endif 43 #include <asm/pgtable.h> 44 #include "drm_internal.h" 45 #include "drm_legacy.h" 46 47 struct drm_vma_entry { 48 struct list_head head; 49 struct vm_area_struct *vma; 50 pid_t pid; 51 }; 52 53 static void drm_vm_open(struct vm_area_struct *vma); 54 static void drm_vm_close(struct vm_area_struct *vma); 55 56 static pgprot_t drm_io_prot(struct drm_local_map *map, 57 struct vm_area_struct *vma) 58 { 59 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 60 61 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) 62 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) 63 tmp = pgprot_noncached(tmp); 64 else 65 tmp = pgprot_writecombine(tmp); 66 #elif defined(__ia64__) 67 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 68 vma->vm_start)) 69 tmp = pgprot_writecombine(tmp); 70 else 71 tmp = pgprot_noncached(tmp); 72 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__) 73 tmp = pgprot_noncached(tmp); 74 #endif 75 return tmp; 76 } 77 78 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) 79 { 80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 81 82 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) 83 tmp = pgprot_noncached_wc(tmp); 84 #endif 85 return tmp; 86 } 87 88 /** 89 * \c fault method for AGP virtual memory. 90 * 91 * \param vma virtual memory area. 92 * \param address access address. 93 * \return pointer to the page structure. 94 * 95 * Find the right map and if it's AGP memory find the real physical page to 96 * map, get the page, increment the use count and return it. 97 */ 98 #if IS_ENABLED(CONFIG_AGP) 99 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 100 { 101 struct drm_file *priv = vma->vm_file->private_data; 102 struct drm_device *dev = priv->minor->dev; 103 struct drm_local_map *map = NULL; 104 struct drm_map_list *r_list; 105 struct drm_hash_item *hash; 106 107 /* 108 * Find the right map 109 */ 110 if (!dev->agp) 111 goto vm_fault_error; 112 113 if (!dev->agp || !dev->agp->cant_use_aperture) 114 goto vm_fault_error; 115 116 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) 117 goto vm_fault_error; 118 119 r_list = drm_hash_entry(hash, struct drm_map_list, hash); 120 map = r_list->map; 121 122 if (map && map->type == _DRM_AGP) { 123 /* 124 * Using vm_pgoff as a selector forces us to use this unusual 125 * addressing scheme. 126 */ 127 resource_size_t offset = (unsigned long)vmf->virtual_address - 128 vma->vm_start; 129 resource_size_t baddr = map->offset + offset; 130 struct drm_agp_mem *agpmem; 131 struct page *page; 132 133 #ifdef __alpha__ 134 /* 135 * Adjust to a bus-relative address 136 */ 137 baddr -= dev->hose->mem_space->start; 138 #endif 139 140 /* 141 * It's AGP memory - find the real physical page to map 142 */ 143 list_for_each_entry(agpmem, &dev->agp->memory, head) { 144 if (agpmem->bound <= baddr && 145 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) 146 break; 147 } 148 149 if (&agpmem->head == &dev->agp->memory) 150 goto vm_fault_error; 151 152 /* 153 * Get the page, inc the use count, and return it 154 */ 155 offset = (baddr - agpmem->bound) >> PAGE_SHIFT; 156 page = agpmem->memory->pages[offset]; 157 get_page(page); 158 vmf->page = page; 159 160 DRM_DEBUG 161 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n", 162 (unsigned long long)baddr, 163 agpmem->memory->pages[offset], 164 (unsigned long long)offset, 165 page_count(page)); 166 return 0; 167 } 168 vm_fault_error: 169 return VM_FAULT_SIGBUS; /* Disallow mremap */ 170 } 171 #else 172 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 173 { 174 return VM_FAULT_SIGBUS; 175 } 176 #endif 177 178 /** 179 * \c nopage method for shared virtual memory. 180 * 181 * \param vma virtual memory area. 182 * \param address access address. 183 * \return pointer to the page structure. 184 * 185 * Get the mapping, find the real physical page to map, get the page, and 186 * return it. 187 */ 188 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 189 { 190 struct drm_local_map *map = vma->vm_private_data; 191 unsigned long offset; 192 unsigned long i; 193 struct page *page; 194 195 if (!map) 196 return VM_FAULT_SIGBUS; /* Nothing allocated */ 197 198 offset = (unsigned long)vmf->virtual_address - vma->vm_start; 199 i = (unsigned long)map->handle + offset; 200 page = vmalloc_to_page((void *)i); 201 if (!page) 202 return VM_FAULT_SIGBUS; 203 get_page(page); 204 vmf->page = page; 205 206 DRM_DEBUG("shm_fault 0x%lx\n", offset); 207 return 0; 208 } 209 210 /** 211 * \c close method for shared virtual memory. 212 * 213 * \param vma virtual memory area. 214 * 215 * Deletes map information if we are the last 216 * person to close a mapping and it's not in the global maplist. 217 */ 218 static void drm_vm_shm_close(struct vm_area_struct *vma) 219 { 220 struct drm_file *priv = vma->vm_file->private_data; 221 struct drm_device *dev = priv->minor->dev; 222 struct drm_vma_entry *pt, *temp; 223 struct drm_local_map *map; 224 struct drm_map_list *r_list; 225 int found_maps = 0; 226 227 DRM_DEBUG("0x%08lx,0x%08lx\n", 228 vma->vm_start, vma->vm_end - vma->vm_start); 229 230 map = vma->vm_private_data; 231 232 mutex_lock(&dev->struct_mutex); 233 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 234 if (pt->vma->vm_private_data == map) 235 found_maps++; 236 if (pt->vma == vma) { 237 list_del(&pt->head); 238 kfree(pt); 239 } 240 } 241 242 /* We were the only map that was found */ 243 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { 244 /* Check to see if we are in the maplist, if we are not, then 245 * we delete this mappings information. 246 */ 247 found_maps = 0; 248 list_for_each_entry(r_list, &dev->maplist, head) { 249 if (r_list->map == map) 250 found_maps++; 251 } 252 253 if (!found_maps) { 254 drm_dma_handle_t dmah; 255 256 switch (map->type) { 257 case _DRM_REGISTERS: 258 case _DRM_FRAME_BUFFER: 259 arch_phys_wc_del(map->mtrr); 260 iounmap(map->handle); 261 break; 262 case _DRM_SHM: 263 vfree(map->handle); 264 break; 265 case _DRM_AGP: 266 case _DRM_SCATTER_GATHER: 267 break; 268 case _DRM_CONSISTENT: 269 dmah.vaddr = map->handle; 270 dmah.busaddr = map->offset; 271 dmah.size = map->size; 272 __drm_legacy_pci_free(dev, &dmah); 273 break; 274 } 275 kfree(map); 276 } 277 } 278 mutex_unlock(&dev->struct_mutex); 279 } 280 281 /** 282 * \c fault method for DMA virtual memory. 283 * 284 * \param vma virtual memory area. 285 * \param address access address. 286 * \return pointer to the page structure. 287 * 288 * Determine the page number from the page offset and get it from drm_device_dma::pagelist. 289 */ 290 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 291 { 292 struct drm_file *priv = vma->vm_file->private_data; 293 struct drm_device *dev = priv->minor->dev; 294 struct drm_device_dma *dma = dev->dma; 295 unsigned long offset; 296 unsigned long page_nr; 297 struct page *page; 298 299 if (!dma) 300 return VM_FAULT_SIGBUS; /* Error */ 301 if (!dma->pagelist) 302 return VM_FAULT_SIGBUS; /* Nothing allocated */ 303 304 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ 305 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ 306 page = virt_to_page((void *)dma->pagelist[page_nr]); 307 308 get_page(page); 309 vmf->page = page; 310 311 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr); 312 return 0; 313 } 314 315 /** 316 * \c fault method for scatter-gather virtual memory. 317 * 318 * \param vma virtual memory area. 319 * \param address access address. 320 * \return pointer to the page structure. 321 * 322 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. 323 */ 324 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 325 { 326 struct drm_local_map *map = vma->vm_private_data; 327 struct drm_file *priv = vma->vm_file->private_data; 328 struct drm_device *dev = priv->minor->dev; 329 struct drm_sg_mem *entry = dev->sg; 330 unsigned long offset; 331 unsigned long map_offset; 332 unsigned long page_offset; 333 struct page *page; 334 335 if (!entry) 336 return VM_FAULT_SIGBUS; /* Error */ 337 if (!entry->pagelist) 338 return VM_FAULT_SIGBUS; /* Nothing allocated */ 339 340 offset = (unsigned long)vmf->virtual_address - vma->vm_start; 341 map_offset = map->offset - (unsigned long)dev->sg->virtual; 342 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); 343 page = entry->pagelist[page_offset]; 344 get_page(page); 345 vmf->page = page; 346 347 return 0; 348 } 349 350 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 351 { 352 return drm_do_vm_fault(vma, vmf); 353 } 354 355 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 356 { 357 return drm_do_vm_shm_fault(vma, vmf); 358 } 359 360 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 361 { 362 return drm_do_vm_dma_fault(vma, vmf); 363 } 364 365 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 366 { 367 return drm_do_vm_sg_fault(vma, vmf); 368 } 369 370 /** AGP virtual memory operations */ 371 static const struct vm_operations_struct drm_vm_ops = { 372 .fault = drm_vm_fault, 373 .open = drm_vm_open, 374 .close = drm_vm_close, 375 }; 376 377 /** Shared virtual memory operations */ 378 static const struct vm_operations_struct drm_vm_shm_ops = { 379 .fault = drm_vm_shm_fault, 380 .open = drm_vm_open, 381 .close = drm_vm_shm_close, 382 }; 383 384 /** DMA virtual memory operations */ 385 static const struct vm_operations_struct drm_vm_dma_ops = { 386 .fault = drm_vm_dma_fault, 387 .open = drm_vm_open, 388 .close = drm_vm_close, 389 }; 390 391 /** Scatter-gather virtual memory operations */ 392 static const struct vm_operations_struct drm_vm_sg_ops = { 393 .fault = drm_vm_sg_fault, 394 .open = drm_vm_open, 395 .close = drm_vm_close, 396 }; 397 398 static void drm_vm_open_locked(struct drm_device *dev, 399 struct vm_area_struct *vma) 400 { 401 struct drm_vma_entry *vma_entry; 402 403 DRM_DEBUG("0x%08lx,0x%08lx\n", 404 vma->vm_start, vma->vm_end - vma->vm_start); 405 406 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL); 407 if (vma_entry) { 408 vma_entry->vma = vma; 409 vma_entry->pid = current->pid; 410 list_add(&vma_entry->head, &dev->vmalist); 411 } 412 } 413 414 static void drm_vm_open(struct vm_area_struct *vma) 415 { 416 struct drm_file *priv = vma->vm_file->private_data; 417 struct drm_device *dev = priv->minor->dev; 418 419 mutex_lock(&dev->struct_mutex); 420 drm_vm_open_locked(dev, vma); 421 mutex_unlock(&dev->struct_mutex); 422 } 423 424 static void drm_vm_close_locked(struct drm_device *dev, 425 struct vm_area_struct *vma) 426 { 427 struct drm_vma_entry *pt, *temp; 428 429 DRM_DEBUG("0x%08lx,0x%08lx\n", 430 vma->vm_start, vma->vm_end - vma->vm_start); 431 432 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 433 if (pt->vma == vma) { 434 list_del(&pt->head); 435 kfree(pt); 436 break; 437 } 438 } 439 } 440 441 /** 442 * \c close method for all virtual memory types. 443 * 444 * \param vma virtual memory area. 445 * 446 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and 447 * free it. 448 */ 449 static void drm_vm_close(struct vm_area_struct *vma) 450 { 451 struct drm_file *priv = vma->vm_file->private_data; 452 struct drm_device *dev = priv->minor->dev; 453 454 mutex_lock(&dev->struct_mutex); 455 drm_vm_close_locked(dev, vma); 456 mutex_unlock(&dev->struct_mutex); 457 } 458 459 /** 460 * mmap DMA memory. 461 * 462 * \param file_priv DRM file private. 463 * \param vma virtual memory area. 464 * \return zero on success or a negative number on failure. 465 * 466 * Sets the virtual memory area operations structure to vm_dma_ops, the file 467 * pointer, and calls vm_open(). 468 */ 469 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) 470 { 471 struct drm_file *priv = filp->private_data; 472 struct drm_device *dev; 473 struct drm_device_dma *dma; 474 unsigned long length = vma->vm_end - vma->vm_start; 475 476 dev = priv->minor->dev; 477 dma = dev->dma; 478 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", 479 vma->vm_start, vma->vm_end, vma->vm_pgoff); 480 481 /* Length must match exact page count */ 482 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { 483 return -EINVAL; 484 } 485 486 if (!capable(CAP_SYS_ADMIN) && 487 (dma->flags & _DRM_DMA_USE_PCI_RO)) { 488 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); 489 #if defined(__i386__) || defined(__x86_64__) 490 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; 491 #else 492 /* Ye gads this is ugly. With more thought 493 we could move this up higher and use 494 `protection_map' instead. */ 495 vma->vm_page_prot = 496 __pgprot(pte_val 497 (pte_wrprotect 498 (__pte(pgprot_val(vma->vm_page_prot))))); 499 #endif 500 } 501 502 vma->vm_ops = &drm_vm_dma_ops; 503 504 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 505 506 drm_vm_open_locked(dev, vma); 507 return 0; 508 } 509 510 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) 511 { 512 #ifdef __alpha__ 513 return dev->hose->dense_mem_base; 514 #else 515 return 0; 516 #endif 517 } 518 519 /** 520 * mmap DMA memory. 521 * 522 * \param file_priv DRM file private. 523 * \param vma virtual memory area. 524 * \return zero on success or a negative number on failure. 525 * 526 * If the virtual memory area has no offset associated with it then it's a DMA 527 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, 528 * checks that the restricted flag is not set, sets the virtual memory operations 529 * according to the mapping type and remaps the pages. Finally sets the file 530 * pointer and calls vm_open(). 531 */ 532 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) 533 { 534 struct drm_file *priv = filp->private_data; 535 struct drm_device *dev = priv->minor->dev; 536 struct drm_local_map *map = NULL; 537 resource_size_t offset = 0; 538 struct drm_hash_item *hash; 539 540 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", 541 vma->vm_start, vma->vm_end, vma->vm_pgoff); 542 543 if (!priv->authenticated) 544 return -EACCES; 545 546 /* We check for "dma". On Apple's UniNorth, it's valid to have 547 * the AGP mapped at physical address 0 548 * --BenH. 549 */ 550 if (!vma->vm_pgoff 551 #if IS_ENABLED(CONFIG_AGP) 552 && (!dev->agp 553 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) 554 #endif 555 ) 556 return drm_mmap_dma(filp, vma); 557 558 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { 559 DRM_ERROR("Could not find map\n"); 560 return -EINVAL; 561 } 562 563 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 564 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) 565 return -EPERM; 566 567 /* Check for valid size. */ 568 if (map->size < vma->vm_end - vma->vm_start) 569 return -EINVAL; 570 571 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { 572 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); 573 #if defined(__i386__) || defined(__x86_64__) 574 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; 575 #else 576 /* Ye gads this is ugly. With more thought 577 we could move this up higher and use 578 `protection_map' instead. */ 579 vma->vm_page_prot = 580 __pgprot(pte_val 581 (pte_wrprotect 582 (__pte(pgprot_val(vma->vm_page_prot))))); 583 #endif 584 } 585 586 switch (map->type) { 587 #if !defined(__arm__) 588 case _DRM_AGP: 589 if (dev->agp && dev->agp->cant_use_aperture) { 590 /* 591 * On some platforms we can't talk to bus dma address from the CPU, so for 592 * memory of type DRM_AGP, we'll deal with sorting out the real physical 593 * pages and mappings in fault() 594 */ 595 #if defined(__powerpc__) 596 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 597 #endif 598 vma->vm_ops = &drm_vm_ops; 599 break; 600 } 601 /* fall through to _DRM_FRAME_BUFFER... */ 602 #endif 603 case _DRM_FRAME_BUFFER: 604 case _DRM_REGISTERS: 605 offset = drm_core_get_reg_ofs(dev); 606 vma->vm_page_prot = drm_io_prot(map, vma); 607 if (io_remap_pfn_range(vma, vma->vm_start, 608 (map->offset + offset) >> PAGE_SHIFT, 609 vma->vm_end - vma->vm_start, 610 vma->vm_page_prot)) 611 return -EAGAIN; 612 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," 613 " offset = 0x%llx\n", 614 map->type, 615 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); 616 617 vma->vm_ops = &drm_vm_ops; 618 break; 619 case _DRM_CONSISTENT: 620 /* Consistent memory is really like shared memory. But 621 * it's allocated in a different way, so avoid fault */ 622 if (remap_pfn_range(vma, vma->vm_start, 623 page_to_pfn(virt_to_page(map->handle)), 624 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 625 return -EAGAIN; 626 vma->vm_page_prot = drm_dma_prot(map->type, vma); 627 /* fall through to _DRM_SHM */ 628 case _DRM_SHM: 629 vma->vm_ops = &drm_vm_shm_ops; 630 vma->vm_private_data = (void *)map; 631 break; 632 case _DRM_SCATTER_GATHER: 633 vma->vm_ops = &drm_vm_sg_ops; 634 vma->vm_private_data = (void *)map; 635 vma->vm_page_prot = drm_dma_prot(map->type, vma); 636 break; 637 default: 638 return -EINVAL; /* This should never happen. */ 639 } 640 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 641 642 drm_vm_open_locked(dev, vma); 643 return 0; 644 } 645 646 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) 647 { 648 struct drm_file *priv = filp->private_data; 649 struct drm_device *dev = priv->minor->dev; 650 int ret; 651 652 if (drm_device_is_unplugged(dev)) 653 return -ENODEV; 654 655 mutex_lock(&dev->struct_mutex); 656 ret = drm_mmap_locked(filp, vma); 657 mutex_unlock(&dev->struct_mutex); 658 659 return ret; 660 } 661 EXPORT_SYMBOL(drm_legacy_mmap); 662 663 void drm_legacy_vma_flush(struct drm_device *dev) 664 { 665 struct drm_vma_entry *vma, *vma_temp; 666 667 /* Clear vma list (only needed for legacy drivers) */ 668 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { 669 list_del(&vma->head); 670 kfree(vma); 671 } 672 } 673