1 /* 2 * Legacy: Generic DRM Buffer Management 3 * 4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. 7 * 8 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 9 * Author: Gareth Hughes <gareth@valinux.com> 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a 12 * copy of this software and associated documentation files (the "Software"), 13 * to deal in the Software without restriction, including without limitation 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 15 * and/or sell copies of the Software, and to permit persons to whom the 16 * Software is furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice (including the next 19 * paragraph) shall be included in all copies or substantial portions of the 20 * Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 28 * OTHER DEALINGS IN THE SOFTWARE. 29 */ 30 31 #include <linux/export.h> 32 #include <linux/log2.h> 33 #include <linux/mm.h> 34 #include <linux/mman.h> 35 #include <linux/nospec.h> 36 #include <linux/pci.h> 37 #include <linux/slab.h> 38 #include <linux/uaccess.h> 39 #include <linux/vmalloc.h> 40 41 #include <asm/shmparam.h> 42 43 #include <drm/drm_agpsupport.h> 44 #include <drm/drm_device.h> 45 #include <drm/drm_drv.h> 46 #include <drm/drm_file.h> 47 #include <drm/drm_print.h> 48 49 #include "drm_legacy.h" 50 51 52 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, 53 struct drm_local_map *map) 54 { 55 struct drm_map_list *entry; 56 list_for_each_entry(entry, &dev->maplist, head) { 57 /* 58 * Because the kernel-userspace ABI is fixed at a 32-bit offset 59 * while PCI resources may live above that, we only compare the 60 * lower 32 bits of the map offset for maps of type 61 * _DRM_FRAMEBUFFER or _DRM_REGISTERS. 62 * It is assumed that if a driver have more than one resource 63 * of each type, the lower 32 bits are different. 64 */ 65 if (!entry->map || 66 map->type != entry->map->type || 67 entry->master != dev->master) 68 continue; 69 switch (map->type) { 70 case _DRM_SHM: 71 if (map->flags != _DRM_CONTAINS_LOCK) 72 break; 73 return entry; 74 case _DRM_REGISTERS: 75 case _DRM_FRAME_BUFFER: 76 if ((entry->map->offset & 0xffffffff) == 77 (map->offset & 0xffffffff)) 78 return entry; 79 default: /* Make gcc happy */ 80 ; 81 } 82 if (entry->map->offset == map->offset) 83 return entry; 84 } 85 86 return NULL; 87 } 88 89 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, 90 unsigned long user_token, int hashed_handle, int shm) 91 { 92 int use_hashed_handle, shift; 93 unsigned long add; 94 95 #if (BITS_PER_LONG == 64) 96 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); 97 #elif (BITS_PER_LONG == 32) 98 use_hashed_handle = hashed_handle; 99 #else 100 #error Unsupported long size. Neither 64 nor 32 bits. 101 #endif 102 103 if (!use_hashed_handle) { 104 int ret; 105 hash->key = user_token >> PAGE_SHIFT; 106 ret = drm_ht_insert_item(&dev->map_hash, hash); 107 if (ret != -EINVAL) 108 return ret; 109 } 110 111 shift = 0; 112 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; 113 if (shm && (SHMLBA > PAGE_SIZE)) { 114 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; 115 116 /* For shared memory, we have to preserve the SHMLBA 117 * bits of the eventual vma->vm_pgoff value during 118 * mmap(). Otherwise we run into cache aliasing problems 119 * on some platforms. On these platforms, the pgoff of 120 * a mmap() request is used to pick a suitable virtual 121 * address for the mmap() region such that it will not 122 * cause cache aliasing problems. 123 * 124 * Therefore, make sure the SHMLBA relevant bits of the 125 * hash value we use are equal to those in the original 126 * kernel virtual address. 127 */ 128 shift = bits; 129 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); 130 } 131 132 return drm_ht_just_insert_please(&dev->map_hash, hash, 133 user_token, 32 - PAGE_SHIFT - 3, 134 shift, add); 135 } 136 137 /* 138 * Core function to create a range of memory available for mapping by a 139 * non-root process. 140 * 141 * Adjusts the memory offset to its absolute value according to the mapping 142 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 143 * applicable and if supported by the kernel. 144 */ 145 static int drm_addmap_core(struct drm_device *dev, resource_size_t offset, 146 unsigned int size, enum drm_map_type type, 147 enum drm_map_flags flags, 148 struct drm_map_list **maplist) 149 { 150 struct drm_local_map *map; 151 struct drm_map_list *list; 152 unsigned long user_token; 153 int ret; 154 155 map = kmalloc(sizeof(*map), GFP_KERNEL); 156 if (!map) 157 return -ENOMEM; 158 159 map->offset = offset; 160 map->size = size; 161 map->flags = flags; 162 map->type = type; 163 164 /* Only allow shared memory to be removable since we only keep enough 165 * book keeping information about shared memory to allow for removal 166 * when processes fork. 167 */ 168 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 169 kfree(map); 170 return -EINVAL; 171 } 172 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 173 (unsigned long long)map->offset, map->size, map->type); 174 175 /* page-align _DRM_SHM maps. They are allocated here so there is no security 176 * hole created by that and it works around various broken drivers that use 177 * a non-aligned quantity to map the SAREA. --BenH 178 */ 179 if (map->type == _DRM_SHM) 180 map->size = PAGE_ALIGN(map->size); 181 182 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 183 kfree(map); 184 return -EINVAL; 185 } 186 map->mtrr = -1; 187 map->handle = NULL; 188 189 switch (map->type) { 190 case _DRM_REGISTERS: 191 case _DRM_FRAME_BUFFER: 192 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) 193 if (map->offset + (map->size-1) < map->offset || 194 map->offset < virt_to_phys(high_memory)) { 195 kfree(map); 196 return -EINVAL; 197 } 198 #endif 199 /* Some drivers preinitialize some maps, without the X Server 200 * needing to be aware of it. Therefore, we just return success 201 * when the server tries to create a duplicate map. 202 */ 203 list = drm_find_matching_map(dev, map); 204 if (list != NULL) { 205 if (list->map->size != map->size) { 206 DRM_DEBUG("Matching maps of type %d with " 207 "mismatched sizes, (%ld vs %ld)\n", 208 map->type, map->size, 209 list->map->size); 210 list->map->size = map->size; 211 } 212 213 kfree(map); 214 *maplist = list; 215 return 0; 216 } 217 218 if (map->type == _DRM_FRAME_BUFFER || 219 (map->flags & _DRM_WRITE_COMBINING)) { 220 map->mtrr = 221 arch_phys_wc_add(map->offset, map->size); 222 } 223 if (map->type == _DRM_REGISTERS) { 224 if (map->flags & _DRM_WRITE_COMBINING) 225 map->handle = ioremap_wc(map->offset, 226 map->size); 227 else 228 map->handle = ioremap(map->offset, map->size); 229 if (!map->handle) { 230 kfree(map); 231 return -ENOMEM; 232 } 233 } 234 235 break; 236 case _DRM_SHM: 237 list = drm_find_matching_map(dev, map); 238 if (list != NULL) { 239 if (list->map->size != map->size) { 240 DRM_DEBUG("Matching maps of type %d with " 241 "mismatched sizes, (%ld vs %ld)\n", 242 map->type, map->size, list->map->size); 243 list->map->size = map->size; 244 } 245 246 kfree(map); 247 *maplist = list; 248 return 0; 249 } 250 map->handle = vmalloc_user(map->size); 251 DRM_DEBUG("%lu %d %p\n", 252 map->size, order_base_2(map->size), map->handle); 253 if (!map->handle) { 254 kfree(map); 255 return -ENOMEM; 256 } 257 map->offset = (unsigned long)map->handle; 258 if (map->flags & _DRM_CONTAINS_LOCK) { 259 /* Prevent a 2nd X Server from creating a 2nd lock */ 260 if (dev->master->lock.hw_lock != NULL) { 261 vfree(map->handle); 262 kfree(map); 263 return -EBUSY; 264 } 265 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ 266 } 267 break; 268 case _DRM_AGP: { 269 struct drm_agp_mem *entry; 270 int valid = 0; 271 272 if (!dev->agp) { 273 kfree(map); 274 return -EINVAL; 275 } 276 #ifdef __alpha__ 277 map->offset += dev->hose->mem_space->start; 278 #endif 279 /* In some cases (i810 driver), user space may have already 280 * added the AGP base itself, because dev->agp->base previously 281 * only got set during AGP enable. So, only add the base 282 * address if the map's offset isn't already within the 283 * aperture. 284 */ 285 if (map->offset < dev->agp->base || 286 map->offset > dev->agp->base + 287 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { 288 map->offset += dev->agp->base; 289 } 290 map->mtrr = dev->agp->agp_mtrr; /* for getmap */ 291 292 /* This assumes the DRM is in total control of AGP space. 293 * It's not always the case as AGP can be in the control 294 * of user space (i.e. i810 driver). So this loop will get 295 * skipped and we double check that dev->agp->memory is 296 * actually set as well as being invalid before EPERM'ing 297 */ 298 list_for_each_entry(entry, &dev->agp->memory, head) { 299 if ((map->offset >= entry->bound) && 300 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { 301 valid = 1; 302 break; 303 } 304 } 305 if (!list_empty(&dev->agp->memory) && !valid) { 306 kfree(map); 307 return -EPERM; 308 } 309 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", 310 (unsigned long long)map->offset, map->size); 311 312 break; 313 } 314 case _DRM_SCATTER_GATHER: 315 if (!dev->sg) { 316 kfree(map); 317 return -EINVAL; 318 } 319 map->offset += (unsigned long)dev->sg->virtual; 320 break; 321 case _DRM_CONSISTENT: 322 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, 323 * As we're limiting the address to 2^32-1 (or less), 324 * casting it down to 32 bits is no problem, but we 325 * need to point to a 64bit variable first. */ 326 map->handle = dma_alloc_coherent(&dev->pdev->dev, 327 map->size, 328 &map->offset, 329 GFP_KERNEL); 330 if (!map->handle) { 331 kfree(map); 332 return -ENOMEM; 333 } 334 break; 335 default: 336 kfree(map); 337 return -EINVAL; 338 } 339 340 list = kzalloc(sizeof(*list), GFP_KERNEL); 341 if (!list) { 342 if (map->type == _DRM_REGISTERS) 343 iounmap(map->handle); 344 kfree(map); 345 return -EINVAL; 346 } 347 list->map = map; 348 349 mutex_lock(&dev->struct_mutex); 350 list_add(&list->head, &dev->maplist); 351 352 /* Assign a 32-bit handle */ 353 /* We do it here so that dev->struct_mutex protects the increment */ 354 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : 355 map->offset; 356 ret = drm_map_handle(dev, &list->hash, user_token, 0, 357 (map->type == _DRM_SHM)); 358 if (ret) { 359 if (map->type == _DRM_REGISTERS) 360 iounmap(map->handle); 361 kfree(map); 362 kfree(list); 363 mutex_unlock(&dev->struct_mutex); 364 return ret; 365 } 366 367 list->user_token = list->hash.key << PAGE_SHIFT; 368 mutex_unlock(&dev->struct_mutex); 369 370 if (!(map->flags & _DRM_DRIVER)) 371 list->master = dev->master; 372 *maplist = list; 373 return 0; 374 } 375 376 int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset, 377 unsigned int size, enum drm_map_type type, 378 enum drm_map_flags flags, struct drm_local_map **map_ptr) 379 { 380 struct drm_map_list *list; 381 int rc; 382 383 rc = drm_addmap_core(dev, offset, size, type, flags, &list); 384 if (!rc) 385 *map_ptr = list->map; 386 return rc; 387 } 388 EXPORT_SYMBOL(drm_legacy_addmap); 389 390 struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, 391 unsigned int token) 392 { 393 struct drm_map_list *_entry; 394 list_for_each_entry(_entry, &dev->maplist, head) 395 if (_entry->user_token == token) 396 return _entry->map; 397 return NULL; 398 } 399 EXPORT_SYMBOL(drm_legacy_findmap); 400 401 /* 402 * Ioctl to specify a range of memory that is available for mapping by a 403 * non-root process. 404 * 405 * \param inode device inode. 406 * \param file_priv DRM file private. 407 * \param cmd command. 408 * \param arg pointer to a drm_map structure. 409 * \return zero on success or a negative value on error. 410 * 411 */ 412 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, 413 struct drm_file *file_priv) 414 { 415 struct drm_map *map = data; 416 struct drm_map_list *maplist; 417 int err; 418 419 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) 420 return -EPERM; 421 422 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 423 !drm_core_check_feature(dev, DRIVER_LEGACY)) 424 return -EOPNOTSUPP; 425 426 err = drm_addmap_core(dev, map->offset, map->size, map->type, 427 map->flags, &maplist); 428 429 if (err) 430 return err; 431 432 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 433 map->handle = (void *)(unsigned long)maplist->user_token; 434 435 /* 436 * It appears that there are no users of this value whatsoever -- 437 * drmAddMap just discards it. Let's not encourage its use. 438 * (Keeping drm_addmap_core's returned mtrr value would be wrong -- 439 * it's not a real mtrr index anymore.) 440 */ 441 map->mtrr = -1; 442 443 return 0; 444 } 445 446 /* 447 * Get a mapping information. 448 * 449 * \param inode device inode. 450 * \param file_priv DRM file private. 451 * \param cmd command. 452 * \param arg user argument, pointing to a drm_map structure. 453 * 454 * \return zero on success or a negative number on failure. 455 * 456 * Searches for the mapping with the specified offset and copies its information 457 * into userspace 458 */ 459 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, 460 struct drm_file *file_priv) 461 { 462 struct drm_map *map = data; 463 struct drm_map_list *r_list = NULL; 464 struct list_head *list; 465 int idx; 466 int i; 467 468 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 469 !drm_core_check_feature(dev, DRIVER_LEGACY)) 470 return -EOPNOTSUPP; 471 472 idx = map->offset; 473 if (idx < 0) 474 return -EINVAL; 475 476 i = 0; 477 mutex_lock(&dev->struct_mutex); 478 list_for_each(list, &dev->maplist) { 479 if (i == idx) { 480 r_list = list_entry(list, struct drm_map_list, head); 481 break; 482 } 483 i++; 484 } 485 if (!r_list || !r_list->map) { 486 mutex_unlock(&dev->struct_mutex); 487 return -EINVAL; 488 } 489 490 map->offset = r_list->map->offset; 491 map->size = r_list->map->size; 492 map->type = r_list->map->type; 493 map->flags = r_list->map->flags; 494 map->handle = (void *)(unsigned long) r_list->user_token; 495 map->mtrr = arch_phys_wc_index(r_list->map->mtrr); 496 497 mutex_unlock(&dev->struct_mutex); 498 499 return 0; 500 } 501 502 /* 503 * Remove a map private from list and deallocate resources if the mapping 504 * isn't in use. 505 * 506 * Searches the map on drm_device::maplist, removes it from the list, see if 507 * it's being used, and free any associated resource (such as MTRR's) if it's not 508 * being on use. 509 * 510 * \sa drm_legacy_addmap 511 */ 512 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) 513 { 514 struct drm_map_list *r_list = NULL, *list_t; 515 int found = 0; 516 struct drm_master *master; 517 518 /* Find the list entry for the map and remove it */ 519 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { 520 if (r_list->map == map) { 521 master = r_list->master; 522 list_del(&r_list->head); 523 drm_ht_remove_key(&dev->map_hash, 524 r_list->user_token >> PAGE_SHIFT); 525 kfree(r_list); 526 found = 1; 527 break; 528 } 529 } 530 531 if (!found) 532 return -EINVAL; 533 534 switch (map->type) { 535 case _DRM_REGISTERS: 536 iounmap(map->handle); 537 /* FALLTHROUGH */ 538 case _DRM_FRAME_BUFFER: 539 arch_phys_wc_del(map->mtrr); 540 break; 541 case _DRM_SHM: 542 vfree(map->handle); 543 if (master) { 544 if (dev->sigdata.lock == master->lock.hw_lock) 545 dev->sigdata.lock = NULL; 546 master->lock.hw_lock = NULL; /* SHM removed */ 547 master->lock.file_priv = NULL; 548 wake_up_interruptible_all(&master->lock.lock_queue); 549 } 550 break; 551 case _DRM_AGP: 552 case _DRM_SCATTER_GATHER: 553 break; 554 case _DRM_CONSISTENT: 555 dma_free_coherent(&dev->pdev->dev, 556 map->size, 557 map->handle, 558 map->offset); 559 break; 560 } 561 kfree(map); 562 563 return 0; 564 } 565 EXPORT_SYMBOL(drm_legacy_rmmap_locked); 566 567 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) 568 { 569 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 570 !drm_core_check_feature(dev, DRIVER_LEGACY)) 571 return; 572 573 mutex_lock(&dev->struct_mutex); 574 drm_legacy_rmmap_locked(dev, map); 575 mutex_unlock(&dev->struct_mutex); 576 } 577 EXPORT_SYMBOL(drm_legacy_rmmap); 578 579 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) 580 { 581 struct drm_map_list *r_list, *list_temp; 582 583 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 584 return; 585 586 mutex_lock(&dev->struct_mutex); 587 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 588 if (r_list->master == master) { 589 drm_legacy_rmmap_locked(dev, r_list->map); 590 r_list = NULL; 591 } 592 } 593 mutex_unlock(&dev->struct_mutex); 594 } 595 596 void drm_legacy_rmmaps(struct drm_device *dev) 597 { 598 struct drm_map_list *r_list, *list_temp; 599 600 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 601 drm_legacy_rmmap(dev, r_list->map); 602 } 603 604 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 605 * the last close of the device, and this is necessary for cleanup when things 606 * exit uncleanly. Therefore, having userland manually remove mappings seems 607 * like a pointless exercise since they're going away anyway. 608 * 609 * One use case might be after addmap is allowed for normal users for SHM and 610 * gets used by drivers that the server doesn't need to care about. This seems 611 * unlikely. 612 * 613 * \param inode device inode. 614 * \param file_priv DRM file private. 615 * \param cmd command. 616 * \param arg pointer to a struct drm_map structure. 617 * \return zero on success or a negative value on error. 618 */ 619 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, 620 struct drm_file *file_priv) 621 { 622 struct drm_map *request = data; 623 struct drm_local_map *map = NULL; 624 struct drm_map_list *r_list; 625 int ret; 626 627 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 628 !drm_core_check_feature(dev, DRIVER_LEGACY)) 629 return -EOPNOTSUPP; 630 631 mutex_lock(&dev->struct_mutex); 632 list_for_each_entry(r_list, &dev->maplist, head) { 633 if (r_list->map && 634 r_list->user_token == (unsigned long)request->handle && 635 r_list->map->flags & _DRM_REMOVABLE) { 636 map = r_list->map; 637 break; 638 } 639 } 640 641 /* List has wrapped around to the head pointer, or it's empty we didn't 642 * find anything. 643 */ 644 if (list_empty(&dev->maplist) || !map) { 645 mutex_unlock(&dev->struct_mutex); 646 return -EINVAL; 647 } 648 649 /* Register and framebuffer maps are permanent */ 650 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 651 mutex_unlock(&dev->struct_mutex); 652 return 0; 653 } 654 655 ret = drm_legacy_rmmap_locked(dev, map); 656 657 mutex_unlock(&dev->struct_mutex); 658 659 return ret; 660 } 661 662 /* 663 * Cleanup after an error on one of the addbufs() functions. 664 * 665 * \param dev DRM device. 666 * \param entry buffer entry where the error occurred. 667 * 668 * Frees any pages and buffers associated with the given entry. 669 */ 670 static void drm_cleanup_buf_error(struct drm_device *dev, 671 struct drm_buf_entry *entry) 672 { 673 int i; 674 675 if (entry->seg_count) { 676 for (i = 0; i < entry->seg_count; i++) { 677 if (entry->seglist[i]) { 678 drm_pci_free(dev, entry->seglist[i]); 679 } 680 } 681 kfree(entry->seglist); 682 683 entry->seg_count = 0; 684 } 685 686 if (entry->buf_count) { 687 for (i = 0; i < entry->buf_count; i++) { 688 kfree(entry->buflist[i].dev_private); 689 } 690 kfree(entry->buflist); 691 692 entry->buf_count = 0; 693 } 694 } 695 696 #if IS_ENABLED(CONFIG_AGP) 697 /* 698 * Add AGP buffers for DMA transfers. 699 * 700 * \param dev struct drm_device to which the buffers are to be added. 701 * \param request pointer to a struct drm_buf_desc describing the request. 702 * \return zero on success or a negative number on failure. 703 * 704 * After some sanity checks creates a drm_buf structure for each buffer and 705 * reallocates the buffer list of the same size order to accommodate the new 706 * buffers. 707 */ 708 int drm_legacy_addbufs_agp(struct drm_device *dev, 709 struct drm_buf_desc *request) 710 { 711 struct drm_device_dma *dma = dev->dma; 712 struct drm_buf_entry *entry; 713 struct drm_agp_mem *agp_entry; 714 struct drm_buf *buf; 715 unsigned long offset; 716 unsigned long agp_offset; 717 int count; 718 int order; 719 int size; 720 int alignment; 721 int page_order; 722 int total; 723 int byte_count; 724 int i, valid; 725 struct drm_buf **temp_buflist; 726 727 if (!dma) 728 return -EINVAL; 729 730 count = request->count; 731 order = order_base_2(request->size); 732 size = 1 << order; 733 734 alignment = (request->flags & _DRM_PAGE_ALIGN) 735 ? PAGE_ALIGN(size) : size; 736 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 737 total = PAGE_SIZE << page_order; 738 739 byte_count = 0; 740 agp_offset = dev->agp->base + request->agp_start; 741 742 DRM_DEBUG("count: %d\n", count); 743 DRM_DEBUG("order: %d\n", order); 744 DRM_DEBUG("size: %d\n", size); 745 DRM_DEBUG("agp_offset: %lx\n", agp_offset); 746 DRM_DEBUG("alignment: %d\n", alignment); 747 DRM_DEBUG("page_order: %d\n", page_order); 748 DRM_DEBUG("total: %d\n", total); 749 750 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 751 return -EINVAL; 752 753 /* Make sure buffers are located in AGP memory that we own */ 754 valid = 0; 755 list_for_each_entry(agp_entry, &dev->agp->memory, head) { 756 if ((agp_offset >= agp_entry->bound) && 757 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { 758 valid = 1; 759 break; 760 } 761 } 762 if (!list_empty(&dev->agp->memory) && !valid) { 763 DRM_DEBUG("zone invalid\n"); 764 return -EINVAL; 765 } 766 spin_lock(&dev->buf_lock); 767 if (dev->buf_use) { 768 spin_unlock(&dev->buf_lock); 769 return -EBUSY; 770 } 771 atomic_inc(&dev->buf_alloc); 772 spin_unlock(&dev->buf_lock); 773 774 mutex_lock(&dev->struct_mutex); 775 entry = &dma->bufs[order]; 776 if (entry->buf_count) { 777 mutex_unlock(&dev->struct_mutex); 778 atomic_dec(&dev->buf_alloc); 779 return -ENOMEM; /* May only call once for each order */ 780 } 781 782 if (count < 0 || count > 4096) { 783 mutex_unlock(&dev->struct_mutex); 784 atomic_dec(&dev->buf_alloc); 785 return -EINVAL; 786 } 787 788 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 789 if (!entry->buflist) { 790 mutex_unlock(&dev->struct_mutex); 791 atomic_dec(&dev->buf_alloc); 792 return -ENOMEM; 793 } 794 795 entry->buf_size = size; 796 entry->page_order = page_order; 797 798 offset = 0; 799 800 while (entry->buf_count < count) { 801 buf = &entry->buflist[entry->buf_count]; 802 buf->idx = dma->buf_count + entry->buf_count; 803 buf->total = alignment; 804 buf->order = order; 805 buf->used = 0; 806 807 buf->offset = (dma->byte_count + offset); 808 buf->bus_address = agp_offset + offset; 809 buf->address = (void *)(agp_offset + offset); 810 buf->next = NULL; 811 buf->waiting = 0; 812 buf->pending = 0; 813 buf->file_priv = NULL; 814 815 buf->dev_priv_size = dev->driver->dev_priv_size; 816 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 817 if (!buf->dev_private) { 818 /* Set count correctly so we free the proper amount. */ 819 entry->buf_count = count; 820 drm_cleanup_buf_error(dev, entry); 821 mutex_unlock(&dev->struct_mutex); 822 atomic_dec(&dev->buf_alloc); 823 return -ENOMEM; 824 } 825 826 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 827 828 offset += alignment; 829 entry->buf_count++; 830 byte_count += PAGE_SIZE << page_order; 831 } 832 833 DRM_DEBUG("byte_count: %d\n", byte_count); 834 835 temp_buflist = krealloc(dma->buflist, 836 (dma->buf_count + entry->buf_count) * 837 sizeof(*dma->buflist), GFP_KERNEL); 838 if (!temp_buflist) { 839 /* Free the entry because it isn't valid */ 840 drm_cleanup_buf_error(dev, entry); 841 mutex_unlock(&dev->struct_mutex); 842 atomic_dec(&dev->buf_alloc); 843 return -ENOMEM; 844 } 845 dma->buflist = temp_buflist; 846 847 for (i = 0; i < entry->buf_count; i++) { 848 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 849 } 850 851 dma->buf_count += entry->buf_count; 852 dma->seg_count += entry->seg_count; 853 dma->page_count += byte_count >> PAGE_SHIFT; 854 dma->byte_count += byte_count; 855 856 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 857 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 858 859 mutex_unlock(&dev->struct_mutex); 860 861 request->count = entry->buf_count; 862 request->size = size; 863 864 dma->flags = _DRM_DMA_USE_AGP; 865 866 atomic_dec(&dev->buf_alloc); 867 return 0; 868 } 869 EXPORT_SYMBOL(drm_legacy_addbufs_agp); 870 #endif /* CONFIG_AGP */ 871 872 int drm_legacy_addbufs_pci(struct drm_device *dev, 873 struct drm_buf_desc *request) 874 { 875 struct drm_device_dma *dma = dev->dma; 876 int count; 877 int order; 878 int size; 879 int total; 880 int page_order; 881 struct drm_buf_entry *entry; 882 drm_dma_handle_t *dmah; 883 struct drm_buf *buf; 884 int alignment; 885 unsigned long offset; 886 int i; 887 int byte_count; 888 int page_count; 889 unsigned long *temp_pagelist; 890 struct drm_buf **temp_buflist; 891 892 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 893 return -EOPNOTSUPP; 894 895 if (!dma) 896 return -EINVAL; 897 898 if (!capable(CAP_SYS_ADMIN)) 899 return -EPERM; 900 901 count = request->count; 902 order = order_base_2(request->size); 903 size = 1 << order; 904 905 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 906 request->count, request->size, size, order); 907 908 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 909 return -EINVAL; 910 911 alignment = (request->flags & _DRM_PAGE_ALIGN) 912 ? PAGE_ALIGN(size) : size; 913 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 914 total = PAGE_SIZE << page_order; 915 916 spin_lock(&dev->buf_lock); 917 if (dev->buf_use) { 918 spin_unlock(&dev->buf_lock); 919 return -EBUSY; 920 } 921 atomic_inc(&dev->buf_alloc); 922 spin_unlock(&dev->buf_lock); 923 924 mutex_lock(&dev->struct_mutex); 925 entry = &dma->bufs[order]; 926 if (entry->buf_count) { 927 mutex_unlock(&dev->struct_mutex); 928 atomic_dec(&dev->buf_alloc); 929 return -ENOMEM; /* May only call once for each order */ 930 } 931 932 if (count < 0 || count > 4096) { 933 mutex_unlock(&dev->struct_mutex); 934 atomic_dec(&dev->buf_alloc); 935 return -EINVAL; 936 } 937 938 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 939 if (!entry->buflist) { 940 mutex_unlock(&dev->struct_mutex); 941 atomic_dec(&dev->buf_alloc); 942 return -ENOMEM; 943 } 944 945 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL); 946 if (!entry->seglist) { 947 kfree(entry->buflist); 948 mutex_unlock(&dev->struct_mutex); 949 atomic_dec(&dev->buf_alloc); 950 return -ENOMEM; 951 } 952 953 /* Keep the original pagelist until we know all the allocations 954 * have succeeded 955 */ 956 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), 957 sizeof(*dma->pagelist), 958 GFP_KERNEL); 959 if (!temp_pagelist) { 960 kfree(entry->buflist); 961 kfree(entry->seglist); 962 mutex_unlock(&dev->struct_mutex); 963 atomic_dec(&dev->buf_alloc); 964 return -ENOMEM; 965 } 966 memcpy(temp_pagelist, 967 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); 968 DRM_DEBUG("pagelist: %d entries\n", 969 dma->page_count + (count << page_order)); 970 971 entry->buf_size = size; 972 entry->page_order = page_order; 973 byte_count = 0; 974 page_count = 0; 975 976 while (entry->buf_count < count) { 977 978 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); 979 980 if (!dmah) { 981 /* Set count correctly so we free the proper amount. */ 982 entry->buf_count = count; 983 entry->seg_count = count; 984 drm_cleanup_buf_error(dev, entry); 985 kfree(temp_pagelist); 986 mutex_unlock(&dev->struct_mutex); 987 atomic_dec(&dev->buf_alloc); 988 return -ENOMEM; 989 } 990 entry->seglist[entry->seg_count++] = dmah; 991 for (i = 0; i < (1 << page_order); i++) { 992 DRM_DEBUG("page %d @ 0x%08lx\n", 993 dma->page_count + page_count, 994 (unsigned long)dmah->vaddr + PAGE_SIZE * i); 995 temp_pagelist[dma->page_count + page_count++] 996 = (unsigned long)dmah->vaddr + PAGE_SIZE * i; 997 } 998 for (offset = 0; 999 offset + size <= total && entry->buf_count < count; 1000 offset += alignment, ++entry->buf_count) { 1001 buf = &entry->buflist[entry->buf_count]; 1002 buf->idx = dma->buf_count + entry->buf_count; 1003 buf->total = alignment; 1004 buf->order = order; 1005 buf->used = 0; 1006 buf->offset = (dma->byte_count + byte_count + offset); 1007 buf->address = (void *)(dmah->vaddr + offset); 1008 buf->bus_address = dmah->busaddr + offset; 1009 buf->next = NULL; 1010 buf->waiting = 0; 1011 buf->pending = 0; 1012 buf->file_priv = NULL; 1013 1014 buf->dev_priv_size = dev->driver->dev_priv_size; 1015 buf->dev_private = kzalloc(buf->dev_priv_size, 1016 GFP_KERNEL); 1017 if (!buf->dev_private) { 1018 /* Set count correctly so we free the proper amount. */ 1019 entry->buf_count = count; 1020 entry->seg_count = count; 1021 drm_cleanup_buf_error(dev, entry); 1022 kfree(temp_pagelist); 1023 mutex_unlock(&dev->struct_mutex); 1024 atomic_dec(&dev->buf_alloc); 1025 return -ENOMEM; 1026 } 1027 1028 DRM_DEBUG("buffer %d @ %p\n", 1029 entry->buf_count, buf->address); 1030 } 1031 byte_count += PAGE_SIZE << page_order; 1032 } 1033 1034 temp_buflist = krealloc(dma->buflist, 1035 (dma->buf_count + entry->buf_count) * 1036 sizeof(*dma->buflist), GFP_KERNEL); 1037 if (!temp_buflist) { 1038 /* Free the entry because it isn't valid */ 1039 drm_cleanup_buf_error(dev, entry); 1040 kfree(temp_pagelist); 1041 mutex_unlock(&dev->struct_mutex); 1042 atomic_dec(&dev->buf_alloc); 1043 return -ENOMEM; 1044 } 1045 dma->buflist = temp_buflist; 1046 1047 for (i = 0; i < entry->buf_count; i++) { 1048 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1049 } 1050 1051 /* No allocations failed, so now we can replace the original pagelist 1052 * with the new one. 1053 */ 1054 if (dma->page_count) { 1055 kfree(dma->pagelist); 1056 } 1057 dma->pagelist = temp_pagelist; 1058 1059 dma->buf_count += entry->buf_count; 1060 dma->seg_count += entry->seg_count; 1061 dma->page_count += entry->seg_count << page_order; 1062 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 1063 1064 mutex_unlock(&dev->struct_mutex); 1065 1066 request->count = entry->buf_count; 1067 request->size = size; 1068 1069 if (request->flags & _DRM_PCI_BUFFER_RO) 1070 dma->flags = _DRM_DMA_USE_PCI_RO; 1071 1072 atomic_dec(&dev->buf_alloc); 1073 return 0; 1074 1075 } 1076 EXPORT_SYMBOL(drm_legacy_addbufs_pci); 1077 1078 static int drm_legacy_addbufs_sg(struct drm_device *dev, 1079 struct drm_buf_desc *request) 1080 { 1081 struct drm_device_dma *dma = dev->dma; 1082 struct drm_buf_entry *entry; 1083 struct drm_buf *buf; 1084 unsigned long offset; 1085 unsigned long agp_offset; 1086 int count; 1087 int order; 1088 int size; 1089 int alignment; 1090 int page_order; 1091 int total; 1092 int byte_count; 1093 int i; 1094 struct drm_buf **temp_buflist; 1095 1096 if (!drm_core_check_feature(dev, DRIVER_SG)) 1097 return -EOPNOTSUPP; 1098 1099 if (!dma) 1100 return -EINVAL; 1101 1102 if (!capable(CAP_SYS_ADMIN)) 1103 return -EPERM; 1104 1105 count = request->count; 1106 order = order_base_2(request->size); 1107 size = 1 << order; 1108 1109 alignment = (request->flags & _DRM_PAGE_ALIGN) 1110 ? PAGE_ALIGN(size) : size; 1111 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 1112 total = PAGE_SIZE << page_order; 1113 1114 byte_count = 0; 1115 agp_offset = request->agp_start; 1116 1117 DRM_DEBUG("count: %d\n", count); 1118 DRM_DEBUG("order: %d\n", order); 1119 DRM_DEBUG("size: %d\n", size); 1120 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 1121 DRM_DEBUG("alignment: %d\n", alignment); 1122 DRM_DEBUG("page_order: %d\n", page_order); 1123 DRM_DEBUG("total: %d\n", total); 1124 1125 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1126 return -EINVAL; 1127 1128 spin_lock(&dev->buf_lock); 1129 if (dev->buf_use) { 1130 spin_unlock(&dev->buf_lock); 1131 return -EBUSY; 1132 } 1133 atomic_inc(&dev->buf_alloc); 1134 spin_unlock(&dev->buf_lock); 1135 1136 mutex_lock(&dev->struct_mutex); 1137 entry = &dma->bufs[order]; 1138 if (entry->buf_count) { 1139 mutex_unlock(&dev->struct_mutex); 1140 atomic_dec(&dev->buf_alloc); 1141 return -ENOMEM; /* May only call once for each order */ 1142 } 1143 1144 if (count < 0 || count > 4096) { 1145 mutex_unlock(&dev->struct_mutex); 1146 atomic_dec(&dev->buf_alloc); 1147 return -EINVAL; 1148 } 1149 1150 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 1151 if (!entry->buflist) { 1152 mutex_unlock(&dev->struct_mutex); 1153 atomic_dec(&dev->buf_alloc); 1154 return -ENOMEM; 1155 } 1156 1157 entry->buf_size = size; 1158 entry->page_order = page_order; 1159 1160 offset = 0; 1161 1162 while (entry->buf_count < count) { 1163 buf = &entry->buflist[entry->buf_count]; 1164 buf->idx = dma->buf_count + entry->buf_count; 1165 buf->total = alignment; 1166 buf->order = order; 1167 buf->used = 0; 1168 1169 buf->offset = (dma->byte_count + offset); 1170 buf->bus_address = agp_offset + offset; 1171 buf->address = (void *)(agp_offset + offset 1172 + (unsigned long)dev->sg->virtual); 1173 buf->next = NULL; 1174 buf->waiting = 0; 1175 buf->pending = 0; 1176 buf->file_priv = NULL; 1177 1178 buf->dev_priv_size = dev->driver->dev_priv_size; 1179 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 1180 if (!buf->dev_private) { 1181 /* Set count correctly so we free the proper amount. */ 1182 entry->buf_count = count; 1183 drm_cleanup_buf_error(dev, entry); 1184 mutex_unlock(&dev->struct_mutex); 1185 atomic_dec(&dev->buf_alloc); 1186 return -ENOMEM; 1187 } 1188 1189 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1190 1191 offset += alignment; 1192 entry->buf_count++; 1193 byte_count += PAGE_SIZE << page_order; 1194 } 1195 1196 DRM_DEBUG("byte_count: %d\n", byte_count); 1197 1198 temp_buflist = krealloc(dma->buflist, 1199 (dma->buf_count + entry->buf_count) * 1200 sizeof(*dma->buflist), GFP_KERNEL); 1201 if (!temp_buflist) { 1202 /* Free the entry because it isn't valid */ 1203 drm_cleanup_buf_error(dev, entry); 1204 mutex_unlock(&dev->struct_mutex); 1205 atomic_dec(&dev->buf_alloc); 1206 return -ENOMEM; 1207 } 1208 dma->buflist = temp_buflist; 1209 1210 for (i = 0; i < entry->buf_count; i++) { 1211 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1212 } 1213 1214 dma->buf_count += entry->buf_count; 1215 dma->seg_count += entry->seg_count; 1216 dma->page_count += byte_count >> PAGE_SHIFT; 1217 dma->byte_count += byte_count; 1218 1219 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1220 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1221 1222 mutex_unlock(&dev->struct_mutex); 1223 1224 request->count = entry->buf_count; 1225 request->size = size; 1226 1227 dma->flags = _DRM_DMA_USE_SG; 1228 1229 atomic_dec(&dev->buf_alloc); 1230 return 0; 1231 } 1232 1233 /* 1234 * Add buffers for DMA transfers (ioctl). 1235 * 1236 * \param inode device inode. 1237 * \param file_priv DRM file private. 1238 * \param cmd command. 1239 * \param arg pointer to a struct drm_buf_desc request. 1240 * \return zero on success or a negative number on failure. 1241 * 1242 * According with the memory type specified in drm_buf_desc::flags and the 1243 * build options, it dispatches the call either to addbufs_agp(), 1244 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1245 * PCI memory respectively. 1246 */ 1247 int drm_legacy_addbufs(struct drm_device *dev, void *data, 1248 struct drm_file *file_priv) 1249 { 1250 struct drm_buf_desc *request = data; 1251 int ret; 1252 1253 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1254 return -EOPNOTSUPP; 1255 1256 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1257 return -EOPNOTSUPP; 1258 1259 #if IS_ENABLED(CONFIG_AGP) 1260 if (request->flags & _DRM_AGP_BUFFER) 1261 ret = drm_legacy_addbufs_agp(dev, request); 1262 else 1263 #endif 1264 if (request->flags & _DRM_SG_BUFFER) 1265 ret = drm_legacy_addbufs_sg(dev, request); 1266 else if (request->flags & _DRM_FB_BUFFER) 1267 ret = -EINVAL; 1268 else 1269 ret = drm_legacy_addbufs_pci(dev, request); 1270 1271 return ret; 1272 } 1273 1274 /* 1275 * Get information about the buffer mappings. 1276 * 1277 * This was originally mean for debugging purposes, or by a sophisticated 1278 * client library to determine how best to use the available buffers (e.g., 1279 * large buffers can be used for image transfer). 1280 * 1281 * \param inode device inode. 1282 * \param file_priv DRM file private. 1283 * \param cmd command. 1284 * \param arg pointer to a drm_buf_info structure. 1285 * \return zero on success or a negative number on failure. 1286 * 1287 * Increments drm_device::buf_use while holding the drm_device::buf_lock 1288 * lock, preventing of allocating more buffers after this call. Information 1289 * about each requested buffer is then copied into user space. 1290 */ 1291 int __drm_legacy_infobufs(struct drm_device *dev, 1292 void *data, int *p, 1293 int (*f)(void *, int, struct drm_buf_entry *)) 1294 { 1295 struct drm_device_dma *dma = dev->dma; 1296 int i; 1297 int count; 1298 1299 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1300 return -EOPNOTSUPP; 1301 1302 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1303 return -EOPNOTSUPP; 1304 1305 if (!dma) 1306 return -EINVAL; 1307 1308 spin_lock(&dev->buf_lock); 1309 if (atomic_read(&dev->buf_alloc)) { 1310 spin_unlock(&dev->buf_lock); 1311 return -EBUSY; 1312 } 1313 ++dev->buf_use; /* Can't allocate more after this call */ 1314 spin_unlock(&dev->buf_lock); 1315 1316 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1317 if (dma->bufs[i].buf_count) 1318 ++count; 1319 } 1320 1321 DRM_DEBUG("count = %d\n", count); 1322 1323 if (*p >= count) { 1324 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1325 struct drm_buf_entry *from = &dma->bufs[i]; 1326 if (from->buf_count) { 1327 if (f(data, count, from) < 0) 1328 return -EFAULT; 1329 DRM_DEBUG("%d %d %d %d %d\n", 1330 i, 1331 dma->bufs[i].buf_count, 1332 dma->bufs[i].buf_size, 1333 dma->bufs[i].low_mark, 1334 dma->bufs[i].high_mark); 1335 ++count; 1336 } 1337 } 1338 } 1339 *p = count; 1340 1341 return 0; 1342 } 1343 1344 static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) 1345 { 1346 struct drm_buf_info *request = data; 1347 struct drm_buf_desc __user *to = &request->list[count]; 1348 struct drm_buf_desc v = {.count = from->buf_count, 1349 .size = from->buf_size, 1350 .low_mark = from->low_mark, 1351 .high_mark = from->high_mark}; 1352 1353 if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags))) 1354 return -EFAULT; 1355 return 0; 1356 } 1357 1358 int drm_legacy_infobufs(struct drm_device *dev, void *data, 1359 struct drm_file *file_priv) 1360 { 1361 struct drm_buf_info *request = data; 1362 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); 1363 } 1364 1365 /* 1366 * Specifies a low and high water mark for buffer allocation 1367 * 1368 * \param inode device inode. 1369 * \param file_priv DRM file private. 1370 * \param cmd command. 1371 * \param arg a pointer to a drm_buf_desc structure. 1372 * \return zero on success or a negative number on failure. 1373 * 1374 * Verifies that the size order is bounded between the admissible orders and 1375 * updates the respective drm_device_dma::bufs entry low and high water mark. 1376 * 1377 * \note This ioctl is deprecated and mostly never used. 1378 */ 1379 int drm_legacy_markbufs(struct drm_device *dev, void *data, 1380 struct drm_file *file_priv) 1381 { 1382 struct drm_device_dma *dma = dev->dma; 1383 struct drm_buf_desc *request = data; 1384 int order; 1385 struct drm_buf_entry *entry; 1386 1387 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1388 return -EOPNOTSUPP; 1389 1390 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1391 return -EOPNOTSUPP; 1392 1393 if (!dma) 1394 return -EINVAL; 1395 1396 DRM_DEBUG("%d, %d, %d\n", 1397 request->size, request->low_mark, request->high_mark); 1398 order = order_base_2(request->size); 1399 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1400 return -EINVAL; 1401 entry = &dma->bufs[order]; 1402 1403 if (request->low_mark < 0 || request->low_mark > entry->buf_count) 1404 return -EINVAL; 1405 if (request->high_mark < 0 || request->high_mark > entry->buf_count) 1406 return -EINVAL; 1407 1408 entry->low_mark = request->low_mark; 1409 entry->high_mark = request->high_mark; 1410 1411 return 0; 1412 } 1413 1414 /* 1415 * Unreserve the buffers in list, previously reserved using drmDMA. 1416 * 1417 * \param inode device inode. 1418 * \param file_priv DRM file private. 1419 * \param cmd command. 1420 * \param arg pointer to a drm_buf_free structure. 1421 * \return zero on success or a negative number on failure. 1422 * 1423 * Calls free_buffer() for each used buffer. 1424 * This function is primarily used for debugging. 1425 */ 1426 int drm_legacy_freebufs(struct drm_device *dev, void *data, 1427 struct drm_file *file_priv) 1428 { 1429 struct drm_device_dma *dma = dev->dma; 1430 struct drm_buf_free *request = data; 1431 int i; 1432 int idx; 1433 struct drm_buf *buf; 1434 1435 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1436 return -EOPNOTSUPP; 1437 1438 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1439 return -EOPNOTSUPP; 1440 1441 if (!dma) 1442 return -EINVAL; 1443 1444 DRM_DEBUG("%d\n", request->count); 1445 for (i = 0; i < request->count; i++) { 1446 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) 1447 return -EFAULT; 1448 if (idx < 0 || idx >= dma->buf_count) { 1449 DRM_ERROR("Index %d (of %d max)\n", 1450 idx, dma->buf_count - 1); 1451 return -EINVAL; 1452 } 1453 idx = array_index_nospec(idx, dma->buf_count); 1454 buf = dma->buflist[idx]; 1455 if (buf->file_priv != file_priv) { 1456 DRM_ERROR("Process %d freeing buffer not owned\n", 1457 task_pid_nr(current)); 1458 return -EINVAL; 1459 } 1460 drm_legacy_free_buffer(dev, buf); 1461 } 1462 1463 return 0; 1464 } 1465 1466 /* 1467 * Maps all of the DMA buffers into client-virtual space (ioctl). 1468 * 1469 * \param inode device inode. 1470 * \param file_priv DRM file private. 1471 * \param cmd command. 1472 * \param arg pointer to a drm_buf_map structure. 1473 * \return zero on success or a negative number on failure. 1474 * 1475 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information 1476 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with 1477 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1478 * drm_mmap_dma(). 1479 */ 1480 int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p, 1481 void __user **v, 1482 int (*f)(void *, int, unsigned long, 1483 struct drm_buf *), 1484 struct drm_file *file_priv) 1485 { 1486 struct drm_device_dma *dma = dev->dma; 1487 int retcode = 0; 1488 unsigned long virtual; 1489 int i; 1490 1491 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1492 return -EOPNOTSUPP; 1493 1494 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1495 return -EOPNOTSUPP; 1496 1497 if (!dma) 1498 return -EINVAL; 1499 1500 spin_lock(&dev->buf_lock); 1501 if (atomic_read(&dev->buf_alloc)) { 1502 spin_unlock(&dev->buf_lock); 1503 return -EBUSY; 1504 } 1505 dev->buf_use++; /* Can't allocate more after this call */ 1506 spin_unlock(&dev->buf_lock); 1507 1508 if (*p >= dma->buf_count) { 1509 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) 1510 || (drm_core_check_feature(dev, DRIVER_SG) 1511 && (dma->flags & _DRM_DMA_USE_SG))) { 1512 struct drm_local_map *map = dev->agp_buffer_map; 1513 unsigned long token = dev->agp_buffer_token; 1514 1515 if (!map) { 1516 retcode = -EINVAL; 1517 goto done; 1518 } 1519 virtual = vm_mmap(file_priv->filp, 0, map->size, 1520 PROT_READ | PROT_WRITE, 1521 MAP_SHARED, 1522 token); 1523 } else { 1524 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, 1525 PROT_READ | PROT_WRITE, 1526 MAP_SHARED, 0); 1527 } 1528 if (virtual > -1024UL) { 1529 /* Real error */ 1530 retcode = (signed long)virtual; 1531 goto done; 1532 } 1533 *v = (void __user *)virtual; 1534 1535 for (i = 0; i < dma->buf_count; i++) { 1536 if (f(data, i, virtual, dma->buflist[i]) < 0) { 1537 retcode = -EFAULT; 1538 goto done; 1539 } 1540 } 1541 } 1542 done: 1543 *p = dma->buf_count; 1544 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode); 1545 1546 return retcode; 1547 } 1548 1549 static int map_one_buf(void *data, int idx, unsigned long virtual, 1550 struct drm_buf *buf) 1551 { 1552 struct drm_buf_map *request = data; 1553 unsigned long address = virtual + buf->offset; /* *** */ 1554 1555 if (copy_to_user(&request->list[idx].idx, &buf->idx, 1556 sizeof(request->list[0].idx))) 1557 return -EFAULT; 1558 if (copy_to_user(&request->list[idx].total, &buf->total, 1559 sizeof(request->list[0].total))) 1560 return -EFAULT; 1561 if (clear_user(&request->list[idx].used, sizeof(int))) 1562 return -EFAULT; 1563 if (copy_to_user(&request->list[idx].address, &address, 1564 sizeof(address))) 1565 return -EFAULT; 1566 return 0; 1567 } 1568 1569 int drm_legacy_mapbufs(struct drm_device *dev, void *data, 1570 struct drm_file *file_priv) 1571 { 1572 struct drm_buf_map *request = data; 1573 return __drm_legacy_mapbufs(dev, data, &request->count, 1574 &request->virtual, map_one_buf, 1575 file_priv); 1576 } 1577 1578 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, 1579 struct drm_file *file_priv) 1580 { 1581 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1582 return -EOPNOTSUPP; 1583 1584 if (dev->driver->dma_ioctl) 1585 return dev->driver->dma_ioctl(dev, data, file_priv); 1586 else 1587 return -EINVAL; 1588 } 1589 1590 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) 1591 { 1592 struct drm_map_list *entry; 1593 1594 list_for_each_entry(entry, &dev->maplist, head) { 1595 if (entry->map && entry->map->type == _DRM_SHM && 1596 (entry->map->flags & _DRM_CONTAINS_LOCK)) { 1597 return entry->map; 1598 } 1599 } 1600 return NULL; 1601 } 1602 EXPORT_SYMBOL(drm_legacy_getsarea); 1603