1 /* 2 * Legacy: Generic DRM Buffer Management 3 * 4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. 7 * 8 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 9 * Author: Gareth Hughes <gareth@valinux.com> 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a 12 * copy of this software and associated documentation files (the "Software"), 13 * to deal in the Software without restriction, including without limitation 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 15 * and/or sell copies of the Software, and to permit persons to whom the 16 * Software is furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice (including the next 19 * paragraph) shall be included in all copies or substantial portions of the 20 * Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 28 * OTHER DEALINGS IN THE SOFTWARE. 29 */ 30 31 #include <linux/vmalloc.h> 32 #include <linux/slab.h> 33 #include <linux/log2.h> 34 #include <linux/export.h> 35 #include <asm/shmparam.h> 36 #include <drm/drmP.h> 37 #include "drm_legacy.h" 38 39 #include <linux/nospec.h> 40 41 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, 42 struct drm_local_map *map) 43 { 44 struct drm_map_list *entry; 45 list_for_each_entry(entry, &dev->maplist, head) { 46 /* 47 * Because the kernel-userspace ABI is fixed at a 32-bit offset 48 * while PCI resources may live above that, we only compare the 49 * lower 32 bits of the map offset for maps of type 50 * _DRM_FRAMEBUFFER or _DRM_REGISTERS. 51 * It is assumed that if a driver have more than one resource 52 * of each type, the lower 32 bits are different. 53 */ 54 if (!entry->map || 55 map->type != entry->map->type || 56 entry->master != dev->master) 57 continue; 58 switch (map->type) { 59 case _DRM_SHM: 60 if (map->flags != _DRM_CONTAINS_LOCK) 61 break; 62 return entry; 63 case _DRM_REGISTERS: 64 case _DRM_FRAME_BUFFER: 65 if ((entry->map->offset & 0xffffffff) == 66 (map->offset & 0xffffffff)) 67 return entry; 68 default: /* Make gcc happy */ 69 ; 70 } 71 if (entry->map->offset == map->offset) 72 return entry; 73 } 74 75 return NULL; 76 } 77 78 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, 79 unsigned long user_token, int hashed_handle, int shm) 80 { 81 int use_hashed_handle, shift; 82 unsigned long add; 83 84 #if (BITS_PER_LONG == 64) 85 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); 86 #elif (BITS_PER_LONG == 32) 87 use_hashed_handle = hashed_handle; 88 #else 89 #error Unsupported long size. Neither 64 nor 32 bits. 90 #endif 91 92 if (!use_hashed_handle) { 93 int ret; 94 hash->key = user_token >> PAGE_SHIFT; 95 ret = drm_ht_insert_item(&dev->map_hash, hash); 96 if (ret != -EINVAL) 97 return ret; 98 } 99 100 shift = 0; 101 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; 102 if (shm && (SHMLBA > PAGE_SIZE)) { 103 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; 104 105 /* For shared memory, we have to preserve the SHMLBA 106 * bits of the eventual vma->vm_pgoff value during 107 * mmap(). Otherwise we run into cache aliasing problems 108 * on some platforms. On these platforms, the pgoff of 109 * a mmap() request is used to pick a suitable virtual 110 * address for the mmap() region such that it will not 111 * cause cache aliasing problems. 112 * 113 * Therefore, make sure the SHMLBA relevant bits of the 114 * hash value we use are equal to those in the original 115 * kernel virtual address. 116 */ 117 shift = bits; 118 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); 119 } 120 121 return drm_ht_just_insert_please(&dev->map_hash, hash, 122 user_token, 32 - PAGE_SHIFT - 3, 123 shift, add); 124 } 125 126 /** 127 * Core function to create a range of memory available for mapping by a 128 * non-root process. 129 * 130 * Adjusts the memory offset to its absolute value according to the mapping 131 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 132 * applicable and if supported by the kernel. 133 */ 134 static int drm_addmap_core(struct drm_device *dev, resource_size_t offset, 135 unsigned int size, enum drm_map_type type, 136 enum drm_map_flags flags, 137 struct drm_map_list **maplist) 138 { 139 struct drm_local_map *map; 140 struct drm_map_list *list; 141 drm_dma_handle_t *dmah; 142 unsigned long user_token; 143 int ret; 144 145 map = kmalloc(sizeof(*map), GFP_KERNEL); 146 if (!map) 147 return -ENOMEM; 148 149 map->offset = offset; 150 map->size = size; 151 map->flags = flags; 152 map->type = type; 153 154 /* Only allow shared memory to be removable since we only keep enough 155 * book keeping information about shared memory to allow for removal 156 * when processes fork. 157 */ 158 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 159 kfree(map); 160 return -EINVAL; 161 } 162 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 163 (unsigned long long)map->offset, map->size, map->type); 164 165 /* page-align _DRM_SHM maps. They are allocated here so there is no security 166 * hole created by that and it works around various broken drivers that use 167 * a non-aligned quantity to map the SAREA. --BenH 168 */ 169 if (map->type == _DRM_SHM) 170 map->size = PAGE_ALIGN(map->size); 171 172 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 173 kfree(map); 174 return -EINVAL; 175 } 176 map->mtrr = -1; 177 map->handle = NULL; 178 179 switch (map->type) { 180 case _DRM_REGISTERS: 181 case _DRM_FRAME_BUFFER: 182 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) 183 if (map->offset + (map->size-1) < map->offset || 184 map->offset < virt_to_phys(high_memory)) { 185 kfree(map); 186 return -EINVAL; 187 } 188 #endif 189 /* Some drivers preinitialize some maps, without the X Server 190 * needing to be aware of it. Therefore, we just return success 191 * when the server tries to create a duplicate map. 192 */ 193 list = drm_find_matching_map(dev, map); 194 if (list != NULL) { 195 if (list->map->size != map->size) { 196 DRM_DEBUG("Matching maps of type %d with " 197 "mismatched sizes, (%ld vs %ld)\n", 198 map->type, map->size, 199 list->map->size); 200 list->map->size = map->size; 201 } 202 203 kfree(map); 204 *maplist = list; 205 return 0; 206 } 207 208 if (map->type == _DRM_FRAME_BUFFER || 209 (map->flags & _DRM_WRITE_COMBINING)) { 210 map->mtrr = 211 arch_phys_wc_add(map->offset, map->size); 212 } 213 if (map->type == _DRM_REGISTERS) { 214 if (map->flags & _DRM_WRITE_COMBINING) 215 map->handle = ioremap_wc(map->offset, 216 map->size); 217 else 218 map->handle = ioremap(map->offset, map->size); 219 if (!map->handle) { 220 kfree(map); 221 return -ENOMEM; 222 } 223 } 224 225 break; 226 case _DRM_SHM: 227 list = drm_find_matching_map(dev, map); 228 if (list != NULL) { 229 if (list->map->size != map->size) { 230 DRM_DEBUG("Matching maps of type %d with " 231 "mismatched sizes, (%ld vs %ld)\n", 232 map->type, map->size, list->map->size); 233 list->map->size = map->size; 234 } 235 236 kfree(map); 237 *maplist = list; 238 return 0; 239 } 240 map->handle = vmalloc_user(map->size); 241 DRM_DEBUG("%lu %d %p\n", 242 map->size, order_base_2(map->size), map->handle); 243 if (!map->handle) { 244 kfree(map); 245 return -ENOMEM; 246 } 247 map->offset = (unsigned long)map->handle; 248 if (map->flags & _DRM_CONTAINS_LOCK) { 249 /* Prevent a 2nd X Server from creating a 2nd lock */ 250 if (dev->master->lock.hw_lock != NULL) { 251 vfree(map->handle); 252 kfree(map); 253 return -EBUSY; 254 } 255 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ 256 } 257 break; 258 case _DRM_AGP: { 259 struct drm_agp_mem *entry; 260 int valid = 0; 261 262 if (!dev->agp) { 263 kfree(map); 264 return -EINVAL; 265 } 266 #ifdef __alpha__ 267 map->offset += dev->hose->mem_space->start; 268 #endif 269 /* In some cases (i810 driver), user space may have already 270 * added the AGP base itself, because dev->agp->base previously 271 * only got set during AGP enable. So, only add the base 272 * address if the map's offset isn't already within the 273 * aperture. 274 */ 275 if (map->offset < dev->agp->base || 276 map->offset > dev->agp->base + 277 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { 278 map->offset += dev->agp->base; 279 } 280 map->mtrr = dev->agp->agp_mtrr; /* for getmap */ 281 282 /* This assumes the DRM is in total control of AGP space. 283 * It's not always the case as AGP can be in the control 284 * of user space (i.e. i810 driver). So this loop will get 285 * skipped and we double check that dev->agp->memory is 286 * actually set as well as being invalid before EPERM'ing 287 */ 288 list_for_each_entry(entry, &dev->agp->memory, head) { 289 if ((map->offset >= entry->bound) && 290 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { 291 valid = 1; 292 break; 293 } 294 } 295 if (!list_empty(&dev->agp->memory) && !valid) { 296 kfree(map); 297 return -EPERM; 298 } 299 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", 300 (unsigned long long)map->offset, map->size); 301 302 break; 303 } 304 case _DRM_SCATTER_GATHER: 305 if (!dev->sg) { 306 kfree(map); 307 return -EINVAL; 308 } 309 map->offset += (unsigned long)dev->sg->virtual; 310 break; 311 case _DRM_CONSISTENT: 312 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, 313 * As we're limiting the address to 2^32-1 (or less), 314 * casting it down to 32 bits is no problem, but we 315 * need to point to a 64bit variable first. */ 316 dmah = drm_pci_alloc(dev, map->size, map->size); 317 if (!dmah) { 318 kfree(map); 319 return -ENOMEM; 320 } 321 map->handle = dmah->vaddr; 322 map->offset = (unsigned long)dmah->busaddr; 323 kfree(dmah); 324 break; 325 default: 326 kfree(map); 327 return -EINVAL; 328 } 329 330 list = kzalloc(sizeof(*list), GFP_KERNEL); 331 if (!list) { 332 if (map->type == _DRM_REGISTERS) 333 iounmap(map->handle); 334 kfree(map); 335 return -EINVAL; 336 } 337 list->map = map; 338 339 mutex_lock(&dev->struct_mutex); 340 list_add(&list->head, &dev->maplist); 341 342 /* Assign a 32-bit handle */ 343 /* We do it here so that dev->struct_mutex protects the increment */ 344 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : 345 map->offset; 346 ret = drm_map_handle(dev, &list->hash, user_token, 0, 347 (map->type == _DRM_SHM)); 348 if (ret) { 349 if (map->type == _DRM_REGISTERS) 350 iounmap(map->handle); 351 kfree(map); 352 kfree(list); 353 mutex_unlock(&dev->struct_mutex); 354 return ret; 355 } 356 357 list->user_token = list->hash.key << PAGE_SHIFT; 358 mutex_unlock(&dev->struct_mutex); 359 360 if (!(map->flags & _DRM_DRIVER)) 361 list->master = dev->master; 362 *maplist = list; 363 return 0; 364 } 365 366 int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset, 367 unsigned int size, enum drm_map_type type, 368 enum drm_map_flags flags, struct drm_local_map **map_ptr) 369 { 370 struct drm_map_list *list; 371 int rc; 372 373 rc = drm_addmap_core(dev, offset, size, type, flags, &list); 374 if (!rc) 375 *map_ptr = list->map; 376 return rc; 377 } 378 EXPORT_SYMBOL(drm_legacy_addmap); 379 380 struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, 381 unsigned int token) 382 { 383 struct drm_map_list *_entry; 384 list_for_each_entry(_entry, &dev->maplist, head) 385 if (_entry->user_token == token) 386 return _entry->map; 387 return NULL; 388 } 389 EXPORT_SYMBOL(drm_legacy_findmap); 390 391 /** 392 * Ioctl to specify a range of memory that is available for mapping by a 393 * non-root process. 394 * 395 * \param inode device inode. 396 * \param file_priv DRM file private. 397 * \param cmd command. 398 * \param arg pointer to a drm_map structure. 399 * \return zero on success or a negative value on error. 400 * 401 */ 402 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, 403 struct drm_file *file_priv) 404 { 405 struct drm_map *map = data; 406 struct drm_map_list *maplist; 407 int err; 408 409 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) 410 return -EPERM; 411 412 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 413 !drm_core_check_feature(dev, DRIVER_LEGACY)) 414 return -EOPNOTSUPP; 415 416 err = drm_addmap_core(dev, map->offset, map->size, map->type, 417 map->flags, &maplist); 418 419 if (err) 420 return err; 421 422 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 423 map->handle = (void *)(unsigned long)maplist->user_token; 424 425 /* 426 * It appears that there are no users of this value whatsoever -- 427 * drmAddMap just discards it. Let's not encourage its use. 428 * (Keeping drm_addmap_core's returned mtrr value would be wrong -- 429 * it's not a real mtrr index anymore.) 430 */ 431 map->mtrr = -1; 432 433 return 0; 434 } 435 436 /* 437 * Get a mapping information. 438 * 439 * \param inode device inode. 440 * \param file_priv DRM file private. 441 * \param cmd command. 442 * \param arg user argument, pointing to a drm_map structure. 443 * 444 * \return zero on success or a negative number on failure. 445 * 446 * Searches for the mapping with the specified offset and copies its information 447 * into userspace 448 */ 449 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, 450 struct drm_file *file_priv) 451 { 452 struct drm_map *map = data; 453 struct drm_map_list *r_list = NULL; 454 struct list_head *list; 455 int idx; 456 int i; 457 458 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 459 !drm_core_check_feature(dev, DRIVER_LEGACY)) 460 return -EOPNOTSUPP; 461 462 idx = map->offset; 463 if (idx < 0) 464 return -EINVAL; 465 466 i = 0; 467 mutex_lock(&dev->struct_mutex); 468 list_for_each(list, &dev->maplist) { 469 if (i == idx) { 470 r_list = list_entry(list, struct drm_map_list, head); 471 break; 472 } 473 i++; 474 } 475 if (!r_list || !r_list->map) { 476 mutex_unlock(&dev->struct_mutex); 477 return -EINVAL; 478 } 479 480 map->offset = r_list->map->offset; 481 map->size = r_list->map->size; 482 map->type = r_list->map->type; 483 map->flags = r_list->map->flags; 484 map->handle = (void *)(unsigned long) r_list->user_token; 485 map->mtrr = arch_phys_wc_index(r_list->map->mtrr); 486 487 mutex_unlock(&dev->struct_mutex); 488 489 return 0; 490 } 491 492 /** 493 * Remove a map private from list and deallocate resources if the mapping 494 * isn't in use. 495 * 496 * Searches the map on drm_device::maplist, removes it from the list, see if 497 * it's being used, and free any associated resource (such as MTRR's) if it's not 498 * being on use. 499 * 500 * \sa drm_legacy_addmap 501 */ 502 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) 503 { 504 struct drm_map_list *r_list = NULL, *list_t; 505 drm_dma_handle_t dmah; 506 int found = 0; 507 struct drm_master *master; 508 509 /* Find the list entry for the map and remove it */ 510 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { 511 if (r_list->map == map) { 512 master = r_list->master; 513 list_del(&r_list->head); 514 drm_ht_remove_key(&dev->map_hash, 515 r_list->user_token >> PAGE_SHIFT); 516 kfree(r_list); 517 found = 1; 518 break; 519 } 520 } 521 522 if (!found) 523 return -EINVAL; 524 525 switch (map->type) { 526 case _DRM_REGISTERS: 527 iounmap(map->handle); 528 /* FALLTHROUGH */ 529 case _DRM_FRAME_BUFFER: 530 arch_phys_wc_del(map->mtrr); 531 break; 532 case _DRM_SHM: 533 vfree(map->handle); 534 if (master) { 535 if (dev->sigdata.lock == master->lock.hw_lock) 536 dev->sigdata.lock = NULL; 537 master->lock.hw_lock = NULL; /* SHM removed */ 538 master->lock.file_priv = NULL; 539 wake_up_interruptible_all(&master->lock.lock_queue); 540 } 541 break; 542 case _DRM_AGP: 543 case _DRM_SCATTER_GATHER: 544 break; 545 case _DRM_CONSISTENT: 546 dmah.vaddr = map->handle; 547 dmah.busaddr = map->offset; 548 dmah.size = map->size; 549 __drm_legacy_pci_free(dev, &dmah); 550 break; 551 } 552 kfree(map); 553 554 return 0; 555 } 556 EXPORT_SYMBOL(drm_legacy_rmmap_locked); 557 558 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) 559 { 560 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 561 !drm_core_check_feature(dev, DRIVER_LEGACY)) 562 return; 563 564 mutex_lock(&dev->struct_mutex); 565 drm_legacy_rmmap_locked(dev, map); 566 mutex_unlock(&dev->struct_mutex); 567 } 568 EXPORT_SYMBOL(drm_legacy_rmmap); 569 570 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) 571 { 572 struct drm_map_list *r_list, *list_temp; 573 574 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 575 return; 576 577 mutex_lock(&dev->struct_mutex); 578 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 579 if (r_list->master == master) { 580 drm_legacy_rmmap_locked(dev, r_list->map); 581 r_list = NULL; 582 } 583 } 584 mutex_unlock(&dev->struct_mutex); 585 } 586 587 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 588 * the last close of the device, and this is necessary for cleanup when things 589 * exit uncleanly. Therefore, having userland manually remove mappings seems 590 * like a pointless exercise since they're going away anyway. 591 * 592 * One use case might be after addmap is allowed for normal users for SHM and 593 * gets used by drivers that the server doesn't need to care about. This seems 594 * unlikely. 595 * 596 * \param inode device inode. 597 * \param file_priv DRM file private. 598 * \param cmd command. 599 * \param arg pointer to a struct drm_map structure. 600 * \return zero on success or a negative value on error. 601 */ 602 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, 603 struct drm_file *file_priv) 604 { 605 struct drm_map *request = data; 606 struct drm_local_map *map = NULL; 607 struct drm_map_list *r_list; 608 int ret; 609 610 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 611 !drm_core_check_feature(dev, DRIVER_LEGACY)) 612 return -EOPNOTSUPP; 613 614 mutex_lock(&dev->struct_mutex); 615 list_for_each_entry(r_list, &dev->maplist, head) { 616 if (r_list->map && 617 r_list->user_token == (unsigned long)request->handle && 618 r_list->map->flags & _DRM_REMOVABLE) { 619 map = r_list->map; 620 break; 621 } 622 } 623 624 /* List has wrapped around to the head pointer, or it's empty we didn't 625 * find anything. 626 */ 627 if (list_empty(&dev->maplist) || !map) { 628 mutex_unlock(&dev->struct_mutex); 629 return -EINVAL; 630 } 631 632 /* Register and framebuffer maps are permanent */ 633 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 634 mutex_unlock(&dev->struct_mutex); 635 return 0; 636 } 637 638 ret = drm_legacy_rmmap_locked(dev, map); 639 640 mutex_unlock(&dev->struct_mutex); 641 642 return ret; 643 } 644 645 /** 646 * Cleanup after an error on one of the addbufs() functions. 647 * 648 * \param dev DRM device. 649 * \param entry buffer entry where the error occurred. 650 * 651 * Frees any pages and buffers associated with the given entry. 652 */ 653 static void drm_cleanup_buf_error(struct drm_device *dev, 654 struct drm_buf_entry *entry) 655 { 656 int i; 657 658 if (entry->seg_count) { 659 for (i = 0; i < entry->seg_count; i++) { 660 if (entry->seglist[i]) { 661 drm_pci_free(dev, entry->seglist[i]); 662 } 663 } 664 kfree(entry->seglist); 665 666 entry->seg_count = 0; 667 } 668 669 if (entry->buf_count) { 670 for (i = 0; i < entry->buf_count; i++) { 671 kfree(entry->buflist[i].dev_private); 672 } 673 kfree(entry->buflist); 674 675 entry->buf_count = 0; 676 } 677 } 678 679 #if IS_ENABLED(CONFIG_AGP) 680 /** 681 * Add AGP buffers for DMA transfers. 682 * 683 * \param dev struct drm_device to which the buffers are to be added. 684 * \param request pointer to a struct drm_buf_desc describing the request. 685 * \return zero on success or a negative number on failure. 686 * 687 * After some sanity checks creates a drm_buf structure for each buffer and 688 * reallocates the buffer list of the same size order to accommodate the new 689 * buffers. 690 */ 691 int drm_legacy_addbufs_agp(struct drm_device *dev, 692 struct drm_buf_desc *request) 693 { 694 struct drm_device_dma *dma = dev->dma; 695 struct drm_buf_entry *entry; 696 struct drm_agp_mem *agp_entry; 697 struct drm_buf *buf; 698 unsigned long offset; 699 unsigned long agp_offset; 700 int count; 701 int order; 702 int size; 703 int alignment; 704 int page_order; 705 int total; 706 int byte_count; 707 int i, valid; 708 struct drm_buf **temp_buflist; 709 710 if (!dma) 711 return -EINVAL; 712 713 count = request->count; 714 order = order_base_2(request->size); 715 size = 1 << order; 716 717 alignment = (request->flags & _DRM_PAGE_ALIGN) 718 ? PAGE_ALIGN(size) : size; 719 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 720 total = PAGE_SIZE << page_order; 721 722 byte_count = 0; 723 agp_offset = dev->agp->base + request->agp_start; 724 725 DRM_DEBUG("count: %d\n", count); 726 DRM_DEBUG("order: %d\n", order); 727 DRM_DEBUG("size: %d\n", size); 728 DRM_DEBUG("agp_offset: %lx\n", agp_offset); 729 DRM_DEBUG("alignment: %d\n", alignment); 730 DRM_DEBUG("page_order: %d\n", page_order); 731 DRM_DEBUG("total: %d\n", total); 732 733 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 734 return -EINVAL; 735 736 /* Make sure buffers are located in AGP memory that we own */ 737 valid = 0; 738 list_for_each_entry(agp_entry, &dev->agp->memory, head) { 739 if ((agp_offset >= agp_entry->bound) && 740 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { 741 valid = 1; 742 break; 743 } 744 } 745 if (!list_empty(&dev->agp->memory) && !valid) { 746 DRM_DEBUG("zone invalid\n"); 747 return -EINVAL; 748 } 749 spin_lock(&dev->buf_lock); 750 if (dev->buf_use) { 751 spin_unlock(&dev->buf_lock); 752 return -EBUSY; 753 } 754 atomic_inc(&dev->buf_alloc); 755 spin_unlock(&dev->buf_lock); 756 757 mutex_lock(&dev->struct_mutex); 758 entry = &dma->bufs[order]; 759 if (entry->buf_count) { 760 mutex_unlock(&dev->struct_mutex); 761 atomic_dec(&dev->buf_alloc); 762 return -ENOMEM; /* May only call once for each order */ 763 } 764 765 if (count < 0 || count > 4096) { 766 mutex_unlock(&dev->struct_mutex); 767 atomic_dec(&dev->buf_alloc); 768 return -EINVAL; 769 } 770 771 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 772 if (!entry->buflist) { 773 mutex_unlock(&dev->struct_mutex); 774 atomic_dec(&dev->buf_alloc); 775 return -ENOMEM; 776 } 777 778 entry->buf_size = size; 779 entry->page_order = page_order; 780 781 offset = 0; 782 783 while (entry->buf_count < count) { 784 buf = &entry->buflist[entry->buf_count]; 785 buf->idx = dma->buf_count + entry->buf_count; 786 buf->total = alignment; 787 buf->order = order; 788 buf->used = 0; 789 790 buf->offset = (dma->byte_count + offset); 791 buf->bus_address = agp_offset + offset; 792 buf->address = (void *)(agp_offset + offset); 793 buf->next = NULL; 794 buf->waiting = 0; 795 buf->pending = 0; 796 buf->file_priv = NULL; 797 798 buf->dev_priv_size = dev->driver->dev_priv_size; 799 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 800 if (!buf->dev_private) { 801 /* Set count correctly so we free the proper amount. */ 802 entry->buf_count = count; 803 drm_cleanup_buf_error(dev, entry); 804 mutex_unlock(&dev->struct_mutex); 805 atomic_dec(&dev->buf_alloc); 806 return -ENOMEM; 807 } 808 809 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 810 811 offset += alignment; 812 entry->buf_count++; 813 byte_count += PAGE_SIZE << page_order; 814 } 815 816 DRM_DEBUG("byte_count: %d\n", byte_count); 817 818 temp_buflist = krealloc(dma->buflist, 819 (dma->buf_count + entry->buf_count) * 820 sizeof(*dma->buflist), GFP_KERNEL); 821 if (!temp_buflist) { 822 /* Free the entry because it isn't valid */ 823 drm_cleanup_buf_error(dev, entry); 824 mutex_unlock(&dev->struct_mutex); 825 atomic_dec(&dev->buf_alloc); 826 return -ENOMEM; 827 } 828 dma->buflist = temp_buflist; 829 830 for (i = 0; i < entry->buf_count; i++) { 831 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 832 } 833 834 dma->buf_count += entry->buf_count; 835 dma->seg_count += entry->seg_count; 836 dma->page_count += byte_count >> PAGE_SHIFT; 837 dma->byte_count += byte_count; 838 839 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 840 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 841 842 mutex_unlock(&dev->struct_mutex); 843 844 request->count = entry->buf_count; 845 request->size = size; 846 847 dma->flags = _DRM_DMA_USE_AGP; 848 849 atomic_dec(&dev->buf_alloc); 850 return 0; 851 } 852 EXPORT_SYMBOL(drm_legacy_addbufs_agp); 853 #endif /* CONFIG_AGP */ 854 855 int drm_legacy_addbufs_pci(struct drm_device *dev, 856 struct drm_buf_desc *request) 857 { 858 struct drm_device_dma *dma = dev->dma; 859 int count; 860 int order; 861 int size; 862 int total; 863 int page_order; 864 struct drm_buf_entry *entry; 865 drm_dma_handle_t *dmah; 866 struct drm_buf *buf; 867 int alignment; 868 unsigned long offset; 869 int i; 870 int byte_count; 871 int page_count; 872 unsigned long *temp_pagelist; 873 struct drm_buf **temp_buflist; 874 875 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 876 return -EOPNOTSUPP; 877 878 if (!dma) 879 return -EINVAL; 880 881 if (!capable(CAP_SYS_ADMIN)) 882 return -EPERM; 883 884 count = request->count; 885 order = order_base_2(request->size); 886 size = 1 << order; 887 888 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 889 request->count, request->size, size, order); 890 891 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 892 return -EINVAL; 893 894 alignment = (request->flags & _DRM_PAGE_ALIGN) 895 ? PAGE_ALIGN(size) : size; 896 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 897 total = PAGE_SIZE << page_order; 898 899 spin_lock(&dev->buf_lock); 900 if (dev->buf_use) { 901 spin_unlock(&dev->buf_lock); 902 return -EBUSY; 903 } 904 atomic_inc(&dev->buf_alloc); 905 spin_unlock(&dev->buf_lock); 906 907 mutex_lock(&dev->struct_mutex); 908 entry = &dma->bufs[order]; 909 if (entry->buf_count) { 910 mutex_unlock(&dev->struct_mutex); 911 atomic_dec(&dev->buf_alloc); 912 return -ENOMEM; /* May only call once for each order */ 913 } 914 915 if (count < 0 || count > 4096) { 916 mutex_unlock(&dev->struct_mutex); 917 atomic_dec(&dev->buf_alloc); 918 return -EINVAL; 919 } 920 921 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 922 if (!entry->buflist) { 923 mutex_unlock(&dev->struct_mutex); 924 atomic_dec(&dev->buf_alloc); 925 return -ENOMEM; 926 } 927 928 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL); 929 if (!entry->seglist) { 930 kfree(entry->buflist); 931 mutex_unlock(&dev->struct_mutex); 932 atomic_dec(&dev->buf_alloc); 933 return -ENOMEM; 934 } 935 936 /* Keep the original pagelist until we know all the allocations 937 * have succeeded 938 */ 939 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), 940 sizeof(*dma->pagelist), 941 GFP_KERNEL); 942 if (!temp_pagelist) { 943 kfree(entry->buflist); 944 kfree(entry->seglist); 945 mutex_unlock(&dev->struct_mutex); 946 atomic_dec(&dev->buf_alloc); 947 return -ENOMEM; 948 } 949 memcpy(temp_pagelist, 950 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); 951 DRM_DEBUG("pagelist: %d entries\n", 952 dma->page_count + (count << page_order)); 953 954 entry->buf_size = size; 955 entry->page_order = page_order; 956 byte_count = 0; 957 page_count = 0; 958 959 while (entry->buf_count < count) { 960 961 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); 962 963 if (!dmah) { 964 /* Set count correctly so we free the proper amount. */ 965 entry->buf_count = count; 966 entry->seg_count = count; 967 drm_cleanup_buf_error(dev, entry); 968 kfree(temp_pagelist); 969 mutex_unlock(&dev->struct_mutex); 970 atomic_dec(&dev->buf_alloc); 971 return -ENOMEM; 972 } 973 entry->seglist[entry->seg_count++] = dmah; 974 for (i = 0; i < (1 << page_order); i++) { 975 DRM_DEBUG("page %d @ 0x%08lx\n", 976 dma->page_count + page_count, 977 (unsigned long)dmah->vaddr + PAGE_SIZE * i); 978 temp_pagelist[dma->page_count + page_count++] 979 = (unsigned long)dmah->vaddr + PAGE_SIZE * i; 980 } 981 for (offset = 0; 982 offset + size <= total && entry->buf_count < count; 983 offset += alignment, ++entry->buf_count) { 984 buf = &entry->buflist[entry->buf_count]; 985 buf->idx = dma->buf_count + entry->buf_count; 986 buf->total = alignment; 987 buf->order = order; 988 buf->used = 0; 989 buf->offset = (dma->byte_count + byte_count + offset); 990 buf->address = (void *)(dmah->vaddr + offset); 991 buf->bus_address = dmah->busaddr + offset; 992 buf->next = NULL; 993 buf->waiting = 0; 994 buf->pending = 0; 995 buf->file_priv = NULL; 996 997 buf->dev_priv_size = dev->driver->dev_priv_size; 998 buf->dev_private = kzalloc(buf->dev_priv_size, 999 GFP_KERNEL); 1000 if (!buf->dev_private) { 1001 /* Set count correctly so we free the proper amount. */ 1002 entry->buf_count = count; 1003 entry->seg_count = count; 1004 drm_cleanup_buf_error(dev, entry); 1005 kfree(temp_pagelist); 1006 mutex_unlock(&dev->struct_mutex); 1007 atomic_dec(&dev->buf_alloc); 1008 return -ENOMEM; 1009 } 1010 1011 DRM_DEBUG("buffer %d @ %p\n", 1012 entry->buf_count, buf->address); 1013 } 1014 byte_count += PAGE_SIZE << page_order; 1015 } 1016 1017 temp_buflist = krealloc(dma->buflist, 1018 (dma->buf_count + entry->buf_count) * 1019 sizeof(*dma->buflist), GFP_KERNEL); 1020 if (!temp_buflist) { 1021 /* Free the entry because it isn't valid */ 1022 drm_cleanup_buf_error(dev, entry); 1023 kfree(temp_pagelist); 1024 mutex_unlock(&dev->struct_mutex); 1025 atomic_dec(&dev->buf_alloc); 1026 return -ENOMEM; 1027 } 1028 dma->buflist = temp_buflist; 1029 1030 for (i = 0; i < entry->buf_count; i++) { 1031 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1032 } 1033 1034 /* No allocations failed, so now we can replace the original pagelist 1035 * with the new one. 1036 */ 1037 if (dma->page_count) { 1038 kfree(dma->pagelist); 1039 } 1040 dma->pagelist = temp_pagelist; 1041 1042 dma->buf_count += entry->buf_count; 1043 dma->seg_count += entry->seg_count; 1044 dma->page_count += entry->seg_count << page_order; 1045 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 1046 1047 mutex_unlock(&dev->struct_mutex); 1048 1049 request->count = entry->buf_count; 1050 request->size = size; 1051 1052 if (request->flags & _DRM_PCI_BUFFER_RO) 1053 dma->flags = _DRM_DMA_USE_PCI_RO; 1054 1055 atomic_dec(&dev->buf_alloc); 1056 return 0; 1057 1058 } 1059 EXPORT_SYMBOL(drm_legacy_addbufs_pci); 1060 1061 static int drm_legacy_addbufs_sg(struct drm_device *dev, 1062 struct drm_buf_desc *request) 1063 { 1064 struct drm_device_dma *dma = dev->dma; 1065 struct drm_buf_entry *entry; 1066 struct drm_buf *buf; 1067 unsigned long offset; 1068 unsigned long agp_offset; 1069 int count; 1070 int order; 1071 int size; 1072 int alignment; 1073 int page_order; 1074 int total; 1075 int byte_count; 1076 int i; 1077 struct drm_buf **temp_buflist; 1078 1079 if (!drm_core_check_feature(dev, DRIVER_SG)) 1080 return -EOPNOTSUPP; 1081 1082 if (!dma) 1083 return -EINVAL; 1084 1085 if (!capable(CAP_SYS_ADMIN)) 1086 return -EPERM; 1087 1088 count = request->count; 1089 order = order_base_2(request->size); 1090 size = 1 << order; 1091 1092 alignment = (request->flags & _DRM_PAGE_ALIGN) 1093 ? PAGE_ALIGN(size) : size; 1094 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 1095 total = PAGE_SIZE << page_order; 1096 1097 byte_count = 0; 1098 agp_offset = request->agp_start; 1099 1100 DRM_DEBUG("count: %d\n", count); 1101 DRM_DEBUG("order: %d\n", order); 1102 DRM_DEBUG("size: %d\n", size); 1103 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 1104 DRM_DEBUG("alignment: %d\n", alignment); 1105 DRM_DEBUG("page_order: %d\n", page_order); 1106 DRM_DEBUG("total: %d\n", total); 1107 1108 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1109 return -EINVAL; 1110 1111 spin_lock(&dev->buf_lock); 1112 if (dev->buf_use) { 1113 spin_unlock(&dev->buf_lock); 1114 return -EBUSY; 1115 } 1116 atomic_inc(&dev->buf_alloc); 1117 spin_unlock(&dev->buf_lock); 1118 1119 mutex_lock(&dev->struct_mutex); 1120 entry = &dma->bufs[order]; 1121 if (entry->buf_count) { 1122 mutex_unlock(&dev->struct_mutex); 1123 atomic_dec(&dev->buf_alloc); 1124 return -ENOMEM; /* May only call once for each order */ 1125 } 1126 1127 if (count < 0 || count > 4096) { 1128 mutex_unlock(&dev->struct_mutex); 1129 atomic_dec(&dev->buf_alloc); 1130 return -EINVAL; 1131 } 1132 1133 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 1134 if (!entry->buflist) { 1135 mutex_unlock(&dev->struct_mutex); 1136 atomic_dec(&dev->buf_alloc); 1137 return -ENOMEM; 1138 } 1139 1140 entry->buf_size = size; 1141 entry->page_order = page_order; 1142 1143 offset = 0; 1144 1145 while (entry->buf_count < count) { 1146 buf = &entry->buflist[entry->buf_count]; 1147 buf->idx = dma->buf_count + entry->buf_count; 1148 buf->total = alignment; 1149 buf->order = order; 1150 buf->used = 0; 1151 1152 buf->offset = (dma->byte_count + offset); 1153 buf->bus_address = agp_offset + offset; 1154 buf->address = (void *)(agp_offset + offset 1155 + (unsigned long)dev->sg->virtual); 1156 buf->next = NULL; 1157 buf->waiting = 0; 1158 buf->pending = 0; 1159 buf->file_priv = NULL; 1160 1161 buf->dev_priv_size = dev->driver->dev_priv_size; 1162 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 1163 if (!buf->dev_private) { 1164 /* Set count correctly so we free the proper amount. */ 1165 entry->buf_count = count; 1166 drm_cleanup_buf_error(dev, entry); 1167 mutex_unlock(&dev->struct_mutex); 1168 atomic_dec(&dev->buf_alloc); 1169 return -ENOMEM; 1170 } 1171 1172 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1173 1174 offset += alignment; 1175 entry->buf_count++; 1176 byte_count += PAGE_SIZE << page_order; 1177 } 1178 1179 DRM_DEBUG("byte_count: %d\n", byte_count); 1180 1181 temp_buflist = krealloc(dma->buflist, 1182 (dma->buf_count + entry->buf_count) * 1183 sizeof(*dma->buflist), GFP_KERNEL); 1184 if (!temp_buflist) { 1185 /* Free the entry because it isn't valid */ 1186 drm_cleanup_buf_error(dev, entry); 1187 mutex_unlock(&dev->struct_mutex); 1188 atomic_dec(&dev->buf_alloc); 1189 return -ENOMEM; 1190 } 1191 dma->buflist = temp_buflist; 1192 1193 for (i = 0; i < entry->buf_count; i++) { 1194 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1195 } 1196 1197 dma->buf_count += entry->buf_count; 1198 dma->seg_count += entry->seg_count; 1199 dma->page_count += byte_count >> PAGE_SHIFT; 1200 dma->byte_count += byte_count; 1201 1202 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1203 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1204 1205 mutex_unlock(&dev->struct_mutex); 1206 1207 request->count = entry->buf_count; 1208 request->size = size; 1209 1210 dma->flags = _DRM_DMA_USE_SG; 1211 1212 atomic_dec(&dev->buf_alloc); 1213 return 0; 1214 } 1215 1216 /** 1217 * Add buffers for DMA transfers (ioctl). 1218 * 1219 * \param inode device inode. 1220 * \param file_priv DRM file private. 1221 * \param cmd command. 1222 * \param arg pointer to a struct drm_buf_desc request. 1223 * \return zero on success or a negative number on failure. 1224 * 1225 * According with the memory type specified in drm_buf_desc::flags and the 1226 * build options, it dispatches the call either to addbufs_agp(), 1227 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1228 * PCI memory respectively. 1229 */ 1230 int drm_legacy_addbufs(struct drm_device *dev, void *data, 1231 struct drm_file *file_priv) 1232 { 1233 struct drm_buf_desc *request = data; 1234 int ret; 1235 1236 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1237 return -EOPNOTSUPP; 1238 1239 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1240 return -EOPNOTSUPP; 1241 1242 #if IS_ENABLED(CONFIG_AGP) 1243 if (request->flags & _DRM_AGP_BUFFER) 1244 ret = drm_legacy_addbufs_agp(dev, request); 1245 else 1246 #endif 1247 if (request->flags & _DRM_SG_BUFFER) 1248 ret = drm_legacy_addbufs_sg(dev, request); 1249 else if (request->flags & _DRM_FB_BUFFER) 1250 ret = -EINVAL; 1251 else 1252 ret = drm_legacy_addbufs_pci(dev, request); 1253 1254 return ret; 1255 } 1256 1257 /** 1258 * Get information about the buffer mappings. 1259 * 1260 * This was originally mean for debugging purposes, or by a sophisticated 1261 * client library to determine how best to use the available buffers (e.g., 1262 * large buffers can be used for image transfer). 1263 * 1264 * \param inode device inode. 1265 * \param file_priv DRM file private. 1266 * \param cmd command. 1267 * \param arg pointer to a drm_buf_info structure. 1268 * \return zero on success or a negative number on failure. 1269 * 1270 * Increments drm_device::buf_use while holding the drm_device::buf_lock 1271 * lock, preventing of allocating more buffers after this call. Information 1272 * about each requested buffer is then copied into user space. 1273 */ 1274 int __drm_legacy_infobufs(struct drm_device *dev, 1275 void *data, int *p, 1276 int (*f)(void *, int, struct drm_buf_entry *)) 1277 { 1278 struct drm_device_dma *dma = dev->dma; 1279 int i; 1280 int count; 1281 1282 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1283 return -EOPNOTSUPP; 1284 1285 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1286 return -EOPNOTSUPP; 1287 1288 if (!dma) 1289 return -EINVAL; 1290 1291 spin_lock(&dev->buf_lock); 1292 if (atomic_read(&dev->buf_alloc)) { 1293 spin_unlock(&dev->buf_lock); 1294 return -EBUSY; 1295 } 1296 ++dev->buf_use; /* Can't allocate more after this call */ 1297 spin_unlock(&dev->buf_lock); 1298 1299 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1300 if (dma->bufs[i].buf_count) 1301 ++count; 1302 } 1303 1304 DRM_DEBUG("count = %d\n", count); 1305 1306 if (*p >= count) { 1307 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1308 struct drm_buf_entry *from = &dma->bufs[i]; 1309 if (from->buf_count) { 1310 if (f(data, count, from) < 0) 1311 return -EFAULT; 1312 DRM_DEBUG("%d %d %d %d %d\n", 1313 i, 1314 dma->bufs[i].buf_count, 1315 dma->bufs[i].buf_size, 1316 dma->bufs[i].low_mark, 1317 dma->bufs[i].high_mark); 1318 ++count; 1319 } 1320 } 1321 } 1322 *p = count; 1323 1324 return 0; 1325 } 1326 1327 static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) 1328 { 1329 struct drm_buf_info *request = data; 1330 struct drm_buf_desc __user *to = &request->list[count]; 1331 struct drm_buf_desc v = {.count = from->buf_count, 1332 .size = from->buf_size, 1333 .low_mark = from->low_mark, 1334 .high_mark = from->high_mark}; 1335 return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)); 1336 } 1337 1338 int drm_legacy_infobufs(struct drm_device *dev, void *data, 1339 struct drm_file *file_priv) 1340 { 1341 struct drm_buf_info *request = data; 1342 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); 1343 } 1344 1345 /** 1346 * Specifies a low and high water mark for buffer allocation 1347 * 1348 * \param inode device inode. 1349 * \param file_priv DRM file private. 1350 * \param cmd command. 1351 * \param arg a pointer to a drm_buf_desc structure. 1352 * \return zero on success or a negative number on failure. 1353 * 1354 * Verifies that the size order is bounded between the admissible orders and 1355 * updates the respective drm_device_dma::bufs entry low and high water mark. 1356 * 1357 * \note This ioctl is deprecated and mostly never used. 1358 */ 1359 int drm_legacy_markbufs(struct drm_device *dev, void *data, 1360 struct drm_file *file_priv) 1361 { 1362 struct drm_device_dma *dma = dev->dma; 1363 struct drm_buf_desc *request = data; 1364 int order; 1365 struct drm_buf_entry *entry; 1366 1367 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1368 return -EOPNOTSUPP; 1369 1370 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1371 return -EOPNOTSUPP; 1372 1373 if (!dma) 1374 return -EINVAL; 1375 1376 DRM_DEBUG("%d, %d, %d\n", 1377 request->size, request->low_mark, request->high_mark); 1378 order = order_base_2(request->size); 1379 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1380 return -EINVAL; 1381 entry = &dma->bufs[order]; 1382 1383 if (request->low_mark < 0 || request->low_mark > entry->buf_count) 1384 return -EINVAL; 1385 if (request->high_mark < 0 || request->high_mark > entry->buf_count) 1386 return -EINVAL; 1387 1388 entry->low_mark = request->low_mark; 1389 entry->high_mark = request->high_mark; 1390 1391 return 0; 1392 } 1393 1394 /** 1395 * Unreserve the buffers in list, previously reserved using drmDMA. 1396 * 1397 * \param inode device inode. 1398 * \param file_priv DRM file private. 1399 * \param cmd command. 1400 * \param arg pointer to a drm_buf_free structure. 1401 * \return zero on success or a negative number on failure. 1402 * 1403 * Calls free_buffer() for each used buffer. 1404 * This function is primarily used for debugging. 1405 */ 1406 int drm_legacy_freebufs(struct drm_device *dev, void *data, 1407 struct drm_file *file_priv) 1408 { 1409 struct drm_device_dma *dma = dev->dma; 1410 struct drm_buf_free *request = data; 1411 int i; 1412 int idx; 1413 struct drm_buf *buf; 1414 1415 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1416 return -EOPNOTSUPP; 1417 1418 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1419 return -EOPNOTSUPP; 1420 1421 if (!dma) 1422 return -EINVAL; 1423 1424 DRM_DEBUG("%d\n", request->count); 1425 for (i = 0; i < request->count; i++) { 1426 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) 1427 return -EFAULT; 1428 if (idx < 0 || idx >= dma->buf_count) { 1429 DRM_ERROR("Index %d (of %d max)\n", 1430 idx, dma->buf_count - 1); 1431 return -EINVAL; 1432 } 1433 idx = array_index_nospec(idx, dma->buf_count); 1434 buf = dma->buflist[idx]; 1435 if (buf->file_priv != file_priv) { 1436 DRM_ERROR("Process %d freeing buffer not owned\n", 1437 task_pid_nr(current)); 1438 return -EINVAL; 1439 } 1440 drm_legacy_free_buffer(dev, buf); 1441 } 1442 1443 return 0; 1444 } 1445 1446 /** 1447 * Maps all of the DMA buffers into client-virtual space (ioctl). 1448 * 1449 * \param inode device inode. 1450 * \param file_priv DRM file private. 1451 * \param cmd command. 1452 * \param arg pointer to a drm_buf_map structure. 1453 * \return zero on success or a negative number on failure. 1454 * 1455 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information 1456 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with 1457 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1458 * drm_mmap_dma(). 1459 */ 1460 int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p, 1461 void __user **v, 1462 int (*f)(void *, int, unsigned long, 1463 struct drm_buf *), 1464 struct drm_file *file_priv) 1465 { 1466 struct drm_device_dma *dma = dev->dma; 1467 int retcode = 0; 1468 unsigned long virtual; 1469 int i; 1470 1471 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1472 return -EOPNOTSUPP; 1473 1474 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1475 return -EOPNOTSUPP; 1476 1477 if (!dma) 1478 return -EINVAL; 1479 1480 spin_lock(&dev->buf_lock); 1481 if (atomic_read(&dev->buf_alloc)) { 1482 spin_unlock(&dev->buf_lock); 1483 return -EBUSY; 1484 } 1485 dev->buf_use++; /* Can't allocate more after this call */ 1486 spin_unlock(&dev->buf_lock); 1487 1488 if (*p >= dma->buf_count) { 1489 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) 1490 || (drm_core_check_feature(dev, DRIVER_SG) 1491 && (dma->flags & _DRM_DMA_USE_SG))) { 1492 struct drm_local_map *map = dev->agp_buffer_map; 1493 unsigned long token = dev->agp_buffer_token; 1494 1495 if (!map) { 1496 retcode = -EINVAL; 1497 goto done; 1498 } 1499 virtual = vm_mmap(file_priv->filp, 0, map->size, 1500 PROT_READ | PROT_WRITE, 1501 MAP_SHARED, 1502 token); 1503 } else { 1504 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, 1505 PROT_READ | PROT_WRITE, 1506 MAP_SHARED, 0); 1507 } 1508 if (virtual > -1024UL) { 1509 /* Real error */ 1510 retcode = (signed long)virtual; 1511 goto done; 1512 } 1513 *v = (void __user *)virtual; 1514 1515 for (i = 0; i < dma->buf_count; i++) { 1516 if (f(data, i, virtual, dma->buflist[i]) < 0) { 1517 retcode = -EFAULT; 1518 goto done; 1519 } 1520 } 1521 } 1522 done: 1523 *p = dma->buf_count; 1524 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode); 1525 1526 return retcode; 1527 } 1528 1529 static int map_one_buf(void *data, int idx, unsigned long virtual, 1530 struct drm_buf *buf) 1531 { 1532 struct drm_buf_map *request = data; 1533 unsigned long address = virtual + buf->offset; /* *** */ 1534 1535 if (copy_to_user(&request->list[idx].idx, &buf->idx, 1536 sizeof(request->list[0].idx))) 1537 return -EFAULT; 1538 if (copy_to_user(&request->list[idx].total, &buf->total, 1539 sizeof(request->list[0].total))) 1540 return -EFAULT; 1541 if (clear_user(&request->list[idx].used, sizeof(int))) 1542 return -EFAULT; 1543 if (copy_to_user(&request->list[idx].address, &address, 1544 sizeof(address))) 1545 return -EFAULT; 1546 return 0; 1547 } 1548 1549 int drm_legacy_mapbufs(struct drm_device *dev, void *data, 1550 struct drm_file *file_priv) 1551 { 1552 struct drm_buf_map *request = data; 1553 return __drm_legacy_mapbufs(dev, data, &request->count, 1554 &request->virtual, map_one_buf, 1555 file_priv); 1556 } 1557 1558 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, 1559 struct drm_file *file_priv) 1560 { 1561 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1562 return -EOPNOTSUPP; 1563 1564 if (dev->driver->dma_ioctl) 1565 return dev->driver->dma_ioctl(dev, data, file_priv); 1566 else 1567 return -EINVAL; 1568 } 1569 1570 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) 1571 { 1572 struct drm_map_list *entry; 1573 1574 list_for_each_entry(entry, &dev->maplist, head) { 1575 if (entry->map && entry->map->type == _DRM_SHM && 1576 (entry->map->flags & _DRM_CONTAINS_LOCK)) { 1577 return entry->map; 1578 } 1579 } 1580 return NULL; 1581 } 1582 EXPORT_SYMBOL(drm_legacy_getsarea); 1583