1 /* 2 * Legacy: Generic DRM Buffer Management 3 * 4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. 7 * 8 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 9 * Author: Gareth Hughes <gareth@valinux.com> 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a 12 * copy of this software and associated documentation files (the "Software"), 13 * to deal in the Software without restriction, including without limitation 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 15 * and/or sell copies of the Software, and to permit persons to whom the 16 * Software is furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice (including the next 19 * paragraph) shall be included in all copies or substantial portions of the 20 * Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 28 * OTHER DEALINGS IN THE SOFTWARE. 29 */ 30 31 #include <linux/vmalloc.h> 32 #include <linux/slab.h> 33 #include <linux/log2.h> 34 #include <linux/export.h> 35 #include <asm/shmparam.h> 36 #include <drm/drmP.h> 37 #include "drm_legacy.h" 38 39 #include <linux/nospec.h> 40 41 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, 42 struct drm_local_map *map) 43 { 44 struct drm_map_list *entry; 45 list_for_each_entry(entry, &dev->maplist, head) { 46 /* 47 * Because the kernel-userspace ABI is fixed at a 32-bit offset 48 * while PCI resources may live above that, we only compare the 49 * lower 32 bits of the map offset for maps of type 50 * _DRM_FRAMEBUFFER or _DRM_REGISTERS. 51 * It is assumed that if a driver have more than one resource 52 * of each type, the lower 32 bits are different. 53 */ 54 if (!entry->map || 55 map->type != entry->map->type || 56 entry->master != dev->master) 57 continue; 58 switch (map->type) { 59 case _DRM_SHM: 60 if (map->flags != _DRM_CONTAINS_LOCK) 61 break; 62 return entry; 63 case _DRM_REGISTERS: 64 case _DRM_FRAME_BUFFER: 65 if ((entry->map->offset & 0xffffffff) == 66 (map->offset & 0xffffffff)) 67 return entry; 68 default: /* Make gcc happy */ 69 ; 70 } 71 if (entry->map->offset == map->offset) 72 return entry; 73 } 74 75 return NULL; 76 } 77 78 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, 79 unsigned long user_token, int hashed_handle, int shm) 80 { 81 int use_hashed_handle, shift; 82 unsigned long add; 83 84 #if (BITS_PER_LONG == 64) 85 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); 86 #elif (BITS_PER_LONG == 32) 87 use_hashed_handle = hashed_handle; 88 #else 89 #error Unsupported long size. Neither 64 nor 32 bits. 90 #endif 91 92 if (!use_hashed_handle) { 93 int ret; 94 hash->key = user_token >> PAGE_SHIFT; 95 ret = drm_ht_insert_item(&dev->map_hash, hash); 96 if (ret != -EINVAL) 97 return ret; 98 } 99 100 shift = 0; 101 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; 102 if (shm && (SHMLBA > PAGE_SIZE)) { 103 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; 104 105 /* For shared memory, we have to preserve the SHMLBA 106 * bits of the eventual vma->vm_pgoff value during 107 * mmap(). Otherwise we run into cache aliasing problems 108 * on some platforms. On these platforms, the pgoff of 109 * a mmap() request is used to pick a suitable virtual 110 * address for the mmap() region such that it will not 111 * cause cache aliasing problems. 112 * 113 * Therefore, make sure the SHMLBA relevant bits of the 114 * hash value we use are equal to those in the original 115 * kernel virtual address. 116 */ 117 shift = bits; 118 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); 119 } 120 121 return drm_ht_just_insert_please(&dev->map_hash, hash, 122 user_token, 32 - PAGE_SHIFT - 3, 123 shift, add); 124 } 125 126 /** 127 * Core function to create a range of memory available for mapping by a 128 * non-root process. 129 * 130 * Adjusts the memory offset to its absolute value according to the mapping 131 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 132 * applicable and if supported by the kernel. 133 */ 134 static int drm_addmap_core(struct drm_device *dev, resource_size_t offset, 135 unsigned int size, enum drm_map_type type, 136 enum drm_map_flags flags, 137 struct drm_map_list **maplist) 138 { 139 struct drm_local_map *map; 140 struct drm_map_list *list; 141 drm_dma_handle_t *dmah; 142 unsigned long user_token; 143 int ret; 144 145 map = kmalloc(sizeof(*map), GFP_KERNEL); 146 if (!map) 147 return -ENOMEM; 148 149 map->offset = offset; 150 map->size = size; 151 map->flags = flags; 152 map->type = type; 153 154 /* Only allow shared memory to be removable since we only keep enough 155 * book keeping information about shared memory to allow for removal 156 * when processes fork. 157 */ 158 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 159 kfree(map); 160 return -EINVAL; 161 } 162 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 163 (unsigned long long)map->offset, map->size, map->type); 164 165 /* page-align _DRM_SHM maps. They are allocated here so there is no security 166 * hole created by that and it works around various broken drivers that use 167 * a non-aligned quantity to map the SAREA. --BenH 168 */ 169 if (map->type == _DRM_SHM) 170 map->size = PAGE_ALIGN(map->size); 171 172 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 173 kfree(map); 174 return -EINVAL; 175 } 176 map->mtrr = -1; 177 map->handle = NULL; 178 179 switch (map->type) { 180 case _DRM_REGISTERS: 181 case _DRM_FRAME_BUFFER: 182 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) 183 if (map->offset + (map->size-1) < map->offset || 184 map->offset < virt_to_phys(high_memory)) { 185 kfree(map); 186 return -EINVAL; 187 } 188 #endif 189 /* Some drivers preinitialize some maps, without the X Server 190 * needing to be aware of it. Therefore, we just return success 191 * when the server tries to create a duplicate map. 192 */ 193 list = drm_find_matching_map(dev, map); 194 if (list != NULL) { 195 if (list->map->size != map->size) { 196 DRM_DEBUG("Matching maps of type %d with " 197 "mismatched sizes, (%ld vs %ld)\n", 198 map->type, map->size, 199 list->map->size); 200 list->map->size = map->size; 201 } 202 203 kfree(map); 204 *maplist = list; 205 return 0; 206 } 207 208 if (map->type == _DRM_FRAME_BUFFER || 209 (map->flags & _DRM_WRITE_COMBINING)) { 210 map->mtrr = 211 arch_phys_wc_add(map->offset, map->size); 212 } 213 if (map->type == _DRM_REGISTERS) { 214 if (map->flags & _DRM_WRITE_COMBINING) 215 map->handle = ioremap_wc(map->offset, 216 map->size); 217 else 218 map->handle = ioremap(map->offset, map->size); 219 if (!map->handle) { 220 kfree(map); 221 return -ENOMEM; 222 } 223 } 224 225 break; 226 case _DRM_SHM: 227 list = drm_find_matching_map(dev, map); 228 if (list != NULL) { 229 if (list->map->size != map->size) { 230 DRM_DEBUG("Matching maps of type %d with " 231 "mismatched sizes, (%ld vs %ld)\n", 232 map->type, map->size, list->map->size); 233 list->map->size = map->size; 234 } 235 236 kfree(map); 237 *maplist = list; 238 return 0; 239 } 240 map->handle = vmalloc_user(map->size); 241 DRM_DEBUG("%lu %d %p\n", 242 map->size, order_base_2(map->size), map->handle); 243 if (!map->handle) { 244 kfree(map); 245 return -ENOMEM; 246 } 247 map->offset = (unsigned long)map->handle; 248 if (map->flags & _DRM_CONTAINS_LOCK) { 249 /* Prevent a 2nd X Server from creating a 2nd lock */ 250 if (dev->master->lock.hw_lock != NULL) { 251 vfree(map->handle); 252 kfree(map); 253 return -EBUSY; 254 } 255 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ 256 } 257 break; 258 case _DRM_AGP: { 259 struct drm_agp_mem *entry; 260 int valid = 0; 261 262 if (!dev->agp) { 263 kfree(map); 264 return -EINVAL; 265 } 266 #ifdef __alpha__ 267 map->offset += dev->hose->mem_space->start; 268 #endif 269 /* In some cases (i810 driver), user space may have already 270 * added the AGP base itself, because dev->agp->base previously 271 * only got set during AGP enable. So, only add the base 272 * address if the map's offset isn't already within the 273 * aperture. 274 */ 275 if (map->offset < dev->agp->base || 276 map->offset > dev->agp->base + 277 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { 278 map->offset += dev->agp->base; 279 } 280 map->mtrr = dev->agp->agp_mtrr; /* for getmap */ 281 282 /* This assumes the DRM is in total control of AGP space. 283 * It's not always the case as AGP can be in the control 284 * of user space (i.e. i810 driver). So this loop will get 285 * skipped and we double check that dev->agp->memory is 286 * actually set as well as being invalid before EPERM'ing 287 */ 288 list_for_each_entry(entry, &dev->agp->memory, head) { 289 if ((map->offset >= entry->bound) && 290 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { 291 valid = 1; 292 break; 293 } 294 } 295 if (!list_empty(&dev->agp->memory) && !valid) { 296 kfree(map); 297 return -EPERM; 298 } 299 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", 300 (unsigned long long)map->offset, map->size); 301 302 break; 303 } 304 case _DRM_SCATTER_GATHER: 305 if (!dev->sg) { 306 kfree(map); 307 return -EINVAL; 308 } 309 map->offset += (unsigned long)dev->sg->virtual; 310 break; 311 case _DRM_CONSISTENT: 312 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, 313 * As we're limiting the address to 2^32-1 (or less), 314 * casting it down to 32 bits is no problem, but we 315 * need to point to a 64bit variable first. */ 316 dmah = drm_pci_alloc(dev, map->size, map->size); 317 if (!dmah) { 318 kfree(map); 319 return -ENOMEM; 320 } 321 map->handle = dmah->vaddr; 322 map->offset = (unsigned long)dmah->busaddr; 323 kfree(dmah); 324 break; 325 default: 326 kfree(map); 327 return -EINVAL; 328 } 329 330 list = kzalloc(sizeof(*list), GFP_KERNEL); 331 if (!list) { 332 if (map->type == _DRM_REGISTERS) 333 iounmap(map->handle); 334 kfree(map); 335 return -EINVAL; 336 } 337 list->map = map; 338 339 mutex_lock(&dev->struct_mutex); 340 list_add(&list->head, &dev->maplist); 341 342 /* Assign a 32-bit handle */ 343 /* We do it here so that dev->struct_mutex protects the increment */ 344 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : 345 map->offset; 346 ret = drm_map_handle(dev, &list->hash, user_token, 0, 347 (map->type == _DRM_SHM)); 348 if (ret) { 349 if (map->type == _DRM_REGISTERS) 350 iounmap(map->handle); 351 kfree(map); 352 kfree(list); 353 mutex_unlock(&dev->struct_mutex); 354 return ret; 355 } 356 357 list->user_token = list->hash.key << PAGE_SHIFT; 358 mutex_unlock(&dev->struct_mutex); 359 360 if (!(map->flags & _DRM_DRIVER)) 361 list->master = dev->master; 362 *maplist = list; 363 return 0; 364 } 365 366 int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset, 367 unsigned int size, enum drm_map_type type, 368 enum drm_map_flags flags, struct drm_local_map **map_ptr) 369 { 370 struct drm_map_list *list; 371 int rc; 372 373 rc = drm_addmap_core(dev, offset, size, type, flags, &list); 374 if (!rc) 375 *map_ptr = list->map; 376 return rc; 377 } 378 EXPORT_SYMBOL(drm_legacy_addmap); 379 380 struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, 381 unsigned int token) 382 { 383 struct drm_map_list *_entry; 384 list_for_each_entry(_entry, &dev->maplist, head) 385 if (_entry->user_token == token) 386 return _entry->map; 387 return NULL; 388 } 389 EXPORT_SYMBOL(drm_legacy_findmap); 390 391 /** 392 * Ioctl to specify a range of memory that is available for mapping by a 393 * non-root process. 394 * 395 * \param inode device inode. 396 * \param file_priv DRM file private. 397 * \param cmd command. 398 * \param arg pointer to a drm_map structure. 399 * \return zero on success or a negative value on error. 400 * 401 */ 402 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, 403 struct drm_file *file_priv) 404 { 405 struct drm_map *map = data; 406 struct drm_map_list *maplist; 407 int err; 408 409 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) 410 return -EPERM; 411 412 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 413 !drm_core_check_feature(dev, DRIVER_LEGACY)) 414 return -EOPNOTSUPP; 415 416 err = drm_addmap_core(dev, map->offset, map->size, map->type, 417 map->flags, &maplist); 418 419 if (err) 420 return err; 421 422 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 423 map->handle = (void *)(unsigned long)maplist->user_token; 424 425 /* 426 * It appears that there are no users of this value whatsoever -- 427 * drmAddMap just discards it. Let's not encourage its use. 428 * (Keeping drm_addmap_core's returned mtrr value would be wrong -- 429 * it's not a real mtrr index anymore.) 430 */ 431 map->mtrr = -1; 432 433 return 0; 434 } 435 436 /* 437 * Get a mapping information. 438 * 439 * \param inode device inode. 440 * \param file_priv DRM file private. 441 * \param cmd command. 442 * \param arg user argument, pointing to a drm_map structure. 443 * 444 * \return zero on success or a negative number on failure. 445 * 446 * Searches for the mapping with the specified offset and copies its information 447 * into userspace 448 */ 449 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, 450 struct drm_file *file_priv) 451 { 452 struct drm_map *map = data; 453 struct drm_map_list *r_list = NULL; 454 struct list_head *list; 455 int idx; 456 int i; 457 458 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 459 !drm_core_check_feature(dev, DRIVER_LEGACY)) 460 return -EOPNOTSUPP; 461 462 idx = map->offset; 463 if (idx < 0) 464 return -EINVAL; 465 466 i = 0; 467 mutex_lock(&dev->struct_mutex); 468 list_for_each(list, &dev->maplist) { 469 if (i == idx) { 470 r_list = list_entry(list, struct drm_map_list, head); 471 break; 472 } 473 i++; 474 } 475 if (!r_list || !r_list->map) { 476 mutex_unlock(&dev->struct_mutex); 477 return -EINVAL; 478 } 479 480 map->offset = r_list->map->offset; 481 map->size = r_list->map->size; 482 map->type = r_list->map->type; 483 map->flags = r_list->map->flags; 484 map->handle = (void *)(unsigned long) r_list->user_token; 485 map->mtrr = arch_phys_wc_index(r_list->map->mtrr); 486 487 mutex_unlock(&dev->struct_mutex); 488 489 return 0; 490 } 491 492 /** 493 * Remove a map private from list and deallocate resources if the mapping 494 * isn't in use. 495 * 496 * Searches the map on drm_device::maplist, removes it from the list, see if 497 * it's being used, and free any associated resource (such as MTRR's) if it's not 498 * being on use. 499 * 500 * \sa drm_legacy_addmap 501 */ 502 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) 503 { 504 struct drm_map_list *r_list = NULL, *list_t; 505 drm_dma_handle_t dmah; 506 int found = 0; 507 struct drm_master *master; 508 509 /* Find the list entry for the map and remove it */ 510 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { 511 if (r_list->map == map) { 512 master = r_list->master; 513 list_del(&r_list->head); 514 drm_ht_remove_key(&dev->map_hash, 515 r_list->user_token >> PAGE_SHIFT); 516 kfree(r_list); 517 found = 1; 518 break; 519 } 520 } 521 522 if (!found) 523 return -EINVAL; 524 525 switch (map->type) { 526 case _DRM_REGISTERS: 527 iounmap(map->handle); 528 /* FALLTHROUGH */ 529 case _DRM_FRAME_BUFFER: 530 arch_phys_wc_del(map->mtrr); 531 break; 532 case _DRM_SHM: 533 vfree(map->handle); 534 if (master) { 535 if (dev->sigdata.lock == master->lock.hw_lock) 536 dev->sigdata.lock = NULL; 537 master->lock.hw_lock = NULL; /* SHM removed */ 538 master->lock.file_priv = NULL; 539 wake_up_interruptible_all(&master->lock.lock_queue); 540 } 541 break; 542 case _DRM_AGP: 543 case _DRM_SCATTER_GATHER: 544 break; 545 case _DRM_CONSISTENT: 546 dmah.vaddr = map->handle; 547 dmah.busaddr = map->offset; 548 dmah.size = map->size; 549 __drm_legacy_pci_free(dev, &dmah); 550 break; 551 } 552 kfree(map); 553 554 return 0; 555 } 556 EXPORT_SYMBOL(drm_legacy_rmmap_locked); 557 558 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) 559 { 560 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 561 !drm_core_check_feature(dev, DRIVER_LEGACY)) 562 return; 563 564 mutex_lock(&dev->struct_mutex); 565 drm_legacy_rmmap_locked(dev, map); 566 mutex_unlock(&dev->struct_mutex); 567 } 568 EXPORT_SYMBOL(drm_legacy_rmmap); 569 570 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) 571 { 572 struct drm_map_list *r_list, *list_temp; 573 574 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 575 return; 576 577 mutex_lock(&dev->struct_mutex); 578 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 579 if (r_list->master == master) { 580 drm_legacy_rmmap_locked(dev, r_list->map); 581 r_list = NULL; 582 } 583 } 584 mutex_unlock(&dev->struct_mutex); 585 } 586 587 void drm_legacy_rmmaps(struct drm_device *dev) 588 { 589 struct drm_map_list *r_list, *list_temp; 590 591 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 592 drm_legacy_rmmap(dev, r_list->map); 593 } 594 595 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 596 * the last close of the device, and this is necessary for cleanup when things 597 * exit uncleanly. Therefore, having userland manually remove mappings seems 598 * like a pointless exercise since they're going away anyway. 599 * 600 * One use case might be after addmap is allowed for normal users for SHM and 601 * gets used by drivers that the server doesn't need to care about. This seems 602 * unlikely. 603 * 604 * \param inode device inode. 605 * \param file_priv DRM file private. 606 * \param cmd command. 607 * \param arg pointer to a struct drm_map structure. 608 * \return zero on success or a negative value on error. 609 */ 610 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, 611 struct drm_file *file_priv) 612 { 613 struct drm_map *request = data; 614 struct drm_local_map *map = NULL; 615 struct drm_map_list *r_list; 616 int ret; 617 618 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 619 !drm_core_check_feature(dev, DRIVER_LEGACY)) 620 return -EOPNOTSUPP; 621 622 mutex_lock(&dev->struct_mutex); 623 list_for_each_entry(r_list, &dev->maplist, head) { 624 if (r_list->map && 625 r_list->user_token == (unsigned long)request->handle && 626 r_list->map->flags & _DRM_REMOVABLE) { 627 map = r_list->map; 628 break; 629 } 630 } 631 632 /* List has wrapped around to the head pointer, or it's empty we didn't 633 * find anything. 634 */ 635 if (list_empty(&dev->maplist) || !map) { 636 mutex_unlock(&dev->struct_mutex); 637 return -EINVAL; 638 } 639 640 /* Register and framebuffer maps are permanent */ 641 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 642 mutex_unlock(&dev->struct_mutex); 643 return 0; 644 } 645 646 ret = drm_legacy_rmmap_locked(dev, map); 647 648 mutex_unlock(&dev->struct_mutex); 649 650 return ret; 651 } 652 653 /** 654 * Cleanup after an error on one of the addbufs() functions. 655 * 656 * \param dev DRM device. 657 * \param entry buffer entry where the error occurred. 658 * 659 * Frees any pages and buffers associated with the given entry. 660 */ 661 static void drm_cleanup_buf_error(struct drm_device *dev, 662 struct drm_buf_entry *entry) 663 { 664 int i; 665 666 if (entry->seg_count) { 667 for (i = 0; i < entry->seg_count; i++) { 668 if (entry->seglist[i]) { 669 drm_pci_free(dev, entry->seglist[i]); 670 } 671 } 672 kfree(entry->seglist); 673 674 entry->seg_count = 0; 675 } 676 677 if (entry->buf_count) { 678 for (i = 0; i < entry->buf_count; i++) { 679 kfree(entry->buflist[i].dev_private); 680 } 681 kfree(entry->buflist); 682 683 entry->buf_count = 0; 684 } 685 } 686 687 #if IS_ENABLED(CONFIG_AGP) 688 /** 689 * Add AGP buffers for DMA transfers. 690 * 691 * \param dev struct drm_device to which the buffers are to be added. 692 * \param request pointer to a struct drm_buf_desc describing the request. 693 * \return zero on success or a negative number on failure. 694 * 695 * After some sanity checks creates a drm_buf structure for each buffer and 696 * reallocates the buffer list of the same size order to accommodate the new 697 * buffers. 698 */ 699 int drm_legacy_addbufs_agp(struct drm_device *dev, 700 struct drm_buf_desc *request) 701 { 702 struct drm_device_dma *dma = dev->dma; 703 struct drm_buf_entry *entry; 704 struct drm_agp_mem *agp_entry; 705 struct drm_buf *buf; 706 unsigned long offset; 707 unsigned long agp_offset; 708 int count; 709 int order; 710 int size; 711 int alignment; 712 int page_order; 713 int total; 714 int byte_count; 715 int i, valid; 716 struct drm_buf **temp_buflist; 717 718 if (!dma) 719 return -EINVAL; 720 721 count = request->count; 722 order = order_base_2(request->size); 723 size = 1 << order; 724 725 alignment = (request->flags & _DRM_PAGE_ALIGN) 726 ? PAGE_ALIGN(size) : size; 727 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 728 total = PAGE_SIZE << page_order; 729 730 byte_count = 0; 731 agp_offset = dev->agp->base + request->agp_start; 732 733 DRM_DEBUG("count: %d\n", count); 734 DRM_DEBUG("order: %d\n", order); 735 DRM_DEBUG("size: %d\n", size); 736 DRM_DEBUG("agp_offset: %lx\n", agp_offset); 737 DRM_DEBUG("alignment: %d\n", alignment); 738 DRM_DEBUG("page_order: %d\n", page_order); 739 DRM_DEBUG("total: %d\n", total); 740 741 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 742 return -EINVAL; 743 744 /* Make sure buffers are located in AGP memory that we own */ 745 valid = 0; 746 list_for_each_entry(agp_entry, &dev->agp->memory, head) { 747 if ((agp_offset >= agp_entry->bound) && 748 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { 749 valid = 1; 750 break; 751 } 752 } 753 if (!list_empty(&dev->agp->memory) && !valid) { 754 DRM_DEBUG("zone invalid\n"); 755 return -EINVAL; 756 } 757 spin_lock(&dev->buf_lock); 758 if (dev->buf_use) { 759 spin_unlock(&dev->buf_lock); 760 return -EBUSY; 761 } 762 atomic_inc(&dev->buf_alloc); 763 spin_unlock(&dev->buf_lock); 764 765 mutex_lock(&dev->struct_mutex); 766 entry = &dma->bufs[order]; 767 if (entry->buf_count) { 768 mutex_unlock(&dev->struct_mutex); 769 atomic_dec(&dev->buf_alloc); 770 return -ENOMEM; /* May only call once for each order */ 771 } 772 773 if (count < 0 || count > 4096) { 774 mutex_unlock(&dev->struct_mutex); 775 atomic_dec(&dev->buf_alloc); 776 return -EINVAL; 777 } 778 779 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 780 if (!entry->buflist) { 781 mutex_unlock(&dev->struct_mutex); 782 atomic_dec(&dev->buf_alloc); 783 return -ENOMEM; 784 } 785 786 entry->buf_size = size; 787 entry->page_order = page_order; 788 789 offset = 0; 790 791 while (entry->buf_count < count) { 792 buf = &entry->buflist[entry->buf_count]; 793 buf->idx = dma->buf_count + entry->buf_count; 794 buf->total = alignment; 795 buf->order = order; 796 buf->used = 0; 797 798 buf->offset = (dma->byte_count + offset); 799 buf->bus_address = agp_offset + offset; 800 buf->address = (void *)(agp_offset + offset); 801 buf->next = NULL; 802 buf->waiting = 0; 803 buf->pending = 0; 804 buf->file_priv = NULL; 805 806 buf->dev_priv_size = dev->driver->dev_priv_size; 807 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 808 if (!buf->dev_private) { 809 /* Set count correctly so we free the proper amount. */ 810 entry->buf_count = count; 811 drm_cleanup_buf_error(dev, entry); 812 mutex_unlock(&dev->struct_mutex); 813 atomic_dec(&dev->buf_alloc); 814 return -ENOMEM; 815 } 816 817 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 818 819 offset += alignment; 820 entry->buf_count++; 821 byte_count += PAGE_SIZE << page_order; 822 } 823 824 DRM_DEBUG("byte_count: %d\n", byte_count); 825 826 temp_buflist = krealloc(dma->buflist, 827 (dma->buf_count + entry->buf_count) * 828 sizeof(*dma->buflist), GFP_KERNEL); 829 if (!temp_buflist) { 830 /* Free the entry because it isn't valid */ 831 drm_cleanup_buf_error(dev, entry); 832 mutex_unlock(&dev->struct_mutex); 833 atomic_dec(&dev->buf_alloc); 834 return -ENOMEM; 835 } 836 dma->buflist = temp_buflist; 837 838 for (i = 0; i < entry->buf_count; i++) { 839 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 840 } 841 842 dma->buf_count += entry->buf_count; 843 dma->seg_count += entry->seg_count; 844 dma->page_count += byte_count >> PAGE_SHIFT; 845 dma->byte_count += byte_count; 846 847 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 848 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 849 850 mutex_unlock(&dev->struct_mutex); 851 852 request->count = entry->buf_count; 853 request->size = size; 854 855 dma->flags = _DRM_DMA_USE_AGP; 856 857 atomic_dec(&dev->buf_alloc); 858 return 0; 859 } 860 EXPORT_SYMBOL(drm_legacy_addbufs_agp); 861 #endif /* CONFIG_AGP */ 862 863 int drm_legacy_addbufs_pci(struct drm_device *dev, 864 struct drm_buf_desc *request) 865 { 866 struct drm_device_dma *dma = dev->dma; 867 int count; 868 int order; 869 int size; 870 int total; 871 int page_order; 872 struct drm_buf_entry *entry; 873 drm_dma_handle_t *dmah; 874 struct drm_buf *buf; 875 int alignment; 876 unsigned long offset; 877 int i; 878 int byte_count; 879 int page_count; 880 unsigned long *temp_pagelist; 881 struct drm_buf **temp_buflist; 882 883 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 884 return -EOPNOTSUPP; 885 886 if (!dma) 887 return -EINVAL; 888 889 if (!capable(CAP_SYS_ADMIN)) 890 return -EPERM; 891 892 count = request->count; 893 order = order_base_2(request->size); 894 size = 1 << order; 895 896 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 897 request->count, request->size, size, order); 898 899 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 900 return -EINVAL; 901 902 alignment = (request->flags & _DRM_PAGE_ALIGN) 903 ? PAGE_ALIGN(size) : size; 904 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 905 total = PAGE_SIZE << page_order; 906 907 spin_lock(&dev->buf_lock); 908 if (dev->buf_use) { 909 spin_unlock(&dev->buf_lock); 910 return -EBUSY; 911 } 912 atomic_inc(&dev->buf_alloc); 913 spin_unlock(&dev->buf_lock); 914 915 mutex_lock(&dev->struct_mutex); 916 entry = &dma->bufs[order]; 917 if (entry->buf_count) { 918 mutex_unlock(&dev->struct_mutex); 919 atomic_dec(&dev->buf_alloc); 920 return -ENOMEM; /* May only call once for each order */ 921 } 922 923 if (count < 0 || count > 4096) { 924 mutex_unlock(&dev->struct_mutex); 925 atomic_dec(&dev->buf_alloc); 926 return -EINVAL; 927 } 928 929 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 930 if (!entry->buflist) { 931 mutex_unlock(&dev->struct_mutex); 932 atomic_dec(&dev->buf_alloc); 933 return -ENOMEM; 934 } 935 936 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL); 937 if (!entry->seglist) { 938 kfree(entry->buflist); 939 mutex_unlock(&dev->struct_mutex); 940 atomic_dec(&dev->buf_alloc); 941 return -ENOMEM; 942 } 943 944 /* Keep the original pagelist until we know all the allocations 945 * have succeeded 946 */ 947 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), 948 sizeof(*dma->pagelist), 949 GFP_KERNEL); 950 if (!temp_pagelist) { 951 kfree(entry->buflist); 952 kfree(entry->seglist); 953 mutex_unlock(&dev->struct_mutex); 954 atomic_dec(&dev->buf_alloc); 955 return -ENOMEM; 956 } 957 memcpy(temp_pagelist, 958 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); 959 DRM_DEBUG("pagelist: %d entries\n", 960 dma->page_count + (count << page_order)); 961 962 entry->buf_size = size; 963 entry->page_order = page_order; 964 byte_count = 0; 965 page_count = 0; 966 967 while (entry->buf_count < count) { 968 969 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); 970 971 if (!dmah) { 972 /* Set count correctly so we free the proper amount. */ 973 entry->buf_count = count; 974 entry->seg_count = count; 975 drm_cleanup_buf_error(dev, entry); 976 kfree(temp_pagelist); 977 mutex_unlock(&dev->struct_mutex); 978 atomic_dec(&dev->buf_alloc); 979 return -ENOMEM; 980 } 981 entry->seglist[entry->seg_count++] = dmah; 982 for (i = 0; i < (1 << page_order); i++) { 983 DRM_DEBUG("page %d @ 0x%08lx\n", 984 dma->page_count + page_count, 985 (unsigned long)dmah->vaddr + PAGE_SIZE * i); 986 temp_pagelist[dma->page_count + page_count++] 987 = (unsigned long)dmah->vaddr + PAGE_SIZE * i; 988 } 989 for (offset = 0; 990 offset + size <= total && entry->buf_count < count; 991 offset += alignment, ++entry->buf_count) { 992 buf = &entry->buflist[entry->buf_count]; 993 buf->idx = dma->buf_count + entry->buf_count; 994 buf->total = alignment; 995 buf->order = order; 996 buf->used = 0; 997 buf->offset = (dma->byte_count + byte_count + offset); 998 buf->address = (void *)(dmah->vaddr + offset); 999 buf->bus_address = dmah->busaddr + offset; 1000 buf->next = NULL; 1001 buf->waiting = 0; 1002 buf->pending = 0; 1003 buf->file_priv = NULL; 1004 1005 buf->dev_priv_size = dev->driver->dev_priv_size; 1006 buf->dev_private = kzalloc(buf->dev_priv_size, 1007 GFP_KERNEL); 1008 if (!buf->dev_private) { 1009 /* Set count correctly so we free the proper amount. */ 1010 entry->buf_count = count; 1011 entry->seg_count = count; 1012 drm_cleanup_buf_error(dev, entry); 1013 kfree(temp_pagelist); 1014 mutex_unlock(&dev->struct_mutex); 1015 atomic_dec(&dev->buf_alloc); 1016 return -ENOMEM; 1017 } 1018 1019 DRM_DEBUG("buffer %d @ %p\n", 1020 entry->buf_count, buf->address); 1021 } 1022 byte_count += PAGE_SIZE << page_order; 1023 } 1024 1025 temp_buflist = krealloc(dma->buflist, 1026 (dma->buf_count + entry->buf_count) * 1027 sizeof(*dma->buflist), GFP_KERNEL); 1028 if (!temp_buflist) { 1029 /* Free the entry because it isn't valid */ 1030 drm_cleanup_buf_error(dev, entry); 1031 kfree(temp_pagelist); 1032 mutex_unlock(&dev->struct_mutex); 1033 atomic_dec(&dev->buf_alloc); 1034 return -ENOMEM; 1035 } 1036 dma->buflist = temp_buflist; 1037 1038 for (i = 0; i < entry->buf_count; i++) { 1039 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1040 } 1041 1042 /* No allocations failed, so now we can replace the original pagelist 1043 * with the new one. 1044 */ 1045 if (dma->page_count) { 1046 kfree(dma->pagelist); 1047 } 1048 dma->pagelist = temp_pagelist; 1049 1050 dma->buf_count += entry->buf_count; 1051 dma->seg_count += entry->seg_count; 1052 dma->page_count += entry->seg_count << page_order; 1053 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 1054 1055 mutex_unlock(&dev->struct_mutex); 1056 1057 request->count = entry->buf_count; 1058 request->size = size; 1059 1060 if (request->flags & _DRM_PCI_BUFFER_RO) 1061 dma->flags = _DRM_DMA_USE_PCI_RO; 1062 1063 atomic_dec(&dev->buf_alloc); 1064 return 0; 1065 1066 } 1067 EXPORT_SYMBOL(drm_legacy_addbufs_pci); 1068 1069 static int drm_legacy_addbufs_sg(struct drm_device *dev, 1070 struct drm_buf_desc *request) 1071 { 1072 struct drm_device_dma *dma = dev->dma; 1073 struct drm_buf_entry *entry; 1074 struct drm_buf *buf; 1075 unsigned long offset; 1076 unsigned long agp_offset; 1077 int count; 1078 int order; 1079 int size; 1080 int alignment; 1081 int page_order; 1082 int total; 1083 int byte_count; 1084 int i; 1085 struct drm_buf **temp_buflist; 1086 1087 if (!drm_core_check_feature(dev, DRIVER_SG)) 1088 return -EOPNOTSUPP; 1089 1090 if (!dma) 1091 return -EINVAL; 1092 1093 if (!capable(CAP_SYS_ADMIN)) 1094 return -EPERM; 1095 1096 count = request->count; 1097 order = order_base_2(request->size); 1098 size = 1 << order; 1099 1100 alignment = (request->flags & _DRM_PAGE_ALIGN) 1101 ? PAGE_ALIGN(size) : size; 1102 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 1103 total = PAGE_SIZE << page_order; 1104 1105 byte_count = 0; 1106 agp_offset = request->agp_start; 1107 1108 DRM_DEBUG("count: %d\n", count); 1109 DRM_DEBUG("order: %d\n", order); 1110 DRM_DEBUG("size: %d\n", size); 1111 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 1112 DRM_DEBUG("alignment: %d\n", alignment); 1113 DRM_DEBUG("page_order: %d\n", page_order); 1114 DRM_DEBUG("total: %d\n", total); 1115 1116 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1117 return -EINVAL; 1118 1119 spin_lock(&dev->buf_lock); 1120 if (dev->buf_use) { 1121 spin_unlock(&dev->buf_lock); 1122 return -EBUSY; 1123 } 1124 atomic_inc(&dev->buf_alloc); 1125 spin_unlock(&dev->buf_lock); 1126 1127 mutex_lock(&dev->struct_mutex); 1128 entry = &dma->bufs[order]; 1129 if (entry->buf_count) { 1130 mutex_unlock(&dev->struct_mutex); 1131 atomic_dec(&dev->buf_alloc); 1132 return -ENOMEM; /* May only call once for each order */ 1133 } 1134 1135 if (count < 0 || count > 4096) { 1136 mutex_unlock(&dev->struct_mutex); 1137 atomic_dec(&dev->buf_alloc); 1138 return -EINVAL; 1139 } 1140 1141 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 1142 if (!entry->buflist) { 1143 mutex_unlock(&dev->struct_mutex); 1144 atomic_dec(&dev->buf_alloc); 1145 return -ENOMEM; 1146 } 1147 1148 entry->buf_size = size; 1149 entry->page_order = page_order; 1150 1151 offset = 0; 1152 1153 while (entry->buf_count < count) { 1154 buf = &entry->buflist[entry->buf_count]; 1155 buf->idx = dma->buf_count + entry->buf_count; 1156 buf->total = alignment; 1157 buf->order = order; 1158 buf->used = 0; 1159 1160 buf->offset = (dma->byte_count + offset); 1161 buf->bus_address = agp_offset + offset; 1162 buf->address = (void *)(agp_offset + offset 1163 + (unsigned long)dev->sg->virtual); 1164 buf->next = NULL; 1165 buf->waiting = 0; 1166 buf->pending = 0; 1167 buf->file_priv = NULL; 1168 1169 buf->dev_priv_size = dev->driver->dev_priv_size; 1170 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 1171 if (!buf->dev_private) { 1172 /* Set count correctly so we free the proper amount. */ 1173 entry->buf_count = count; 1174 drm_cleanup_buf_error(dev, entry); 1175 mutex_unlock(&dev->struct_mutex); 1176 atomic_dec(&dev->buf_alloc); 1177 return -ENOMEM; 1178 } 1179 1180 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1181 1182 offset += alignment; 1183 entry->buf_count++; 1184 byte_count += PAGE_SIZE << page_order; 1185 } 1186 1187 DRM_DEBUG("byte_count: %d\n", byte_count); 1188 1189 temp_buflist = krealloc(dma->buflist, 1190 (dma->buf_count + entry->buf_count) * 1191 sizeof(*dma->buflist), GFP_KERNEL); 1192 if (!temp_buflist) { 1193 /* Free the entry because it isn't valid */ 1194 drm_cleanup_buf_error(dev, entry); 1195 mutex_unlock(&dev->struct_mutex); 1196 atomic_dec(&dev->buf_alloc); 1197 return -ENOMEM; 1198 } 1199 dma->buflist = temp_buflist; 1200 1201 for (i = 0; i < entry->buf_count; i++) { 1202 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1203 } 1204 1205 dma->buf_count += entry->buf_count; 1206 dma->seg_count += entry->seg_count; 1207 dma->page_count += byte_count >> PAGE_SHIFT; 1208 dma->byte_count += byte_count; 1209 1210 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1211 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1212 1213 mutex_unlock(&dev->struct_mutex); 1214 1215 request->count = entry->buf_count; 1216 request->size = size; 1217 1218 dma->flags = _DRM_DMA_USE_SG; 1219 1220 atomic_dec(&dev->buf_alloc); 1221 return 0; 1222 } 1223 1224 /** 1225 * Add buffers for DMA transfers (ioctl). 1226 * 1227 * \param inode device inode. 1228 * \param file_priv DRM file private. 1229 * \param cmd command. 1230 * \param arg pointer to a struct drm_buf_desc request. 1231 * \return zero on success or a negative number on failure. 1232 * 1233 * According with the memory type specified in drm_buf_desc::flags and the 1234 * build options, it dispatches the call either to addbufs_agp(), 1235 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1236 * PCI memory respectively. 1237 */ 1238 int drm_legacy_addbufs(struct drm_device *dev, void *data, 1239 struct drm_file *file_priv) 1240 { 1241 struct drm_buf_desc *request = data; 1242 int ret; 1243 1244 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1245 return -EOPNOTSUPP; 1246 1247 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1248 return -EOPNOTSUPP; 1249 1250 #if IS_ENABLED(CONFIG_AGP) 1251 if (request->flags & _DRM_AGP_BUFFER) 1252 ret = drm_legacy_addbufs_agp(dev, request); 1253 else 1254 #endif 1255 if (request->flags & _DRM_SG_BUFFER) 1256 ret = drm_legacy_addbufs_sg(dev, request); 1257 else if (request->flags & _DRM_FB_BUFFER) 1258 ret = -EINVAL; 1259 else 1260 ret = drm_legacy_addbufs_pci(dev, request); 1261 1262 return ret; 1263 } 1264 1265 /** 1266 * Get information about the buffer mappings. 1267 * 1268 * This was originally mean for debugging purposes, or by a sophisticated 1269 * client library to determine how best to use the available buffers (e.g., 1270 * large buffers can be used for image transfer). 1271 * 1272 * \param inode device inode. 1273 * \param file_priv DRM file private. 1274 * \param cmd command. 1275 * \param arg pointer to a drm_buf_info structure. 1276 * \return zero on success or a negative number on failure. 1277 * 1278 * Increments drm_device::buf_use while holding the drm_device::buf_lock 1279 * lock, preventing of allocating more buffers after this call. Information 1280 * about each requested buffer is then copied into user space. 1281 */ 1282 int __drm_legacy_infobufs(struct drm_device *dev, 1283 void *data, int *p, 1284 int (*f)(void *, int, struct drm_buf_entry *)) 1285 { 1286 struct drm_device_dma *dma = dev->dma; 1287 int i; 1288 int count; 1289 1290 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1291 return -EOPNOTSUPP; 1292 1293 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1294 return -EOPNOTSUPP; 1295 1296 if (!dma) 1297 return -EINVAL; 1298 1299 spin_lock(&dev->buf_lock); 1300 if (atomic_read(&dev->buf_alloc)) { 1301 spin_unlock(&dev->buf_lock); 1302 return -EBUSY; 1303 } 1304 ++dev->buf_use; /* Can't allocate more after this call */ 1305 spin_unlock(&dev->buf_lock); 1306 1307 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1308 if (dma->bufs[i].buf_count) 1309 ++count; 1310 } 1311 1312 DRM_DEBUG("count = %d\n", count); 1313 1314 if (*p >= count) { 1315 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1316 struct drm_buf_entry *from = &dma->bufs[i]; 1317 if (from->buf_count) { 1318 if (f(data, count, from) < 0) 1319 return -EFAULT; 1320 DRM_DEBUG("%d %d %d %d %d\n", 1321 i, 1322 dma->bufs[i].buf_count, 1323 dma->bufs[i].buf_size, 1324 dma->bufs[i].low_mark, 1325 dma->bufs[i].high_mark); 1326 ++count; 1327 } 1328 } 1329 } 1330 *p = count; 1331 1332 return 0; 1333 } 1334 1335 static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) 1336 { 1337 struct drm_buf_info *request = data; 1338 struct drm_buf_desc __user *to = &request->list[count]; 1339 struct drm_buf_desc v = {.count = from->buf_count, 1340 .size = from->buf_size, 1341 .low_mark = from->low_mark, 1342 .high_mark = from->high_mark}; 1343 return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)); 1344 } 1345 1346 int drm_legacy_infobufs(struct drm_device *dev, void *data, 1347 struct drm_file *file_priv) 1348 { 1349 struct drm_buf_info *request = data; 1350 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); 1351 } 1352 1353 /** 1354 * Specifies a low and high water mark for buffer allocation 1355 * 1356 * \param inode device inode. 1357 * \param file_priv DRM file private. 1358 * \param cmd command. 1359 * \param arg a pointer to a drm_buf_desc structure. 1360 * \return zero on success or a negative number on failure. 1361 * 1362 * Verifies that the size order is bounded between the admissible orders and 1363 * updates the respective drm_device_dma::bufs entry low and high water mark. 1364 * 1365 * \note This ioctl is deprecated and mostly never used. 1366 */ 1367 int drm_legacy_markbufs(struct drm_device *dev, void *data, 1368 struct drm_file *file_priv) 1369 { 1370 struct drm_device_dma *dma = dev->dma; 1371 struct drm_buf_desc *request = data; 1372 int order; 1373 struct drm_buf_entry *entry; 1374 1375 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1376 return -EOPNOTSUPP; 1377 1378 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1379 return -EOPNOTSUPP; 1380 1381 if (!dma) 1382 return -EINVAL; 1383 1384 DRM_DEBUG("%d, %d, %d\n", 1385 request->size, request->low_mark, request->high_mark); 1386 order = order_base_2(request->size); 1387 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1388 return -EINVAL; 1389 entry = &dma->bufs[order]; 1390 1391 if (request->low_mark < 0 || request->low_mark > entry->buf_count) 1392 return -EINVAL; 1393 if (request->high_mark < 0 || request->high_mark > entry->buf_count) 1394 return -EINVAL; 1395 1396 entry->low_mark = request->low_mark; 1397 entry->high_mark = request->high_mark; 1398 1399 return 0; 1400 } 1401 1402 /** 1403 * Unreserve the buffers in list, previously reserved using drmDMA. 1404 * 1405 * \param inode device inode. 1406 * \param file_priv DRM file private. 1407 * \param cmd command. 1408 * \param arg pointer to a drm_buf_free structure. 1409 * \return zero on success or a negative number on failure. 1410 * 1411 * Calls free_buffer() for each used buffer. 1412 * This function is primarily used for debugging. 1413 */ 1414 int drm_legacy_freebufs(struct drm_device *dev, void *data, 1415 struct drm_file *file_priv) 1416 { 1417 struct drm_device_dma *dma = dev->dma; 1418 struct drm_buf_free *request = data; 1419 int i; 1420 int idx; 1421 struct drm_buf *buf; 1422 1423 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1424 return -EOPNOTSUPP; 1425 1426 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1427 return -EOPNOTSUPP; 1428 1429 if (!dma) 1430 return -EINVAL; 1431 1432 DRM_DEBUG("%d\n", request->count); 1433 for (i = 0; i < request->count; i++) { 1434 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) 1435 return -EFAULT; 1436 if (idx < 0 || idx >= dma->buf_count) { 1437 DRM_ERROR("Index %d (of %d max)\n", 1438 idx, dma->buf_count - 1); 1439 return -EINVAL; 1440 } 1441 idx = array_index_nospec(idx, dma->buf_count); 1442 buf = dma->buflist[idx]; 1443 if (buf->file_priv != file_priv) { 1444 DRM_ERROR("Process %d freeing buffer not owned\n", 1445 task_pid_nr(current)); 1446 return -EINVAL; 1447 } 1448 drm_legacy_free_buffer(dev, buf); 1449 } 1450 1451 return 0; 1452 } 1453 1454 /** 1455 * Maps all of the DMA buffers into client-virtual space (ioctl). 1456 * 1457 * \param inode device inode. 1458 * \param file_priv DRM file private. 1459 * \param cmd command. 1460 * \param arg pointer to a drm_buf_map structure. 1461 * \return zero on success or a negative number on failure. 1462 * 1463 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information 1464 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with 1465 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1466 * drm_mmap_dma(). 1467 */ 1468 int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p, 1469 void __user **v, 1470 int (*f)(void *, int, unsigned long, 1471 struct drm_buf *), 1472 struct drm_file *file_priv) 1473 { 1474 struct drm_device_dma *dma = dev->dma; 1475 int retcode = 0; 1476 unsigned long virtual; 1477 int i; 1478 1479 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1480 return -EOPNOTSUPP; 1481 1482 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1483 return -EOPNOTSUPP; 1484 1485 if (!dma) 1486 return -EINVAL; 1487 1488 spin_lock(&dev->buf_lock); 1489 if (atomic_read(&dev->buf_alloc)) { 1490 spin_unlock(&dev->buf_lock); 1491 return -EBUSY; 1492 } 1493 dev->buf_use++; /* Can't allocate more after this call */ 1494 spin_unlock(&dev->buf_lock); 1495 1496 if (*p >= dma->buf_count) { 1497 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) 1498 || (drm_core_check_feature(dev, DRIVER_SG) 1499 && (dma->flags & _DRM_DMA_USE_SG))) { 1500 struct drm_local_map *map = dev->agp_buffer_map; 1501 unsigned long token = dev->agp_buffer_token; 1502 1503 if (!map) { 1504 retcode = -EINVAL; 1505 goto done; 1506 } 1507 virtual = vm_mmap(file_priv->filp, 0, map->size, 1508 PROT_READ | PROT_WRITE, 1509 MAP_SHARED, 1510 token); 1511 } else { 1512 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, 1513 PROT_READ | PROT_WRITE, 1514 MAP_SHARED, 0); 1515 } 1516 if (virtual > -1024UL) { 1517 /* Real error */ 1518 retcode = (signed long)virtual; 1519 goto done; 1520 } 1521 *v = (void __user *)virtual; 1522 1523 for (i = 0; i < dma->buf_count; i++) { 1524 if (f(data, i, virtual, dma->buflist[i]) < 0) { 1525 retcode = -EFAULT; 1526 goto done; 1527 } 1528 } 1529 } 1530 done: 1531 *p = dma->buf_count; 1532 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode); 1533 1534 return retcode; 1535 } 1536 1537 static int map_one_buf(void *data, int idx, unsigned long virtual, 1538 struct drm_buf *buf) 1539 { 1540 struct drm_buf_map *request = data; 1541 unsigned long address = virtual + buf->offset; /* *** */ 1542 1543 if (copy_to_user(&request->list[idx].idx, &buf->idx, 1544 sizeof(request->list[0].idx))) 1545 return -EFAULT; 1546 if (copy_to_user(&request->list[idx].total, &buf->total, 1547 sizeof(request->list[0].total))) 1548 return -EFAULT; 1549 if (clear_user(&request->list[idx].used, sizeof(int))) 1550 return -EFAULT; 1551 if (copy_to_user(&request->list[idx].address, &address, 1552 sizeof(address))) 1553 return -EFAULT; 1554 return 0; 1555 } 1556 1557 int drm_legacy_mapbufs(struct drm_device *dev, void *data, 1558 struct drm_file *file_priv) 1559 { 1560 struct drm_buf_map *request = data; 1561 return __drm_legacy_mapbufs(dev, data, &request->count, 1562 &request->virtual, map_one_buf, 1563 file_priv); 1564 } 1565 1566 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, 1567 struct drm_file *file_priv) 1568 { 1569 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1570 return -EOPNOTSUPP; 1571 1572 if (dev->driver->dma_ioctl) 1573 return dev->driver->dma_ioctl(dev, data, file_priv); 1574 else 1575 return -EINVAL; 1576 } 1577 1578 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) 1579 { 1580 struct drm_map_list *entry; 1581 1582 list_for_each_entry(entry, &dev->maplist, head) { 1583 if (entry->map && entry->map->type == _DRM_SHM && 1584 (entry->map->flags & _DRM_CONTAINS_LOCK)) { 1585 return entry->map; 1586 } 1587 } 1588 return NULL; 1589 } 1590 EXPORT_SYMBOL(drm_legacy_getsarea); 1591