1 /* 2 * Legacy: Generic DRM Buffer Management 3 * 4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 6 * All Rights Reserved. 7 * 8 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 9 * Author: Gareth Hughes <gareth@valinux.com> 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a 12 * copy of this software and associated documentation files (the "Software"), 13 * to deal in the Software without restriction, including without limitation 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 15 * and/or sell copies of the Software, and to permit persons to whom the 16 * Software is furnished to do so, subject to the following conditions: 17 * 18 * The above copyright notice and this permission notice (including the next 19 * paragraph) shall be included in all copies or substantial portions of the 20 * Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 28 * OTHER DEALINGS IN THE SOFTWARE. 29 */ 30 31 #include <linux/vmalloc.h> 32 #include <linux/slab.h> 33 #include <linux/log2.h> 34 #include <linux/export.h> 35 #include <asm/shmparam.h> 36 #include <drm/drmP.h> 37 #include "drm_legacy.h" 38 39 #include <linux/nospec.h> 40 41 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, 42 struct drm_local_map *map) 43 { 44 struct drm_map_list *entry; 45 list_for_each_entry(entry, &dev->maplist, head) { 46 /* 47 * Because the kernel-userspace ABI is fixed at a 32-bit offset 48 * while PCI resources may live above that, we only compare the 49 * lower 32 bits of the map offset for maps of type 50 * _DRM_FRAMEBUFFER or _DRM_REGISTERS. 51 * It is assumed that if a driver have more than one resource 52 * of each type, the lower 32 bits are different. 53 */ 54 if (!entry->map || 55 map->type != entry->map->type || 56 entry->master != dev->master) 57 continue; 58 switch (map->type) { 59 case _DRM_SHM: 60 if (map->flags != _DRM_CONTAINS_LOCK) 61 break; 62 return entry; 63 case _DRM_REGISTERS: 64 case _DRM_FRAME_BUFFER: 65 if ((entry->map->offset & 0xffffffff) == 66 (map->offset & 0xffffffff)) 67 return entry; 68 default: /* Make gcc happy */ 69 ; 70 } 71 if (entry->map->offset == map->offset) 72 return entry; 73 } 74 75 return NULL; 76 } 77 78 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, 79 unsigned long user_token, int hashed_handle, int shm) 80 { 81 int use_hashed_handle, shift; 82 unsigned long add; 83 84 #if (BITS_PER_LONG == 64) 85 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); 86 #elif (BITS_PER_LONG == 32) 87 use_hashed_handle = hashed_handle; 88 #else 89 #error Unsupported long size. Neither 64 nor 32 bits. 90 #endif 91 92 if (!use_hashed_handle) { 93 int ret; 94 hash->key = user_token >> PAGE_SHIFT; 95 ret = drm_ht_insert_item(&dev->map_hash, hash); 96 if (ret != -EINVAL) 97 return ret; 98 } 99 100 shift = 0; 101 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; 102 if (shm && (SHMLBA > PAGE_SIZE)) { 103 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; 104 105 /* For shared memory, we have to preserve the SHMLBA 106 * bits of the eventual vma->vm_pgoff value during 107 * mmap(). Otherwise we run into cache aliasing problems 108 * on some platforms. On these platforms, the pgoff of 109 * a mmap() request is used to pick a suitable virtual 110 * address for the mmap() region such that it will not 111 * cause cache aliasing problems. 112 * 113 * Therefore, make sure the SHMLBA relevant bits of the 114 * hash value we use are equal to those in the original 115 * kernel virtual address. 116 */ 117 shift = bits; 118 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); 119 } 120 121 return drm_ht_just_insert_please(&dev->map_hash, hash, 122 user_token, 32 - PAGE_SHIFT - 3, 123 shift, add); 124 } 125 126 /** 127 * Core function to create a range of memory available for mapping by a 128 * non-root process. 129 * 130 * Adjusts the memory offset to its absolute value according to the mapping 131 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 132 * applicable and if supported by the kernel. 133 */ 134 static int drm_addmap_core(struct drm_device *dev, resource_size_t offset, 135 unsigned int size, enum drm_map_type type, 136 enum drm_map_flags flags, 137 struct drm_map_list **maplist) 138 { 139 struct drm_local_map *map; 140 struct drm_map_list *list; 141 drm_dma_handle_t *dmah; 142 unsigned long user_token; 143 int ret; 144 145 map = kmalloc(sizeof(*map), GFP_KERNEL); 146 if (!map) 147 return -ENOMEM; 148 149 map->offset = offset; 150 map->size = size; 151 map->flags = flags; 152 map->type = type; 153 154 /* Only allow shared memory to be removable since we only keep enough 155 * book keeping information about shared memory to allow for removal 156 * when processes fork. 157 */ 158 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 159 kfree(map); 160 return -EINVAL; 161 } 162 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 163 (unsigned long long)map->offset, map->size, map->type); 164 165 /* page-align _DRM_SHM maps. They are allocated here so there is no security 166 * hole created by that and it works around various broken drivers that use 167 * a non-aligned quantity to map the SAREA. --BenH 168 */ 169 if (map->type == _DRM_SHM) 170 map->size = PAGE_ALIGN(map->size); 171 172 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 173 kfree(map); 174 return -EINVAL; 175 } 176 map->mtrr = -1; 177 map->handle = NULL; 178 179 switch (map->type) { 180 case _DRM_REGISTERS: 181 case _DRM_FRAME_BUFFER: 182 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) 183 if (map->offset + (map->size-1) < map->offset || 184 map->offset < virt_to_phys(high_memory)) { 185 kfree(map); 186 return -EINVAL; 187 } 188 #endif 189 /* Some drivers preinitialize some maps, without the X Server 190 * needing to be aware of it. Therefore, we just return success 191 * when the server tries to create a duplicate map. 192 */ 193 list = drm_find_matching_map(dev, map); 194 if (list != NULL) { 195 if (list->map->size != map->size) { 196 DRM_DEBUG("Matching maps of type %d with " 197 "mismatched sizes, (%ld vs %ld)\n", 198 map->type, map->size, 199 list->map->size); 200 list->map->size = map->size; 201 } 202 203 kfree(map); 204 *maplist = list; 205 return 0; 206 } 207 208 if (map->type == _DRM_FRAME_BUFFER || 209 (map->flags & _DRM_WRITE_COMBINING)) { 210 map->mtrr = 211 arch_phys_wc_add(map->offset, map->size); 212 } 213 if (map->type == _DRM_REGISTERS) { 214 if (map->flags & _DRM_WRITE_COMBINING) 215 map->handle = ioremap_wc(map->offset, 216 map->size); 217 else 218 map->handle = ioremap(map->offset, map->size); 219 if (!map->handle) { 220 kfree(map); 221 return -ENOMEM; 222 } 223 } 224 225 break; 226 case _DRM_SHM: 227 list = drm_find_matching_map(dev, map); 228 if (list != NULL) { 229 if (list->map->size != map->size) { 230 DRM_DEBUG("Matching maps of type %d with " 231 "mismatched sizes, (%ld vs %ld)\n", 232 map->type, map->size, list->map->size); 233 list->map->size = map->size; 234 } 235 236 kfree(map); 237 *maplist = list; 238 return 0; 239 } 240 map->handle = vmalloc_user(map->size); 241 DRM_DEBUG("%lu %d %p\n", 242 map->size, order_base_2(map->size), map->handle); 243 if (!map->handle) { 244 kfree(map); 245 return -ENOMEM; 246 } 247 map->offset = (unsigned long)map->handle; 248 if (map->flags & _DRM_CONTAINS_LOCK) { 249 /* Prevent a 2nd X Server from creating a 2nd lock */ 250 if (dev->master->lock.hw_lock != NULL) { 251 vfree(map->handle); 252 kfree(map); 253 return -EBUSY; 254 } 255 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ 256 } 257 break; 258 case _DRM_AGP: { 259 struct drm_agp_mem *entry; 260 int valid = 0; 261 262 if (!dev->agp) { 263 kfree(map); 264 return -EINVAL; 265 } 266 #ifdef __alpha__ 267 map->offset += dev->hose->mem_space->start; 268 #endif 269 /* In some cases (i810 driver), user space may have already 270 * added the AGP base itself, because dev->agp->base previously 271 * only got set during AGP enable. So, only add the base 272 * address if the map's offset isn't already within the 273 * aperture. 274 */ 275 if (map->offset < dev->agp->base || 276 map->offset > dev->agp->base + 277 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { 278 map->offset += dev->agp->base; 279 } 280 map->mtrr = dev->agp->agp_mtrr; /* for getmap */ 281 282 /* This assumes the DRM is in total control of AGP space. 283 * It's not always the case as AGP can be in the control 284 * of user space (i.e. i810 driver). So this loop will get 285 * skipped and we double check that dev->agp->memory is 286 * actually set as well as being invalid before EPERM'ing 287 */ 288 list_for_each_entry(entry, &dev->agp->memory, head) { 289 if ((map->offset >= entry->bound) && 290 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { 291 valid = 1; 292 break; 293 } 294 } 295 if (!list_empty(&dev->agp->memory) && !valid) { 296 kfree(map); 297 return -EPERM; 298 } 299 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", 300 (unsigned long long)map->offset, map->size); 301 302 break; 303 } 304 case _DRM_SCATTER_GATHER: 305 if (!dev->sg) { 306 kfree(map); 307 return -EINVAL; 308 } 309 map->offset += (unsigned long)dev->sg->virtual; 310 break; 311 case _DRM_CONSISTENT: 312 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, 313 * As we're limiting the address to 2^32-1 (or less), 314 * casting it down to 32 bits is no problem, but we 315 * need to point to a 64bit variable first. */ 316 dmah = drm_pci_alloc(dev, map->size, map->size); 317 if (!dmah) { 318 kfree(map); 319 return -ENOMEM; 320 } 321 map->handle = dmah->vaddr; 322 map->offset = (unsigned long)dmah->busaddr; 323 kfree(dmah); 324 break; 325 default: 326 kfree(map); 327 return -EINVAL; 328 } 329 330 list = kzalloc(sizeof(*list), GFP_KERNEL); 331 if (!list) { 332 if (map->type == _DRM_REGISTERS) 333 iounmap(map->handle); 334 kfree(map); 335 return -EINVAL; 336 } 337 list->map = map; 338 339 mutex_lock(&dev->struct_mutex); 340 list_add(&list->head, &dev->maplist); 341 342 /* Assign a 32-bit handle */ 343 /* We do it here so that dev->struct_mutex protects the increment */ 344 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : 345 map->offset; 346 ret = drm_map_handle(dev, &list->hash, user_token, 0, 347 (map->type == _DRM_SHM)); 348 if (ret) { 349 if (map->type == _DRM_REGISTERS) 350 iounmap(map->handle); 351 kfree(map); 352 kfree(list); 353 mutex_unlock(&dev->struct_mutex); 354 return ret; 355 } 356 357 list->user_token = list->hash.key << PAGE_SHIFT; 358 mutex_unlock(&dev->struct_mutex); 359 360 if (!(map->flags & _DRM_DRIVER)) 361 list->master = dev->master; 362 *maplist = list; 363 return 0; 364 } 365 366 int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset, 367 unsigned int size, enum drm_map_type type, 368 enum drm_map_flags flags, struct drm_local_map **map_ptr) 369 { 370 struct drm_map_list *list; 371 int rc; 372 373 rc = drm_addmap_core(dev, offset, size, type, flags, &list); 374 if (!rc) 375 *map_ptr = list->map; 376 return rc; 377 } 378 EXPORT_SYMBOL(drm_legacy_addmap); 379 380 /** 381 * Ioctl to specify a range of memory that is available for mapping by a 382 * non-root process. 383 * 384 * \param inode device inode. 385 * \param file_priv DRM file private. 386 * \param cmd command. 387 * \param arg pointer to a drm_map structure. 388 * \return zero on success or a negative value on error. 389 * 390 */ 391 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, 392 struct drm_file *file_priv) 393 { 394 struct drm_map *map = data; 395 struct drm_map_list *maplist; 396 int err; 397 398 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) 399 return -EPERM; 400 401 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 402 !drm_core_check_feature(dev, DRIVER_LEGACY)) 403 return -EOPNOTSUPP; 404 405 err = drm_addmap_core(dev, map->offset, map->size, map->type, 406 map->flags, &maplist); 407 408 if (err) 409 return err; 410 411 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 412 map->handle = (void *)(unsigned long)maplist->user_token; 413 414 /* 415 * It appears that there are no users of this value whatsoever -- 416 * drmAddMap just discards it. Let's not encourage its use. 417 * (Keeping drm_addmap_core's returned mtrr value would be wrong -- 418 * it's not a real mtrr index anymore.) 419 */ 420 map->mtrr = -1; 421 422 return 0; 423 } 424 425 /* 426 * Get a mapping information. 427 * 428 * \param inode device inode. 429 * \param file_priv DRM file private. 430 * \param cmd command. 431 * \param arg user argument, pointing to a drm_map structure. 432 * 433 * \return zero on success or a negative number on failure. 434 * 435 * Searches for the mapping with the specified offset and copies its information 436 * into userspace 437 */ 438 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, 439 struct drm_file *file_priv) 440 { 441 struct drm_map *map = data; 442 struct drm_map_list *r_list = NULL; 443 struct list_head *list; 444 int idx; 445 int i; 446 447 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 448 !drm_core_check_feature(dev, DRIVER_LEGACY)) 449 return -EOPNOTSUPP; 450 451 idx = map->offset; 452 if (idx < 0) 453 return -EINVAL; 454 455 i = 0; 456 mutex_lock(&dev->struct_mutex); 457 list_for_each(list, &dev->maplist) { 458 if (i == idx) { 459 r_list = list_entry(list, struct drm_map_list, head); 460 break; 461 } 462 i++; 463 } 464 if (!r_list || !r_list->map) { 465 mutex_unlock(&dev->struct_mutex); 466 return -EINVAL; 467 } 468 469 map->offset = r_list->map->offset; 470 map->size = r_list->map->size; 471 map->type = r_list->map->type; 472 map->flags = r_list->map->flags; 473 map->handle = (void *)(unsigned long) r_list->user_token; 474 map->mtrr = arch_phys_wc_index(r_list->map->mtrr); 475 476 mutex_unlock(&dev->struct_mutex); 477 478 return 0; 479 } 480 481 /** 482 * Remove a map private from list and deallocate resources if the mapping 483 * isn't in use. 484 * 485 * Searches the map on drm_device::maplist, removes it from the list, see if 486 * its being used, and free any associate resource (such as MTRR's) if it's not 487 * being on use. 488 * 489 * \sa drm_legacy_addmap 490 */ 491 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) 492 { 493 struct drm_map_list *r_list = NULL, *list_t; 494 drm_dma_handle_t dmah; 495 int found = 0; 496 struct drm_master *master; 497 498 /* Find the list entry for the map and remove it */ 499 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { 500 if (r_list->map == map) { 501 master = r_list->master; 502 list_del(&r_list->head); 503 drm_ht_remove_key(&dev->map_hash, 504 r_list->user_token >> PAGE_SHIFT); 505 kfree(r_list); 506 found = 1; 507 break; 508 } 509 } 510 511 if (!found) 512 return -EINVAL; 513 514 switch (map->type) { 515 case _DRM_REGISTERS: 516 iounmap(map->handle); 517 /* FALLTHROUGH */ 518 case _DRM_FRAME_BUFFER: 519 arch_phys_wc_del(map->mtrr); 520 break; 521 case _DRM_SHM: 522 vfree(map->handle); 523 if (master) { 524 if (dev->sigdata.lock == master->lock.hw_lock) 525 dev->sigdata.lock = NULL; 526 master->lock.hw_lock = NULL; /* SHM removed */ 527 master->lock.file_priv = NULL; 528 wake_up_interruptible_all(&master->lock.lock_queue); 529 } 530 break; 531 case _DRM_AGP: 532 case _DRM_SCATTER_GATHER: 533 break; 534 case _DRM_CONSISTENT: 535 dmah.vaddr = map->handle; 536 dmah.busaddr = map->offset; 537 dmah.size = map->size; 538 __drm_legacy_pci_free(dev, &dmah); 539 break; 540 } 541 kfree(map); 542 543 return 0; 544 } 545 EXPORT_SYMBOL(drm_legacy_rmmap_locked); 546 547 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) 548 { 549 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 550 !drm_core_check_feature(dev, DRIVER_LEGACY)) 551 return; 552 553 mutex_lock(&dev->struct_mutex); 554 drm_legacy_rmmap_locked(dev, map); 555 mutex_unlock(&dev->struct_mutex); 556 } 557 EXPORT_SYMBOL(drm_legacy_rmmap); 558 559 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) 560 { 561 struct drm_map_list *r_list, *list_temp; 562 563 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 564 return; 565 566 mutex_lock(&dev->struct_mutex); 567 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { 568 if (r_list->master == master) { 569 drm_legacy_rmmap_locked(dev, r_list->map); 570 r_list = NULL; 571 } 572 } 573 mutex_unlock(&dev->struct_mutex); 574 } 575 576 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 577 * the last close of the device, and this is necessary for cleanup when things 578 * exit uncleanly. Therefore, having userland manually remove mappings seems 579 * like a pointless exercise since they're going away anyway. 580 * 581 * One use case might be after addmap is allowed for normal users for SHM and 582 * gets used by drivers that the server doesn't need to care about. This seems 583 * unlikely. 584 * 585 * \param inode device inode. 586 * \param file_priv DRM file private. 587 * \param cmd command. 588 * \param arg pointer to a struct drm_map structure. 589 * \return zero on success or a negative value on error. 590 */ 591 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, 592 struct drm_file *file_priv) 593 { 594 struct drm_map *request = data; 595 struct drm_local_map *map = NULL; 596 struct drm_map_list *r_list; 597 int ret; 598 599 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && 600 !drm_core_check_feature(dev, DRIVER_LEGACY)) 601 return -EOPNOTSUPP; 602 603 mutex_lock(&dev->struct_mutex); 604 list_for_each_entry(r_list, &dev->maplist, head) { 605 if (r_list->map && 606 r_list->user_token == (unsigned long)request->handle && 607 r_list->map->flags & _DRM_REMOVABLE) { 608 map = r_list->map; 609 break; 610 } 611 } 612 613 /* List has wrapped around to the head pointer, or its empty we didn't 614 * find anything. 615 */ 616 if (list_empty(&dev->maplist) || !map) { 617 mutex_unlock(&dev->struct_mutex); 618 return -EINVAL; 619 } 620 621 /* Register and framebuffer maps are permanent */ 622 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 623 mutex_unlock(&dev->struct_mutex); 624 return 0; 625 } 626 627 ret = drm_legacy_rmmap_locked(dev, map); 628 629 mutex_unlock(&dev->struct_mutex); 630 631 return ret; 632 } 633 634 /** 635 * Cleanup after an error on one of the addbufs() functions. 636 * 637 * \param dev DRM device. 638 * \param entry buffer entry where the error occurred. 639 * 640 * Frees any pages and buffers associated with the given entry. 641 */ 642 static void drm_cleanup_buf_error(struct drm_device *dev, 643 struct drm_buf_entry *entry) 644 { 645 int i; 646 647 if (entry->seg_count) { 648 for (i = 0; i < entry->seg_count; i++) { 649 if (entry->seglist[i]) { 650 drm_pci_free(dev, entry->seglist[i]); 651 } 652 } 653 kfree(entry->seglist); 654 655 entry->seg_count = 0; 656 } 657 658 if (entry->buf_count) { 659 for (i = 0; i < entry->buf_count; i++) { 660 kfree(entry->buflist[i].dev_private); 661 } 662 kfree(entry->buflist); 663 664 entry->buf_count = 0; 665 } 666 } 667 668 #if IS_ENABLED(CONFIG_AGP) 669 /** 670 * Add AGP buffers for DMA transfers. 671 * 672 * \param dev struct drm_device to which the buffers are to be added. 673 * \param request pointer to a struct drm_buf_desc describing the request. 674 * \return zero on success or a negative number on failure. 675 * 676 * After some sanity checks creates a drm_buf structure for each buffer and 677 * reallocates the buffer list of the same size order to accommodate the new 678 * buffers. 679 */ 680 int drm_legacy_addbufs_agp(struct drm_device *dev, 681 struct drm_buf_desc *request) 682 { 683 struct drm_device_dma *dma = dev->dma; 684 struct drm_buf_entry *entry; 685 struct drm_agp_mem *agp_entry; 686 struct drm_buf *buf; 687 unsigned long offset; 688 unsigned long agp_offset; 689 int count; 690 int order; 691 int size; 692 int alignment; 693 int page_order; 694 int total; 695 int byte_count; 696 int i, valid; 697 struct drm_buf **temp_buflist; 698 699 if (!dma) 700 return -EINVAL; 701 702 count = request->count; 703 order = order_base_2(request->size); 704 size = 1 << order; 705 706 alignment = (request->flags & _DRM_PAGE_ALIGN) 707 ? PAGE_ALIGN(size) : size; 708 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 709 total = PAGE_SIZE << page_order; 710 711 byte_count = 0; 712 agp_offset = dev->agp->base + request->agp_start; 713 714 DRM_DEBUG("count: %d\n", count); 715 DRM_DEBUG("order: %d\n", order); 716 DRM_DEBUG("size: %d\n", size); 717 DRM_DEBUG("agp_offset: %lx\n", agp_offset); 718 DRM_DEBUG("alignment: %d\n", alignment); 719 DRM_DEBUG("page_order: %d\n", page_order); 720 DRM_DEBUG("total: %d\n", total); 721 722 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 723 return -EINVAL; 724 725 /* Make sure buffers are located in AGP memory that we own */ 726 valid = 0; 727 list_for_each_entry(agp_entry, &dev->agp->memory, head) { 728 if ((agp_offset >= agp_entry->bound) && 729 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { 730 valid = 1; 731 break; 732 } 733 } 734 if (!list_empty(&dev->agp->memory) && !valid) { 735 DRM_DEBUG("zone invalid\n"); 736 return -EINVAL; 737 } 738 spin_lock(&dev->buf_lock); 739 if (dev->buf_use) { 740 spin_unlock(&dev->buf_lock); 741 return -EBUSY; 742 } 743 atomic_inc(&dev->buf_alloc); 744 spin_unlock(&dev->buf_lock); 745 746 mutex_lock(&dev->struct_mutex); 747 entry = &dma->bufs[order]; 748 if (entry->buf_count) { 749 mutex_unlock(&dev->struct_mutex); 750 atomic_dec(&dev->buf_alloc); 751 return -ENOMEM; /* May only call once for each order */ 752 } 753 754 if (count < 0 || count > 4096) { 755 mutex_unlock(&dev->struct_mutex); 756 atomic_dec(&dev->buf_alloc); 757 return -EINVAL; 758 } 759 760 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 761 if (!entry->buflist) { 762 mutex_unlock(&dev->struct_mutex); 763 atomic_dec(&dev->buf_alloc); 764 return -ENOMEM; 765 } 766 767 entry->buf_size = size; 768 entry->page_order = page_order; 769 770 offset = 0; 771 772 while (entry->buf_count < count) { 773 buf = &entry->buflist[entry->buf_count]; 774 buf->idx = dma->buf_count + entry->buf_count; 775 buf->total = alignment; 776 buf->order = order; 777 buf->used = 0; 778 779 buf->offset = (dma->byte_count + offset); 780 buf->bus_address = agp_offset + offset; 781 buf->address = (void *)(agp_offset + offset); 782 buf->next = NULL; 783 buf->waiting = 0; 784 buf->pending = 0; 785 buf->file_priv = NULL; 786 787 buf->dev_priv_size = dev->driver->dev_priv_size; 788 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 789 if (!buf->dev_private) { 790 /* Set count correctly so we free the proper amount. */ 791 entry->buf_count = count; 792 drm_cleanup_buf_error(dev, entry); 793 mutex_unlock(&dev->struct_mutex); 794 atomic_dec(&dev->buf_alloc); 795 return -ENOMEM; 796 } 797 798 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 799 800 offset += alignment; 801 entry->buf_count++; 802 byte_count += PAGE_SIZE << page_order; 803 } 804 805 DRM_DEBUG("byte_count: %d\n", byte_count); 806 807 temp_buflist = krealloc(dma->buflist, 808 (dma->buf_count + entry->buf_count) * 809 sizeof(*dma->buflist), GFP_KERNEL); 810 if (!temp_buflist) { 811 /* Free the entry because it isn't valid */ 812 drm_cleanup_buf_error(dev, entry); 813 mutex_unlock(&dev->struct_mutex); 814 atomic_dec(&dev->buf_alloc); 815 return -ENOMEM; 816 } 817 dma->buflist = temp_buflist; 818 819 for (i = 0; i < entry->buf_count; i++) { 820 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 821 } 822 823 dma->buf_count += entry->buf_count; 824 dma->seg_count += entry->seg_count; 825 dma->page_count += byte_count >> PAGE_SHIFT; 826 dma->byte_count += byte_count; 827 828 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 829 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 830 831 mutex_unlock(&dev->struct_mutex); 832 833 request->count = entry->buf_count; 834 request->size = size; 835 836 dma->flags = _DRM_DMA_USE_AGP; 837 838 atomic_dec(&dev->buf_alloc); 839 return 0; 840 } 841 EXPORT_SYMBOL(drm_legacy_addbufs_agp); 842 #endif /* CONFIG_AGP */ 843 844 int drm_legacy_addbufs_pci(struct drm_device *dev, 845 struct drm_buf_desc *request) 846 { 847 struct drm_device_dma *dma = dev->dma; 848 int count; 849 int order; 850 int size; 851 int total; 852 int page_order; 853 struct drm_buf_entry *entry; 854 drm_dma_handle_t *dmah; 855 struct drm_buf *buf; 856 int alignment; 857 unsigned long offset; 858 int i; 859 int byte_count; 860 int page_count; 861 unsigned long *temp_pagelist; 862 struct drm_buf **temp_buflist; 863 864 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 865 return -EOPNOTSUPP; 866 867 if (!dma) 868 return -EINVAL; 869 870 if (!capable(CAP_SYS_ADMIN)) 871 return -EPERM; 872 873 count = request->count; 874 order = order_base_2(request->size); 875 size = 1 << order; 876 877 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 878 request->count, request->size, size, order); 879 880 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 881 return -EINVAL; 882 883 alignment = (request->flags & _DRM_PAGE_ALIGN) 884 ? PAGE_ALIGN(size) : size; 885 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 886 total = PAGE_SIZE << page_order; 887 888 spin_lock(&dev->buf_lock); 889 if (dev->buf_use) { 890 spin_unlock(&dev->buf_lock); 891 return -EBUSY; 892 } 893 atomic_inc(&dev->buf_alloc); 894 spin_unlock(&dev->buf_lock); 895 896 mutex_lock(&dev->struct_mutex); 897 entry = &dma->bufs[order]; 898 if (entry->buf_count) { 899 mutex_unlock(&dev->struct_mutex); 900 atomic_dec(&dev->buf_alloc); 901 return -ENOMEM; /* May only call once for each order */ 902 } 903 904 if (count < 0 || count > 4096) { 905 mutex_unlock(&dev->struct_mutex); 906 atomic_dec(&dev->buf_alloc); 907 return -EINVAL; 908 } 909 910 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 911 if (!entry->buflist) { 912 mutex_unlock(&dev->struct_mutex); 913 atomic_dec(&dev->buf_alloc); 914 return -ENOMEM; 915 } 916 917 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL); 918 if (!entry->seglist) { 919 kfree(entry->buflist); 920 mutex_unlock(&dev->struct_mutex); 921 atomic_dec(&dev->buf_alloc); 922 return -ENOMEM; 923 } 924 925 /* Keep the original pagelist until we know all the allocations 926 * have succeeded 927 */ 928 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order), 929 sizeof(*dma->pagelist), 930 GFP_KERNEL); 931 if (!temp_pagelist) { 932 kfree(entry->buflist); 933 kfree(entry->seglist); 934 mutex_unlock(&dev->struct_mutex); 935 atomic_dec(&dev->buf_alloc); 936 return -ENOMEM; 937 } 938 memcpy(temp_pagelist, 939 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); 940 DRM_DEBUG("pagelist: %d entries\n", 941 dma->page_count + (count << page_order)); 942 943 entry->buf_size = size; 944 entry->page_order = page_order; 945 byte_count = 0; 946 page_count = 0; 947 948 while (entry->buf_count < count) { 949 950 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); 951 952 if (!dmah) { 953 /* Set count correctly so we free the proper amount. */ 954 entry->buf_count = count; 955 entry->seg_count = count; 956 drm_cleanup_buf_error(dev, entry); 957 kfree(temp_pagelist); 958 mutex_unlock(&dev->struct_mutex); 959 atomic_dec(&dev->buf_alloc); 960 return -ENOMEM; 961 } 962 entry->seglist[entry->seg_count++] = dmah; 963 for (i = 0; i < (1 << page_order); i++) { 964 DRM_DEBUG("page %d @ 0x%08lx\n", 965 dma->page_count + page_count, 966 (unsigned long)dmah->vaddr + PAGE_SIZE * i); 967 temp_pagelist[dma->page_count + page_count++] 968 = (unsigned long)dmah->vaddr + PAGE_SIZE * i; 969 } 970 for (offset = 0; 971 offset + size <= total && entry->buf_count < count; 972 offset += alignment, ++entry->buf_count) { 973 buf = &entry->buflist[entry->buf_count]; 974 buf->idx = dma->buf_count + entry->buf_count; 975 buf->total = alignment; 976 buf->order = order; 977 buf->used = 0; 978 buf->offset = (dma->byte_count + byte_count + offset); 979 buf->address = (void *)(dmah->vaddr + offset); 980 buf->bus_address = dmah->busaddr + offset; 981 buf->next = NULL; 982 buf->waiting = 0; 983 buf->pending = 0; 984 buf->file_priv = NULL; 985 986 buf->dev_priv_size = dev->driver->dev_priv_size; 987 buf->dev_private = kzalloc(buf->dev_priv_size, 988 GFP_KERNEL); 989 if (!buf->dev_private) { 990 /* Set count correctly so we free the proper amount. */ 991 entry->buf_count = count; 992 entry->seg_count = count; 993 drm_cleanup_buf_error(dev, entry); 994 kfree(temp_pagelist); 995 mutex_unlock(&dev->struct_mutex); 996 atomic_dec(&dev->buf_alloc); 997 return -ENOMEM; 998 } 999 1000 DRM_DEBUG("buffer %d @ %p\n", 1001 entry->buf_count, buf->address); 1002 } 1003 byte_count += PAGE_SIZE << page_order; 1004 } 1005 1006 temp_buflist = krealloc(dma->buflist, 1007 (dma->buf_count + entry->buf_count) * 1008 sizeof(*dma->buflist), GFP_KERNEL); 1009 if (!temp_buflist) { 1010 /* Free the entry because it isn't valid */ 1011 drm_cleanup_buf_error(dev, entry); 1012 kfree(temp_pagelist); 1013 mutex_unlock(&dev->struct_mutex); 1014 atomic_dec(&dev->buf_alloc); 1015 return -ENOMEM; 1016 } 1017 dma->buflist = temp_buflist; 1018 1019 for (i = 0; i < entry->buf_count; i++) { 1020 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1021 } 1022 1023 /* No allocations failed, so now we can replace the original pagelist 1024 * with the new one. 1025 */ 1026 if (dma->page_count) { 1027 kfree(dma->pagelist); 1028 } 1029 dma->pagelist = temp_pagelist; 1030 1031 dma->buf_count += entry->buf_count; 1032 dma->seg_count += entry->seg_count; 1033 dma->page_count += entry->seg_count << page_order; 1034 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 1035 1036 mutex_unlock(&dev->struct_mutex); 1037 1038 request->count = entry->buf_count; 1039 request->size = size; 1040 1041 if (request->flags & _DRM_PCI_BUFFER_RO) 1042 dma->flags = _DRM_DMA_USE_PCI_RO; 1043 1044 atomic_dec(&dev->buf_alloc); 1045 return 0; 1046 1047 } 1048 EXPORT_SYMBOL(drm_legacy_addbufs_pci); 1049 1050 static int drm_legacy_addbufs_sg(struct drm_device *dev, 1051 struct drm_buf_desc *request) 1052 { 1053 struct drm_device_dma *dma = dev->dma; 1054 struct drm_buf_entry *entry; 1055 struct drm_buf *buf; 1056 unsigned long offset; 1057 unsigned long agp_offset; 1058 int count; 1059 int order; 1060 int size; 1061 int alignment; 1062 int page_order; 1063 int total; 1064 int byte_count; 1065 int i; 1066 struct drm_buf **temp_buflist; 1067 1068 if (!drm_core_check_feature(dev, DRIVER_SG)) 1069 return -EOPNOTSUPP; 1070 1071 if (!dma) 1072 return -EINVAL; 1073 1074 if (!capable(CAP_SYS_ADMIN)) 1075 return -EPERM; 1076 1077 count = request->count; 1078 order = order_base_2(request->size); 1079 size = 1 << order; 1080 1081 alignment = (request->flags & _DRM_PAGE_ALIGN) 1082 ? PAGE_ALIGN(size) : size; 1083 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 1084 total = PAGE_SIZE << page_order; 1085 1086 byte_count = 0; 1087 agp_offset = request->agp_start; 1088 1089 DRM_DEBUG("count: %d\n", count); 1090 DRM_DEBUG("order: %d\n", order); 1091 DRM_DEBUG("size: %d\n", size); 1092 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 1093 DRM_DEBUG("alignment: %d\n", alignment); 1094 DRM_DEBUG("page_order: %d\n", page_order); 1095 DRM_DEBUG("total: %d\n", total); 1096 1097 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1098 return -EINVAL; 1099 1100 spin_lock(&dev->buf_lock); 1101 if (dev->buf_use) { 1102 spin_unlock(&dev->buf_lock); 1103 return -EBUSY; 1104 } 1105 atomic_inc(&dev->buf_alloc); 1106 spin_unlock(&dev->buf_lock); 1107 1108 mutex_lock(&dev->struct_mutex); 1109 entry = &dma->bufs[order]; 1110 if (entry->buf_count) { 1111 mutex_unlock(&dev->struct_mutex); 1112 atomic_dec(&dev->buf_alloc); 1113 return -ENOMEM; /* May only call once for each order */ 1114 } 1115 1116 if (count < 0 || count > 4096) { 1117 mutex_unlock(&dev->struct_mutex); 1118 atomic_dec(&dev->buf_alloc); 1119 return -EINVAL; 1120 } 1121 1122 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL); 1123 if (!entry->buflist) { 1124 mutex_unlock(&dev->struct_mutex); 1125 atomic_dec(&dev->buf_alloc); 1126 return -ENOMEM; 1127 } 1128 1129 entry->buf_size = size; 1130 entry->page_order = page_order; 1131 1132 offset = 0; 1133 1134 while (entry->buf_count < count) { 1135 buf = &entry->buflist[entry->buf_count]; 1136 buf->idx = dma->buf_count + entry->buf_count; 1137 buf->total = alignment; 1138 buf->order = order; 1139 buf->used = 0; 1140 1141 buf->offset = (dma->byte_count + offset); 1142 buf->bus_address = agp_offset + offset; 1143 buf->address = (void *)(agp_offset + offset 1144 + (unsigned long)dev->sg->virtual); 1145 buf->next = NULL; 1146 buf->waiting = 0; 1147 buf->pending = 0; 1148 buf->file_priv = NULL; 1149 1150 buf->dev_priv_size = dev->driver->dev_priv_size; 1151 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 1152 if (!buf->dev_private) { 1153 /* Set count correctly so we free the proper amount. */ 1154 entry->buf_count = count; 1155 drm_cleanup_buf_error(dev, entry); 1156 mutex_unlock(&dev->struct_mutex); 1157 atomic_dec(&dev->buf_alloc); 1158 return -ENOMEM; 1159 } 1160 1161 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1162 1163 offset += alignment; 1164 entry->buf_count++; 1165 byte_count += PAGE_SIZE << page_order; 1166 } 1167 1168 DRM_DEBUG("byte_count: %d\n", byte_count); 1169 1170 temp_buflist = krealloc(dma->buflist, 1171 (dma->buf_count + entry->buf_count) * 1172 sizeof(*dma->buflist), GFP_KERNEL); 1173 if (!temp_buflist) { 1174 /* Free the entry because it isn't valid */ 1175 drm_cleanup_buf_error(dev, entry); 1176 mutex_unlock(&dev->struct_mutex); 1177 atomic_dec(&dev->buf_alloc); 1178 return -ENOMEM; 1179 } 1180 dma->buflist = temp_buflist; 1181 1182 for (i = 0; i < entry->buf_count; i++) { 1183 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1184 } 1185 1186 dma->buf_count += entry->buf_count; 1187 dma->seg_count += entry->seg_count; 1188 dma->page_count += byte_count >> PAGE_SHIFT; 1189 dma->byte_count += byte_count; 1190 1191 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1192 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1193 1194 mutex_unlock(&dev->struct_mutex); 1195 1196 request->count = entry->buf_count; 1197 request->size = size; 1198 1199 dma->flags = _DRM_DMA_USE_SG; 1200 1201 atomic_dec(&dev->buf_alloc); 1202 return 0; 1203 } 1204 1205 /** 1206 * Add buffers for DMA transfers (ioctl). 1207 * 1208 * \param inode device inode. 1209 * \param file_priv DRM file private. 1210 * \param cmd command. 1211 * \param arg pointer to a struct drm_buf_desc request. 1212 * \return zero on success or a negative number on failure. 1213 * 1214 * According with the memory type specified in drm_buf_desc::flags and the 1215 * build options, it dispatches the call either to addbufs_agp(), 1216 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1217 * PCI memory respectively. 1218 */ 1219 int drm_legacy_addbufs(struct drm_device *dev, void *data, 1220 struct drm_file *file_priv) 1221 { 1222 struct drm_buf_desc *request = data; 1223 int ret; 1224 1225 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1226 return -EOPNOTSUPP; 1227 1228 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1229 return -EOPNOTSUPP; 1230 1231 #if IS_ENABLED(CONFIG_AGP) 1232 if (request->flags & _DRM_AGP_BUFFER) 1233 ret = drm_legacy_addbufs_agp(dev, request); 1234 else 1235 #endif 1236 if (request->flags & _DRM_SG_BUFFER) 1237 ret = drm_legacy_addbufs_sg(dev, request); 1238 else if (request->flags & _DRM_FB_BUFFER) 1239 ret = -EINVAL; 1240 else 1241 ret = drm_legacy_addbufs_pci(dev, request); 1242 1243 return ret; 1244 } 1245 1246 /** 1247 * Get information about the buffer mappings. 1248 * 1249 * This was originally mean for debugging purposes, or by a sophisticated 1250 * client library to determine how best to use the available buffers (e.g., 1251 * large buffers can be used for image transfer). 1252 * 1253 * \param inode device inode. 1254 * \param file_priv DRM file private. 1255 * \param cmd command. 1256 * \param arg pointer to a drm_buf_info structure. 1257 * \return zero on success or a negative number on failure. 1258 * 1259 * Increments drm_device::buf_use while holding the drm_device::buf_lock 1260 * lock, preventing of allocating more buffers after this call. Information 1261 * about each requested buffer is then copied into user space. 1262 */ 1263 int __drm_legacy_infobufs(struct drm_device *dev, 1264 void *data, int *p, 1265 int (*f)(void *, int, struct drm_buf_entry *)) 1266 { 1267 struct drm_device_dma *dma = dev->dma; 1268 int i; 1269 int count; 1270 1271 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1272 return -EOPNOTSUPP; 1273 1274 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1275 return -EOPNOTSUPP; 1276 1277 if (!dma) 1278 return -EINVAL; 1279 1280 spin_lock(&dev->buf_lock); 1281 if (atomic_read(&dev->buf_alloc)) { 1282 spin_unlock(&dev->buf_lock); 1283 return -EBUSY; 1284 } 1285 ++dev->buf_use; /* Can't allocate more after this call */ 1286 spin_unlock(&dev->buf_lock); 1287 1288 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1289 if (dma->bufs[i].buf_count) 1290 ++count; 1291 } 1292 1293 DRM_DEBUG("count = %d\n", count); 1294 1295 if (*p >= count) { 1296 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1297 struct drm_buf_entry *from = &dma->bufs[i]; 1298 if (from->buf_count) { 1299 if (f(data, count, from) < 0) 1300 return -EFAULT; 1301 DRM_DEBUG("%d %d %d %d %d\n", 1302 i, 1303 dma->bufs[i].buf_count, 1304 dma->bufs[i].buf_size, 1305 dma->bufs[i].low_mark, 1306 dma->bufs[i].high_mark); 1307 ++count; 1308 } 1309 } 1310 } 1311 *p = count; 1312 1313 return 0; 1314 } 1315 1316 static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) 1317 { 1318 struct drm_buf_info *request = data; 1319 struct drm_buf_desc __user *to = &request->list[count]; 1320 struct drm_buf_desc v = {.count = from->buf_count, 1321 .size = from->buf_size, 1322 .low_mark = from->low_mark, 1323 .high_mark = from->high_mark}; 1324 return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)); 1325 } 1326 1327 int drm_legacy_infobufs(struct drm_device *dev, void *data, 1328 struct drm_file *file_priv) 1329 { 1330 struct drm_buf_info *request = data; 1331 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); 1332 } 1333 1334 /** 1335 * Specifies a low and high water mark for buffer allocation 1336 * 1337 * \param inode device inode. 1338 * \param file_priv DRM file private. 1339 * \param cmd command. 1340 * \param arg a pointer to a drm_buf_desc structure. 1341 * \return zero on success or a negative number on failure. 1342 * 1343 * Verifies that the size order is bounded between the admissible orders and 1344 * updates the respective drm_device_dma::bufs entry low and high water mark. 1345 * 1346 * \note This ioctl is deprecated and mostly never used. 1347 */ 1348 int drm_legacy_markbufs(struct drm_device *dev, void *data, 1349 struct drm_file *file_priv) 1350 { 1351 struct drm_device_dma *dma = dev->dma; 1352 struct drm_buf_desc *request = data; 1353 int order; 1354 struct drm_buf_entry *entry; 1355 1356 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1357 return -EOPNOTSUPP; 1358 1359 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1360 return -EOPNOTSUPP; 1361 1362 if (!dma) 1363 return -EINVAL; 1364 1365 DRM_DEBUG("%d, %d, %d\n", 1366 request->size, request->low_mark, request->high_mark); 1367 order = order_base_2(request->size); 1368 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1369 return -EINVAL; 1370 entry = &dma->bufs[order]; 1371 1372 if (request->low_mark < 0 || request->low_mark > entry->buf_count) 1373 return -EINVAL; 1374 if (request->high_mark < 0 || request->high_mark > entry->buf_count) 1375 return -EINVAL; 1376 1377 entry->low_mark = request->low_mark; 1378 entry->high_mark = request->high_mark; 1379 1380 return 0; 1381 } 1382 1383 /** 1384 * Unreserve the buffers in list, previously reserved using drmDMA. 1385 * 1386 * \param inode device inode. 1387 * \param file_priv DRM file private. 1388 * \param cmd command. 1389 * \param arg pointer to a drm_buf_free structure. 1390 * \return zero on success or a negative number on failure. 1391 * 1392 * Calls free_buffer() for each used buffer. 1393 * This function is primarily used for debugging. 1394 */ 1395 int drm_legacy_freebufs(struct drm_device *dev, void *data, 1396 struct drm_file *file_priv) 1397 { 1398 struct drm_device_dma *dma = dev->dma; 1399 struct drm_buf_free *request = data; 1400 int i; 1401 int idx; 1402 struct drm_buf *buf; 1403 1404 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1405 return -EOPNOTSUPP; 1406 1407 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1408 return -EOPNOTSUPP; 1409 1410 if (!dma) 1411 return -EINVAL; 1412 1413 DRM_DEBUG("%d\n", request->count); 1414 for (i = 0; i < request->count; i++) { 1415 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) 1416 return -EFAULT; 1417 if (idx < 0 || idx >= dma->buf_count) { 1418 DRM_ERROR("Index %d (of %d max)\n", 1419 idx, dma->buf_count - 1); 1420 return -EINVAL; 1421 } 1422 idx = array_index_nospec(idx, dma->buf_count); 1423 buf = dma->buflist[idx]; 1424 if (buf->file_priv != file_priv) { 1425 DRM_ERROR("Process %d freeing buffer not owned\n", 1426 task_pid_nr(current)); 1427 return -EINVAL; 1428 } 1429 drm_legacy_free_buffer(dev, buf); 1430 } 1431 1432 return 0; 1433 } 1434 1435 /** 1436 * Maps all of the DMA buffers into client-virtual space (ioctl). 1437 * 1438 * \param inode device inode. 1439 * \param file_priv DRM file private. 1440 * \param cmd command. 1441 * \param arg pointer to a drm_buf_map structure. 1442 * \return zero on success or a negative number on failure. 1443 * 1444 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information 1445 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with 1446 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1447 * drm_mmap_dma(). 1448 */ 1449 int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p, 1450 void __user **v, 1451 int (*f)(void *, int, unsigned long, 1452 struct drm_buf *), 1453 struct drm_file *file_priv) 1454 { 1455 struct drm_device_dma *dma = dev->dma; 1456 int retcode = 0; 1457 unsigned long virtual; 1458 int i; 1459 1460 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1461 return -EOPNOTSUPP; 1462 1463 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1464 return -EOPNOTSUPP; 1465 1466 if (!dma) 1467 return -EINVAL; 1468 1469 spin_lock(&dev->buf_lock); 1470 if (atomic_read(&dev->buf_alloc)) { 1471 spin_unlock(&dev->buf_lock); 1472 return -EBUSY; 1473 } 1474 dev->buf_use++; /* Can't allocate more after this call */ 1475 spin_unlock(&dev->buf_lock); 1476 1477 if (*p >= dma->buf_count) { 1478 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) 1479 || (drm_core_check_feature(dev, DRIVER_SG) 1480 && (dma->flags & _DRM_DMA_USE_SG))) { 1481 struct drm_local_map *map = dev->agp_buffer_map; 1482 unsigned long token = dev->agp_buffer_token; 1483 1484 if (!map) { 1485 retcode = -EINVAL; 1486 goto done; 1487 } 1488 virtual = vm_mmap(file_priv->filp, 0, map->size, 1489 PROT_READ | PROT_WRITE, 1490 MAP_SHARED, 1491 token); 1492 } else { 1493 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, 1494 PROT_READ | PROT_WRITE, 1495 MAP_SHARED, 0); 1496 } 1497 if (virtual > -1024UL) { 1498 /* Real error */ 1499 retcode = (signed long)virtual; 1500 goto done; 1501 } 1502 *v = (void __user *)virtual; 1503 1504 for (i = 0; i < dma->buf_count; i++) { 1505 if (f(data, i, virtual, dma->buflist[i]) < 0) { 1506 retcode = -EFAULT; 1507 goto done; 1508 } 1509 } 1510 } 1511 done: 1512 *p = dma->buf_count; 1513 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode); 1514 1515 return retcode; 1516 } 1517 1518 static int map_one_buf(void *data, int idx, unsigned long virtual, 1519 struct drm_buf *buf) 1520 { 1521 struct drm_buf_map *request = data; 1522 unsigned long address = virtual + buf->offset; /* *** */ 1523 1524 if (copy_to_user(&request->list[idx].idx, &buf->idx, 1525 sizeof(request->list[0].idx))) 1526 return -EFAULT; 1527 if (copy_to_user(&request->list[idx].total, &buf->total, 1528 sizeof(request->list[0].total))) 1529 return -EFAULT; 1530 if (clear_user(&request->list[idx].used, sizeof(int))) 1531 return -EFAULT; 1532 if (copy_to_user(&request->list[idx].address, &address, 1533 sizeof(address))) 1534 return -EFAULT; 1535 return 0; 1536 } 1537 1538 int drm_legacy_mapbufs(struct drm_device *dev, void *data, 1539 struct drm_file *file_priv) 1540 { 1541 struct drm_buf_map *request = data; 1542 return __drm_legacy_mapbufs(dev, data, &request->count, 1543 &request->virtual, map_one_buf, 1544 file_priv); 1545 } 1546 1547 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, 1548 struct drm_file *file_priv) 1549 { 1550 if (!drm_core_check_feature(dev, DRIVER_LEGACY)) 1551 return -EOPNOTSUPP; 1552 1553 if (dev->driver->dma_ioctl) 1554 return dev->driver->dma_ioctl(dev, data, file_priv); 1555 else 1556 return -EINVAL; 1557 } 1558 1559 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) 1560 { 1561 struct drm_map_list *entry; 1562 1563 list_for_each_entry(entry, &dev->maplist, head) { 1564 if (entry->map && entry->map->type == _DRM_SHM && 1565 (entry->map->flags & _DRM_CONTAINS_LOCK)) { 1566 return entry->map; 1567 } 1568 } 1569 return NULL; 1570 } 1571 EXPORT_SYMBOL(drm_legacy_getsarea); 1572