1 /** 2 * \file drm_bufs.c 3 * Generic buffer template 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com> 7 */ 8 9 /* 10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com 11 * 12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 14 * All Rights Reserved. 15 * 16 * Permission is hereby granted, free of charge, to any person obtaining a 17 * copy of this software and associated documentation files (the "Software"), 18 * to deal in the Software without restriction, including without limitation 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20 * and/or sell copies of the Software, and to permit persons to whom the 21 * Software is furnished to do so, subject to the following conditions: 22 * 23 * The above copyright notice and this permission notice (including the next 24 * paragraph) shall be included in all copies or substantial portions of the 25 * Software. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 33 * OTHER DEALINGS IN THE SOFTWARE. 34 */ 35 36 #include <linux/vmalloc.h> 37 #include <linux/slab.h> 38 #include <linux/log2.h> 39 #include <asm/shmparam.h> 40 #include "drmP.h" 41 42 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, 43 struct drm_local_map *map) 44 { 45 struct drm_map_list *entry; 46 list_for_each_entry(entry, &dev->maplist, head) { 47 /* 48 * Because the kernel-userspace ABI is fixed at a 32-bit offset 49 * while PCI resources may live above that, we ignore the map 50 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS. 51 * It is assumed that each driver will have only one resource of 52 * each type. 53 */ 54 if (!entry->map || 55 map->type != entry->map->type || 56 entry->master != dev->primary->master) 57 continue; 58 switch (map->type) { 59 case _DRM_SHM: 60 if (map->flags != _DRM_CONTAINS_LOCK) 61 break; 62 case _DRM_REGISTERS: 63 case _DRM_FRAME_BUFFER: 64 return entry; 65 default: /* Make gcc happy */ 66 ; 67 } 68 if (entry->map->offset == map->offset) 69 return entry; 70 } 71 72 return NULL; 73 } 74 75 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, 76 unsigned long user_token, int hashed_handle, int shm) 77 { 78 int use_hashed_handle, shift; 79 unsigned long add; 80 81 #if (BITS_PER_LONG == 64) 82 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle); 83 #elif (BITS_PER_LONG == 32) 84 use_hashed_handle = hashed_handle; 85 #else 86 #error Unsupported long size. Neither 64 nor 32 bits. 87 #endif 88 89 if (!use_hashed_handle) { 90 int ret; 91 hash->key = user_token >> PAGE_SHIFT; 92 ret = drm_ht_insert_item(&dev->map_hash, hash); 93 if (ret != -EINVAL) 94 return ret; 95 } 96 97 shift = 0; 98 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; 99 if (shm && (SHMLBA > PAGE_SIZE)) { 100 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; 101 102 /* For shared memory, we have to preserve the SHMLBA 103 * bits of the eventual vma->vm_pgoff value during 104 * mmap(). Otherwise we run into cache aliasing problems 105 * on some platforms. On these platforms, the pgoff of 106 * a mmap() request is used to pick a suitable virtual 107 * address for the mmap() region such that it will not 108 * cause cache aliasing problems. 109 * 110 * Therefore, make sure the SHMLBA relevant bits of the 111 * hash value we use are equal to those in the original 112 * kernel virtual address. 113 */ 114 shift = bits; 115 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); 116 } 117 118 return drm_ht_just_insert_please(&dev->map_hash, hash, 119 user_token, 32 - PAGE_SHIFT - 3, 120 shift, add); 121 } 122 123 /** 124 * Core function to create a range of memory available for mapping by a 125 * non-root process. 126 * 127 * Adjusts the memory offset to its absolute value according to the mapping 128 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 129 * applicable and if supported by the kernel. 130 */ 131 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, 132 unsigned int size, enum drm_map_type type, 133 enum drm_map_flags flags, 134 struct drm_map_list ** maplist) 135 { 136 struct drm_local_map *map; 137 struct drm_map_list *list; 138 drm_dma_handle_t *dmah; 139 unsigned long user_token; 140 int ret; 141 142 map = kmalloc(sizeof(*map), GFP_KERNEL); 143 if (!map) 144 return -ENOMEM; 145 146 map->offset = offset; 147 map->size = size; 148 map->flags = flags; 149 map->type = type; 150 151 /* Only allow shared memory to be removable since we only keep enough 152 * book keeping information about shared memory to allow for removal 153 * when processes fork. 154 */ 155 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 156 kfree(map); 157 return -EINVAL; 158 } 159 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 160 (unsigned long long)map->offset, map->size, map->type); 161 162 /* page-align _DRM_SHM maps. They are allocated here so there is no security 163 * hole created by that and it works around various broken drivers that use 164 * a non-aligned quantity to map the SAREA. --BenH 165 */ 166 if (map->type == _DRM_SHM) 167 map->size = PAGE_ALIGN(map->size); 168 169 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 170 kfree(map); 171 return -EINVAL; 172 } 173 map->mtrr = -1; 174 map->handle = NULL; 175 176 switch (map->type) { 177 case _DRM_REGISTERS: 178 case _DRM_FRAME_BUFFER: 179 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) 180 if (map->offset + (map->size-1) < map->offset || 181 map->offset < virt_to_phys(high_memory)) { 182 kfree(map); 183 return -EINVAL; 184 } 185 #endif 186 #ifdef __alpha__ 187 map->offset += dev->hose->mem_space->start; 188 #endif 189 /* Some drivers preinitialize some maps, without the X Server 190 * needing to be aware of it. Therefore, we just return success 191 * when the server tries to create a duplicate map. 192 */ 193 list = drm_find_matching_map(dev, map); 194 if (list != NULL) { 195 if (list->map->size != map->size) { 196 DRM_DEBUG("Matching maps of type %d with " 197 "mismatched sizes, (%ld vs %ld)\n", 198 map->type, map->size, 199 list->map->size); 200 list->map->size = map->size; 201 } 202 203 kfree(map); 204 *maplist = list; 205 return 0; 206 } 207 208 if (drm_core_has_MTRR(dev)) { 209 if (map->type == _DRM_FRAME_BUFFER || 210 (map->flags & _DRM_WRITE_COMBINING)) { 211 map->mtrr = mtrr_add(map->offset, map->size, 212 MTRR_TYPE_WRCOMB, 1); 213 } 214 } 215 if (map->type == _DRM_REGISTERS) { 216 map->handle = ioremap(map->offset, map->size); 217 if (!map->handle) { 218 kfree(map); 219 return -ENOMEM; 220 } 221 } 222 223 break; 224 case _DRM_SHM: 225 list = drm_find_matching_map(dev, map); 226 if (list != NULL) { 227 if(list->map->size != map->size) { 228 DRM_DEBUG("Matching maps of type %d with " 229 "mismatched sizes, (%ld vs %ld)\n", 230 map->type, map->size, list->map->size); 231 list->map->size = map->size; 232 } 233 234 kfree(map); 235 *maplist = list; 236 return 0; 237 } 238 map->handle = vmalloc_user(map->size); 239 DRM_DEBUG("%lu %d %p\n", 240 map->size, drm_order(map->size), map->handle); 241 if (!map->handle) { 242 kfree(map); 243 return -ENOMEM; 244 } 245 map->offset = (unsigned long)map->handle; 246 if (map->flags & _DRM_CONTAINS_LOCK) { 247 /* Prevent a 2nd X Server from creating a 2nd lock */ 248 if (dev->primary->master->lock.hw_lock != NULL) { 249 vfree(map->handle); 250 kfree(map); 251 return -EBUSY; 252 } 253 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ 254 } 255 break; 256 case _DRM_AGP: { 257 struct drm_agp_mem *entry; 258 int valid = 0; 259 260 if (!drm_core_has_AGP(dev)) { 261 kfree(map); 262 return -EINVAL; 263 } 264 #ifdef __alpha__ 265 map->offset += dev->hose->mem_space->start; 266 #endif 267 /* In some cases (i810 driver), user space may have already 268 * added the AGP base itself, because dev->agp->base previously 269 * only got set during AGP enable. So, only add the base 270 * address if the map's offset isn't already within the 271 * aperture. 272 */ 273 if (map->offset < dev->agp->base || 274 map->offset > dev->agp->base + 275 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { 276 map->offset += dev->agp->base; 277 } 278 map->mtrr = dev->agp->agp_mtrr; /* for getmap */ 279 280 /* This assumes the DRM is in total control of AGP space. 281 * It's not always the case as AGP can be in the control 282 * of user space (i.e. i810 driver). So this loop will get 283 * skipped and we double check that dev->agp->memory is 284 * actually set as well as being invalid before EPERM'ing 285 */ 286 list_for_each_entry(entry, &dev->agp->memory, head) { 287 if ((map->offset >= entry->bound) && 288 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { 289 valid = 1; 290 break; 291 } 292 } 293 if (!list_empty(&dev->agp->memory) && !valid) { 294 kfree(map); 295 return -EPERM; 296 } 297 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", 298 (unsigned long long)map->offset, map->size); 299 300 break; 301 } 302 case _DRM_GEM: 303 DRM_ERROR("tried to addmap GEM object\n"); 304 break; 305 case _DRM_SCATTER_GATHER: 306 if (!dev->sg) { 307 kfree(map); 308 return -EINVAL; 309 } 310 map->offset += (unsigned long)dev->sg->virtual; 311 break; 312 case _DRM_CONSISTENT: 313 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, 314 * As we're limiting the address to 2^32-1 (or less), 315 * casting it down to 32 bits is no problem, but we 316 * need to point to a 64bit variable first. */ 317 dmah = drm_pci_alloc(dev, map->size, map->size); 318 if (!dmah) { 319 kfree(map); 320 return -ENOMEM; 321 } 322 map->handle = dmah->vaddr; 323 map->offset = (unsigned long)dmah->busaddr; 324 kfree(dmah); 325 break; 326 default: 327 kfree(map); 328 return -EINVAL; 329 } 330 331 list = kzalloc(sizeof(*list), GFP_KERNEL); 332 if (!list) { 333 if (map->type == _DRM_REGISTERS) 334 iounmap(map->handle); 335 kfree(map); 336 return -EINVAL; 337 } 338 list->map = map; 339 340 mutex_lock(&dev->struct_mutex); 341 list_add(&list->head, &dev->maplist); 342 343 /* Assign a 32-bit handle */ 344 /* We do it here so that dev->struct_mutex protects the increment */ 345 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : 346 map->offset; 347 ret = drm_map_handle(dev, &list->hash, user_token, 0, 348 (map->type == _DRM_SHM)); 349 if (ret) { 350 if (map->type == _DRM_REGISTERS) 351 iounmap(map->handle); 352 kfree(map); 353 kfree(list); 354 mutex_unlock(&dev->struct_mutex); 355 return ret; 356 } 357 358 list->user_token = list->hash.key << PAGE_SHIFT; 359 mutex_unlock(&dev->struct_mutex); 360 361 if (!(map->flags & _DRM_DRIVER)) 362 list->master = dev->primary->master; 363 *maplist = list; 364 return 0; 365 } 366 367 int drm_addmap(struct drm_device * dev, resource_size_t offset, 368 unsigned int size, enum drm_map_type type, 369 enum drm_map_flags flags, struct drm_local_map ** map_ptr) 370 { 371 struct drm_map_list *list; 372 int rc; 373 374 rc = drm_addmap_core(dev, offset, size, type, flags, &list); 375 if (!rc) 376 *map_ptr = list->map; 377 return rc; 378 } 379 380 EXPORT_SYMBOL(drm_addmap); 381 382 /** 383 * Ioctl to specify a range of memory that is available for mapping by a 384 * non-root process. 385 * 386 * \param inode device inode. 387 * \param file_priv DRM file private. 388 * \param cmd command. 389 * \param arg pointer to a drm_map structure. 390 * \return zero on success or a negative value on error. 391 * 392 */ 393 int drm_addmap_ioctl(struct drm_device *dev, void *data, 394 struct drm_file *file_priv) 395 { 396 struct drm_map *map = data; 397 struct drm_map_list *maplist; 398 int err; 399 400 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) 401 return -EPERM; 402 403 err = drm_addmap_core(dev, map->offset, map->size, map->type, 404 map->flags, &maplist); 405 406 if (err) 407 return err; 408 409 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 410 map->handle = (void *)(unsigned long)maplist->user_token; 411 return 0; 412 } 413 414 /** 415 * Remove a map private from list and deallocate resources if the mapping 416 * isn't in use. 417 * 418 * Searches the map on drm_device::maplist, removes it from the list, see if 419 * its being used, and free any associate resource (such as MTRR's) if it's not 420 * being on use. 421 * 422 * \sa drm_addmap 423 */ 424 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) 425 { 426 struct drm_map_list *r_list = NULL, *list_t; 427 drm_dma_handle_t dmah; 428 int found = 0; 429 struct drm_master *master; 430 431 /* Find the list entry for the map and remove it */ 432 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { 433 if (r_list->map == map) { 434 master = r_list->master; 435 list_del(&r_list->head); 436 drm_ht_remove_key(&dev->map_hash, 437 r_list->user_token >> PAGE_SHIFT); 438 kfree(r_list); 439 found = 1; 440 break; 441 } 442 } 443 444 if (!found) 445 return -EINVAL; 446 447 switch (map->type) { 448 case _DRM_REGISTERS: 449 iounmap(map->handle); 450 /* FALLTHROUGH */ 451 case _DRM_FRAME_BUFFER: 452 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { 453 int retcode; 454 retcode = mtrr_del(map->mtrr, map->offset, map->size); 455 DRM_DEBUG("mtrr_del=%d\n", retcode); 456 } 457 break; 458 case _DRM_SHM: 459 vfree(map->handle); 460 if (master) { 461 if (dev->sigdata.lock == master->lock.hw_lock) 462 dev->sigdata.lock = NULL; 463 master->lock.hw_lock = NULL; /* SHM removed */ 464 master->lock.file_priv = NULL; 465 wake_up_interruptible_all(&master->lock.lock_queue); 466 } 467 break; 468 case _DRM_AGP: 469 case _DRM_SCATTER_GATHER: 470 break; 471 case _DRM_CONSISTENT: 472 dmah.vaddr = map->handle; 473 dmah.busaddr = map->offset; 474 dmah.size = map->size; 475 __drm_pci_free(dev, &dmah); 476 break; 477 case _DRM_GEM: 478 DRM_ERROR("tried to rmmap GEM object\n"); 479 break; 480 } 481 kfree(map); 482 483 return 0; 484 } 485 EXPORT_SYMBOL(drm_rmmap_locked); 486 487 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map) 488 { 489 int ret; 490 491 mutex_lock(&dev->struct_mutex); 492 ret = drm_rmmap_locked(dev, map); 493 mutex_unlock(&dev->struct_mutex); 494 495 return ret; 496 } 497 EXPORT_SYMBOL(drm_rmmap); 498 499 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 500 * the last close of the device, and this is necessary for cleanup when things 501 * exit uncleanly. Therefore, having userland manually remove mappings seems 502 * like a pointless exercise since they're going away anyway. 503 * 504 * One use case might be after addmap is allowed for normal users for SHM and 505 * gets used by drivers that the server doesn't need to care about. This seems 506 * unlikely. 507 * 508 * \param inode device inode. 509 * \param file_priv DRM file private. 510 * \param cmd command. 511 * \param arg pointer to a struct drm_map structure. 512 * \return zero on success or a negative value on error. 513 */ 514 int drm_rmmap_ioctl(struct drm_device *dev, void *data, 515 struct drm_file *file_priv) 516 { 517 struct drm_map *request = data; 518 struct drm_local_map *map = NULL; 519 struct drm_map_list *r_list; 520 int ret; 521 522 mutex_lock(&dev->struct_mutex); 523 list_for_each_entry(r_list, &dev->maplist, head) { 524 if (r_list->map && 525 r_list->user_token == (unsigned long)request->handle && 526 r_list->map->flags & _DRM_REMOVABLE) { 527 map = r_list->map; 528 break; 529 } 530 } 531 532 /* List has wrapped around to the head pointer, or its empty we didn't 533 * find anything. 534 */ 535 if (list_empty(&dev->maplist) || !map) { 536 mutex_unlock(&dev->struct_mutex); 537 return -EINVAL; 538 } 539 540 /* Register and framebuffer maps are permanent */ 541 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 542 mutex_unlock(&dev->struct_mutex); 543 return 0; 544 } 545 546 ret = drm_rmmap_locked(dev, map); 547 548 mutex_unlock(&dev->struct_mutex); 549 550 return ret; 551 } 552 553 /** 554 * Cleanup after an error on one of the addbufs() functions. 555 * 556 * \param dev DRM device. 557 * \param entry buffer entry where the error occurred. 558 * 559 * Frees any pages and buffers associated with the given entry. 560 */ 561 static void drm_cleanup_buf_error(struct drm_device * dev, 562 struct drm_buf_entry * entry) 563 { 564 int i; 565 566 if (entry->seg_count) { 567 for (i = 0; i < entry->seg_count; i++) { 568 if (entry->seglist[i]) { 569 drm_pci_free(dev, entry->seglist[i]); 570 } 571 } 572 kfree(entry->seglist); 573 574 entry->seg_count = 0; 575 } 576 577 if (entry->buf_count) { 578 for (i = 0; i < entry->buf_count; i++) { 579 kfree(entry->buflist[i].dev_private); 580 } 581 kfree(entry->buflist); 582 583 entry->buf_count = 0; 584 } 585 } 586 587 #if __OS_HAS_AGP 588 /** 589 * Add AGP buffers for DMA transfers. 590 * 591 * \param dev struct drm_device to which the buffers are to be added. 592 * \param request pointer to a struct drm_buf_desc describing the request. 593 * \return zero on success or a negative number on failure. 594 * 595 * After some sanity checks creates a drm_buf structure for each buffer and 596 * reallocates the buffer list of the same size order to accommodate the new 597 * buffers. 598 */ 599 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request) 600 { 601 struct drm_device_dma *dma = dev->dma; 602 struct drm_buf_entry *entry; 603 struct drm_agp_mem *agp_entry; 604 struct drm_buf *buf; 605 unsigned long offset; 606 unsigned long agp_offset; 607 int count; 608 int order; 609 int size; 610 int alignment; 611 int page_order; 612 int total; 613 int byte_count; 614 int i, valid; 615 struct drm_buf **temp_buflist; 616 617 if (!dma) 618 return -EINVAL; 619 620 count = request->count; 621 order = drm_order(request->size); 622 size = 1 << order; 623 624 alignment = (request->flags & _DRM_PAGE_ALIGN) 625 ? PAGE_ALIGN(size) : size; 626 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 627 total = PAGE_SIZE << page_order; 628 629 byte_count = 0; 630 agp_offset = dev->agp->base + request->agp_start; 631 632 DRM_DEBUG("count: %d\n", count); 633 DRM_DEBUG("order: %d\n", order); 634 DRM_DEBUG("size: %d\n", size); 635 DRM_DEBUG("agp_offset: %lx\n", agp_offset); 636 DRM_DEBUG("alignment: %d\n", alignment); 637 DRM_DEBUG("page_order: %d\n", page_order); 638 DRM_DEBUG("total: %d\n", total); 639 640 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 641 return -EINVAL; 642 if (dev->queue_count) 643 return -EBUSY; /* Not while in use */ 644 645 /* Make sure buffers are located in AGP memory that we own */ 646 valid = 0; 647 list_for_each_entry(agp_entry, &dev->agp->memory, head) { 648 if ((agp_offset >= agp_entry->bound) && 649 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { 650 valid = 1; 651 break; 652 } 653 } 654 if (!list_empty(&dev->agp->memory) && !valid) { 655 DRM_DEBUG("zone invalid\n"); 656 return -EINVAL; 657 } 658 spin_lock(&dev->count_lock); 659 if (dev->buf_use) { 660 spin_unlock(&dev->count_lock); 661 return -EBUSY; 662 } 663 atomic_inc(&dev->buf_alloc); 664 spin_unlock(&dev->count_lock); 665 666 mutex_lock(&dev->struct_mutex); 667 entry = &dma->bufs[order]; 668 if (entry->buf_count) { 669 mutex_unlock(&dev->struct_mutex); 670 atomic_dec(&dev->buf_alloc); 671 return -ENOMEM; /* May only call once for each order */ 672 } 673 674 if (count < 0 || count > 4096) { 675 mutex_unlock(&dev->struct_mutex); 676 atomic_dec(&dev->buf_alloc); 677 return -EINVAL; 678 } 679 680 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); 681 if (!entry->buflist) { 682 mutex_unlock(&dev->struct_mutex); 683 atomic_dec(&dev->buf_alloc); 684 return -ENOMEM; 685 } 686 687 entry->buf_size = size; 688 entry->page_order = page_order; 689 690 offset = 0; 691 692 while (entry->buf_count < count) { 693 buf = &entry->buflist[entry->buf_count]; 694 buf->idx = dma->buf_count + entry->buf_count; 695 buf->total = alignment; 696 buf->order = order; 697 buf->used = 0; 698 699 buf->offset = (dma->byte_count + offset); 700 buf->bus_address = agp_offset + offset; 701 buf->address = (void *)(agp_offset + offset); 702 buf->next = NULL; 703 buf->waiting = 0; 704 buf->pending = 0; 705 init_waitqueue_head(&buf->dma_wait); 706 buf->file_priv = NULL; 707 708 buf->dev_priv_size = dev->driver->dev_priv_size; 709 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 710 if (!buf->dev_private) { 711 /* Set count correctly so we free the proper amount. */ 712 entry->buf_count = count; 713 drm_cleanup_buf_error(dev, entry); 714 mutex_unlock(&dev->struct_mutex); 715 atomic_dec(&dev->buf_alloc); 716 return -ENOMEM; 717 } 718 719 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 720 721 offset += alignment; 722 entry->buf_count++; 723 byte_count += PAGE_SIZE << page_order; 724 } 725 726 DRM_DEBUG("byte_count: %d\n", byte_count); 727 728 temp_buflist = krealloc(dma->buflist, 729 (dma->buf_count + entry->buf_count) * 730 sizeof(*dma->buflist), GFP_KERNEL); 731 if (!temp_buflist) { 732 /* Free the entry because it isn't valid */ 733 drm_cleanup_buf_error(dev, entry); 734 mutex_unlock(&dev->struct_mutex); 735 atomic_dec(&dev->buf_alloc); 736 return -ENOMEM; 737 } 738 dma->buflist = temp_buflist; 739 740 for (i = 0; i < entry->buf_count; i++) { 741 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 742 } 743 744 dma->buf_count += entry->buf_count; 745 dma->seg_count += entry->seg_count; 746 dma->page_count += byte_count >> PAGE_SHIFT; 747 dma->byte_count += byte_count; 748 749 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 750 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 751 752 mutex_unlock(&dev->struct_mutex); 753 754 request->count = entry->buf_count; 755 request->size = size; 756 757 dma->flags = _DRM_DMA_USE_AGP; 758 759 atomic_dec(&dev->buf_alloc); 760 return 0; 761 } 762 EXPORT_SYMBOL(drm_addbufs_agp); 763 #endif /* __OS_HAS_AGP */ 764 765 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) 766 { 767 struct drm_device_dma *dma = dev->dma; 768 int count; 769 int order; 770 int size; 771 int total; 772 int page_order; 773 struct drm_buf_entry *entry; 774 drm_dma_handle_t *dmah; 775 struct drm_buf *buf; 776 int alignment; 777 unsigned long offset; 778 int i; 779 int byte_count; 780 int page_count; 781 unsigned long *temp_pagelist; 782 struct drm_buf **temp_buflist; 783 784 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 785 return -EINVAL; 786 787 if (!dma) 788 return -EINVAL; 789 790 if (!capable(CAP_SYS_ADMIN)) 791 return -EPERM; 792 793 count = request->count; 794 order = drm_order(request->size); 795 size = 1 << order; 796 797 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n", 798 request->count, request->size, size, order, dev->queue_count); 799 800 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 801 return -EINVAL; 802 if (dev->queue_count) 803 return -EBUSY; /* Not while in use */ 804 805 alignment = (request->flags & _DRM_PAGE_ALIGN) 806 ? PAGE_ALIGN(size) : size; 807 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 808 total = PAGE_SIZE << page_order; 809 810 spin_lock(&dev->count_lock); 811 if (dev->buf_use) { 812 spin_unlock(&dev->count_lock); 813 return -EBUSY; 814 } 815 atomic_inc(&dev->buf_alloc); 816 spin_unlock(&dev->count_lock); 817 818 mutex_lock(&dev->struct_mutex); 819 entry = &dma->bufs[order]; 820 if (entry->buf_count) { 821 mutex_unlock(&dev->struct_mutex); 822 atomic_dec(&dev->buf_alloc); 823 return -ENOMEM; /* May only call once for each order */ 824 } 825 826 if (count < 0 || count > 4096) { 827 mutex_unlock(&dev->struct_mutex); 828 atomic_dec(&dev->buf_alloc); 829 return -EINVAL; 830 } 831 832 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); 833 if (!entry->buflist) { 834 mutex_unlock(&dev->struct_mutex); 835 atomic_dec(&dev->buf_alloc); 836 return -ENOMEM; 837 } 838 839 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL); 840 if (!entry->seglist) { 841 kfree(entry->buflist); 842 mutex_unlock(&dev->struct_mutex); 843 atomic_dec(&dev->buf_alloc); 844 return -ENOMEM; 845 } 846 847 /* Keep the original pagelist until we know all the allocations 848 * have succeeded 849 */ 850 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * 851 sizeof(*dma->pagelist), GFP_KERNEL); 852 if (!temp_pagelist) { 853 kfree(entry->buflist); 854 kfree(entry->seglist); 855 mutex_unlock(&dev->struct_mutex); 856 atomic_dec(&dev->buf_alloc); 857 return -ENOMEM; 858 } 859 memcpy(temp_pagelist, 860 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); 861 DRM_DEBUG("pagelist: %d entries\n", 862 dma->page_count + (count << page_order)); 863 864 entry->buf_size = size; 865 entry->page_order = page_order; 866 byte_count = 0; 867 page_count = 0; 868 869 while (entry->buf_count < count) { 870 871 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); 872 873 if (!dmah) { 874 /* Set count correctly so we free the proper amount. */ 875 entry->buf_count = count; 876 entry->seg_count = count; 877 drm_cleanup_buf_error(dev, entry); 878 kfree(temp_pagelist); 879 mutex_unlock(&dev->struct_mutex); 880 atomic_dec(&dev->buf_alloc); 881 return -ENOMEM; 882 } 883 entry->seglist[entry->seg_count++] = dmah; 884 for (i = 0; i < (1 << page_order); i++) { 885 DRM_DEBUG("page %d @ 0x%08lx\n", 886 dma->page_count + page_count, 887 (unsigned long)dmah->vaddr + PAGE_SIZE * i); 888 temp_pagelist[dma->page_count + page_count++] 889 = (unsigned long)dmah->vaddr + PAGE_SIZE * i; 890 } 891 for (offset = 0; 892 offset + size <= total && entry->buf_count < count; 893 offset += alignment, ++entry->buf_count) { 894 buf = &entry->buflist[entry->buf_count]; 895 buf->idx = dma->buf_count + entry->buf_count; 896 buf->total = alignment; 897 buf->order = order; 898 buf->used = 0; 899 buf->offset = (dma->byte_count + byte_count + offset); 900 buf->address = (void *)(dmah->vaddr + offset); 901 buf->bus_address = dmah->busaddr + offset; 902 buf->next = NULL; 903 buf->waiting = 0; 904 buf->pending = 0; 905 init_waitqueue_head(&buf->dma_wait); 906 buf->file_priv = NULL; 907 908 buf->dev_priv_size = dev->driver->dev_priv_size; 909 buf->dev_private = kzalloc(buf->dev_priv_size, 910 GFP_KERNEL); 911 if (!buf->dev_private) { 912 /* Set count correctly so we free the proper amount. */ 913 entry->buf_count = count; 914 entry->seg_count = count; 915 drm_cleanup_buf_error(dev, entry); 916 kfree(temp_pagelist); 917 mutex_unlock(&dev->struct_mutex); 918 atomic_dec(&dev->buf_alloc); 919 return -ENOMEM; 920 } 921 922 DRM_DEBUG("buffer %d @ %p\n", 923 entry->buf_count, buf->address); 924 } 925 byte_count += PAGE_SIZE << page_order; 926 } 927 928 temp_buflist = krealloc(dma->buflist, 929 (dma->buf_count + entry->buf_count) * 930 sizeof(*dma->buflist), GFP_KERNEL); 931 if (!temp_buflist) { 932 /* Free the entry because it isn't valid */ 933 drm_cleanup_buf_error(dev, entry); 934 kfree(temp_pagelist); 935 mutex_unlock(&dev->struct_mutex); 936 atomic_dec(&dev->buf_alloc); 937 return -ENOMEM; 938 } 939 dma->buflist = temp_buflist; 940 941 for (i = 0; i < entry->buf_count; i++) { 942 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 943 } 944 945 /* No allocations failed, so now we can replace the original pagelist 946 * with the new one. 947 */ 948 if (dma->page_count) { 949 kfree(dma->pagelist); 950 } 951 dma->pagelist = temp_pagelist; 952 953 dma->buf_count += entry->buf_count; 954 dma->seg_count += entry->seg_count; 955 dma->page_count += entry->seg_count << page_order; 956 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 957 958 mutex_unlock(&dev->struct_mutex); 959 960 request->count = entry->buf_count; 961 request->size = size; 962 963 if (request->flags & _DRM_PCI_BUFFER_RO) 964 dma->flags = _DRM_DMA_USE_PCI_RO; 965 966 atomic_dec(&dev->buf_alloc); 967 return 0; 968 969 } 970 EXPORT_SYMBOL(drm_addbufs_pci); 971 972 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request) 973 { 974 struct drm_device_dma *dma = dev->dma; 975 struct drm_buf_entry *entry; 976 struct drm_buf *buf; 977 unsigned long offset; 978 unsigned long agp_offset; 979 int count; 980 int order; 981 int size; 982 int alignment; 983 int page_order; 984 int total; 985 int byte_count; 986 int i; 987 struct drm_buf **temp_buflist; 988 989 if (!drm_core_check_feature(dev, DRIVER_SG)) 990 return -EINVAL; 991 992 if (!dma) 993 return -EINVAL; 994 995 if (!capable(CAP_SYS_ADMIN)) 996 return -EPERM; 997 998 count = request->count; 999 order = drm_order(request->size); 1000 size = 1 << order; 1001 1002 alignment = (request->flags & _DRM_PAGE_ALIGN) 1003 ? PAGE_ALIGN(size) : size; 1004 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 1005 total = PAGE_SIZE << page_order; 1006 1007 byte_count = 0; 1008 agp_offset = request->agp_start; 1009 1010 DRM_DEBUG("count: %d\n", count); 1011 DRM_DEBUG("order: %d\n", order); 1012 DRM_DEBUG("size: %d\n", size); 1013 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 1014 DRM_DEBUG("alignment: %d\n", alignment); 1015 DRM_DEBUG("page_order: %d\n", page_order); 1016 DRM_DEBUG("total: %d\n", total); 1017 1018 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1019 return -EINVAL; 1020 if (dev->queue_count) 1021 return -EBUSY; /* Not while in use */ 1022 1023 spin_lock(&dev->count_lock); 1024 if (dev->buf_use) { 1025 spin_unlock(&dev->count_lock); 1026 return -EBUSY; 1027 } 1028 atomic_inc(&dev->buf_alloc); 1029 spin_unlock(&dev->count_lock); 1030 1031 mutex_lock(&dev->struct_mutex); 1032 entry = &dma->bufs[order]; 1033 if (entry->buf_count) { 1034 mutex_unlock(&dev->struct_mutex); 1035 atomic_dec(&dev->buf_alloc); 1036 return -ENOMEM; /* May only call once for each order */ 1037 } 1038 1039 if (count < 0 || count > 4096) { 1040 mutex_unlock(&dev->struct_mutex); 1041 atomic_dec(&dev->buf_alloc); 1042 return -EINVAL; 1043 } 1044 1045 entry->buflist = kzalloc(count * sizeof(*entry->buflist), 1046 GFP_KERNEL); 1047 if (!entry->buflist) { 1048 mutex_unlock(&dev->struct_mutex); 1049 atomic_dec(&dev->buf_alloc); 1050 return -ENOMEM; 1051 } 1052 1053 entry->buf_size = size; 1054 entry->page_order = page_order; 1055 1056 offset = 0; 1057 1058 while (entry->buf_count < count) { 1059 buf = &entry->buflist[entry->buf_count]; 1060 buf->idx = dma->buf_count + entry->buf_count; 1061 buf->total = alignment; 1062 buf->order = order; 1063 buf->used = 0; 1064 1065 buf->offset = (dma->byte_count + offset); 1066 buf->bus_address = agp_offset + offset; 1067 buf->address = (void *)(agp_offset + offset 1068 + (unsigned long)dev->sg->virtual); 1069 buf->next = NULL; 1070 buf->waiting = 0; 1071 buf->pending = 0; 1072 init_waitqueue_head(&buf->dma_wait); 1073 buf->file_priv = NULL; 1074 1075 buf->dev_priv_size = dev->driver->dev_priv_size; 1076 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 1077 if (!buf->dev_private) { 1078 /* Set count correctly so we free the proper amount. */ 1079 entry->buf_count = count; 1080 drm_cleanup_buf_error(dev, entry); 1081 mutex_unlock(&dev->struct_mutex); 1082 atomic_dec(&dev->buf_alloc); 1083 return -ENOMEM; 1084 } 1085 1086 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1087 1088 offset += alignment; 1089 entry->buf_count++; 1090 byte_count += PAGE_SIZE << page_order; 1091 } 1092 1093 DRM_DEBUG("byte_count: %d\n", byte_count); 1094 1095 temp_buflist = krealloc(dma->buflist, 1096 (dma->buf_count + entry->buf_count) * 1097 sizeof(*dma->buflist), GFP_KERNEL); 1098 if (!temp_buflist) { 1099 /* Free the entry because it isn't valid */ 1100 drm_cleanup_buf_error(dev, entry); 1101 mutex_unlock(&dev->struct_mutex); 1102 atomic_dec(&dev->buf_alloc); 1103 return -ENOMEM; 1104 } 1105 dma->buflist = temp_buflist; 1106 1107 for (i = 0; i < entry->buf_count; i++) { 1108 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1109 } 1110 1111 dma->buf_count += entry->buf_count; 1112 dma->seg_count += entry->seg_count; 1113 dma->page_count += byte_count >> PAGE_SHIFT; 1114 dma->byte_count += byte_count; 1115 1116 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1117 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1118 1119 mutex_unlock(&dev->struct_mutex); 1120 1121 request->count = entry->buf_count; 1122 request->size = size; 1123 1124 dma->flags = _DRM_DMA_USE_SG; 1125 1126 atomic_dec(&dev->buf_alloc); 1127 return 0; 1128 } 1129 1130 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request) 1131 { 1132 struct drm_device_dma *dma = dev->dma; 1133 struct drm_buf_entry *entry; 1134 struct drm_buf *buf; 1135 unsigned long offset; 1136 unsigned long agp_offset; 1137 int count; 1138 int order; 1139 int size; 1140 int alignment; 1141 int page_order; 1142 int total; 1143 int byte_count; 1144 int i; 1145 struct drm_buf **temp_buflist; 1146 1147 if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) 1148 return -EINVAL; 1149 1150 if (!dma) 1151 return -EINVAL; 1152 1153 if (!capable(CAP_SYS_ADMIN)) 1154 return -EPERM; 1155 1156 count = request->count; 1157 order = drm_order(request->size); 1158 size = 1 << order; 1159 1160 alignment = (request->flags & _DRM_PAGE_ALIGN) 1161 ? PAGE_ALIGN(size) : size; 1162 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 1163 total = PAGE_SIZE << page_order; 1164 1165 byte_count = 0; 1166 agp_offset = request->agp_start; 1167 1168 DRM_DEBUG("count: %d\n", count); 1169 DRM_DEBUG("order: %d\n", order); 1170 DRM_DEBUG("size: %d\n", size); 1171 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 1172 DRM_DEBUG("alignment: %d\n", alignment); 1173 DRM_DEBUG("page_order: %d\n", page_order); 1174 DRM_DEBUG("total: %d\n", total); 1175 1176 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1177 return -EINVAL; 1178 if (dev->queue_count) 1179 return -EBUSY; /* Not while in use */ 1180 1181 spin_lock(&dev->count_lock); 1182 if (dev->buf_use) { 1183 spin_unlock(&dev->count_lock); 1184 return -EBUSY; 1185 } 1186 atomic_inc(&dev->buf_alloc); 1187 spin_unlock(&dev->count_lock); 1188 1189 mutex_lock(&dev->struct_mutex); 1190 entry = &dma->bufs[order]; 1191 if (entry->buf_count) { 1192 mutex_unlock(&dev->struct_mutex); 1193 atomic_dec(&dev->buf_alloc); 1194 return -ENOMEM; /* May only call once for each order */ 1195 } 1196 1197 if (count < 0 || count > 4096) { 1198 mutex_unlock(&dev->struct_mutex); 1199 atomic_dec(&dev->buf_alloc); 1200 return -EINVAL; 1201 } 1202 1203 entry->buflist = kzalloc(count * sizeof(*entry->buflist), 1204 GFP_KERNEL); 1205 if (!entry->buflist) { 1206 mutex_unlock(&dev->struct_mutex); 1207 atomic_dec(&dev->buf_alloc); 1208 return -ENOMEM; 1209 } 1210 1211 entry->buf_size = size; 1212 entry->page_order = page_order; 1213 1214 offset = 0; 1215 1216 while (entry->buf_count < count) { 1217 buf = &entry->buflist[entry->buf_count]; 1218 buf->idx = dma->buf_count + entry->buf_count; 1219 buf->total = alignment; 1220 buf->order = order; 1221 buf->used = 0; 1222 1223 buf->offset = (dma->byte_count + offset); 1224 buf->bus_address = agp_offset + offset; 1225 buf->address = (void *)(agp_offset + offset); 1226 buf->next = NULL; 1227 buf->waiting = 0; 1228 buf->pending = 0; 1229 init_waitqueue_head(&buf->dma_wait); 1230 buf->file_priv = NULL; 1231 1232 buf->dev_priv_size = dev->driver->dev_priv_size; 1233 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 1234 if (!buf->dev_private) { 1235 /* Set count correctly so we free the proper amount. */ 1236 entry->buf_count = count; 1237 drm_cleanup_buf_error(dev, entry); 1238 mutex_unlock(&dev->struct_mutex); 1239 atomic_dec(&dev->buf_alloc); 1240 return -ENOMEM; 1241 } 1242 1243 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1244 1245 offset += alignment; 1246 entry->buf_count++; 1247 byte_count += PAGE_SIZE << page_order; 1248 } 1249 1250 DRM_DEBUG("byte_count: %d\n", byte_count); 1251 1252 temp_buflist = krealloc(dma->buflist, 1253 (dma->buf_count + entry->buf_count) * 1254 sizeof(*dma->buflist), GFP_KERNEL); 1255 if (!temp_buflist) { 1256 /* Free the entry because it isn't valid */ 1257 drm_cleanup_buf_error(dev, entry); 1258 mutex_unlock(&dev->struct_mutex); 1259 atomic_dec(&dev->buf_alloc); 1260 return -ENOMEM; 1261 } 1262 dma->buflist = temp_buflist; 1263 1264 for (i = 0; i < entry->buf_count; i++) { 1265 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1266 } 1267 1268 dma->buf_count += entry->buf_count; 1269 dma->seg_count += entry->seg_count; 1270 dma->page_count += byte_count >> PAGE_SHIFT; 1271 dma->byte_count += byte_count; 1272 1273 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1274 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1275 1276 mutex_unlock(&dev->struct_mutex); 1277 1278 request->count = entry->buf_count; 1279 request->size = size; 1280 1281 dma->flags = _DRM_DMA_USE_FB; 1282 1283 atomic_dec(&dev->buf_alloc); 1284 return 0; 1285 } 1286 1287 1288 /** 1289 * Add buffers for DMA transfers (ioctl). 1290 * 1291 * \param inode device inode. 1292 * \param file_priv DRM file private. 1293 * \param cmd command. 1294 * \param arg pointer to a struct drm_buf_desc request. 1295 * \return zero on success or a negative number on failure. 1296 * 1297 * According with the memory type specified in drm_buf_desc::flags and the 1298 * build options, it dispatches the call either to addbufs_agp(), 1299 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1300 * PCI memory respectively. 1301 */ 1302 int drm_addbufs(struct drm_device *dev, void *data, 1303 struct drm_file *file_priv) 1304 { 1305 struct drm_buf_desc *request = data; 1306 int ret; 1307 1308 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1309 return -EINVAL; 1310 1311 #if __OS_HAS_AGP 1312 if (request->flags & _DRM_AGP_BUFFER) 1313 ret = drm_addbufs_agp(dev, request); 1314 else 1315 #endif 1316 if (request->flags & _DRM_SG_BUFFER) 1317 ret = drm_addbufs_sg(dev, request); 1318 else if (request->flags & _DRM_FB_BUFFER) 1319 ret = drm_addbufs_fb(dev, request); 1320 else 1321 ret = drm_addbufs_pci(dev, request); 1322 1323 return ret; 1324 } 1325 1326 /** 1327 * Get information about the buffer mappings. 1328 * 1329 * This was originally mean for debugging purposes, or by a sophisticated 1330 * client library to determine how best to use the available buffers (e.g., 1331 * large buffers can be used for image transfer). 1332 * 1333 * \param inode device inode. 1334 * \param file_priv DRM file private. 1335 * \param cmd command. 1336 * \param arg pointer to a drm_buf_info structure. 1337 * \return zero on success or a negative number on failure. 1338 * 1339 * Increments drm_device::buf_use while holding the drm_device::count_lock 1340 * lock, preventing of allocating more buffers after this call. Information 1341 * about each requested buffer is then copied into user space. 1342 */ 1343 int drm_infobufs(struct drm_device *dev, void *data, 1344 struct drm_file *file_priv) 1345 { 1346 struct drm_device_dma *dma = dev->dma; 1347 struct drm_buf_info *request = data; 1348 int i; 1349 int count; 1350 1351 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1352 return -EINVAL; 1353 1354 if (!dma) 1355 return -EINVAL; 1356 1357 spin_lock(&dev->count_lock); 1358 if (atomic_read(&dev->buf_alloc)) { 1359 spin_unlock(&dev->count_lock); 1360 return -EBUSY; 1361 } 1362 ++dev->buf_use; /* Can't allocate more after this call */ 1363 spin_unlock(&dev->count_lock); 1364 1365 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1366 if (dma->bufs[i].buf_count) 1367 ++count; 1368 } 1369 1370 DRM_DEBUG("count = %d\n", count); 1371 1372 if (request->count >= count) { 1373 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1374 if (dma->bufs[i].buf_count) { 1375 struct drm_buf_desc __user *to = 1376 &request->list[count]; 1377 struct drm_buf_entry *from = &dma->bufs[i]; 1378 struct drm_freelist *list = &dma->bufs[i].freelist; 1379 if (copy_to_user(&to->count, 1380 &from->buf_count, 1381 sizeof(from->buf_count)) || 1382 copy_to_user(&to->size, 1383 &from->buf_size, 1384 sizeof(from->buf_size)) || 1385 copy_to_user(&to->low_mark, 1386 &list->low_mark, 1387 sizeof(list->low_mark)) || 1388 copy_to_user(&to->high_mark, 1389 &list->high_mark, 1390 sizeof(list->high_mark))) 1391 return -EFAULT; 1392 1393 DRM_DEBUG("%d %d %d %d %d\n", 1394 i, 1395 dma->bufs[i].buf_count, 1396 dma->bufs[i].buf_size, 1397 dma->bufs[i].freelist.low_mark, 1398 dma->bufs[i].freelist.high_mark); 1399 ++count; 1400 } 1401 } 1402 } 1403 request->count = count; 1404 1405 return 0; 1406 } 1407 1408 /** 1409 * Specifies a low and high water mark for buffer allocation 1410 * 1411 * \param inode device inode. 1412 * \param file_priv DRM file private. 1413 * \param cmd command. 1414 * \param arg a pointer to a drm_buf_desc structure. 1415 * \return zero on success or a negative number on failure. 1416 * 1417 * Verifies that the size order is bounded between the admissible orders and 1418 * updates the respective drm_device_dma::bufs entry low and high water mark. 1419 * 1420 * \note This ioctl is deprecated and mostly never used. 1421 */ 1422 int drm_markbufs(struct drm_device *dev, void *data, 1423 struct drm_file *file_priv) 1424 { 1425 struct drm_device_dma *dma = dev->dma; 1426 struct drm_buf_desc *request = data; 1427 int order; 1428 struct drm_buf_entry *entry; 1429 1430 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1431 return -EINVAL; 1432 1433 if (!dma) 1434 return -EINVAL; 1435 1436 DRM_DEBUG("%d, %d, %d\n", 1437 request->size, request->low_mark, request->high_mark); 1438 order = drm_order(request->size); 1439 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1440 return -EINVAL; 1441 entry = &dma->bufs[order]; 1442 1443 if (request->low_mark < 0 || request->low_mark > entry->buf_count) 1444 return -EINVAL; 1445 if (request->high_mark < 0 || request->high_mark > entry->buf_count) 1446 return -EINVAL; 1447 1448 entry->freelist.low_mark = request->low_mark; 1449 entry->freelist.high_mark = request->high_mark; 1450 1451 return 0; 1452 } 1453 1454 /** 1455 * Unreserve the buffers in list, previously reserved using drmDMA. 1456 * 1457 * \param inode device inode. 1458 * \param file_priv DRM file private. 1459 * \param cmd command. 1460 * \param arg pointer to a drm_buf_free structure. 1461 * \return zero on success or a negative number on failure. 1462 * 1463 * Calls free_buffer() for each used buffer. 1464 * This function is primarily used for debugging. 1465 */ 1466 int drm_freebufs(struct drm_device *dev, void *data, 1467 struct drm_file *file_priv) 1468 { 1469 struct drm_device_dma *dma = dev->dma; 1470 struct drm_buf_free *request = data; 1471 int i; 1472 int idx; 1473 struct drm_buf *buf; 1474 1475 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1476 return -EINVAL; 1477 1478 if (!dma) 1479 return -EINVAL; 1480 1481 DRM_DEBUG("%d\n", request->count); 1482 for (i = 0; i < request->count; i++) { 1483 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) 1484 return -EFAULT; 1485 if (idx < 0 || idx >= dma->buf_count) { 1486 DRM_ERROR("Index %d (of %d max)\n", 1487 idx, dma->buf_count - 1); 1488 return -EINVAL; 1489 } 1490 buf = dma->buflist[idx]; 1491 if (buf->file_priv != file_priv) { 1492 DRM_ERROR("Process %d freeing buffer not owned\n", 1493 task_pid_nr(current)); 1494 return -EINVAL; 1495 } 1496 drm_free_buffer(dev, buf); 1497 } 1498 1499 return 0; 1500 } 1501 1502 /** 1503 * Maps all of the DMA buffers into client-virtual space (ioctl). 1504 * 1505 * \param inode device inode. 1506 * \param file_priv DRM file private. 1507 * \param cmd command. 1508 * \param arg pointer to a drm_buf_map structure. 1509 * \return zero on success or a negative number on failure. 1510 * 1511 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information 1512 * about each buffer into user space. For PCI buffers, it calls do_mmap() with 1513 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1514 * drm_mmap_dma(). 1515 */ 1516 int drm_mapbufs(struct drm_device *dev, void *data, 1517 struct drm_file *file_priv) 1518 { 1519 struct drm_device_dma *dma = dev->dma; 1520 int retcode = 0; 1521 const int zero = 0; 1522 unsigned long virtual; 1523 unsigned long address; 1524 struct drm_buf_map *request = data; 1525 int i; 1526 1527 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1528 return -EINVAL; 1529 1530 if (!dma) 1531 return -EINVAL; 1532 1533 spin_lock(&dev->count_lock); 1534 if (atomic_read(&dev->buf_alloc)) { 1535 spin_unlock(&dev->count_lock); 1536 return -EBUSY; 1537 } 1538 dev->buf_use++; /* Can't allocate more after this call */ 1539 spin_unlock(&dev->count_lock); 1540 1541 if (request->count >= dma->buf_count) { 1542 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) 1543 || (drm_core_check_feature(dev, DRIVER_SG) 1544 && (dma->flags & _DRM_DMA_USE_SG)) 1545 || (drm_core_check_feature(dev, DRIVER_FB_DMA) 1546 && (dma->flags & _DRM_DMA_USE_FB))) { 1547 struct drm_local_map *map = dev->agp_buffer_map; 1548 unsigned long token = dev->agp_buffer_token; 1549 1550 if (!map) { 1551 retcode = -EINVAL; 1552 goto done; 1553 } 1554 down_write(¤t->mm->mmap_sem); 1555 virtual = do_mmap(file_priv->filp, 0, map->size, 1556 PROT_READ | PROT_WRITE, 1557 MAP_SHARED, 1558 token); 1559 up_write(¤t->mm->mmap_sem); 1560 } else { 1561 down_write(¤t->mm->mmap_sem); 1562 virtual = do_mmap(file_priv->filp, 0, dma->byte_count, 1563 PROT_READ | PROT_WRITE, 1564 MAP_SHARED, 0); 1565 up_write(¤t->mm->mmap_sem); 1566 } 1567 if (virtual > -1024UL) { 1568 /* Real error */ 1569 retcode = (signed long)virtual; 1570 goto done; 1571 } 1572 request->virtual = (void __user *)virtual; 1573 1574 for (i = 0; i < dma->buf_count; i++) { 1575 if (copy_to_user(&request->list[i].idx, 1576 &dma->buflist[i]->idx, 1577 sizeof(request->list[0].idx))) { 1578 retcode = -EFAULT; 1579 goto done; 1580 } 1581 if (copy_to_user(&request->list[i].total, 1582 &dma->buflist[i]->total, 1583 sizeof(request->list[0].total))) { 1584 retcode = -EFAULT; 1585 goto done; 1586 } 1587 if (copy_to_user(&request->list[i].used, 1588 &zero, sizeof(zero))) { 1589 retcode = -EFAULT; 1590 goto done; 1591 } 1592 address = virtual + dma->buflist[i]->offset; /* *** */ 1593 if (copy_to_user(&request->list[i].address, 1594 &address, sizeof(address))) { 1595 retcode = -EFAULT; 1596 goto done; 1597 } 1598 } 1599 } 1600 done: 1601 request->count = dma->buf_count; 1602 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); 1603 1604 return retcode; 1605 } 1606 1607 /** 1608 * Compute size order. Returns the exponent of the smaller power of two which 1609 * is greater or equal to given number. 1610 * 1611 * \param size size. 1612 * \return order. 1613 * 1614 * \todo Can be made faster. 1615 */ 1616 int drm_order(unsigned long size) 1617 { 1618 int order; 1619 unsigned long tmp; 1620 1621 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; 1622 1623 if (size & (size - 1)) 1624 ++order; 1625 1626 return order; 1627 } 1628 EXPORT_SYMBOL(drm_order); 1629