xref: /openbmc/linux/drivers/gpu/drm/drm_bufs.c (revision 82ced6fd)
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 #include <linux/vmalloc.h>
37 #include <linux/log2.h>
38 #include <asm/shmparam.h>
39 #include "drmP.h"
40 
41 resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
42 {
43 	return pci_resource_start(dev->pdev, resource);
44 }
45 EXPORT_SYMBOL(drm_get_resource_start);
46 
47 resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
48 {
49 	return pci_resource_len(dev->pdev, resource);
50 }
51 
52 EXPORT_SYMBOL(drm_get_resource_len);
53 
54 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
55 						  struct drm_local_map *map)
56 {
57 	struct drm_map_list *entry;
58 	list_for_each_entry(entry, &dev->maplist, head) {
59 		/*
60 		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
61 		 * while PCI resources may live above that, we ignore the map
62 		 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS.
63 		 * It is assumed that each driver will have only one resource of
64 		 * each type.
65 		 */
66 		if (!entry->map ||
67 		    map->type != entry->map->type ||
68 		    entry->master != dev->primary->master)
69 			continue;
70 		switch (map->type) {
71 		case _DRM_SHM:
72 			if (map->flags != _DRM_CONTAINS_LOCK)
73 				break;
74 		case _DRM_REGISTERS:
75 		case _DRM_FRAME_BUFFER:
76 			return entry;
77 		default: /* Make gcc happy */
78 			;
79 		}
80 		if (entry->map->offset == map->offset)
81 			return entry;
82 	}
83 
84 	return NULL;
85 }
86 
87 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
88 			  unsigned long user_token, int hashed_handle, int shm)
89 {
90 	int use_hashed_handle, shift;
91 	unsigned long add;
92 
93 #if (BITS_PER_LONG == 64)
94 	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
95 #elif (BITS_PER_LONG == 32)
96 	use_hashed_handle = hashed_handle;
97 #else
98 #error Unsupported long size. Neither 64 nor 32 bits.
99 #endif
100 
101 	if (!use_hashed_handle) {
102 		int ret;
103 		hash->key = user_token >> PAGE_SHIFT;
104 		ret = drm_ht_insert_item(&dev->map_hash, hash);
105 		if (ret != -EINVAL)
106 			return ret;
107 	}
108 
109 	shift = 0;
110 	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
111 	if (shm && (SHMLBA > PAGE_SIZE)) {
112 		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
113 
114 		/* For shared memory, we have to preserve the SHMLBA
115 		 * bits of the eventual vma->vm_pgoff value during
116 		 * mmap().  Otherwise we run into cache aliasing problems
117 		 * on some platforms.  On these platforms, the pgoff of
118 		 * a mmap() request is used to pick a suitable virtual
119 		 * address for the mmap() region such that it will not
120 		 * cause cache aliasing problems.
121 		 *
122 		 * Therefore, make sure the SHMLBA relevant bits of the
123 		 * hash value we use are equal to those in the original
124 		 * kernel virtual address.
125 		 */
126 		shift = bits;
127 		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
128 	}
129 
130 	return drm_ht_just_insert_please(&dev->map_hash, hash,
131 					 user_token, 32 - PAGE_SHIFT - 3,
132 					 shift, add);
133 }
134 
135 /**
136  * Core function to create a range of memory available for mapping by a
137  * non-root process.
138  *
139  * Adjusts the memory offset to its absolute value according to the mapping
140  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
141  * applicable and if supported by the kernel.
142  */
143 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
144 			   unsigned int size, enum drm_map_type type,
145 			   enum drm_map_flags flags,
146 			   struct drm_map_list ** maplist)
147 {
148 	struct drm_local_map *map;
149 	struct drm_map_list *list;
150 	drm_dma_handle_t *dmah;
151 	unsigned long user_token;
152 	int ret;
153 
154 	map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
155 	if (!map)
156 		return -ENOMEM;
157 
158 	map->offset = offset;
159 	map->size = size;
160 	map->flags = flags;
161 	map->type = type;
162 
163 	/* Only allow shared memory to be removable since we only keep enough
164 	 * book keeping information about shared memory to allow for removal
165 	 * when processes fork.
166 	 */
167 	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
168 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
169 		return -EINVAL;
170 	}
171 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
172 		  (unsigned long long)map->offset, map->size, map->type);
173 
174 	/* page-align _DRM_SHM maps. They are allocated here so there is no security
175 	 * hole created by that and it works around various broken drivers that use
176 	 * a non-aligned quantity to map the SAREA. --BenH
177 	 */
178 	if (map->type == _DRM_SHM)
179 		map->size = PAGE_ALIGN(map->size);
180 
181 	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
182 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
183 		return -EINVAL;
184 	}
185 	map->mtrr = -1;
186 	map->handle = NULL;
187 
188 	switch (map->type) {
189 	case _DRM_REGISTERS:
190 	case _DRM_FRAME_BUFFER:
191 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
192 		if (map->offset + (map->size-1) < map->offset ||
193 		    map->offset < virt_to_phys(high_memory)) {
194 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
195 			return -EINVAL;
196 		}
197 #endif
198 #ifdef __alpha__
199 		map->offset += dev->hose->mem_space->start;
200 #endif
201 		/* Some drivers preinitialize some maps, without the X Server
202 		 * needing to be aware of it.  Therefore, we just return success
203 		 * when the server tries to create a duplicate map.
204 		 */
205 		list = drm_find_matching_map(dev, map);
206 		if (list != NULL) {
207 			if (list->map->size != map->size) {
208 				DRM_DEBUG("Matching maps of type %d with "
209 					  "mismatched sizes, (%ld vs %ld)\n",
210 					  map->type, map->size,
211 					  list->map->size);
212 				list->map->size = map->size;
213 			}
214 
215 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
216 			*maplist = list;
217 			return 0;
218 		}
219 
220 		if (drm_core_has_MTRR(dev)) {
221 			if (map->type == _DRM_FRAME_BUFFER ||
222 			    (map->flags & _DRM_WRITE_COMBINING)) {
223 				map->mtrr = mtrr_add(map->offset, map->size,
224 						     MTRR_TYPE_WRCOMB, 1);
225 			}
226 		}
227 		if (map->type == _DRM_REGISTERS) {
228 			map->handle = ioremap(map->offset, map->size);
229 			if (!map->handle) {
230 				drm_free(map, sizeof(*map), DRM_MEM_MAPS);
231 				return -ENOMEM;
232 			}
233 		}
234 
235 		break;
236 	case _DRM_SHM:
237 		list = drm_find_matching_map(dev, map);
238 		if (list != NULL) {
239 			if(list->map->size != map->size) {
240 				DRM_DEBUG("Matching maps of type %d with "
241 					  "mismatched sizes, (%ld vs %ld)\n",
242 					  map->type, map->size, list->map->size);
243 				list->map->size = map->size;
244 			}
245 
246 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
247 			*maplist = list;
248 			return 0;
249 		}
250 		map->handle = vmalloc_user(map->size);
251 		DRM_DEBUG("%lu %d %p\n",
252 			  map->size, drm_order(map->size), map->handle);
253 		if (!map->handle) {
254 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
255 			return -ENOMEM;
256 		}
257 		map->offset = (unsigned long)map->handle;
258 		if (map->flags & _DRM_CONTAINS_LOCK) {
259 			/* Prevent a 2nd X Server from creating a 2nd lock */
260 			if (dev->primary->master->lock.hw_lock != NULL) {
261 				vfree(map->handle);
262 				drm_free(map, sizeof(*map), DRM_MEM_MAPS);
263 				return -EBUSY;
264 			}
265 			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
266 		}
267 		break;
268 	case _DRM_AGP: {
269 		struct drm_agp_mem *entry;
270 		int valid = 0;
271 
272 		if (!drm_core_has_AGP(dev)) {
273 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
274 			return -EINVAL;
275 		}
276 #ifdef __alpha__
277 		map->offset += dev->hose->mem_space->start;
278 #endif
279 		/* In some cases (i810 driver), user space may have already
280 		 * added the AGP base itself, because dev->agp->base previously
281 		 * only got set during AGP enable.  So, only add the base
282 		 * address if the map's offset isn't already within the
283 		 * aperture.
284 		 */
285 		if (map->offset < dev->agp->base ||
286 		    map->offset > dev->agp->base +
287 		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
288 			map->offset += dev->agp->base;
289 		}
290 		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
291 
292 		/* This assumes the DRM is in total control of AGP space.
293 		 * It's not always the case as AGP can be in the control
294 		 * of user space (i.e. i810 driver). So this loop will get
295 		 * skipped and we double check that dev->agp->memory is
296 		 * actually set as well as being invalid before EPERM'ing
297 		 */
298 		list_for_each_entry(entry, &dev->agp->memory, head) {
299 			if ((map->offset >= entry->bound) &&
300 			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
301 				valid = 1;
302 				break;
303 			}
304 		}
305 		if (!list_empty(&dev->agp->memory) && !valid) {
306 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
307 			return -EPERM;
308 		}
309 		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
310 			  (unsigned long long)map->offset, map->size);
311 
312 		break;
313 	case _DRM_GEM:
314 		DRM_ERROR("tried to rmmap GEM object\n");
315 		break;
316 	}
317 	case _DRM_SCATTER_GATHER:
318 		if (!dev->sg) {
319 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
320 			return -EINVAL;
321 		}
322 		map->offset += (unsigned long)dev->sg->virtual;
323 		break;
324 	case _DRM_CONSISTENT:
325 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
326 		 * As we're limiting the address to 2^32-1 (or less),
327 		 * casting it down to 32 bits is no problem, but we
328 		 * need to point to a 64bit variable first. */
329 		dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
330 		if (!dmah) {
331 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
332 			return -ENOMEM;
333 		}
334 		map->handle = dmah->vaddr;
335 		map->offset = (unsigned long)dmah->busaddr;
336 		kfree(dmah);
337 		break;
338 	default:
339 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
340 		return -EINVAL;
341 	}
342 
343 	list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
344 	if (!list) {
345 		if (map->type == _DRM_REGISTERS)
346 			iounmap(map->handle);
347 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
348 		return -EINVAL;
349 	}
350 	memset(list, 0, sizeof(*list));
351 	list->map = map;
352 
353 	mutex_lock(&dev->struct_mutex);
354 	list_add(&list->head, &dev->maplist);
355 
356 	/* Assign a 32-bit handle */
357 	/* We do it here so that dev->struct_mutex protects the increment */
358 	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
359 		map->offset;
360 	ret = drm_map_handle(dev, &list->hash, user_token, 0,
361 			     (map->type == _DRM_SHM));
362 	if (ret) {
363 		if (map->type == _DRM_REGISTERS)
364 			iounmap(map->handle);
365 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
366 		drm_free(list, sizeof(*list), DRM_MEM_MAPS);
367 		mutex_unlock(&dev->struct_mutex);
368 		return ret;
369 	}
370 
371 	list->user_token = list->hash.key << PAGE_SHIFT;
372 	mutex_unlock(&dev->struct_mutex);
373 
374 	list->master = dev->primary->master;
375 	*maplist = list;
376 	return 0;
377 	}
378 
379 int drm_addmap(struct drm_device * dev, resource_size_t offset,
380 	       unsigned int size, enum drm_map_type type,
381 	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
382 {
383 	struct drm_map_list *list;
384 	int rc;
385 
386 	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
387 	if (!rc)
388 		*map_ptr = list->map;
389 	return rc;
390 }
391 
392 EXPORT_SYMBOL(drm_addmap);
393 
394 /**
395  * Ioctl to specify a range of memory that is available for mapping by a
396  * non-root process.
397  *
398  * \param inode device inode.
399  * \param file_priv DRM file private.
400  * \param cmd command.
401  * \param arg pointer to a drm_map structure.
402  * \return zero on success or a negative value on error.
403  *
404  */
405 int drm_addmap_ioctl(struct drm_device *dev, void *data,
406 		     struct drm_file *file_priv)
407 {
408 	struct drm_map *map = data;
409 	struct drm_map_list *maplist;
410 	int err;
411 
412 	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
413 		return -EPERM;
414 
415 	err = drm_addmap_core(dev, map->offset, map->size, map->type,
416 			      map->flags, &maplist);
417 
418 	if (err)
419 		return err;
420 
421 	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
422 	map->handle = (void *)(unsigned long)maplist->user_token;
423 	return 0;
424 }
425 
426 /**
427  * Remove a map private from list and deallocate resources if the mapping
428  * isn't in use.
429  *
430  * Searches the map on drm_device::maplist, removes it from the list, see if
431  * its being used, and free any associate resource (such as MTRR's) if it's not
432  * being on use.
433  *
434  * \sa drm_addmap
435  */
436 int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
437 {
438 	struct drm_map_list *r_list = NULL, *list_t;
439 	drm_dma_handle_t dmah;
440 	int found = 0;
441 	struct drm_master *master;
442 
443 	/* Find the list entry for the map and remove it */
444 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
445 		if (r_list->map == map) {
446 			master = r_list->master;
447 			list_del(&r_list->head);
448 			drm_ht_remove_key(&dev->map_hash,
449 					  r_list->user_token >> PAGE_SHIFT);
450 			drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
451 			found = 1;
452 			break;
453 		}
454 	}
455 
456 	if (!found)
457 		return -EINVAL;
458 
459 	switch (map->type) {
460 	case _DRM_REGISTERS:
461 		iounmap(map->handle);
462 		/* FALLTHROUGH */
463 	case _DRM_FRAME_BUFFER:
464 		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
465 			int retcode;
466 			retcode = mtrr_del(map->mtrr, map->offset, map->size);
467 			DRM_DEBUG("mtrr_del=%d\n", retcode);
468 		}
469 		break;
470 	case _DRM_SHM:
471 		vfree(map->handle);
472 		if (master) {
473 			if (dev->sigdata.lock == master->lock.hw_lock)
474 				dev->sigdata.lock = NULL;
475 			master->lock.hw_lock = NULL;   /* SHM removed */
476 			master->lock.file_priv = NULL;
477 			wake_up_interruptible_all(&master->lock.lock_queue);
478 		}
479 		break;
480 	case _DRM_AGP:
481 	case _DRM_SCATTER_GATHER:
482 		break;
483 	case _DRM_CONSISTENT:
484 		dmah.vaddr = map->handle;
485 		dmah.busaddr = map->offset;
486 		dmah.size = map->size;
487 		__drm_pci_free(dev, &dmah);
488 		break;
489 	case _DRM_GEM:
490 		DRM_ERROR("tried to rmmap GEM object\n");
491 		break;
492 	}
493 	drm_free(map, sizeof(*map), DRM_MEM_MAPS);
494 
495 	return 0;
496 }
497 EXPORT_SYMBOL(drm_rmmap_locked);
498 
499 int drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
500 {
501 	int ret;
502 
503 	mutex_lock(&dev->struct_mutex);
504 	ret = drm_rmmap_locked(dev, map);
505 	mutex_unlock(&dev->struct_mutex);
506 
507 	return ret;
508 }
509 EXPORT_SYMBOL(drm_rmmap);
510 
511 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
512  * the last close of the device, and this is necessary for cleanup when things
513  * exit uncleanly.  Therefore, having userland manually remove mappings seems
514  * like a pointless exercise since they're going away anyway.
515  *
516  * One use case might be after addmap is allowed for normal users for SHM and
517  * gets used by drivers that the server doesn't need to care about.  This seems
518  * unlikely.
519  *
520  * \param inode device inode.
521  * \param file_priv DRM file private.
522  * \param cmd command.
523  * \param arg pointer to a struct drm_map structure.
524  * \return zero on success or a negative value on error.
525  */
526 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
527 		    struct drm_file *file_priv)
528 {
529 	struct drm_map *request = data;
530 	struct drm_local_map *map = NULL;
531 	struct drm_map_list *r_list;
532 	int ret;
533 
534 	mutex_lock(&dev->struct_mutex);
535 	list_for_each_entry(r_list, &dev->maplist, head) {
536 		if (r_list->map &&
537 		    r_list->user_token == (unsigned long)request->handle &&
538 		    r_list->map->flags & _DRM_REMOVABLE) {
539 			map = r_list->map;
540 			break;
541 		}
542 	}
543 
544 	/* List has wrapped around to the head pointer, or its empty we didn't
545 	 * find anything.
546 	 */
547 	if (list_empty(&dev->maplist) || !map) {
548 		mutex_unlock(&dev->struct_mutex);
549 		return -EINVAL;
550 	}
551 
552 	/* Register and framebuffer maps are permanent */
553 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
554 		mutex_unlock(&dev->struct_mutex);
555 		return 0;
556 	}
557 
558 	ret = drm_rmmap_locked(dev, map);
559 
560 	mutex_unlock(&dev->struct_mutex);
561 
562 	return ret;
563 }
564 
565 /**
566  * Cleanup after an error on one of the addbufs() functions.
567  *
568  * \param dev DRM device.
569  * \param entry buffer entry where the error occurred.
570  *
571  * Frees any pages and buffers associated with the given entry.
572  */
573 static void drm_cleanup_buf_error(struct drm_device * dev,
574 				  struct drm_buf_entry * entry)
575 {
576 	int i;
577 
578 	if (entry->seg_count) {
579 		for (i = 0; i < entry->seg_count; i++) {
580 			if (entry->seglist[i]) {
581 				drm_pci_free(dev, entry->seglist[i]);
582 			}
583 		}
584 		drm_free(entry->seglist,
585 			 entry->seg_count *
586 			 sizeof(*entry->seglist), DRM_MEM_SEGS);
587 
588 		entry->seg_count = 0;
589 	}
590 
591 	if (entry->buf_count) {
592 		for (i = 0; i < entry->buf_count; i++) {
593 			if (entry->buflist[i].dev_private) {
594 				drm_free(entry->buflist[i].dev_private,
595 					 entry->buflist[i].dev_priv_size,
596 					 DRM_MEM_BUFS);
597 			}
598 		}
599 		drm_free(entry->buflist,
600 			 entry->buf_count *
601 			 sizeof(*entry->buflist), DRM_MEM_BUFS);
602 
603 		entry->buf_count = 0;
604 	}
605 }
606 
607 #if __OS_HAS_AGP
608 /**
609  * Add AGP buffers for DMA transfers.
610  *
611  * \param dev struct drm_device to which the buffers are to be added.
612  * \param request pointer to a struct drm_buf_desc describing the request.
613  * \return zero on success or a negative number on failure.
614  *
615  * After some sanity checks creates a drm_buf structure for each buffer and
616  * reallocates the buffer list of the same size order to accommodate the new
617  * buffers.
618  */
619 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
620 {
621 	struct drm_device_dma *dma = dev->dma;
622 	struct drm_buf_entry *entry;
623 	struct drm_agp_mem *agp_entry;
624 	struct drm_buf *buf;
625 	unsigned long offset;
626 	unsigned long agp_offset;
627 	int count;
628 	int order;
629 	int size;
630 	int alignment;
631 	int page_order;
632 	int total;
633 	int byte_count;
634 	int i, valid;
635 	struct drm_buf **temp_buflist;
636 
637 	if (!dma)
638 		return -EINVAL;
639 
640 	count = request->count;
641 	order = drm_order(request->size);
642 	size = 1 << order;
643 
644 	alignment = (request->flags & _DRM_PAGE_ALIGN)
645 	    ? PAGE_ALIGN(size) : size;
646 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
647 	total = PAGE_SIZE << page_order;
648 
649 	byte_count = 0;
650 	agp_offset = dev->agp->base + request->agp_start;
651 
652 	DRM_DEBUG("count:      %d\n", count);
653 	DRM_DEBUG("order:      %d\n", order);
654 	DRM_DEBUG("size:       %d\n", size);
655 	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
656 	DRM_DEBUG("alignment:  %d\n", alignment);
657 	DRM_DEBUG("page_order: %d\n", page_order);
658 	DRM_DEBUG("total:      %d\n", total);
659 
660 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
661 		return -EINVAL;
662 	if (dev->queue_count)
663 		return -EBUSY;	/* Not while in use */
664 
665 	/* Make sure buffers are located in AGP memory that we own */
666 	valid = 0;
667 	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
668 		if ((agp_offset >= agp_entry->bound) &&
669 		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
670 			valid = 1;
671 			break;
672 		}
673 	}
674 	if (!list_empty(&dev->agp->memory) && !valid) {
675 		DRM_DEBUG("zone invalid\n");
676 		return -EINVAL;
677 	}
678 	spin_lock(&dev->count_lock);
679 	if (dev->buf_use) {
680 		spin_unlock(&dev->count_lock);
681 		return -EBUSY;
682 	}
683 	atomic_inc(&dev->buf_alloc);
684 	spin_unlock(&dev->count_lock);
685 
686 	mutex_lock(&dev->struct_mutex);
687 	entry = &dma->bufs[order];
688 	if (entry->buf_count) {
689 		mutex_unlock(&dev->struct_mutex);
690 		atomic_dec(&dev->buf_alloc);
691 		return -ENOMEM;	/* May only call once for each order */
692 	}
693 
694 	if (count < 0 || count > 4096) {
695 		mutex_unlock(&dev->struct_mutex);
696 		atomic_dec(&dev->buf_alloc);
697 		return -EINVAL;
698 	}
699 
700 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
701 				   DRM_MEM_BUFS);
702 	if (!entry->buflist) {
703 		mutex_unlock(&dev->struct_mutex);
704 		atomic_dec(&dev->buf_alloc);
705 		return -ENOMEM;
706 	}
707 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
708 
709 	entry->buf_size = size;
710 	entry->page_order = page_order;
711 
712 	offset = 0;
713 
714 	while (entry->buf_count < count) {
715 		buf = &entry->buflist[entry->buf_count];
716 		buf->idx = dma->buf_count + entry->buf_count;
717 		buf->total = alignment;
718 		buf->order = order;
719 		buf->used = 0;
720 
721 		buf->offset = (dma->byte_count + offset);
722 		buf->bus_address = agp_offset + offset;
723 		buf->address = (void *)(agp_offset + offset);
724 		buf->next = NULL;
725 		buf->waiting = 0;
726 		buf->pending = 0;
727 		init_waitqueue_head(&buf->dma_wait);
728 		buf->file_priv = NULL;
729 
730 		buf->dev_priv_size = dev->driver->dev_priv_size;
731 		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
732 		if (!buf->dev_private) {
733 			/* Set count correctly so we free the proper amount. */
734 			entry->buf_count = count;
735 			drm_cleanup_buf_error(dev, entry);
736 			mutex_unlock(&dev->struct_mutex);
737 			atomic_dec(&dev->buf_alloc);
738 			return -ENOMEM;
739 		}
740 		memset(buf->dev_private, 0, buf->dev_priv_size);
741 
742 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
743 
744 		offset += alignment;
745 		entry->buf_count++;
746 		byte_count += PAGE_SIZE << page_order;
747 	}
748 
749 	DRM_DEBUG("byte_count: %d\n", byte_count);
750 
751 	temp_buflist = drm_realloc(dma->buflist,
752 				   dma->buf_count * sizeof(*dma->buflist),
753 				   (dma->buf_count + entry->buf_count)
754 				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
755 	if (!temp_buflist) {
756 		/* Free the entry because it isn't valid */
757 		drm_cleanup_buf_error(dev, entry);
758 		mutex_unlock(&dev->struct_mutex);
759 		atomic_dec(&dev->buf_alloc);
760 		return -ENOMEM;
761 	}
762 	dma->buflist = temp_buflist;
763 
764 	for (i = 0; i < entry->buf_count; i++) {
765 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
766 	}
767 
768 	dma->buf_count += entry->buf_count;
769 	dma->seg_count += entry->seg_count;
770 	dma->page_count += byte_count >> PAGE_SHIFT;
771 	dma->byte_count += byte_count;
772 
773 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
774 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
775 
776 	mutex_unlock(&dev->struct_mutex);
777 
778 	request->count = entry->buf_count;
779 	request->size = size;
780 
781 	dma->flags = _DRM_DMA_USE_AGP;
782 
783 	atomic_dec(&dev->buf_alloc);
784 	return 0;
785 }
786 EXPORT_SYMBOL(drm_addbufs_agp);
787 #endif				/* __OS_HAS_AGP */
788 
789 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
790 {
791 	struct drm_device_dma *dma = dev->dma;
792 	int count;
793 	int order;
794 	int size;
795 	int total;
796 	int page_order;
797 	struct drm_buf_entry *entry;
798 	drm_dma_handle_t *dmah;
799 	struct drm_buf *buf;
800 	int alignment;
801 	unsigned long offset;
802 	int i;
803 	int byte_count;
804 	int page_count;
805 	unsigned long *temp_pagelist;
806 	struct drm_buf **temp_buflist;
807 
808 	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
809 		return -EINVAL;
810 
811 	if (!dma)
812 		return -EINVAL;
813 
814 	if (!capable(CAP_SYS_ADMIN))
815 		return -EPERM;
816 
817 	count = request->count;
818 	order = drm_order(request->size);
819 	size = 1 << order;
820 
821 	DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
822 		  request->count, request->size, size, order, dev->queue_count);
823 
824 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
825 		return -EINVAL;
826 	if (dev->queue_count)
827 		return -EBUSY;	/* Not while in use */
828 
829 	alignment = (request->flags & _DRM_PAGE_ALIGN)
830 	    ? PAGE_ALIGN(size) : size;
831 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
832 	total = PAGE_SIZE << page_order;
833 
834 	spin_lock(&dev->count_lock);
835 	if (dev->buf_use) {
836 		spin_unlock(&dev->count_lock);
837 		return -EBUSY;
838 	}
839 	atomic_inc(&dev->buf_alloc);
840 	spin_unlock(&dev->count_lock);
841 
842 	mutex_lock(&dev->struct_mutex);
843 	entry = &dma->bufs[order];
844 	if (entry->buf_count) {
845 		mutex_unlock(&dev->struct_mutex);
846 		atomic_dec(&dev->buf_alloc);
847 		return -ENOMEM;	/* May only call once for each order */
848 	}
849 
850 	if (count < 0 || count > 4096) {
851 		mutex_unlock(&dev->struct_mutex);
852 		atomic_dec(&dev->buf_alloc);
853 		return -EINVAL;
854 	}
855 
856 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
857 				   DRM_MEM_BUFS);
858 	if (!entry->buflist) {
859 		mutex_unlock(&dev->struct_mutex);
860 		atomic_dec(&dev->buf_alloc);
861 		return -ENOMEM;
862 	}
863 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
864 
865 	entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
866 				   DRM_MEM_SEGS);
867 	if (!entry->seglist) {
868 		drm_free(entry->buflist,
869 			 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
870 		mutex_unlock(&dev->struct_mutex);
871 		atomic_dec(&dev->buf_alloc);
872 		return -ENOMEM;
873 	}
874 	memset(entry->seglist, 0, count * sizeof(*entry->seglist));
875 
876 	/* Keep the original pagelist until we know all the allocations
877 	 * have succeeded
878 	 */
879 	temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
880 				  * sizeof(*dma->pagelist), DRM_MEM_PAGES);
881 	if (!temp_pagelist) {
882 		drm_free(entry->buflist,
883 			 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
884 		drm_free(entry->seglist,
885 			 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
886 		mutex_unlock(&dev->struct_mutex);
887 		atomic_dec(&dev->buf_alloc);
888 		return -ENOMEM;
889 	}
890 	memcpy(temp_pagelist,
891 	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
892 	DRM_DEBUG("pagelist: %d entries\n",
893 		  dma->page_count + (count << page_order));
894 
895 	entry->buf_size = size;
896 	entry->page_order = page_order;
897 	byte_count = 0;
898 	page_count = 0;
899 
900 	while (entry->buf_count < count) {
901 
902 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
903 
904 		if (!dmah) {
905 			/* Set count correctly so we free the proper amount. */
906 			entry->buf_count = count;
907 			entry->seg_count = count;
908 			drm_cleanup_buf_error(dev, entry);
909 			drm_free(temp_pagelist,
910 				 (dma->page_count + (count << page_order))
911 				 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
912 			mutex_unlock(&dev->struct_mutex);
913 			atomic_dec(&dev->buf_alloc);
914 			return -ENOMEM;
915 		}
916 		entry->seglist[entry->seg_count++] = dmah;
917 		for (i = 0; i < (1 << page_order); i++) {
918 			DRM_DEBUG("page %d @ 0x%08lx\n",
919 				  dma->page_count + page_count,
920 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
921 			temp_pagelist[dma->page_count + page_count++]
922 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
923 		}
924 		for (offset = 0;
925 		     offset + size <= total && entry->buf_count < count;
926 		     offset += alignment, ++entry->buf_count) {
927 			buf = &entry->buflist[entry->buf_count];
928 			buf->idx = dma->buf_count + entry->buf_count;
929 			buf->total = alignment;
930 			buf->order = order;
931 			buf->used = 0;
932 			buf->offset = (dma->byte_count + byte_count + offset);
933 			buf->address = (void *)(dmah->vaddr + offset);
934 			buf->bus_address = dmah->busaddr + offset;
935 			buf->next = NULL;
936 			buf->waiting = 0;
937 			buf->pending = 0;
938 			init_waitqueue_head(&buf->dma_wait);
939 			buf->file_priv = NULL;
940 
941 			buf->dev_priv_size = dev->driver->dev_priv_size;
942 			buf->dev_private = drm_alloc(buf->dev_priv_size,
943 						     DRM_MEM_BUFS);
944 			if (!buf->dev_private) {
945 				/* Set count correctly so we free the proper amount. */
946 				entry->buf_count = count;
947 				entry->seg_count = count;
948 				drm_cleanup_buf_error(dev, entry);
949 				drm_free(temp_pagelist,
950 					 (dma->page_count +
951 					  (count << page_order))
952 					 * sizeof(*dma->pagelist),
953 					 DRM_MEM_PAGES);
954 				mutex_unlock(&dev->struct_mutex);
955 				atomic_dec(&dev->buf_alloc);
956 				return -ENOMEM;
957 			}
958 			memset(buf->dev_private, 0, buf->dev_priv_size);
959 
960 			DRM_DEBUG("buffer %d @ %p\n",
961 				  entry->buf_count, buf->address);
962 		}
963 		byte_count += PAGE_SIZE << page_order;
964 	}
965 
966 	temp_buflist = drm_realloc(dma->buflist,
967 				   dma->buf_count * sizeof(*dma->buflist),
968 				   (dma->buf_count + entry->buf_count)
969 				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
970 	if (!temp_buflist) {
971 		/* Free the entry because it isn't valid */
972 		drm_cleanup_buf_error(dev, entry);
973 		drm_free(temp_pagelist,
974 			 (dma->page_count + (count << page_order))
975 			 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
976 		mutex_unlock(&dev->struct_mutex);
977 		atomic_dec(&dev->buf_alloc);
978 		return -ENOMEM;
979 	}
980 	dma->buflist = temp_buflist;
981 
982 	for (i = 0; i < entry->buf_count; i++) {
983 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
984 	}
985 
986 	/* No allocations failed, so now we can replace the orginal pagelist
987 	 * with the new one.
988 	 */
989 	if (dma->page_count) {
990 		drm_free(dma->pagelist,
991 			 dma->page_count * sizeof(*dma->pagelist),
992 			 DRM_MEM_PAGES);
993 	}
994 	dma->pagelist = temp_pagelist;
995 
996 	dma->buf_count += entry->buf_count;
997 	dma->seg_count += entry->seg_count;
998 	dma->page_count += entry->seg_count << page_order;
999 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1000 
1001 	mutex_unlock(&dev->struct_mutex);
1002 
1003 	request->count = entry->buf_count;
1004 	request->size = size;
1005 
1006 	if (request->flags & _DRM_PCI_BUFFER_RO)
1007 		dma->flags = _DRM_DMA_USE_PCI_RO;
1008 
1009 	atomic_dec(&dev->buf_alloc);
1010 	return 0;
1011 
1012 }
1013 EXPORT_SYMBOL(drm_addbufs_pci);
1014 
1015 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
1016 {
1017 	struct drm_device_dma *dma = dev->dma;
1018 	struct drm_buf_entry *entry;
1019 	struct drm_buf *buf;
1020 	unsigned long offset;
1021 	unsigned long agp_offset;
1022 	int count;
1023 	int order;
1024 	int size;
1025 	int alignment;
1026 	int page_order;
1027 	int total;
1028 	int byte_count;
1029 	int i;
1030 	struct drm_buf **temp_buflist;
1031 
1032 	if (!drm_core_check_feature(dev, DRIVER_SG))
1033 		return -EINVAL;
1034 
1035 	if (!dma)
1036 		return -EINVAL;
1037 
1038 	if (!capable(CAP_SYS_ADMIN))
1039 		return -EPERM;
1040 
1041 	count = request->count;
1042 	order = drm_order(request->size);
1043 	size = 1 << order;
1044 
1045 	alignment = (request->flags & _DRM_PAGE_ALIGN)
1046 	    ? PAGE_ALIGN(size) : size;
1047 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1048 	total = PAGE_SIZE << page_order;
1049 
1050 	byte_count = 0;
1051 	agp_offset = request->agp_start;
1052 
1053 	DRM_DEBUG("count:      %d\n", count);
1054 	DRM_DEBUG("order:      %d\n", order);
1055 	DRM_DEBUG("size:       %d\n", size);
1056 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1057 	DRM_DEBUG("alignment:  %d\n", alignment);
1058 	DRM_DEBUG("page_order: %d\n", page_order);
1059 	DRM_DEBUG("total:      %d\n", total);
1060 
1061 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1062 		return -EINVAL;
1063 	if (dev->queue_count)
1064 		return -EBUSY;	/* Not while in use */
1065 
1066 	spin_lock(&dev->count_lock);
1067 	if (dev->buf_use) {
1068 		spin_unlock(&dev->count_lock);
1069 		return -EBUSY;
1070 	}
1071 	atomic_inc(&dev->buf_alloc);
1072 	spin_unlock(&dev->count_lock);
1073 
1074 	mutex_lock(&dev->struct_mutex);
1075 	entry = &dma->bufs[order];
1076 	if (entry->buf_count) {
1077 		mutex_unlock(&dev->struct_mutex);
1078 		atomic_dec(&dev->buf_alloc);
1079 		return -ENOMEM;	/* May only call once for each order */
1080 	}
1081 
1082 	if (count < 0 || count > 4096) {
1083 		mutex_unlock(&dev->struct_mutex);
1084 		atomic_dec(&dev->buf_alloc);
1085 		return -EINVAL;
1086 	}
1087 
1088 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1089 				   DRM_MEM_BUFS);
1090 	if (!entry->buflist) {
1091 		mutex_unlock(&dev->struct_mutex);
1092 		atomic_dec(&dev->buf_alloc);
1093 		return -ENOMEM;
1094 	}
1095 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1096 
1097 	entry->buf_size = size;
1098 	entry->page_order = page_order;
1099 
1100 	offset = 0;
1101 
1102 	while (entry->buf_count < count) {
1103 		buf = &entry->buflist[entry->buf_count];
1104 		buf->idx = dma->buf_count + entry->buf_count;
1105 		buf->total = alignment;
1106 		buf->order = order;
1107 		buf->used = 0;
1108 
1109 		buf->offset = (dma->byte_count + offset);
1110 		buf->bus_address = agp_offset + offset;
1111 		buf->address = (void *)(agp_offset + offset
1112 					+ (unsigned long)dev->sg->virtual);
1113 		buf->next = NULL;
1114 		buf->waiting = 0;
1115 		buf->pending = 0;
1116 		init_waitqueue_head(&buf->dma_wait);
1117 		buf->file_priv = NULL;
1118 
1119 		buf->dev_priv_size = dev->driver->dev_priv_size;
1120 		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1121 		if (!buf->dev_private) {
1122 			/* Set count correctly so we free the proper amount. */
1123 			entry->buf_count = count;
1124 			drm_cleanup_buf_error(dev, entry);
1125 			mutex_unlock(&dev->struct_mutex);
1126 			atomic_dec(&dev->buf_alloc);
1127 			return -ENOMEM;
1128 		}
1129 
1130 		memset(buf->dev_private, 0, buf->dev_priv_size);
1131 
1132 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1133 
1134 		offset += alignment;
1135 		entry->buf_count++;
1136 		byte_count += PAGE_SIZE << page_order;
1137 	}
1138 
1139 	DRM_DEBUG("byte_count: %d\n", byte_count);
1140 
1141 	temp_buflist = drm_realloc(dma->buflist,
1142 				   dma->buf_count * sizeof(*dma->buflist),
1143 				   (dma->buf_count + entry->buf_count)
1144 				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
1145 	if (!temp_buflist) {
1146 		/* Free the entry because it isn't valid */
1147 		drm_cleanup_buf_error(dev, entry);
1148 		mutex_unlock(&dev->struct_mutex);
1149 		atomic_dec(&dev->buf_alloc);
1150 		return -ENOMEM;
1151 	}
1152 	dma->buflist = temp_buflist;
1153 
1154 	for (i = 0; i < entry->buf_count; i++) {
1155 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1156 	}
1157 
1158 	dma->buf_count += entry->buf_count;
1159 	dma->seg_count += entry->seg_count;
1160 	dma->page_count += byte_count >> PAGE_SHIFT;
1161 	dma->byte_count += byte_count;
1162 
1163 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1164 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1165 
1166 	mutex_unlock(&dev->struct_mutex);
1167 
1168 	request->count = entry->buf_count;
1169 	request->size = size;
1170 
1171 	dma->flags = _DRM_DMA_USE_SG;
1172 
1173 	atomic_dec(&dev->buf_alloc);
1174 	return 0;
1175 }
1176 
1177 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1178 {
1179 	struct drm_device_dma *dma = dev->dma;
1180 	struct drm_buf_entry *entry;
1181 	struct drm_buf *buf;
1182 	unsigned long offset;
1183 	unsigned long agp_offset;
1184 	int count;
1185 	int order;
1186 	int size;
1187 	int alignment;
1188 	int page_order;
1189 	int total;
1190 	int byte_count;
1191 	int i;
1192 	struct drm_buf **temp_buflist;
1193 
1194 	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1195 		return -EINVAL;
1196 
1197 	if (!dma)
1198 		return -EINVAL;
1199 
1200 	if (!capable(CAP_SYS_ADMIN))
1201 		return -EPERM;
1202 
1203 	count = request->count;
1204 	order = drm_order(request->size);
1205 	size = 1 << order;
1206 
1207 	alignment = (request->flags & _DRM_PAGE_ALIGN)
1208 	    ? PAGE_ALIGN(size) : size;
1209 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1210 	total = PAGE_SIZE << page_order;
1211 
1212 	byte_count = 0;
1213 	agp_offset = request->agp_start;
1214 
1215 	DRM_DEBUG("count:      %d\n", count);
1216 	DRM_DEBUG("order:      %d\n", order);
1217 	DRM_DEBUG("size:       %d\n", size);
1218 	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1219 	DRM_DEBUG("alignment:  %d\n", alignment);
1220 	DRM_DEBUG("page_order: %d\n", page_order);
1221 	DRM_DEBUG("total:      %d\n", total);
1222 
1223 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1224 		return -EINVAL;
1225 	if (dev->queue_count)
1226 		return -EBUSY;	/* Not while in use */
1227 
1228 	spin_lock(&dev->count_lock);
1229 	if (dev->buf_use) {
1230 		spin_unlock(&dev->count_lock);
1231 		return -EBUSY;
1232 	}
1233 	atomic_inc(&dev->buf_alloc);
1234 	spin_unlock(&dev->count_lock);
1235 
1236 	mutex_lock(&dev->struct_mutex);
1237 	entry = &dma->bufs[order];
1238 	if (entry->buf_count) {
1239 		mutex_unlock(&dev->struct_mutex);
1240 		atomic_dec(&dev->buf_alloc);
1241 		return -ENOMEM;	/* May only call once for each order */
1242 	}
1243 
1244 	if (count < 0 || count > 4096) {
1245 		mutex_unlock(&dev->struct_mutex);
1246 		atomic_dec(&dev->buf_alloc);
1247 		return -EINVAL;
1248 	}
1249 
1250 	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1251 				   DRM_MEM_BUFS);
1252 	if (!entry->buflist) {
1253 		mutex_unlock(&dev->struct_mutex);
1254 		atomic_dec(&dev->buf_alloc);
1255 		return -ENOMEM;
1256 	}
1257 	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1258 
1259 	entry->buf_size = size;
1260 	entry->page_order = page_order;
1261 
1262 	offset = 0;
1263 
1264 	while (entry->buf_count < count) {
1265 		buf = &entry->buflist[entry->buf_count];
1266 		buf->idx = dma->buf_count + entry->buf_count;
1267 		buf->total = alignment;
1268 		buf->order = order;
1269 		buf->used = 0;
1270 
1271 		buf->offset = (dma->byte_count + offset);
1272 		buf->bus_address = agp_offset + offset;
1273 		buf->address = (void *)(agp_offset + offset);
1274 		buf->next = NULL;
1275 		buf->waiting = 0;
1276 		buf->pending = 0;
1277 		init_waitqueue_head(&buf->dma_wait);
1278 		buf->file_priv = NULL;
1279 
1280 		buf->dev_priv_size = dev->driver->dev_priv_size;
1281 		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1282 		if (!buf->dev_private) {
1283 			/* Set count correctly so we free the proper amount. */
1284 			entry->buf_count = count;
1285 			drm_cleanup_buf_error(dev, entry);
1286 			mutex_unlock(&dev->struct_mutex);
1287 			atomic_dec(&dev->buf_alloc);
1288 			return -ENOMEM;
1289 		}
1290 		memset(buf->dev_private, 0, buf->dev_priv_size);
1291 
1292 		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1293 
1294 		offset += alignment;
1295 		entry->buf_count++;
1296 		byte_count += PAGE_SIZE << page_order;
1297 	}
1298 
1299 	DRM_DEBUG("byte_count: %d\n", byte_count);
1300 
1301 	temp_buflist = drm_realloc(dma->buflist,
1302 				   dma->buf_count * sizeof(*dma->buflist),
1303 				   (dma->buf_count + entry->buf_count)
1304 				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
1305 	if (!temp_buflist) {
1306 		/* Free the entry because it isn't valid */
1307 		drm_cleanup_buf_error(dev, entry);
1308 		mutex_unlock(&dev->struct_mutex);
1309 		atomic_dec(&dev->buf_alloc);
1310 		return -ENOMEM;
1311 	}
1312 	dma->buflist = temp_buflist;
1313 
1314 	for (i = 0; i < entry->buf_count; i++) {
1315 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1316 	}
1317 
1318 	dma->buf_count += entry->buf_count;
1319 	dma->seg_count += entry->seg_count;
1320 	dma->page_count += byte_count >> PAGE_SHIFT;
1321 	dma->byte_count += byte_count;
1322 
1323 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1324 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1325 
1326 	mutex_unlock(&dev->struct_mutex);
1327 
1328 	request->count = entry->buf_count;
1329 	request->size = size;
1330 
1331 	dma->flags = _DRM_DMA_USE_FB;
1332 
1333 	atomic_dec(&dev->buf_alloc);
1334 	return 0;
1335 }
1336 
1337 
1338 /**
1339  * Add buffers for DMA transfers (ioctl).
1340  *
1341  * \param inode device inode.
1342  * \param file_priv DRM file private.
1343  * \param cmd command.
1344  * \param arg pointer to a struct drm_buf_desc request.
1345  * \return zero on success or a negative number on failure.
1346  *
1347  * According with the memory type specified in drm_buf_desc::flags and the
1348  * build options, it dispatches the call either to addbufs_agp(),
1349  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1350  * PCI memory respectively.
1351  */
1352 int drm_addbufs(struct drm_device *dev, void *data,
1353 		struct drm_file *file_priv)
1354 {
1355 	struct drm_buf_desc *request = data;
1356 	int ret;
1357 
1358 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1359 		return -EINVAL;
1360 
1361 #if __OS_HAS_AGP
1362 	if (request->flags & _DRM_AGP_BUFFER)
1363 		ret = drm_addbufs_agp(dev, request);
1364 	else
1365 #endif
1366 	if (request->flags & _DRM_SG_BUFFER)
1367 		ret = drm_addbufs_sg(dev, request);
1368 	else if (request->flags & _DRM_FB_BUFFER)
1369 		ret = drm_addbufs_fb(dev, request);
1370 	else
1371 		ret = drm_addbufs_pci(dev, request);
1372 
1373 	return ret;
1374 }
1375 
1376 /**
1377  * Get information about the buffer mappings.
1378  *
1379  * This was originally mean for debugging purposes, or by a sophisticated
1380  * client library to determine how best to use the available buffers (e.g.,
1381  * large buffers can be used for image transfer).
1382  *
1383  * \param inode device inode.
1384  * \param file_priv DRM file private.
1385  * \param cmd command.
1386  * \param arg pointer to a drm_buf_info structure.
1387  * \return zero on success or a negative number on failure.
1388  *
1389  * Increments drm_device::buf_use while holding the drm_device::count_lock
1390  * lock, preventing of allocating more buffers after this call. Information
1391  * about each requested buffer is then copied into user space.
1392  */
1393 int drm_infobufs(struct drm_device *dev, void *data,
1394 		 struct drm_file *file_priv)
1395 {
1396 	struct drm_device_dma *dma = dev->dma;
1397 	struct drm_buf_info *request = data;
1398 	int i;
1399 	int count;
1400 
1401 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1402 		return -EINVAL;
1403 
1404 	if (!dma)
1405 		return -EINVAL;
1406 
1407 	spin_lock(&dev->count_lock);
1408 	if (atomic_read(&dev->buf_alloc)) {
1409 		spin_unlock(&dev->count_lock);
1410 		return -EBUSY;
1411 	}
1412 	++dev->buf_use;		/* Can't allocate more after this call */
1413 	spin_unlock(&dev->count_lock);
1414 
1415 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1416 		if (dma->bufs[i].buf_count)
1417 			++count;
1418 	}
1419 
1420 	DRM_DEBUG("count = %d\n", count);
1421 
1422 	if (request->count >= count) {
1423 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1424 			if (dma->bufs[i].buf_count) {
1425 				struct drm_buf_desc __user *to =
1426 				    &request->list[count];
1427 				struct drm_buf_entry *from = &dma->bufs[i];
1428 				struct drm_freelist *list = &dma->bufs[i].freelist;
1429 				if (copy_to_user(&to->count,
1430 						 &from->buf_count,
1431 						 sizeof(from->buf_count)) ||
1432 				    copy_to_user(&to->size,
1433 						 &from->buf_size,
1434 						 sizeof(from->buf_size)) ||
1435 				    copy_to_user(&to->low_mark,
1436 						 &list->low_mark,
1437 						 sizeof(list->low_mark)) ||
1438 				    copy_to_user(&to->high_mark,
1439 						 &list->high_mark,
1440 						 sizeof(list->high_mark)))
1441 					return -EFAULT;
1442 
1443 				DRM_DEBUG("%d %d %d %d %d\n",
1444 					  i,
1445 					  dma->bufs[i].buf_count,
1446 					  dma->bufs[i].buf_size,
1447 					  dma->bufs[i].freelist.low_mark,
1448 					  dma->bufs[i].freelist.high_mark);
1449 				++count;
1450 			}
1451 		}
1452 	}
1453 	request->count = count;
1454 
1455 	return 0;
1456 }
1457 
1458 /**
1459  * Specifies a low and high water mark for buffer allocation
1460  *
1461  * \param inode device inode.
1462  * \param file_priv DRM file private.
1463  * \param cmd command.
1464  * \param arg a pointer to a drm_buf_desc structure.
1465  * \return zero on success or a negative number on failure.
1466  *
1467  * Verifies that the size order is bounded between the admissible orders and
1468  * updates the respective drm_device_dma::bufs entry low and high water mark.
1469  *
1470  * \note This ioctl is deprecated and mostly never used.
1471  */
1472 int drm_markbufs(struct drm_device *dev, void *data,
1473 		 struct drm_file *file_priv)
1474 {
1475 	struct drm_device_dma *dma = dev->dma;
1476 	struct drm_buf_desc *request = data;
1477 	int order;
1478 	struct drm_buf_entry *entry;
1479 
1480 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1481 		return -EINVAL;
1482 
1483 	if (!dma)
1484 		return -EINVAL;
1485 
1486 	DRM_DEBUG("%d, %d, %d\n",
1487 		  request->size, request->low_mark, request->high_mark);
1488 	order = drm_order(request->size);
1489 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1490 		return -EINVAL;
1491 	entry = &dma->bufs[order];
1492 
1493 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1494 		return -EINVAL;
1495 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1496 		return -EINVAL;
1497 
1498 	entry->freelist.low_mark = request->low_mark;
1499 	entry->freelist.high_mark = request->high_mark;
1500 
1501 	return 0;
1502 }
1503 
1504 /**
1505  * Unreserve the buffers in list, previously reserved using drmDMA.
1506  *
1507  * \param inode device inode.
1508  * \param file_priv DRM file private.
1509  * \param cmd command.
1510  * \param arg pointer to a drm_buf_free structure.
1511  * \return zero on success or a negative number on failure.
1512  *
1513  * Calls free_buffer() for each used buffer.
1514  * This function is primarily used for debugging.
1515  */
1516 int drm_freebufs(struct drm_device *dev, void *data,
1517 		 struct drm_file *file_priv)
1518 {
1519 	struct drm_device_dma *dma = dev->dma;
1520 	struct drm_buf_free *request = data;
1521 	int i;
1522 	int idx;
1523 	struct drm_buf *buf;
1524 
1525 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1526 		return -EINVAL;
1527 
1528 	if (!dma)
1529 		return -EINVAL;
1530 
1531 	DRM_DEBUG("%d\n", request->count);
1532 	for (i = 0; i < request->count; i++) {
1533 		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1534 			return -EFAULT;
1535 		if (idx < 0 || idx >= dma->buf_count) {
1536 			DRM_ERROR("Index %d (of %d max)\n",
1537 				  idx, dma->buf_count - 1);
1538 			return -EINVAL;
1539 		}
1540 		buf = dma->buflist[idx];
1541 		if (buf->file_priv != file_priv) {
1542 			DRM_ERROR("Process %d freeing buffer not owned\n",
1543 				  task_pid_nr(current));
1544 			return -EINVAL;
1545 		}
1546 		drm_free_buffer(dev, buf);
1547 	}
1548 
1549 	return 0;
1550 }
1551 
1552 /**
1553  * Maps all of the DMA buffers into client-virtual space (ioctl).
1554  *
1555  * \param inode device inode.
1556  * \param file_priv DRM file private.
1557  * \param cmd command.
1558  * \param arg pointer to a drm_buf_map structure.
1559  * \return zero on success or a negative number on failure.
1560  *
1561  * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1562  * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1563  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1564  * drm_mmap_dma().
1565  */
1566 int drm_mapbufs(struct drm_device *dev, void *data,
1567 	        struct drm_file *file_priv)
1568 {
1569 	struct drm_device_dma *dma = dev->dma;
1570 	int retcode = 0;
1571 	const int zero = 0;
1572 	unsigned long virtual;
1573 	unsigned long address;
1574 	struct drm_buf_map *request = data;
1575 	int i;
1576 
1577 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1578 		return -EINVAL;
1579 
1580 	if (!dma)
1581 		return -EINVAL;
1582 
1583 	spin_lock(&dev->count_lock);
1584 	if (atomic_read(&dev->buf_alloc)) {
1585 		spin_unlock(&dev->count_lock);
1586 		return -EBUSY;
1587 	}
1588 	dev->buf_use++;		/* Can't allocate more after this call */
1589 	spin_unlock(&dev->count_lock);
1590 
1591 	if (request->count >= dma->buf_count) {
1592 		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1593 		    || (drm_core_check_feature(dev, DRIVER_SG)
1594 			&& (dma->flags & _DRM_DMA_USE_SG))
1595 		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1596 			&& (dma->flags & _DRM_DMA_USE_FB))) {
1597 			struct drm_local_map *map = dev->agp_buffer_map;
1598 			unsigned long token = dev->agp_buffer_token;
1599 
1600 			if (!map) {
1601 				retcode = -EINVAL;
1602 				goto done;
1603 			}
1604 			down_write(&current->mm->mmap_sem);
1605 			virtual = do_mmap(file_priv->filp, 0, map->size,
1606 					  PROT_READ | PROT_WRITE,
1607 					  MAP_SHARED,
1608 					  token);
1609 			up_write(&current->mm->mmap_sem);
1610 		} else {
1611 			down_write(&current->mm->mmap_sem);
1612 			virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1613 					  PROT_READ | PROT_WRITE,
1614 					  MAP_SHARED, 0);
1615 			up_write(&current->mm->mmap_sem);
1616 		}
1617 		if (virtual > -1024UL) {
1618 			/* Real error */
1619 			retcode = (signed long)virtual;
1620 			goto done;
1621 		}
1622 		request->virtual = (void __user *)virtual;
1623 
1624 		for (i = 0; i < dma->buf_count; i++) {
1625 			if (copy_to_user(&request->list[i].idx,
1626 					 &dma->buflist[i]->idx,
1627 					 sizeof(request->list[0].idx))) {
1628 				retcode = -EFAULT;
1629 				goto done;
1630 			}
1631 			if (copy_to_user(&request->list[i].total,
1632 					 &dma->buflist[i]->total,
1633 					 sizeof(request->list[0].total))) {
1634 				retcode = -EFAULT;
1635 				goto done;
1636 			}
1637 			if (copy_to_user(&request->list[i].used,
1638 					 &zero, sizeof(zero))) {
1639 				retcode = -EFAULT;
1640 				goto done;
1641 			}
1642 			address = virtual + dma->buflist[i]->offset;	/* *** */
1643 			if (copy_to_user(&request->list[i].address,
1644 					 &address, sizeof(address))) {
1645 				retcode = -EFAULT;
1646 				goto done;
1647 			}
1648 		}
1649 	}
1650       done:
1651 	request->count = dma->buf_count;
1652 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1653 
1654 	return retcode;
1655 }
1656 
1657 /**
1658  * Compute size order.  Returns the exponent of the smaller power of two which
1659  * is greater or equal to given number.
1660  *
1661  * \param size size.
1662  * \return order.
1663  *
1664  * \todo Can be made faster.
1665  */
1666 int drm_order(unsigned long size)
1667 {
1668 	int order;
1669 	unsigned long tmp;
1670 
1671 	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1672 
1673 	if (size & (size - 1))
1674 		++order;
1675 
1676 	return order;
1677 }
1678 EXPORT_SYMBOL(drm_order);
1679