1 /*
2 * Legacy: Generic DRM Buffer Management
3 *
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31 #include <linux/export.h>
32 #include <linux/log2.h>
33 #include <linux/mm.h>
34 #include <linux/mman.h>
35 #include <linux/nospec.h>
36 #include <linux/pci.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39 #include <linux/vmalloc.h>
40
41 #include <asm/shmparam.h>
42
43 #include <drm/drm_device.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_print.h>
47
48 #include "drm_legacy.h"
49
50
drm_find_matching_map(struct drm_device * dev,struct drm_local_map * map)51 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
52 struct drm_local_map *map)
53 {
54 struct drm_map_list *entry;
55
56 list_for_each_entry(entry, &dev->maplist, head) {
57 /*
58 * Because the kernel-userspace ABI is fixed at a 32-bit offset
59 * while PCI resources may live above that, we only compare the
60 * lower 32 bits of the map offset for maps of type
61 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
62 * It is assumed that if a driver have more than one resource
63 * of each type, the lower 32 bits are different.
64 */
65 if (!entry->map ||
66 map->type != entry->map->type ||
67 entry->master != dev->master)
68 continue;
69 switch (map->type) {
70 case _DRM_SHM:
71 if (map->flags != _DRM_CONTAINS_LOCK)
72 break;
73 return entry;
74 case _DRM_REGISTERS:
75 case _DRM_FRAME_BUFFER:
76 if ((entry->map->offset & 0xffffffff) ==
77 (map->offset & 0xffffffff))
78 return entry;
79 break;
80 default: /* Make gcc happy */
81 break;
82 }
83 if (entry->map->offset == map->offset)
84 return entry;
85 }
86
87 return NULL;
88 }
89
drm_map_handle(struct drm_device * dev,struct drm_hash_item * hash,unsigned long user_token,int hashed_handle,int shm)90 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
91 unsigned long user_token, int hashed_handle, int shm)
92 {
93 int use_hashed_handle, shift;
94 unsigned long add;
95
96 #if (BITS_PER_LONG == 64)
97 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
98 #elif (BITS_PER_LONG == 32)
99 use_hashed_handle = hashed_handle;
100 #else
101 #error Unsupported long size. Neither 64 nor 32 bits.
102 #endif
103
104 if (!use_hashed_handle) {
105 int ret;
106
107 hash->key = user_token >> PAGE_SHIFT;
108 ret = drm_ht_insert_item(&dev->map_hash, hash);
109 if (ret != -EINVAL)
110 return ret;
111 }
112
113 shift = 0;
114 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
115 if (shm && (SHMLBA > PAGE_SIZE)) {
116 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
117
118 /* For shared memory, we have to preserve the SHMLBA
119 * bits of the eventual vma->vm_pgoff value during
120 * mmap(). Otherwise we run into cache aliasing problems
121 * on some platforms. On these platforms, the pgoff of
122 * a mmap() request is used to pick a suitable virtual
123 * address for the mmap() region such that it will not
124 * cause cache aliasing problems.
125 *
126 * Therefore, make sure the SHMLBA relevant bits of the
127 * hash value we use are equal to those in the original
128 * kernel virtual address.
129 */
130 shift = bits;
131 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
132 }
133
134 return drm_ht_just_insert_please(&dev->map_hash, hash,
135 user_token, 32 - PAGE_SHIFT - 3,
136 shift, add);
137 }
138
139 /*
140 * Core function to create a range of memory available for mapping by a
141 * non-root process.
142 *
143 * Adjusts the memory offset to its absolute value according to the mapping
144 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
145 * applicable and if supported by the kernel.
146 */
drm_addmap_core(struct drm_device * dev,resource_size_t offset,unsigned int size,enum drm_map_type type,enum drm_map_flags flags,struct drm_map_list ** maplist)147 static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
148 unsigned int size, enum drm_map_type type,
149 enum drm_map_flags flags,
150 struct drm_map_list **maplist)
151 {
152 struct drm_local_map *map;
153 struct drm_map_list *list;
154 unsigned long user_token;
155 int ret;
156
157 map = kmalloc(sizeof(*map), GFP_KERNEL);
158 if (!map)
159 return -ENOMEM;
160
161 map->offset = offset;
162 map->size = size;
163 map->flags = flags;
164 map->type = type;
165
166 /* Only allow shared memory to be removable since we only keep enough
167 * book keeping information about shared memory to allow for removal
168 * when processes fork.
169 */
170 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
171 kfree(map);
172 return -EINVAL;
173 }
174 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
175 (unsigned long long)map->offset, map->size, map->type);
176
177 /* page-align _DRM_SHM maps. They are allocated here so there is no security
178 * hole created by that and it works around various broken drivers that use
179 * a non-aligned quantity to map the SAREA. --BenH
180 */
181 if (map->type == _DRM_SHM)
182 map->size = PAGE_ALIGN(map->size);
183
184 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
185 kfree(map);
186 return -EINVAL;
187 }
188 map->mtrr = -1;
189 map->handle = NULL;
190
191 switch (map->type) {
192 case _DRM_REGISTERS:
193 case _DRM_FRAME_BUFFER:
194 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
195 if (map->offset + (map->size-1) < map->offset ||
196 map->offset < virt_to_phys(high_memory)) {
197 kfree(map);
198 return -EINVAL;
199 }
200 #endif
201 /* Some drivers preinitialize some maps, without the X Server
202 * needing to be aware of it. Therefore, we just return success
203 * when the server tries to create a duplicate map.
204 */
205 list = drm_find_matching_map(dev, map);
206 if (list != NULL) {
207 if (list->map->size != map->size) {
208 DRM_DEBUG("Matching maps of type %d with "
209 "mismatched sizes, (%ld vs %ld)\n",
210 map->type, map->size,
211 list->map->size);
212 list->map->size = map->size;
213 }
214
215 kfree(map);
216 *maplist = list;
217 return 0;
218 }
219
220 if (map->type == _DRM_FRAME_BUFFER ||
221 (map->flags & _DRM_WRITE_COMBINING)) {
222 map->mtrr =
223 arch_phys_wc_add(map->offset, map->size);
224 }
225 if (map->type == _DRM_REGISTERS) {
226 if (map->flags & _DRM_WRITE_COMBINING)
227 map->handle = ioremap_wc(map->offset,
228 map->size);
229 else
230 map->handle = ioremap(map->offset, map->size);
231 if (!map->handle) {
232 kfree(map);
233 return -ENOMEM;
234 }
235 }
236
237 break;
238 case _DRM_SHM:
239 list = drm_find_matching_map(dev, map);
240 if (list != NULL) {
241 if (list->map->size != map->size) {
242 DRM_DEBUG("Matching maps of type %d with "
243 "mismatched sizes, (%ld vs %ld)\n",
244 map->type, map->size, list->map->size);
245 list->map->size = map->size;
246 }
247
248 kfree(map);
249 *maplist = list;
250 return 0;
251 }
252 map->handle = vmalloc_user(map->size);
253 DRM_DEBUG("%lu %d %p\n",
254 map->size, order_base_2(map->size), map->handle);
255 if (!map->handle) {
256 kfree(map);
257 return -ENOMEM;
258 }
259 map->offset = (unsigned long)map->handle;
260 if (map->flags & _DRM_CONTAINS_LOCK) {
261 /* Prevent a 2nd X Server from creating a 2nd lock */
262 if (dev->master->lock.hw_lock != NULL) {
263 vfree(map->handle);
264 kfree(map);
265 return -EBUSY;
266 }
267 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
268 }
269 break;
270 case _DRM_AGP: {
271 struct drm_agp_mem *entry;
272 int valid = 0;
273
274 if (!dev->agp) {
275 kfree(map);
276 return -EINVAL;
277 }
278 #ifdef __alpha__
279 map->offset += dev->hose->mem_space->start;
280 #endif
281 /* In some cases (i810 driver), user space may have already
282 * added the AGP base itself, because dev->agp->base previously
283 * only got set during AGP enable. So, only add the base
284 * address if the map's offset isn't already within the
285 * aperture.
286 */
287 if (map->offset < dev->agp->base ||
288 map->offset > dev->agp->base +
289 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
290 map->offset += dev->agp->base;
291 }
292 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
293
294 /* This assumes the DRM is in total control of AGP space.
295 * It's not always the case as AGP can be in the control
296 * of user space (i.e. i810 driver). So this loop will get
297 * skipped and we double check that dev->agp->memory is
298 * actually set as well as being invalid before EPERM'ing
299 */
300 list_for_each_entry(entry, &dev->agp->memory, head) {
301 if ((map->offset >= entry->bound) &&
302 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
303 valid = 1;
304 break;
305 }
306 }
307 if (!list_empty(&dev->agp->memory) && !valid) {
308 kfree(map);
309 return -EPERM;
310 }
311 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
312 (unsigned long long)map->offset, map->size);
313
314 break;
315 }
316 case _DRM_SCATTER_GATHER:
317 if (!dev->sg) {
318 kfree(map);
319 return -EINVAL;
320 }
321 map->offset += (unsigned long)dev->sg->virtual;
322 break;
323 case _DRM_CONSISTENT:
324 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
325 * As we're limiting the address to 2^32-1 (or less),
326 * casting it down to 32 bits is no problem, but we
327 * need to point to a 64bit variable first.
328 */
329 map->handle = dma_alloc_coherent(dev->dev,
330 map->size,
331 &map->offset,
332 GFP_KERNEL);
333 if (!map->handle) {
334 kfree(map);
335 return -ENOMEM;
336 }
337 break;
338 default:
339 kfree(map);
340 return -EINVAL;
341 }
342
343 list = kzalloc(sizeof(*list), GFP_KERNEL);
344 if (!list) {
345 if (map->type == _DRM_REGISTERS)
346 iounmap(map->handle);
347 kfree(map);
348 return -EINVAL;
349 }
350 list->map = map;
351
352 mutex_lock(&dev->struct_mutex);
353 list_add(&list->head, &dev->maplist);
354
355 /* Assign a 32-bit handle */
356 /* We do it here so that dev->struct_mutex protects the increment */
357 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
358 map->offset;
359 ret = drm_map_handle(dev, &list->hash, user_token, 0,
360 (map->type == _DRM_SHM));
361 if (ret) {
362 if (map->type == _DRM_REGISTERS)
363 iounmap(map->handle);
364 kfree(map);
365 kfree(list);
366 mutex_unlock(&dev->struct_mutex);
367 return ret;
368 }
369
370 list->user_token = list->hash.key << PAGE_SHIFT;
371 mutex_unlock(&dev->struct_mutex);
372
373 if (!(map->flags & _DRM_DRIVER))
374 list->master = dev->master;
375 *maplist = list;
376 return 0;
377 }
378
drm_legacy_addmap(struct drm_device * dev,resource_size_t offset,unsigned int size,enum drm_map_type type,enum drm_map_flags flags,struct drm_local_map ** map_ptr)379 int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
380 unsigned int size, enum drm_map_type type,
381 enum drm_map_flags flags, struct drm_local_map **map_ptr)
382 {
383 struct drm_map_list *list;
384 int rc;
385
386 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
387 if (!rc)
388 *map_ptr = list->map;
389 return rc;
390 }
391 EXPORT_SYMBOL(drm_legacy_addmap);
392
drm_legacy_findmap(struct drm_device * dev,unsigned int token)393 struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
394 unsigned int token)
395 {
396 struct drm_map_list *_entry;
397
398 list_for_each_entry(_entry, &dev->maplist, head)
399 if (_entry->user_token == token)
400 return _entry->map;
401 return NULL;
402 }
403 EXPORT_SYMBOL(drm_legacy_findmap);
404
405 /*
406 * Ioctl to specify a range of memory that is available for mapping by a
407 * non-root process.
408 *
409 * \param inode device inode.
410 * \param file_priv DRM file private.
411 * \param cmd command.
412 * \param arg pointer to a drm_map structure.
413 * \return zero on success or a negative value on error.
414 *
415 */
drm_legacy_addmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)416 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
417 struct drm_file *file_priv)
418 {
419 struct drm_map *map = data;
420 struct drm_map_list *maplist;
421 int err;
422
423 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
424 return -EPERM;
425
426 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
427 return -EOPNOTSUPP;
428
429 err = drm_addmap_core(dev, map->offset, map->size, map->type,
430 map->flags, &maplist);
431
432 if (err)
433 return err;
434
435 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
436 map->handle = (void *)(unsigned long)maplist->user_token;
437
438 /*
439 * It appears that there are no users of this value whatsoever --
440 * drmAddMap just discards it. Let's not encourage its use.
441 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
442 * it's not a real mtrr index anymore.)
443 */
444 map->mtrr = -1;
445
446 return 0;
447 }
448
449 /*
450 * Get a mapping information.
451 *
452 * \param inode device inode.
453 * \param file_priv DRM file private.
454 * \param cmd command.
455 * \param arg user argument, pointing to a drm_map structure.
456 *
457 * \return zero on success or a negative number on failure.
458 *
459 * Searches for the mapping with the specified offset and copies its information
460 * into userspace
461 */
drm_legacy_getmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)462 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
463 struct drm_file *file_priv)
464 {
465 struct drm_map *map = data;
466 struct drm_map_list *r_list = NULL;
467 struct list_head *list;
468 int idx;
469 int i;
470
471 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
472 return -EOPNOTSUPP;
473
474 idx = map->offset;
475 if (idx < 0)
476 return -EINVAL;
477
478 i = 0;
479 mutex_lock(&dev->struct_mutex);
480 list_for_each(list, &dev->maplist) {
481 if (i == idx) {
482 r_list = list_entry(list, struct drm_map_list, head);
483 break;
484 }
485 i++;
486 }
487 if (!r_list || !r_list->map) {
488 mutex_unlock(&dev->struct_mutex);
489 return -EINVAL;
490 }
491
492 map->offset = r_list->map->offset;
493 map->size = r_list->map->size;
494 map->type = r_list->map->type;
495 map->flags = r_list->map->flags;
496 map->handle = (void *)(unsigned long) r_list->user_token;
497 map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
498
499 mutex_unlock(&dev->struct_mutex);
500
501 return 0;
502 }
503
504 /*
505 * Remove a map private from list and deallocate resources if the mapping
506 * isn't in use.
507 *
508 * Searches the map on drm_device::maplist, removes it from the list, see if
509 * it's being used, and free any associated resource (such as MTRR's) if it's not
510 * being on use.
511 *
512 * \sa drm_legacy_addmap
513 */
drm_legacy_rmmap_locked(struct drm_device * dev,struct drm_local_map * map)514 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
515 {
516 struct drm_map_list *r_list = NULL, *list_t;
517 int found = 0;
518 struct drm_master *master;
519
520 /* Find the list entry for the map and remove it */
521 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
522 if (r_list->map == map) {
523 master = r_list->master;
524 list_del(&r_list->head);
525 drm_ht_remove_key(&dev->map_hash,
526 r_list->user_token >> PAGE_SHIFT);
527 kfree(r_list);
528 found = 1;
529 break;
530 }
531 }
532
533 if (!found)
534 return -EINVAL;
535
536 switch (map->type) {
537 case _DRM_REGISTERS:
538 iounmap(map->handle);
539 fallthrough;
540 case _DRM_FRAME_BUFFER:
541 arch_phys_wc_del(map->mtrr);
542 break;
543 case _DRM_SHM:
544 vfree(map->handle);
545 if (master) {
546 if (dev->sigdata.lock == master->lock.hw_lock)
547 dev->sigdata.lock = NULL;
548 master->lock.hw_lock = NULL; /* SHM removed */
549 master->lock.file_priv = NULL;
550 wake_up_interruptible_all(&master->lock.lock_queue);
551 }
552 break;
553 case _DRM_AGP:
554 case _DRM_SCATTER_GATHER:
555 break;
556 case _DRM_CONSISTENT:
557 dma_free_coherent(dev->dev,
558 map->size,
559 map->handle,
560 map->offset);
561 break;
562 }
563 kfree(map);
564
565 return 0;
566 }
567 EXPORT_SYMBOL(drm_legacy_rmmap_locked);
568
drm_legacy_rmmap(struct drm_device * dev,struct drm_local_map * map)569 void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
570 {
571 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
572 return;
573
574 mutex_lock(&dev->struct_mutex);
575 drm_legacy_rmmap_locked(dev, map);
576 mutex_unlock(&dev->struct_mutex);
577 }
578 EXPORT_SYMBOL(drm_legacy_rmmap);
579
drm_legacy_master_rmmaps(struct drm_device * dev,struct drm_master * master)580 void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
581 {
582 struct drm_map_list *r_list, *list_temp;
583
584 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
585 return;
586
587 mutex_lock(&dev->struct_mutex);
588 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
589 if (r_list->master == master) {
590 drm_legacy_rmmap_locked(dev, r_list->map);
591 r_list = NULL;
592 }
593 }
594 mutex_unlock(&dev->struct_mutex);
595 }
596
drm_legacy_rmmaps(struct drm_device * dev)597 void drm_legacy_rmmaps(struct drm_device *dev)
598 {
599 struct drm_map_list *r_list, *list_temp;
600
601 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
602 drm_legacy_rmmap(dev, r_list->map);
603 }
604
605 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
606 * the last close of the device, and this is necessary for cleanup when things
607 * exit uncleanly. Therefore, having userland manually remove mappings seems
608 * like a pointless exercise since they're going away anyway.
609 *
610 * One use case might be after addmap is allowed for normal users for SHM and
611 * gets used by drivers that the server doesn't need to care about. This seems
612 * unlikely.
613 *
614 * \param inode device inode.
615 * \param file_priv DRM file private.
616 * \param cmd command.
617 * \param arg pointer to a struct drm_map structure.
618 * \return zero on success or a negative value on error.
619 */
drm_legacy_rmmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)620 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
621 struct drm_file *file_priv)
622 {
623 struct drm_map *request = data;
624 struct drm_local_map *map = NULL;
625 struct drm_map_list *r_list;
626 int ret;
627
628 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
629 return -EOPNOTSUPP;
630
631 mutex_lock(&dev->struct_mutex);
632 list_for_each_entry(r_list, &dev->maplist, head) {
633 if (r_list->map &&
634 r_list->user_token == (unsigned long)request->handle &&
635 r_list->map->flags & _DRM_REMOVABLE) {
636 map = r_list->map;
637 break;
638 }
639 }
640
641 /* List has wrapped around to the head pointer, or it's empty we didn't
642 * find anything.
643 */
644 if (list_empty(&dev->maplist) || !map) {
645 mutex_unlock(&dev->struct_mutex);
646 return -EINVAL;
647 }
648
649 /* Register and framebuffer maps are permanent */
650 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
651 mutex_unlock(&dev->struct_mutex);
652 return 0;
653 }
654
655 ret = drm_legacy_rmmap_locked(dev, map);
656
657 mutex_unlock(&dev->struct_mutex);
658
659 return ret;
660 }
661
662 /*
663 * Cleanup after an error on one of the addbufs() functions.
664 *
665 * \param dev DRM device.
666 * \param entry buffer entry where the error occurred.
667 *
668 * Frees any pages and buffers associated with the given entry.
669 */
drm_cleanup_buf_error(struct drm_device * dev,struct drm_buf_entry * entry)670 static void drm_cleanup_buf_error(struct drm_device *dev,
671 struct drm_buf_entry *entry)
672 {
673 drm_dma_handle_t *dmah;
674 int i;
675
676 if (entry->seg_count) {
677 for (i = 0; i < entry->seg_count; i++) {
678 if (entry->seglist[i]) {
679 dmah = entry->seglist[i];
680 dma_free_coherent(dev->dev,
681 dmah->size,
682 dmah->vaddr,
683 dmah->busaddr);
684 kfree(dmah);
685 }
686 }
687 kfree(entry->seglist);
688
689 entry->seg_count = 0;
690 }
691
692 if (entry->buf_count) {
693 for (i = 0; i < entry->buf_count; i++) {
694 kfree(entry->buflist[i].dev_private);
695 }
696 kfree(entry->buflist);
697
698 entry->buf_count = 0;
699 }
700 }
701
702 #if IS_ENABLED(CONFIG_AGP)
703 /*
704 * Add AGP buffers for DMA transfers.
705 *
706 * \param dev struct drm_device to which the buffers are to be added.
707 * \param request pointer to a struct drm_buf_desc describing the request.
708 * \return zero on success or a negative number on failure.
709 *
710 * After some sanity checks creates a drm_buf structure for each buffer and
711 * reallocates the buffer list of the same size order to accommodate the new
712 * buffers.
713 */
drm_legacy_addbufs_agp(struct drm_device * dev,struct drm_buf_desc * request)714 int drm_legacy_addbufs_agp(struct drm_device *dev,
715 struct drm_buf_desc *request)
716 {
717 struct drm_device_dma *dma = dev->dma;
718 struct drm_buf_entry *entry;
719 struct drm_agp_mem *agp_entry;
720 struct drm_buf *buf;
721 unsigned long offset;
722 unsigned long agp_offset;
723 int count;
724 int order;
725 int size;
726 int alignment;
727 int page_order;
728 int total;
729 int byte_count;
730 int i, valid;
731 struct drm_buf **temp_buflist;
732
733 if (!dma)
734 return -EINVAL;
735
736 count = request->count;
737 order = order_base_2(request->size);
738 size = 1 << order;
739
740 alignment = (request->flags & _DRM_PAGE_ALIGN)
741 ? PAGE_ALIGN(size) : size;
742 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
743 total = PAGE_SIZE << page_order;
744
745 byte_count = 0;
746 agp_offset = dev->agp->base + request->agp_start;
747
748 DRM_DEBUG("count: %d\n", count);
749 DRM_DEBUG("order: %d\n", order);
750 DRM_DEBUG("size: %d\n", size);
751 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
752 DRM_DEBUG("alignment: %d\n", alignment);
753 DRM_DEBUG("page_order: %d\n", page_order);
754 DRM_DEBUG("total: %d\n", total);
755
756 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
757 return -EINVAL;
758
759 /* Make sure buffers are located in AGP memory that we own */
760 valid = 0;
761 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
762 if ((agp_offset >= agp_entry->bound) &&
763 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
764 valid = 1;
765 break;
766 }
767 }
768 if (!list_empty(&dev->agp->memory) && !valid) {
769 DRM_DEBUG("zone invalid\n");
770 return -EINVAL;
771 }
772 spin_lock(&dev->buf_lock);
773 if (dev->buf_use) {
774 spin_unlock(&dev->buf_lock);
775 return -EBUSY;
776 }
777 atomic_inc(&dev->buf_alloc);
778 spin_unlock(&dev->buf_lock);
779
780 mutex_lock(&dev->struct_mutex);
781 entry = &dma->bufs[order];
782 if (entry->buf_count) {
783 mutex_unlock(&dev->struct_mutex);
784 atomic_dec(&dev->buf_alloc);
785 return -ENOMEM; /* May only call once for each order */
786 }
787
788 if (count < 0 || count > 4096) {
789 mutex_unlock(&dev->struct_mutex);
790 atomic_dec(&dev->buf_alloc);
791 return -EINVAL;
792 }
793
794 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
795 if (!entry->buflist) {
796 mutex_unlock(&dev->struct_mutex);
797 atomic_dec(&dev->buf_alloc);
798 return -ENOMEM;
799 }
800
801 entry->buf_size = size;
802 entry->page_order = page_order;
803
804 offset = 0;
805
806 while (entry->buf_count < count) {
807 buf = &entry->buflist[entry->buf_count];
808 buf->idx = dma->buf_count + entry->buf_count;
809 buf->total = alignment;
810 buf->order = order;
811 buf->used = 0;
812
813 buf->offset = (dma->byte_count + offset);
814 buf->bus_address = agp_offset + offset;
815 buf->address = (void *)(agp_offset + offset);
816 buf->next = NULL;
817 buf->waiting = 0;
818 buf->pending = 0;
819 buf->file_priv = NULL;
820
821 buf->dev_priv_size = dev->driver->dev_priv_size;
822 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
823 if (!buf->dev_private) {
824 /* Set count correctly so we free the proper amount. */
825 entry->buf_count = count;
826 drm_cleanup_buf_error(dev, entry);
827 mutex_unlock(&dev->struct_mutex);
828 atomic_dec(&dev->buf_alloc);
829 return -ENOMEM;
830 }
831
832 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
833
834 offset += alignment;
835 entry->buf_count++;
836 byte_count += PAGE_SIZE << page_order;
837 }
838
839 DRM_DEBUG("byte_count: %d\n", byte_count);
840
841 temp_buflist = krealloc(dma->buflist,
842 (dma->buf_count + entry->buf_count) *
843 sizeof(*dma->buflist), GFP_KERNEL);
844 if (!temp_buflist) {
845 /* Free the entry because it isn't valid */
846 drm_cleanup_buf_error(dev, entry);
847 mutex_unlock(&dev->struct_mutex);
848 atomic_dec(&dev->buf_alloc);
849 return -ENOMEM;
850 }
851 dma->buflist = temp_buflist;
852
853 for (i = 0; i < entry->buf_count; i++) {
854 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
855 }
856
857 dma->buf_count += entry->buf_count;
858 dma->seg_count += entry->seg_count;
859 dma->page_count += byte_count >> PAGE_SHIFT;
860 dma->byte_count += byte_count;
861
862 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
863 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
864
865 mutex_unlock(&dev->struct_mutex);
866
867 request->count = entry->buf_count;
868 request->size = size;
869
870 dma->flags = _DRM_DMA_USE_AGP;
871
872 atomic_dec(&dev->buf_alloc);
873 return 0;
874 }
875 EXPORT_SYMBOL(drm_legacy_addbufs_agp);
876 #endif /* CONFIG_AGP */
877
drm_legacy_addbufs_pci(struct drm_device * dev,struct drm_buf_desc * request)878 int drm_legacy_addbufs_pci(struct drm_device *dev,
879 struct drm_buf_desc *request)
880 {
881 struct drm_device_dma *dma = dev->dma;
882 int count;
883 int order;
884 int size;
885 int total;
886 int page_order;
887 struct drm_buf_entry *entry;
888 drm_dma_handle_t *dmah;
889 struct drm_buf *buf;
890 int alignment;
891 unsigned long offset;
892 int i;
893 int byte_count;
894 int page_count;
895 unsigned long *temp_pagelist;
896 struct drm_buf **temp_buflist;
897
898 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
899 return -EOPNOTSUPP;
900
901 if (!dma)
902 return -EINVAL;
903
904 if (!capable(CAP_SYS_ADMIN))
905 return -EPERM;
906
907 count = request->count;
908 order = order_base_2(request->size);
909 size = 1 << order;
910
911 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
912 request->count, request->size, size, order);
913
914 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
915 return -EINVAL;
916
917 alignment = (request->flags & _DRM_PAGE_ALIGN)
918 ? PAGE_ALIGN(size) : size;
919 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
920 total = PAGE_SIZE << page_order;
921
922 spin_lock(&dev->buf_lock);
923 if (dev->buf_use) {
924 spin_unlock(&dev->buf_lock);
925 return -EBUSY;
926 }
927 atomic_inc(&dev->buf_alloc);
928 spin_unlock(&dev->buf_lock);
929
930 mutex_lock(&dev->struct_mutex);
931 entry = &dma->bufs[order];
932 if (entry->buf_count) {
933 mutex_unlock(&dev->struct_mutex);
934 atomic_dec(&dev->buf_alloc);
935 return -ENOMEM; /* May only call once for each order */
936 }
937
938 if (count < 0 || count > 4096) {
939 mutex_unlock(&dev->struct_mutex);
940 atomic_dec(&dev->buf_alloc);
941 return -EINVAL;
942 }
943
944 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
945 if (!entry->buflist) {
946 mutex_unlock(&dev->struct_mutex);
947 atomic_dec(&dev->buf_alloc);
948 return -ENOMEM;
949 }
950
951 entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
952 if (!entry->seglist) {
953 kfree(entry->buflist);
954 mutex_unlock(&dev->struct_mutex);
955 atomic_dec(&dev->buf_alloc);
956 return -ENOMEM;
957 }
958
959 /* Keep the original pagelist until we know all the allocations
960 * have succeeded
961 */
962 temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
963 sizeof(*dma->pagelist),
964 GFP_KERNEL);
965 if (!temp_pagelist) {
966 kfree(entry->buflist);
967 kfree(entry->seglist);
968 mutex_unlock(&dev->struct_mutex);
969 atomic_dec(&dev->buf_alloc);
970 return -ENOMEM;
971 }
972 memcpy(temp_pagelist,
973 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
974 DRM_DEBUG("pagelist: %d entries\n",
975 dma->page_count + (count << page_order));
976
977 entry->buf_size = size;
978 entry->page_order = page_order;
979 byte_count = 0;
980 page_count = 0;
981
982 while (entry->buf_count < count) {
983 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
984 if (!dmah) {
985 /* Set count correctly so we free the proper amount. */
986 entry->buf_count = count;
987 entry->seg_count = count;
988 drm_cleanup_buf_error(dev, entry);
989 kfree(temp_pagelist);
990 mutex_unlock(&dev->struct_mutex);
991 atomic_dec(&dev->buf_alloc);
992 return -ENOMEM;
993 }
994
995 dmah->size = total;
996 dmah->vaddr = dma_alloc_coherent(dev->dev,
997 dmah->size,
998 &dmah->busaddr,
999 GFP_KERNEL);
1000 if (!dmah->vaddr) {
1001 kfree(dmah);
1002
1003 /* Set count correctly so we free the proper amount. */
1004 entry->buf_count = count;
1005 entry->seg_count = count;
1006 drm_cleanup_buf_error(dev, entry);
1007 kfree(temp_pagelist);
1008 mutex_unlock(&dev->struct_mutex);
1009 atomic_dec(&dev->buf_alloc);
1010 return -ENOMEM;
1011 }
1012 entry->seglist[entry->seg_count++] = dmah;
1013 for (i = 0; i < (1 << page_order); i++) {
1014 DRM_DEBUG("page %d @ 0x%08lx\n",
1015 dma->page_count + page_count,
1016 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
1017 temp_pagelist[dma->page_count + page_count++]
1018 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
1019 }
1020 for (offset = 0;
1021 offset + size <= total && entry->buf_count < count;
1022 offset += alignment, ++entry->buf_count) {
1023 buf = &entry->buflist[entry->buf_count];
1024 buf->idx = dma->buf_count + entry->buf_count;
1025 buf->total = alignment;
1026 buf->order = order;
1027 buf->used = 0;
1028 buf->offset = (dma->byte_count + byte_count + offset);
1029 buf->address = (void *)(dmah->vaddr + offset);
1030 buf->bus_address = dmah->busaddr + offset;
1031 buf->next = NULL;
1032 buf->waiting = 0;
1033 buf->pending = 0;
1034 buf->file_priv = NULL;
1035
1036 buf->dev_priv_size = dev->driver->dev_priv_size;
1037 buf->dev_private = kzalloc(buf->dev_priv_size,
1038 GFP_KERNEL);
1039 if (!buf->dev_private) {
1040 /* Set count correctly so we free the proper amount. */
1041 entry->buf_count = count;
1042 entry->seg_count = count;
1043 drm_cleanup_buf_error(dev, entry);
1044 kfree(temp_pagelist);
1045 mutex_unlock(&dev->struct_mutex);
1046 atomic_dec(&dev->buf_alloc);
1047 return -ENOMEM;
1048 }
1049
1050 DRM_DEBUG("buffer %d @ %p\n",
1051 entry->buf_count, buf->address);
1052 }
1053 byte_count += PAGE_SIZE << page_order;
1054 }
1055
1056 temp_buflist = krealloc(dma->buflist,
1057 (dma->buf_count + entry->buf_count) *
1058 sizeof(*dma->buflist), GFP_KERNEL);
1059 if (!temp_buflist) {
1060 /* Free the entry because it isn't valid */
1061 drm_cleanup_buf_error(dev, entry);
1062 kfree(temp_pagelist);
1063 mutex_unlock(&dev->struct_mutex);
1064 atomic_dec(&dev->buf_alloc);
1065 return -ENOMEM;
1066 }
1067 dma->buflist = temp_buflist;
1068
1069 for (i = 0; i < entry->buf_count; i++) {
1070 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1071 }
1072
1073 /* No allocations failed, so now we can replace the original pagelist
1074 * with the new one.
1075 */
1076 if (dma->page_count) {
1077 kfree(dma->pagelist);
1078 }
1079 dma->pagelist = temp_pagelist;
1080
1081 dma->buf_count += entry->buf_count;
1082 dma->seg_count += entry->seg_count;
1083 dma->page_count += entry->seg_count << page_order;
1084 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1085
1086 mutex_unlock(&dev->struct_mutex);
1087
1088 request->count = entry->buf_count;
1089 request->size = size;
1090
1091 if (request->flags & _DRM_PCI_BUFFER_RO)
1092 dma->flags = _DRM_DMA_USE_PCI_RO;
1093
1094 atomic_dec(&dev->buf_alloc);
1095 return 0;
1096
1097 }
1098 EXPORT_SYMBOL(drm_legacy_addbufs_pci);
1099
drm_legacy_addbufs_sg(struct drm_device * dev,struct drm_buf_desc * request)1100 static int drm_legacy_addbufs_sg(struct drm_device *dev,
1101 struct drm_buf_desc *request)
1102 {
1103 struct drm_device_dma *dma = dev->dma;
1104 struct drm_buf_entry *entry;
1105 struct drm_buf *buf;
1106 unsigned long offset;
1107 unsigned long agp_offset;
1108 int count;
1109 int order;
1110 int size;
1111 int alignment;
1112 int page_order;
1113 int total;
1114 int byte_count;
1115 int i;
1116 struct drm_buf **temp_buflist;
1117
1118 if (!drm_core_check_feature(dev, DRIVER_SG))
1119 return -EOPNOTSUPP;
1120
1121 if (!dma)
1122 return -EINVAL;
1123
1124 if (!capable(CAP_SYS_ADMIN))
1125 return -EPERM;
1126
1127 count = request->count;
1128 order = order_base_2(request->size);
1129 size = 1 << order;
1130
1131 alignment = (request->flags & _DRM_PAGE_ALIGN)
1132 ? PAGE_ALIGN(size) : size;
1133 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1134 total = PAGE_SIZE << page_order;
1135
1136 byte_count = 0;
1137 agp_offset = request->agp_start;
1138
1139 DRM_DEBUG("count: %d\n", count);
1140 DRM_DEBUG("order: %d\n", order);
1141 DRM_DEBUG("size: %d\n", size);
1142 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1143 DRM_DEBUG("alignment: %d\n", alignment);
1144 DRM_DEBUG("page_order: %d\n", page_order);
1145 DRM_DEBUG("total: %d\n", total);
1146
1147 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1148 return -EINVAL;
1149
1150 spin_lock(&dev->buf_lock);
1151 if (dev->buf_use) {
1152 spin_unlock(&dev->buf_lock);
1153 return -EBUSY;
1154 }
1155 atomic_inc(&dev->buf_alloc);
1156 spin_unlock(&dev->buf_lock);
1157
1158 mutex_lock(&dev->struct_mutex);
1159 entry = &dma->bufs[order];
1160 if (entry->buf_count) {
1161 mutex_unlock(&dev->struct_mutex);
1162 atomic_dec(&dev->buf_alloc);
1163 return -ENOMEM; /* May only call once for each order */
1164 }
1165
1166 if (count < 0 || count > 4096) {
1167 mutex_unlock(&dev->struct_mutex);
1168 atomic_dec(&dev->buf_alloc);
1169 return -EINVAL;
1170 }
1171
1172 entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
1173 if (!entry->buflist) {
1174 mutex_unlock(&dev->struct_mutex);
1175 atomic_dec(&dev->buf_alloc);
1176 return -ENOMEM;
1177 }
1178
1179 entry->buf_size = size;
1180 entry->page_order = page_order;
1181
1182 offset = 0;
1183
1184 while (entry->buf_count < count) {
1185 buf = &entry->buflist[entry->buf_count];
1186 buf->idx = dma->buf_count + entry->buf_count;
1187 buf->total = alignment;
1188 buf->order = order;
1189 buf->used = 0;
1190
1191 buf->offset = (dma->byte_count + offset);
1192 buf->bus_address = agp_offset + offset;
1193 buf->address = (void *)(agp_offset + offset
1194 + (unsigned long)dev->sg->virtual);
1195 buf->next = NULL;
1196 buf->waiting = 0;
1197 buf->pending = 0;
1198 buf->file_priv = NULL;
1199
1200 buf->dev_priv_size = dev->driver->dev_priv_size;
1201 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
1202 if (!buf->dev_private) {
1203 /* Set count correctly so we free the proper amount. */
1204 entry->buf_count = count;
1205 drm_cleanup_buf_error(dev, entry);
1206 mutex_unlock(&dev->struct_mutex);
1207 atomic_dec(&dev->buf_alloc);
1208 return -ENOMEM;
1209 }
1210
1211 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1212
1213 offset += alignment;
1214 entry->buf_count++;
1215 byte_count += PAGE_SIZE << page_order;
1216 }
1217
1218 DRM_DEBUG("byte_count: %d\n", byte_count);
1219
1220 temp_buflist = krealloc(dma->buflist,
1221 (dma->buf_count + entry->buf_count) *
1222 sizeof(*dma->buflist), GFP_KERNEL);
1223 if (!temp_buflist) {
1224 /* Free the entry because it isn't valid */
1225 drm_cleanup_buf_error(dev, entry);
1226 mutex_unlock(&dev->struct_mutex);
1227 atomic_dec(&dev->buf_alloc);
1228 return -ENOMEM;
1229 }
1230 dma->buflist = temp_buflist;
1231
1232 for (i = 0; i < entry->buf_count; i++) {
1233 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1234 }
1235
1236 dma->buf_count += entry->buf_count;
1237 dma->seg_count += entry->seg_count;
1238 dma->page_count += byte_count >> PAGE_SHIFT;
1239 dma->byte_count += byte_count;
1240
1241 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1242 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1243
1244 mutex_unlock(&dev->struct_mutex);
1245
1246 request->count = entry->buf_count;
1247 request->size = size;
1248
1249 dma->flags = _DRM_DMA_USE_SG;
1250
1251 atomic_dec(&dev->buf_alloc);
1252 return 0;
1253 }
1254
1255 /*
1256 * Add buffers for DMA transfers (ioctl).
1257 *
1258 * \param inode device inode.
1259 * \param file_priv DRM file private.
1260 * \param cmd command.
1261 * \param arg pointer to a struct drm_buf_desc request.
1262 * \return zero on success or a negative number on failure.
1263 *
1264 * According with the memory type specified in drm_buf_desc::flags and the
1265 * build options, it dispatches the call either to addbufs_agp(),
1266 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1267 * PCI memory respectively.
1268 */
drm_legacy_addbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1269 int drm_legacy_addbufs(struct drm_device *dev, void *data,
1270 struct drm_file *file_priv)
1271 {
1272 struct drm_buf_desc *request = data;
1273 int ret;
1274
1275 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1276 return -EOPNOTSUPP;
1277
1278 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1279 return -EOPNOTSUPP;
1280
1281 #if IS_ENABLED(CONFIG_AGP)
1282 if (request->flags & _DRM_AGP_BUFFER)
1283 ret = drm_legacy_addbufs_agp(dev, request);
1284 else
1285 #endif
1286 if (request->flags & _DRM_SG_BUFFER)
1287 ret = drm_legacy_addbufs_sg(dev, request);
1288 else if (request->flags & _DRM_FB_BUFFER)
1289 ret = -EINVAL;
1290 else
1291 ret = drm_legacy_addbufs_pci(dev, request);
1292
1293 return ret;
1294 }
1295
1296 /*
1297 * Get information about the buffer mappings.
1298 *
1299 * This was originally mean for debugging purposes, or by a sophisticated
1300 * client library to determine how best to use the available buffers (e.g.,
1301 * large buffers can be used for image transfer).
1302 *
1303 * \param inode device inode.
1304 * \param file_priv DRM file private.
1305 * \param cmd command.
1306 * \param arg pointer to a drm_buf_info structure.
1307 * \return zero on success or a negative number on failure.
1308 *
1309 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1310 * lock, preventing of allocating more buffers after this call. Information
1311 * about each requested buffer is then copied into user space.
1312 */
__drm_legacy_infobufs(struct drm_device * dev,void * data,int * p,int (* f)(void *,int,struct drm_buf_entry *))1313 int __drm_legacy_infobufs(struct drm_device *dev,
1314 void *data, int *p,
1315 int (*f)(void *, int, struct drm_buf_entry *))
1316 {
1317 struct drm_device_dma *dma = dev->dma;
1318 int i;
1319 int count;
1320
1321 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1322 return -EOPNOTSUPP;
1323
1324 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1325 return -EOPNOTSUPP;
1326
1327 if (!dma)
1328 return -EINVAL;
1329
1330 spin_lock(&dev->buf_lock);
1331 if (atomic_read(&dev->buf_alloc)) {
1332 spin_unlock(&dev->buf_lock);
1333 return -EBUSY;
1334 }
1335 ++dev->buf_use; /* Can't allocate more after this call */
1336 spin_unlock(&dev->buf_lock);
1337
1338 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1339 if (dma->bufs[i].buf_count)
1340 ++count;
1341 }
1342
1343 DRM_DEBUG("count = %d\n", count);
1344
1345 if (*p >= count) {
1346 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1347 struct drm_buf_entry *from = &dma->bufs[i];
1348
1349 if (from->buf_count) {
1350 if (f(data, count, from) < 0)
1351 return -EFAULT;
1352 DRM_DEBUG("%d %d %d %d %d\n",
1353 i,
1354 dma->bufs[i].buf_count,
1355 dma->bufs[i].buf_size,
1356 dma->bufs[i].low_mark,
1357 dma->bufs[i].high_mark);
1358 ++count;
1359 }
1360 }
1361 }
1362 *p = count;
1363
1364 return 0;
1365 }
1366
copy_one_buf(void * data,int count,struct drm_buf_entry * from)1367 static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
1368 {
1369 struct drm_buf_info *request = data;
1370 struct drm_buf_desc __user *to = &request->list[count];
1371 struct drm_buf_desc v = {.count = from->buf_count,
1372 .size = from->buf_size,
1373 .low_mark = from->low_mark,
1374 .high_mark = from->high_mark};
1375
1376 if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
1377 return -EFAULT;
1378 return 0;
1379 }
1380
drm_legacy_infobufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1381 int drm_legacy_infobufs(struct drm_device *dev, void *data,
1382 struct drm_file *file_priv)
1383 {
1384 struct drm_buf_info *request = data;
1385
1386 return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
1387 }
1388
1389 /*
1390 * Specifies a low and high water mark for buffer allocation
1391 *
1392 * \param inode device inode.
1393 * \param file_priv DRM file private.
1394 * \param cmd command.
1395 * \param arg a pointer to a drm_buf_desc structure.
1396 * \return zero on success or a negative number on failure.
1397 *
1398 * Verifies that the size order is bounded between the admissible orders and
1399 * updates the respective drm_device_dma::bufs entry low and high water mark.
1400 *
1401 * \note This ioctl is deprecated and mostly never used.
1402 */
drm_legacy_markbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1403 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1404 struct drm_file *file_priv)
1405 {
1406 struct drm_device_dma *dma = dev->dma;
1407 struct drm_buf_desc *request = data;
1408 int order;
1409 struct drm_buf_entry *entry;
1410
1411 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1412 return -EOPNOTSUPP;
1413
1414 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1415 return -EOPNOTSUPP;
1416
1417 if (!dma)
1418 return -EINVAL;
1419
1420 DRM_DEBUG("%d, %d, %d\n",
1421 request->size, request->low_mark, request->high_mark);
1422 order = order_base_2(request->size);
1423 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1424 return -EINVAL;
1425 entry = &dma->bufs[order];
1426
1427 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1428 return -EINVAL;
1429 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1430 return -EINVAL;
1431
1432 entry->low_mark = request->low_mark;
1433 entry->high_mark = request->high_mark;
1434
1435 return 0;
1436 }
1437
1438 /*
1439 * Unreserve the buffers in list, previously reserved using drmDMA.
1440 *
1441 * \param inode device inode.
1442 * \param file_priv DRM file private.
1443 * \param cmd command.
1444 * \param arg pointer to a drm_buf_free structure.
1445 * \return zero on success or a negative number on failure.
1446 *
1447 * Calls free_buffer() for each used buffer.
1448 * This function is primarily used for debugging.
1449 */
drm_legacy_freebufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1450 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1451 struct drm_file *file_priv)
1452 {
1453 struct drm_device_dma *dma = dev->dma;
1454 struct drm_buf_free *request = data;
1455 int i;
1456 int idx;
1457 struct drm_buf *buf;
1458
1459 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1460 return -EOPNOTSUPP;
1461
1462 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1463 return -EOPNOTSUPP;
1464
1465 if (!dma)
1466 return -EINVAL;
1467
1468 DRM_DEBUG("%d\n", request->count);
1469 for (i = 0; i < request->count; i++) {
1470 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1471 return -EFAULT;
1472 if (idx < 0 || idx >= dma->buf_count) {
1473 DRM_ERROR("Index %d (of %d max)\n",
1474 idx, dma->buf_count - 1);
1475 return -EINVAL;
1476 }
1477 idx = array_index_nospec(idx, dma->buf_count);
1478 buf = dma->buflist[idx];
1479 if (buf->file_priv != file_priv) {
1480 DRM_ERROR("Process %d freeing buffer not owned\n",
1481 task_pid_nr(current));
1482 return -EINVAL;
1483 }
1484 drm_legacy_free_buffer(dev, buf);
1485 }
1486
1487 return 0;
1488 }
1489
1490 /*
1491 * Maps all of the DMA buffers into client-virtual space (ioctl).
1492 *
1493 * \param inode device inode.
1494 * \param file_priv DRM file private.
1495 * \param cmd command.
1496 * \param arg pointer to a drm_buf_map structure.
1497 * \return zero on success or a negative number on failure.
1498 *
1499 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1500 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1501 * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls
1502 * drm_mmap_dma().
1503 */
__drm_legacy_mapbufs(struct drm_device * dev,void * data,int * p,void __user ** v,int (* f)(void *,int,unsigned long,struct drm_buf *),struct drm_file * file_priv)1504 int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
1505 void __user **v,
1506 int (*f)(void *, int, unsigned long,
1507 struct drm_buf *),
1508 struct drm_file *file_priv)
1509 {
1510 struct drm_device_dma *dma = dev->dma;
1511 int retcode = 0;
1512 unsigned long virtual;
1513 int i;
1514
1515 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1516 return -EOPNOTSUPP;
1517
1518 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1519 return -EOPNOTSUPP;
1520
1521 if (!dma)
1522 return -EINVAL;
1523
1524 spin_lock(&dev->buf_lock);
1525 if (atomic_read(&dev->buf_alloc)) {
1526 spin_unlock(&dev->buf_lock);
1527 return -EBUSY;
1528 }
1529 dev->buf_use++; /* Can't allocate more after this call */
1530 spin_unlock(&dev->buf_lock);
1531
1532 if (*p >= dma->buf_count) {
1533 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
1534 || (drm_core_check_feature(dev, DRIVER_SG)
1535 && (dma->flags & _DRM_DMA_USE_SG))) {
1536 struct drm_local_map *map = dev->agp_buffer_map;
1537 unsigned long token = dev->agp_buffer_token;
1538
1539 if (!map) {
1540 retcode = -EINVAL;
1541 goto done;
1542 }
1543 virtual = vm_mmap(file_priv->filp, 0, map->size,
1544 PROT_READ | PROT_WRITE,
1545 MAP_SHARED,
1546 token);
1547 } else {
1548 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
1549 PROT_READ | PROT_WRITE,
1550 MAP_SHARED, 0);
1551 }
1552 if (virtual > -1024UL) {
1553 /* Real error */
1554 retcode = (signed long)virtual;
1555 goto done;
1556 }
1557 *v = (void __user *)virtual;
1558
1559 for (i = 0; i < dma->buf_count; i++) {
1560 if (f(data, i, virtual, dma->buflist[i]) < 0) {
1561 retcode = -EFAULT;
1562 goto done;
1563 }
1564 }
1565 }
1566 done:
1567 *p = dma->buf_count;
1568 DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
1569
1570 return retcode;
1571 }
1572
map_one_buf(void * data,int idx,unsigned long virtual,struct drm_buf * buf)1573 static int map_one_buf(void *data, int idx, unsigned long virtual,
1574 struct drm_buf *buf)
1575 {
1576 struct drm_buf_map *request = data;
1577 unsigned long address = virtual + buf->offset; /* *** */
1578
1579 if (copy_to_user(&request->list[idx].idx, &buf->idx,
1580 sizeof(request->list[0].idx)))
1581 return -EFAULT;
1582 if (copy_to_user(&request->list[idx].total, &buf->total,
1583 sizeof(request->list[0].total)))
1584 return -EFAULT;
1585 if (clear_user(&request->list[idx].used, sizeof(int)))
1586 return -EFAULT;
1587 if (copy_to_user(&request->list[idx].address, &address,
1588 sizeof(address)))
1589 return -EFAULT;
1590 return 0;
1591 }
1592
drm_legacy_mapbufs(struct drm_device * dev,void * data,struct drm_file * file_priv)1593 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1594 struct drm_file *file_priv)
1595 {
1596 struct drm_buf_map *request = data;
1597
1598 return __drm_legacy_mapbufs(dev, data, &request->count,
1599 &request->virtual, map_one_buf,
1600 file_priv);
1601 }
1602
drm_legacy_dma_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1603 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1604 struct drm_file *file_priv)
1605 {
1606 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
1607 return -EOPNOTSUPP;
1608
1609 if (dev->driver->dma_ioctl)
1610 return dev->driver->dma_ioctl(dev, data, file_priv);
1611 else
1612 return -EINVAL;
1613 }
1614
drm_legacy_getsarea(struct drm_device * dev)1615 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1616 {
1617 struct drm_map_list *entry;
1618
1619 list_for_each_entry(entry, &dev->maplist, head) {
1620 if (entry->map && entry->map->type == _DRM_SHM &&
1621 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1622 return entry->map;
1623 }
1624 }
1625 return NULL;
1626 }
1627 EXPORT_SYMBOL(drm_legacy_getsarea);
1628