xref: /openbmc/linux/drivers/gpu/drm/drm_vm.c (revision 002dff36)
1 /*
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 #include <linux/export.h>
37 #include <linux/pci.h>
38 #include <linux/seq_file.h>
39 #include <linux/vmalloc.h>
40 #include <linux/pgtable.h>
41 
42 #if defined(__ia64__)
43 #include <linux/efi.h>
44 #include <linux/slab.h>
45 #endif
46 #include <linux/mem_encrypt.h>
47 
48 
49 #include <drm/drm_agpsupport.h>
50 #include <drm/drm_device.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_file.h>
53 #include <drm/drm_framebuffer.h>
54 #include <drm/drm_gem.h>
55 #include <drm/drm_print.h>
56 
57 #include "drm_internal.h"
58 #include "drm_legacy.h"
59 
60 struct drm_vma_entry {
61 	struct list_head head;
62 	struct vm_area_struct *vma;
63 	pid_t pid;
64 };
65 
66 static void drm_vm_open(struct vm_area_struct *vma);
67 static void drm_vm_close(struct vm_area_struct *vma);
68 
69 static pgprot_t drm_io_prot(struct drm_local_map *map,
70 			    struct vm_area_struct *vma)
71 {
72 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
73 
74 	/* We don't want graphics memory to be mapped encrypted */
75 	tmp = pgprot_decrypted(tmp);
76 
77 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
78     defined(__mips__)
79 	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
80 		tmp = pgprot_noncached(tmp);
81 	else
82 		tmp = pgprot_writecombine(tmp);
83 #elif defined(__ia64__)
84 	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
85 				    vma->vm_start))
86 		tmp = pgprot_writecombine(tmp);
87 	else
88 		tmp = pgprot_noncached(tmp);
89 #elif defined(__sparc__) || defined(__arm__)
90 	tmp = pgprot_noncached(tmp);
91 #endif
92 	return tmp;
93 }
94 
95 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
96 {
97 	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
98 
99 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
100 	tmp = pgprot_noncached_wc(tmp);
101 #endif
102 	return tmp;
103 }
104 
105 /*
106  * \c fault method for AGP virtual memory.
107  *
108  * \param vma virtual memory area.
109  * \param address access address.
110  * \return pointer to the page structure.
111  *
112  * Find the right map and if it's AGP memory find the real physical page to
113  * map, get the page, increment the use count and return it.
114  */
115 #if IS_ENABLED(CONFIG_AGP)
116 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
117 {
118 	struct vm_area_struct *vma = vmf->vma;
119 	struct drm_file *priv = vma->vm_file->private_data;
120 	struct drm_device *dev = priv->minor->dev;
121 	struct drm_local_map *map = NULL;
122 	struct drm_map_list *r_list;
123 	struct drm_hash_item *hash;
124 
125 	/*
126 	 * Find the right map
127 	 */
128 	if (!dev->agp)
129 		goto vm_fault_error;
130 
131 	if (!dev->agp || !dev->agp->cant_use_aperture)
132 		goto vm_fault_error;
133 
134 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
135 		goto vm_fault_error;
136 
137 	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
138 	map = r_list->map;
139 
140 	if (map && map->type == _DRM_AGP) {
141 		/*
142 		 * Using vm_pgoff as a selector forces us to use this unusual
143 		 * addressing scheme.
144 		 */
145 		resource_size_t offset = vmf->address - vma->vm_start;
146 		resource_size_t baddr = map->offset + offset;
147 		struct drm_agp_mem *agpmem;
148 		struct page *page;
149 
150 #ifdef __alpha__
151 		/*
152 		 * Adjust to a bus-relative address
153 		 */
154 		baddr -= dev->hose->mem_space->start;
155 #endif
156 
157 		/*
158 		 * It's AGP memory - find the real physical page to map
159 		 */
160 		list_for_each_entry(agpmem, &dev->agp->memory, head) {
161 			if (agpmem->bound <= baddr &&
162 			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
163 				break;
164 		}
165 
166 		if (&agpmem->head == &dev->agp->memory)
167 			goto vm_fault_error;
168 
169 		/*
170 		 * Get the page, inc the use count, and return it
171 		 */
172 		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
173 		page = agpmem->memory->pages[offset];
174 		get_page(page);
175 		vmf->page = page;
176 
177 		DRM_DEBUG
178 		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
179 		     (unsigned long long)baddr,
180 		     agpmem->memory->pages[offset],
181 		     (unsigned long long)offset,
182 		     page_count(page));
183 		return 0;
184 	}
185 vm_fault_error:
186 	return VM_FAULT_SIGBUS;	/* Disallow mremap */
187 }
188 #else
189 static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
190 {
191 	return VM_FAULT_SIGBUS;
192 }
193 #endif
194 
195 /*
196  * \c nopage method for shared virtual memory.
197  *
198  * \param vma virtual memory area.
199  * \param address access address.
200  * \return pointer to the page structure.
201  *
202  * Get the mapping, find the real physical page to map, get the page, and
203  * return it.
204  */
205 static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
206 {
207 	struct vm_area_struct *vma = vmf->vma;
208 	struct drm_local_map *map = vma->vm_private_data;
209 	unsigned long offset;
210 	unsigned long i;
211 	struct page *page;
212 
213 	if (!map)
214 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
215 
216 	offset = vmf->address - vma->vm_start;
217 	i = (unsigned long)map->handle + offset;
218 	page = vmalloc_to_page((void *)i);
219 	if (!page)
220 		return VM_FAULT_SIGBUS;
221 	get_page(page);
222 	vmf->page = page;
223 
224 	DRM_DEBUG("shm_fault 0x%lx\n", offset);
225 	return 0;
226 }
227 
228 /*
229  * \c close method for shared virtual memory.
230  *
231  * \param vma virtual memory area.
232  *
233  * Deletes map information if we are the last
234  * person to close a mapping and it's not in the global maplist.
235  */
236 static void drm_vm_shm_close(struct vm_area_struct *vma)
237 {
238 	struct drm_file *priv = vma->vm_file->private_data;
239 	struct drm_device *dev = priv->minor->dev;
240 	struct drm_vma_entry *pt, *temp;
241 	struct drm_local_map *map;
242 	struct drm_map_list *r_list;
243 	int found_maps = 0;
244 
245 	DRM_DEBUG("0x%08lx,0x%08lx\n",
246 		  vma->vm_start, vma->vm_end - vma->vm_start);
247 
248 	map = vma->vm_private_data;
249 
250 	mutex_lock(&dev->struct_mutex);
251 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
252 		if (pt->vma->vm_private_data == map)
253 			found_maps++;
254 		if (pt->vma == vma) {
255 			list_del(&pt->head);
256 			kfree(pt);
257 		}
258 	}
259 
260 	/* We were the only map that was found */
261 	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
262 		/* Check to see if we are in the maplist, if we are not, then
263 		 * we delete this mappings information.
264 		 */
265 		found_maps = 0;
266 		list_for_each_entry(r_list, &dev->maplist, head) {
267 			if (r_list->map == map)
268 				found_maps++;
269 		}
270 
271 		if (!found_maps) {
272 			switch (map->type) {
273 			case _DRM_REGISTERS:
274 			case _DRM_FRAME_BUFFER:
275 				arch_phys_wc_del(map->mtrr);
276 				iounmap(map->handle);
277 				break;
278 			case _DRM_SHM:
279 				vfree(map->handle);
280 				break;
281 			case _DRM_AGP:
282 			case _DRM_SCATTER_GATHER:
283 				break;
284 			case _DRM_CONSISTENT:
285 				dma_free_coherent(&dev->pdev->dev,
286 						  map->size,
287 						  map->handle,
288 						  map->offset);
289 				break;
290 			}
291 			kfree(map);
292 		}
293 	}
294 	mutex_unlock(&dev->struct_mutex);
295 }
296 
297 /*
298  * \c fault method for DMA virtual memory.
299  *
300  * \param address access address.
301  * \return pointer to the page structure.
302  *
303  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
304  */
305 static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
306 {
307 	struct vm_area_struct *vma = vmf->vma;
308 	struct drm_file *priv = vma->vm_file->private_data;
309 	struct drm_device *dev = priv->minor->dev;
310 	struct drm_device_dma *dma = dev->dma;
311 	unsigned long offset;
312 	unsigned long page_nr;
313 	struct page *page;
314 
315 	if (!dma)
316 		return VM_FAULT_SIGBUS;	/* Error */
317 	if (!dma->pagelist)
318 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
319 
320 	offset = vmf->address - vma->vm_start;
321 					/* vm_[pg]off[set] should be 0 */
322 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
323 	page = virt_to_page((void *)dma->pagelist[page_nr]);
324 
325 	get_page(page);
326 	vmf->page = page;
327 
328 	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
329 	return 0;
330 }
331 
332 /*
333  * \c fault method for scatter-gather virtual memory.
334  *
335  * \param address access address.
336  * \return pointer to the page structure.
337  *
338  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
339  */
340 static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
341 {
342 	struct vm_area_struct *vma = vmf->vma;
343 	struct drm_local_map *map = vma->vm_private_data;
344 	struct drm_file *priv = vma->vm_file->private_data;
345 	struct drm_device *dev = priv->minor->dev;
346 	struct drm_sg_mem *entry = dev->sg;
347 	unsigned long offset;
348 	unsigned long map_offset;
349 	unsigned long page_offset;
350 	struct page *page;
351 
352 	if (!entry)
353 		return VM_FAULT_SIGBUS;	/* Error */
354 	if (!entry->pagelist)
355 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
356 
357 	offset = vmf->address - vma->vm_start;
358 	map_offset = map->offset - (unsigned long)dev->sg->virtual;
359 	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
360 	page = entry->pagelist[page_offset];
361 	get_page(page);
362 	vmf->page = page;
363 
364 	return 0;
365 }
366 
367 /** AGP virtual memory operations */
368 static const struct vm_operations_struct drm_vm_ops = {
369 	.fault = drm_vm_fault,
370 	.open = drm_vm_open,
371 	.close = drm_vm_close,
372 };
373 
374 /** Shared virtual memory operations */
375 static const struct vm_operations_struct drm_vm_shm_ops = {
376 	.fault = drm_vm_shm_fault,
377 	.open = drm_vm_open,
378 	.close = drm_vm_shm_close,
379 };
380 
381 /** DMA virtual memory operations */
382 static const struct vm_operations_struct drm_vm_dma_ops = {
383 	.fault = drm_vm_dma_fault,
384 	.open = drm_vm_open,
385 	.close = drm_vm_close,
386 };
387 
388 /** Scatter-gather virtual memory operations */
389 static const struct vm_operations_struct drm_vm_sg_ops = {
390 	.fault = drm_vm_sg_fault,
391 	.open = drm_vm_open,
392 	.close = drm_vm_close,
393 };
394 
395 static void drm_vm_open_locked(struct drm_device *dev,
396 			       struct vm_area_struct *vma)
397 {
398 	struct drm_vma_entry *vma_entry;
399 
400 	DRM_DEBUG("0x%08lx,0x%08lx\n",
401 		  vma->vm_start, vma->vm_end - vma->vm_start);
402 
403 	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
404 	if (vma_entry) {
405 		vma_entry->vma = vma;
406 		vma_entry->pid = current->pid;
407 		list_add(&vma_entry->head, &dev->vmalist);
408 	}
409 }
410 
411 static void drm_vm_open(struct vm_area_struct *vma)
412 {
413 	struct drm_file *priv = vma->vm_file->private_data;
414 	struct drm_device *dev = priv->minor->dev;
415 
416 	mutex_lock(&dev->struct_mutex);
417 	drm_vm_open_locked(dev, vma);
418 	mutex_unlock(&dev->struct_mutex);
419 }
420 
421 static void drm_vm_close_locked(struct drm_device *dev,
422 				struct vm_area_struct *vma)
423 {
424 	struct drm_vma_entry *pt, *temp;
425 
426 	DRM_DEBUG("0x%08lx,0x%08lx\n",
427 		  vma->vm_start, vma->vm_end - vma->vm_start);
428 
429 	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
430 		if (pt->vma == vma) {
431 			list_del(&pt->head);
432 			kfree(pt);
433 			break;
434 		}
435 	}
436 }
437 
438 /*
439  * \c close method for all virtual memory types.
440  *
441  * \param vma virtual memory area.
442  *
443  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
444  * free it.
445  */
446 static void drm_vm_close(struct vm_area_struct *vma)
447 {
448 	struct drm_file *priv = vma->vm_file->private_data;
449 	struct drm_device *dev = priv->minor->dev;
450 
451 	mutex_lock(&dev->struct_mutex);
452 	drm_vm_close_locked(dev, vma);
453 	mutex_unlock(&dev->struct_mutex);
454 }
455 
456 /*
457  * mmap DMA memory.
458  *
459  * \param file_priv DRM file private.
460  * \param vma virtual memory area.
461  * \return zero on success or a negative number on failure.
462  *
463  * Sets the virtual memory area operations structure to vm_dma_ops, the file
464  * pointer, and calls vm_open().
465  */
466 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
467 {
468 	struct drm_file *priv = filp->private_data;
469 	struct drm_device *dev;
470 	struct drm_device_dma *dma;
471 	unsigned long length = vma->vm_end - vma->vm_start;
472 
473 	dev = priv->minor->dev;
474 	dma = dev->dma;
475 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
476 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
477 
478 	/* Length must match exact page count */
479 	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
480 		return -EINVAL;
481 	}
482 
483 	if (!capable(CAP_SYS_ADMIN) &&
484 	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
485 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
486 #if defined(__i386__) || defined(__x86_64__)
487 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
488 #else
489 		/* Ye gads this is ugly.  With more thought
490 		   we could move this up higher and use
491 		   `protection_map' instead.  */
492 		vma->vm_page_prot =
493 		    __pgprot(pte_val
494 			     (pte_wrprotect
495 			      (__pte(pgprot_val(vma->vm_page_prot)))));
496 #endif
497 	}
498 
499 	vma->vm_ops = &drm_vm_dma_ops;
500 
501 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
502 
503 	drm_vm_open_locked(dev, vma);
504 	return 0;
505 }
506 
507 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
508 {
509 #ifdef __alpha__
510 	return dev->hose->dense_mem_base;
511 #else
512 	return 0;
513 #endif
514 }
515 
516 /*
517  * mmap DMA memory.
518  *
519  * \param file_priv DRM file private.
520  * \param vma virtual memory area.
521  * \return zero on success or a negative number on failure.
522  *
523  * If the virtual memory area has no offset associated with it then it's a DMA
524  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
525  * checks that the restricted flag is not set, sets the virtual memory operations
526  * according to the mapping type and remaps the pages. Finally sets the file
527  * pointer and calls vm_open().
528  */
529 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
530 {
531 	struct drm_file *priv = filp->private_data;
532 	struct drm_device *dev = priv->minor->dev;
533 	struct drm_local_map *map = NULL;
534 	resource_size_t offset = 0;
535 	struct drm_hash_item *hash;
536 
537 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
538 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
539 
540 	if (!priv->authenticated)
541 		return -EACCES;
542 
543 	/* We check for "dma". On Apple's UniNorth, it's valid to have
544 	 * the AGP mapped at physical address 0
545 	 * --BenH.
546 	 */
547 	if (!vma->vm_pgoff
548 #if IS_ENABLED(CONFIG_AGP)
549 	    && (!dev->agp
550 		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
551 #endif
552 	    )
553 		return drm_mmap_dma(filp, vma);
554 
555 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
556 		DRM_ERROR("Could not find map\n");
557 		return -EINVAL;
558 	}
559 
560 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
561 	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
562 		return -EPERM;
563 
564 	/* Check for valid size. */
565 	if (map->size < vma->vm_end - vma->vm_start)
566 		return -EINVAL;
567 
568 	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
569 		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
570 #if defined(__i386__) || defined(__x86_64__)
571 		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
572 #else
573 		/* Ye gads this is ugly.  With more thought
574 		   we could move this up higher and use
575 		   `protection_map' instead.  */
576 		vma->vm_page_prot =
577 		    __pgprot(pte_val
578 			     (pte_wrprotect
579 			      (__pte(pgprot_val(vma->vm_page_prot)))));
580 #endif
581 	}
582 
583 	switch (map->type) {
584 #if !defined(__arm__)
585 	case _DRM_AGP:
586 		if (dev->agp && dev->agp->cant_use_aperture) {
587 			/*
588 			 * On some platforms we can't talk to bus dma address from the CPU, so for
589 			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
590 			 * pages and mappings in fault()
591 			 */
592 #if defined(__powerpc__)
593 			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
594 #endif
595 			vma->vm_ops = &drm_vm_ops;
596 			break;
597 		}
598 		fallthrough;	/* to _DRM_FRAME_BUFFER... */
599 #endif
600 	case _DRM_FRAME_BUFFER:
601 	case _DRM_REGISTERS:
602 		offset = drm_core_get_reg_ofs(dev);
603 		vma->vm_page_prot = drm_io_prot(map, vma);
604 		if (io_remap_pfn_range(vma, vma->vm_start,
605 				       (map->offset + offset) >> PAGE_SHIFT,
606 				       vma->vm_end - vma->vm_start,
607 				       vma->vm_page_prot))
608 			return -EAGAIN;
609 		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
610 			  " offset = 0x%llx\n",
611 			  map->type,
612 			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
613 
614 		vma->vm_ops = &drm_vm_ops;
615 		break;
616 	case _DRM_CONSISTENT:
617 		/* Consistent memory is really like shared memory. But
618 		 * it's allocated in a different way, so avoid fault */
619 		if (remap_pfn_range(vma, vma->vm_start,
620 		    page_to_pfn(virt_to_page(map->handle)),
621 		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
622 			return -EAGAIN;
623 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
624 		fallthrough;	/* to _DRM_SHM */
625 	case _DRM_SHM:
626 		vma->vm_ops = &drm_vm_shm_ops;
627 		vma->vm_private_data = (void *)map;
628 		break;
629 	case _DRM_SCATTER_GATHER:
630 		vma->vm_ops = &drm_vm_sg_ops;
631 		vma->vm_private_data = (void *)map;
632 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
633 		break;
634 	default:
635 		return -EINVAL;	/* This should never happen. */
636 	}
637 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
638 
639 	drm_vm_open_locked(dev, vma);
640 	return 0;
641 }
642 
643 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
644 {
645 	struct drm_file *priv = filp->private_data;
646 	struct drm_device *dev = priv->minor->dev;
647 	int ret;
648 
649 	if (drm_dev_is_unplugged(dev))
650 		return -ENODEV;
651 
652 	mutex_lock(&dev->struct_mutex);
653 	ret = drm_mmap_locked(filp, vma);
654 	mutex_unlock(&dev->struct_mutex);
655 
656 	return ret;
657 }
658 EXPORT_SYMBOL(drm_legacy_mmap);
659 
660 #if IS_ENABLED(CONFIG_DRM_LEGACY)
661 void drm_legacy_vma_flush(struct drm_device *dev)
662 {
663 	struct drm_vma_entry *vma, *vma_temp;
664 
665 	/* Clear vma list (only needed for legacy drivers) */
666 	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
667 		list_del(&vma->head);
668 		kfree(vma);
669 	}
670 }
671 #endif
672