xref: /openbmc/linux/drivers/gpu/drm/drm_memory.c (revision 151f4e2b)
1 /**
2  * \file drm_memory.c
3  * Memory management wrappers for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 #include <linux/highmem.h>
37 #include <linux/export.h>
38 #include <xen/xen.h>
39 #include <drm/drmP.h>
40 #include "drm_legacy.h"
41 
42 #if IS_ENABLED(CONFIG_AGP)
43 
44 #ifdef HAVE_PAGE_AGP
45 # include <asm/agp.h>
46 #else
47 # ifdef __powerpc__
48 #  define PAGE_AGP	pgprot_noncached_wc(PAGE_KERNEL)
49 # else
50 #  define PAGE_AGP	PAGE_KERNEL
51 # endif
52 #endif
53 
54 static void *agp_remap(unsigned long offset, unsigned long size,
55 		       struct drm_device *dev)
56 {
57 	unsigned long i, num_pages =
58 	    PAGE_ALIGN(size) / PAGE_SIZE;
59 	struct drm_agp_mem *agpmem;
60 	struct page **page_map;
61 	struct page **phys_page_map;
62 	void *addr;
63 
64 	size = PAGE_ALIGN(size);
65 
66 #ifdef __alpha__
67 	offset -= dev->hose->mem_space->start;
68 #endif
69 
70 	list_for_each_entry(agpmem, &dev->agp->memory, head)
71 		if (agpmem->bound <= offset
72 		    && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
73 		    (offset + size))
74 			break;
75 	if (&agpmem->head == &dev->agp->memory)
76 		return NULL;
77 
78 	/*
79 	 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
80 	 * the CPU do not get remapped by the GART.  We fix this by using the kernel's
81 	 * page-table instead (that's probably faster anyhow...).
82 	 */
83 	/* note: use vmalloc() because num_pages could be large... */
84 	page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
85 	if (!page_map)
86 		return NULL;
87 
88 	phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
89 	for (i = 0; i < num_pages; ++i)
90 		page_map[i] = phys_page_map[i];
91 	addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
92 	vfree(page_map);
93 
94 	return addr;
95 }
96 
97 /** Wrapper around agp_free_memory() */
98 void drm_free_agp(struct agp_memory *handle, int pages)
99 {
100 	agp_free_memory(handle);
101 }
102 
103 /** Wrapper around agp_bind_memory() */
104 int drm_bind_agp(struct agp_memory *handle, unsigned int start)
105 {
106 	return agp_bind_memory(handle, start);
107 }
108 
109 /** Wrapper around agp_unbind_memory() */
110 int drm_unbind_agp(struct agp_memory *handle)
111 {
112 	return agp_unbind_memory(handle);
113 }
114 
115 #else /*  CONFIG_AGP  */
116 static inline void *agp_remap(unsigned long offset, unsigned long size,
117 			      struct drm_device *dev)
118 {
119 	return NULL;
120 }
121 
122 #endif /* CONFIG_AGP */
123 
124 void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
125 {
126 	if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
127 		map->handle = agp_remap(map->offset, map->size, dev);
128 	else
129 		map->handle = ioremap(map->offset, map->size);
130 }
131 EXPORT_SYMBOL(drm_legacy_ioremap);
132 
133 void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
134 {
135 	if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
136 		map->handle = agp_remap(map->offset, map->size, dev);
137 	else
138 		map->handle = ioremap_wc(map->offset, map->size);
139 }
140 EXPORT_SYMBOL(drm_legacy_ioremap_wc);
141 
142 void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
143 {
144 	if (!map->handle || !map->size)
145 		return;
146 
147 	if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
148 		vunmap(map->handle);
149 	else
150 		iounmap(map->handle);
151 }
152 EXPORT_SYMBOL(drm_legacy_ioremapfree);
153 
154 bool drm_need_swiotlb(int dma_bits)
155 {
156 	struct resource *tmp;
157 	resource_size_t max_iomem = 0;
158 
159 	/*
160 	 * Xen paravirtual hosts require swiotlb regardless of requested dma
161 	 * transfer size.
162 	 *
163 	 * NOTE: Really, what it requires is use of the dma_alloc_coherent
164 	 *       allocator used in ttm_dma_populate() instead of
165 	 *       ttm_populate_and_map_pages(), which bounce buffers so much in
166 	 *       Xen it leads to swiotlb buffer exhaustion.
167 	 */
168 	if (xen_pv_domain())
169 		return true;
170 
171 	/*
172 	 * Enforce dma_alloc_coherent when memory encryption is active as well
173 	 * for the same reasons as for Xen paravirtual hosts.
174 	 */
175 	if (mem_encrypt_active())
176 		return true;
177 
178 	for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
179 		max_iomem = max(max_iomem,  tmp->end);
180 	}
181 
182 	return max_iomem > ((u64)1 << dma_bits);
183 }
184 EXPORT_SYMBOL(drm_need_swiotlb);
185