1 /**************************************************************************
2  *
3  * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/drmP.h>
30 #include <drm/ttm/ttm_bo_driver.h>
31 
32 #define VMW_PPN_SIZE (sizeof(unsigned long))
33 /* A future safe maximum remap size. */
34 #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
35 #define DMA_ADDR_INVALID ((dma_addr_t) 0)
36 #define DMA_PAGE_INVALID 0UL
37 
38 static int vmw_gmr2_bind(struct vmw_private *dev_priv,
39 			 struct vmw_piter *iter,
40 			 unsigned long num_pages,
41 			 int gmr_id)
42 {
43 	SVGAFifoCmdDefineGMR2 define_cmd;
44 	SVGAFifoCmdRemapGMR2 remap_cmd;
45 	uint32_t *cmd;
46 	uint32_t *cmd_orig;
47 	uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
48 	uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
49 	uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
50 	uint32_t remap_pos = 0;
51 	uint32_t cmd_size = define_size + remap_size;
52 	uint32_t i;
53 
54 	cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
55 	if (unlikely(cmd == NULL))
56 		return -ENOMEM;
57 
58 	define_cmd.gmrId = gmr_id;
59 	define_cmd.numPages = num_pages;
60 
61 	*cmd++ = SVGA_CMD_DEFINE_GMR2;
62 	memcpy(cmd, &define_cmd, sizeof(define_cmd));
63 	cmd += sizeof(define_cmd) / sizeof(*cmd);
64 
65 	/*
66 	 * Need to split the command if there are too many
67 	 * pages that goes into the gmr.
68 	 */
69 
70 	remap_cmd.gmrId = gmr_id;
71 	remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
72 		SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
73 
74 	while (num_pages > 0) {
75 		unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
76 
77 		remap_cmd.offsetPages = remap_pos;
78 		remap_cmd.numPages = nr;
79 
80 		*cmd++ = SVGA_CMD_REMAP_GMR2;
81 		memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
82 		cmd += sizeof(remap_cmd) / sizeof(*cmd);
83 
84 		for (i = 0; i < nr; ++i) {
85 			if (VMW_PPN_SIZE <= 4)
86 				*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
87 			else
88 				*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
89 					PAGE_SHIFT;
90 
91 			cmd += VMW_PPN_SIZE / sizeof(*cmd);
92 			vmw_piter_next(iter);
93 		}
94 
95 		num_pages -= nr;
96 		remap_pos += nr;
97 	}
98 
99 	BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
100 
101 	vmw_fifo_commit(dev_priv, cmd_size);
102 
103 	return 0;
104 }
105 
106 static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
107 			    int gmr_id)
108 {
109 	SVGAFifoCmdDefineGMR2 define_cmd;
110 	uint32_t define_size = sizeof(define_cmd) + 4;
111 	uint32_t *cmd;
112 
113 	cmd = vmw_fifo_reserve(dev_priv, define_size);
114 	if (unlikely(cmd == NULL)) {
115 		DRM_ERROR("GMR2 unbind failed.\n");
116 		return;
117 	}
118 	define_cmd.gmrId = gmr_id;
119 	define_cmd.numPages = 0;
120 
121 	*cmd++ = SVGA_CMD_DEFINE_GMR2;
122 	memcpy(cmd, &define_cmd, sizeof(define_cmd));
123 
124 	vmw_fifo_commit(dev_priv, define_size);
125 }
126 
127 
128 static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
129 				     struct list_head *desc_pages)
130 {
131 	struct page *page, *next;
132 	struct svga_guest_mem_descriptor *page_virtual;
133 	unsigned int desc_per_page = PAGE_SIZE /
134 		sizeof(struct svga_guest_mem_descriptor) - 1;
135 
136 	if (list_empty(desc_pages))
137 		return;
138 
139 	list_for_each_entry_safe(page, next, desc_pages, lru) {
140 		list_del_init(&page->lru);
141 
142 		if (likely(desc_dma != DMA_ADDR_INVALID)) {
143 			dma_unmap_page(dev, desc_dma, PAGE_SIZE,
144 				       DMA_TO_DEVICE);
145 		}
146 
147 		page_virtual = kmap_atomic(page);
148 		desc_dma = (dma_addr_t)
149 			le32_to_cpu(page_virtual[desc_per_page].ppn) <<
150 			PAGE_SHIFT;
151 		kunmap_atomic(page_virtual);
152 
153 		__free_page(page);
154 	}
155 }
156 
157 /**
158  * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
159  * the number of used descriptors.
160  *
161  */
162 
163 static int vmw_gmr_build_descriptors(struct device *dev,
164 				     struct list_head *desc_pages,
165 				     struct vmw_piter *iter,
166 				     unsigned long num_pages,
167 				     dma_addr_t *first_dma)
168 {
169 	struct page *page;
170 	struct svga_guest_mem_descriptor *page_virtual = NULL;
171 	struct svga_guest_mem_descriptor *desc_virtual = NULL;
172 	unsigned int desc_per_page;
173 	unsigned long prev_pfn;
174 	unsigned long pfn;
175 	int ret;
176 	dma_addr_t desc_dma;
177 
178 	desc_per_page = PAGE_SIZE /
179 	    sizeof(struct svga_guest_mem_descriptor) - 1;
180 
181 	while (likely(num_pages != 0)) {
182 		page = alloc_page(__GFP_HIGHMEM);
183 		if (unlikely(page == NULL)) {
184 			ret = -ENOMEM;
185 			goto out_err;
186 		}
187 
188 		list_add_tail(&page->lru, desc_pages);
189 		page_virtual = kmap_atomic(page);
190 		desc_virtual = page_virtual - 1;
191 		prev_pfn = ~(0UL);
192 
193 		while (likely(num_pages != 0)) {
194 			pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
195 
196 			if (pfn != prev_pfn + 1) {
197 
198 				if (desc_virtual - page_virtual ==
199 				    desc_per_page - 1)
200 					break;
201 
202 				(++desc_virtual)->ppn = cpu_to_le32(pfn);
203 				desc_virtual->num_pages = cpu_to_le32(1);
204 			} else {
205 				uint32_t tmp =
206 				    le32_to_cpu(desc_virtual->num_pages);
207 				desc_virtual->num_pages = cpu_to_le32(tmp + 1);
208 			}
209 			prev_pfn = pfn;
210 			--num_pages;
211 			vmw_piter_next(iter);
212 		}
213 
214 		(++desc_virtual)->ppn = DMA_PAGE_INVALID;
215 		desc_virtual->num_pages = cpu_to_le32(0);
216 		kunmap_atomic(page_virtual);
217 	}
218 
219 	desc_dma = 0;
220 	list_for_each_entry_reverse(page, desc_pages, lru) {
221 		page_virtual = kmap_atomic(page);
222 		page_virtual[desc_per_page].ppn = cpu_to_le32
223 			(desc_dma >> PAGE_SHIFT);
224 		kunmap_atomic(page_virtual);
225 		desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
226 					DMA_TO_DEVICE);
227 
228 		if (unlikely(dma_mapping_error(dev, desc_dma)))
229 			goto out_err;
230 	}
231 	*first_dma = desc_dma;
232 
233 	return 0;
234 out_err:
235 	vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
236 	return ret;
237 }
238 
239 static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
240 				     int gmr_id, dma_addr_t desc_dma)
241 {
242 	mutex_lock(&dev_priv->hw_mutex);
243 
244 	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
245 	wmb();
246 	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
247 	mb();
248 
249 	mutex_unlock(&dev_priv->hw_mutex);
250 
251 }
252 
253 int vmw_gmr_bind(struct vmw_private *dev_priv,
254 		 const struct vmw_sg_table *vsgt,
255 		 unsigned long num_pages,
256 		 int gmr_id)
257 {
258 	struct list_head desc_pages;
259 	dma_addr_t desc_dma = 0;
260 	struct device *dev = dev_priv->dev->dev;
261 	struct vmw_piter data_iter;
262 	int ret;
263 
264 	vmw_piter_start(&data_iter, vsgt, 0);
265 
266 	if (unlikely(!vmw_piter_next(&data_iter)))
267 		return 0;
268 
269 	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
270 		return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
271 
272 	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
273 		return -EINVAL;
274 
275 	if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
276 		return -EINVAL;
277 
278 	INIT_LIST_HEAD(&desc_pages);
279 
280 	ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
281 					num_pages, &desc_dma);
282 	if (unlikely(ret != 0))
283 		return ret;
284 
285 	vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
286 	vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
287 
288 	return 0;
289 }
290 
291 
292 void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
293 {
294 	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
295 		vmw_gmr2_unbind(dev_priv, gmr_id);
296 		return;
297 	}
298 
299 	mutex_lock(&dev_priv->hw_mutex);
300 	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
301 	wmb();
302 	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
303 	mb();
304 	mutex_unlock(&dev_priv->hw_mutex);
305 }
306