1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8 
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15 
16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 				 unsigned long iova, size_t size)
18 {
19 	size_t unmapped_page, unmapped = 0;
20 	size_t pgsize = SZ_4K;
21 
22 	if (!IS_ALIGNED(iova | size, pgsize)) {
23 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24 		       iova, size, pgsize);
25 		return;
26 	}
27 
28 	while (unmapped < size) {
29 		unmapped_page = context->global->ops->unmap(context, iova,
30 							    pgsize);
31 		if (!unmapped_page)
32 			break;
33 
34 		iova += unmapped_page;
35 		unmapped += unmapped_page;
36 	}
37 }
38 
39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40 			      unsigned long iova, phys_addr_t paddr,
41 			      size_t size, int prot)
42 {
43 	unsigned long orig_iova = iova;
44 	size_t pgsize = SZ_4K;
45 	size_t orig_size = size;
46 	int ret = 0;
47 
48 	if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 		       iova, &paddr, size, pgsize);
51 		return -EINVAL;
52 	}
53 
54 	while (size) {
55 		ret = context->global->ops->map(context, iova, paddr, pgsize,
56 						prot);
57 		if (ret)
58 			break;
59 
60 		iova += pgsize;
61 		paddr += pgsize;
62 		size -= pgsize;
63 	}
64 
65 	/* unroll mapping in case something went wrong */
66 	if (ret)
67 		etnaviv_context_unmap(context, orig_iova, orig_size - size);
68 
69 	return ret;
70 }
71 
72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73 			     struct sg_table *sgt, unsigned len, int prot)
74 {	struct scatterlist *sg;
75 	unsigned int da = iova;
76 	unsigned int i, j;
77 	int ret;
78 
79 	if (!context || !sgt)
80 		return -EINVAL;
81 
82 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
83 		u32 pa = sg_dma_address(sg) - sg->offset;
84 		size_t bytes = sg_dma_len(sg) + sg->offset;
85 
86 		VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
87 
88 		ret = etnaviv_context_map(context, da, pa, bytes, prot);
89 		if (ret)
90 			goto fail;
91 
92 		da += bytes;
93 	}
94 
95 	return 0;
96 
97 fail:
98 	da = iova;
99 
100 	for_each_sg(sgt->sgl, sg, i, j) {
101 		size_t bytes = sg_dma_len(sg) + sg->offset;
102 
103 		etnaviv_context_unmap(context, da, bytes);
104 		da += bytes;
105 	}
106 	return ret;
107 }
108 
109 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
110 				struct sg_table *sgt, unsigned len)
111 {
112 	struct scatterlist *sg;
113 	unsigned int da = iova;
114 	int i;
115 
116 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
117 		size_t bytes = sg_dma_len(sg) + sg->offset;
118 
119 		etnaviv_context_unmap(context, da, bytes);
120 
121 		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
122 
123 		BUG_ON(!PAGE_ALIGNED(bytes));
124 
125 		da += bytes;
126 	}
127 }
128 
129 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
130 	struct etnaviv_vram_mapping *mapping)
131 {
132 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
133 
134 	etnaviv_iommu_unmap(context, mapping->vram_node.start,
135 			    etnaviv_obj->sgt, etnaviv_obj->base.size);
136 	drm_mm_remove_node(&mapping->vram_node);
137 }
138 
139 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
140 				   struct drm_mm_node *node, size_t size)
141 {
142 	struct etnaviv_vram_mapping *free = NULL;
143 	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
144 	int ret;
145 
146 	lockdep_assert_held(&context->lock);
147 
148 	while (1) {
149 		struct etnaviv_vram_mapping *m, *n;
150 		struct drm_mm_scan scan;
151 		struct list_head list;
152 		bool found;
153 
154 		ret = drm_mm_insert_node_in_range(&context->mm, node,
155 						  size, 0, 0, 0, U64_MAX, mode);
156 		if (ret != -ENOSPC)
157 			break;
158 
159 		/* Try to retire some entries */
160 		drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
161 
162 		found = 0;
163 		INIT_LIST_HEAD(&list);
164 		list_for_each_entry(free, &context->mappings, mmu_node) {
165 			/* If this vram node has not been used, skip this. */
166 			if (!free->vram_node.mm)
167 				continue;
168 
169 			/*
170 			 * If the iova is pinned, then it's in-use,
171 			 * so we must keep its mapping.
172 			 */
173 			if (free->use)
174 				continue;
175 
176 			list_add(&free->scan_node, &list);
177 			if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
178 				found = true;
179 				break;
180 			}
181 		}
182 
183 		if (!found) {
184 			/* Nothing found, clean up and fail */
185 			list_for_each_entry_safe(m, n, &list, scan_node)
186 				BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
187 			break;
188 		}
189 
190 		/*
191 		 * drm_mm does not allow any other operations while
192 		 * scanning, so we have to remove all blocks first.
193 		 * If drm_mm_scan_remove_block() returns false, we
194 		 * can leave the block pinned.
195 		 */
196 		list_for_each_entry_safe(m, n, &list, scan_node)
197 			if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
198 				list_del_init(&m->scan_node);
199 
200 		/*
201 		 * Unmap the blocks which need to be reaped from the MMU.
202 		 * Clear the mmu pointer to prevent the mapping_get finding
203 		 * this mapping.
204 		 */
205 		list_for_each_entry_safe(m, n, &list, scan_node) {
206 			etnaviv_iommu_remove_mapping(context, m);
207 			m->context = NULL;
208 			list_del_init(&m->mmu_node);
209 			list_del_init(&m->scan_node);
210 		}
211 
212 		mode = DRM_MM_INSERT_EVICT;
213 
214 		/*
215 		 * We removed enough mappings so that the new allocation will
216 		 * succeed, retry the allocation one more time.
217 		 */
218 	}
219 
220 	return ret;
221 }
222 
223 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
224 		   struct drm_mm_node *node, size_t size, u64 va)
225 {
226 	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
227 					   va + size, DRM_MM_INSERT_LOWEST);
228 }
229 
230 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
231 	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
232 	struct etnaviv_vram_mapping *mapping, u64 va)
233 {
234 	struct sg_table *sgt = etnaviv_obj->sgt;
235 	struct drm_mm_node *node;
236 	int ret;
237 
238 	lockdep_assert_held(&etnaviv_obj->lock);
239 
240 	mutex_lock(&context->lock);
241 
242 	/* v1 MMU can optimize single entry (contiguous) scatterlists */
243 	if (context->global->version == ETNAVIV_IOMMU_V1 &&
244 	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
245 		u32 iova;
246 
247 		iova = sg_dma_address(sgt->sgl) - memory_base;
248 		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
249 			mapping->iova = iova;
250 			list_add_tail(&mapping->mmu_node, &context->mappings);
251 			ret = 0;
252 			goto unlock;
253 		}
254 	}
255 
256 	node = &mapping->vram_node;
257 
258 	if (va)
259 		ret = etnaviv_iommu_insert_exact(context, node,
260 						 etnaviv_obj->base.size, va);
261 	else
262 		ret = etnaviv_iommu_find_iova(context, node,
263 					      etnaviv_obj->base.size);
264 	if (ret < 0)
265 		goto unlock;
266 
267 	mapping->iova = node->start;
268 	ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
269 				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
270 
271 	if (ret < 0) {
272 		drm_mm_remove_node(node);
273 		goto unlock;
274 	}
275 
276 	list_add_tail(&mapping->mmu_node, &context->mappings);
277 	context->flush_seq++;
278 unlock:
279 	mutex_unlock(&context->lock);
280 
281 	return ret;
282 }
283 
284 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
285 	struct etnaviv_vram_mapping *mapping)
286 {
287 	WARN_ON(mapping->use);
288 
289 	mutex_lock(&context->lock);
290 
291 	/* If the vram node is on the mm, unmap and remove the node */
292 	if (mapping->vram_node.mm == &context->mm)
293 		etnaviv_iommu_remove_mapping(context, mapping);
294 
295 	list_del(&mapping->mmu_node);
296 	context->flush_seq++;
297 	mutex_unlock(&context->lock);
298 }
299 
300 static void etnaviv_iommu_context_free(struct kref *kref)
301 {
302 	struct etnaviv_iommu_context *context =
303 		container_of(kref, struct etnaviv_iommu_context, refcount);
304 
305 	etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
306 
307 	context->global->ops->free(context);
308 }
309 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
310 {
311 	kref_put(&context->refcount, etnaviv_iommu_context_free);
312 }
313 
314 struct etnaviv_iommu_context *
315 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
316 			   struct etnaviv_cmdbuf_suballoc *suballoc)
317 {
318 	struct etnaviv_iommu_context *ctx;
319 	int ret;
320 
321 	if (global->version == ETNAVIV_IOMMU_V1)
322 		ctx = etnaviv_iommuv1_context_alloc(global);
323 	else
324 		ctx = etnaviv_iommuv2_context_alloc(global);
325 
326 	if (!ctx)
327 		return NULL;
328 
329 	ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
330 					  global->memory_base);
331 	if (ret) {
332 		global->ops->free(ctx);
333 		return NULL;
334 	}
335 
336 	return ctx;
337 }
338 
339 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
340 			   struct etnaviv_iommu_context *context)
341 {
342 	context->global->ops->restore(gpu, context);
343 }
344 
345 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
346 				  struct etnaviv_vram_mapping *mapping,
347 				  u32 memory_base, dma_addr_t paddr,
348 				  size_t size)
349 {
350 	mutex_lock(&context->lock);
351 
352 	if (mapping->use > 0) {
353 		mapping->use++;
354 		mutex_unlock(&context->lock);
355 		return 0;
356 	}
357 
358 	/*
359 	 * For MMUv1 we don't add the suballoc region to the pagetables, as
360 	 * those GPUs can only work with cmdbufs accessed through the linear
361 	 * window. Instead we manufacture a mapping to make it look uniform
362 	 * to the upper layers.
363 	 */
364 	if (context->global->version == ETNAVIV_IOMMU_V1) {
365 		mapping->iova = paddr - memory_base;
366 	} else {
367 		struct drm_mm_node *node = &mapping->vram_node;
368 		int ret;
369 
370 		ret = etnaviv_iommu_find_iova(context, node, size);
371 		if (ret < 0) {
372 			mutex_unlock(&context->lock);
373 			return ret;
374 		}
375 
376 		mapping->iova = node->start;
377 		ret = etnaviv_context_map(context, node->start, paddr, size,
378 					  ETNAVIV_PROT_READ);
379 		if (ret < 0) {
380 			drm_mm_remove_node(node);
381 			mutex_unlock(&context->lock);
382 			return ret;
383 		}
384 
385 		context->flush_seq++;
386 	}
387 
388 	list_add_tail(&mapping->mmu_node, &context->mappings);
389 	mapping->use = 1;
390 
391 	mutex_unlock(&context->lock);
392 
393 	return 0;
394 }
395 
396 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
397 		  struct etnaviv_vram_mapping *mapping)
398 {
399 	struct drm_mm_node *node = &mapping->vram_node;
400 
401 	mutex_lock(&context->lock);
402 	mapping->use--;
403 
404 	if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
405 		mutex_unlock(&context->lock);
406 		return;
407 	}
408 
409 	etnaviv_context_unmap(context, node->start, node->size);
410 	drm_mm_remove_node(node);
411 	mutex_unlock(&context->lock);
412 }
413 
414 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
415 {
416 	return context->global->ops->dump_size(context);
417 }
418 
419 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
420 {
421 	context->global->ops->dump(context, buf);
422 }
423 
424 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
425 {
426 	enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
427 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
428 	struct etnaviv_iommu_global *global;
429 	struct device *dev = gpu->drm->dev;
430 
431 	if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
432 		version = ETNAVIV_IOMMU_V2;
433 
434 	if (priv->mmu_global) {
435 		if (priv->mmu_global->version != version) {
436 			dev_err(gpu->dev,
437 				"MMU version doesn't match global version\n");
438 			return -ENXIO;
439 		}
440 
441 		priv->mmu_global->use++;
442 		return 0;
443 	}
444 
445 	global = kzalloc(sizeof(*global), GFP_KERNEL);
446 	if (!global)
447 		return -ENOMEM;
448 
449 	global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
450 					    GFP_KERNEL);
451 	if (!global->bad_page_cpu)
452 		goto free_global;
453 
454 	memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
455 
456 	if (version == ETNAVIV_IOMMU_V2) {
457 		global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
458 					       &global->v2.pta_dma, GFP_KERNEL);
459 		if (!global->v2.pta_cpu)
460 			goto free_bad_page;
461 	}
462 
463 	global->dev = dev;
464 	global->version = version;
465 	global->use = 1;
466 	mutex_init(&global->lock);
467 
468 	if (version == ETNAVIV_IOMMU_V1)
469 		global->ops = &etnaviv_iommuv1_ops;
470 	else
471 		global->ops = &etnaviv_iommuv2_ops;
472 
473 	priv->mmu_global = global;
474 
475 	return 0;
476 
477 free_bad_page:
478 	dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
479 free_global:
480 	kfree(global);
481 
482 	return -ENOMEM;
483 }
484 
485 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
486 {
487 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
488 	struct etnaviv_iommu_global *global = priv->mmu_global;
489 
490 	if (--global->use > 0)
491 		return;
492 
493 	if (global->v2.pta_cpu)
494 		dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
495 			    global->v2.pta_cpu, global->v2.pta_dma);
496 
497 	if (global->bad_page_cpu)
498 		dma_free_wc(global->dev, SZ_4K,
499 			    global->bad_page_cpu, global->bad_page_dma);
500 
501 	mutex_destroy(&global->lock);
502 	kfree(global);
503 
504 	priv->mmu_global = NULL;
505 }
506