1 /*
2  * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 /*
24  * GK20A does not have dedicated video memory, and to accurately represent this
25  * fact Nouveau will not create a RAM device for it. Therefore its instmem
26  * implementation must be done directly on top of system memory, while
27  * preserving coherency for read and write operations.
28  *
29  * Instmem can be allocated through two means:
30  * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
31  *    pages contiguous to the GPU. This is the preferred way.
32  * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
33  *    contiguous memory.
34  *
35  * In both cases CPU read and writes are performed by creating a write-combined
36  * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
37  * be conservative we do this every time we acquire or release an instobj, but
38  * ideally L2 management should be handled at a higher level.
39  *
40  * To improve performance, CPU mappings are not removed upon instobj release.
41  * Instead they are placed into a LRU list to be recycled when the mapped space
42  * goes beyond a certain threshold. At the moment this limit is 1MB.
43  */
44 #include "priv.h"
45 
46 #include <core/memory.h>
47 #include <core/mm.h>
48 #include <core/tegra.h>
49 #include <subdev/fb.h>
50 #include <subdev/ltc.h>
51 
52 struct gk20a_instobj {
53 	struct nvkm_memory memory;
54 	struct nvkm_mem mem;
55 	struct gk20a_instmem *imem;
56 
57 	/* CPU mapping */
58 	u32 *vaddr;
59 };
60 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
61 
62 /*
63  * Used for objects allocated using the DMA API
64  */
65 struct gk20a_instobj_dma {
66 	struct gk20a_instobj base;
67 
68 	dma_addr_t handle;
69 	struct nvkm_mm_node r;
70 };
71 #define gk20a_instobj_dma(p) \
72 	container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
73 
74 /*
75  * Used for objects flattened using the IOMMU API
76  */
77 struct gk20a_instobj_iommu {
78 	struct gk20a_instobj base;
79 
80 	/* to link into gk20a_instmem::vaddr_lru */
81 	struct list_head vaddr_node;
82 	/* how many clients are using vaddr? */
83 	u32 use_cpt;
84 
85 	/* will point to the higher half of pages */
86 	dma_addr_t *dma_addrs;
87 	/* array of base.mem->size pages (+ dma_addr_ts) */
88 	struct page *pages[];
89 };
90 #define gk20a_instobj_iommu(p) \
91 	container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
92 
93 struct gk20a_instmem {
94 	struct nvkm_instmem base;
95 
96 	/* protects vaddr_* and gk20a_instobj::vaddr* */
97 	spinlock_t lock;
98 
99 	/* CPU mappings LRU */
100 	unsigned int vaddr_use;
101 	unsigned int vaddr_max;
102 	struct list_head vaddr_lru;
103 
104 	/* Only used if IOMMU if present */
105 	struct mutex *mm_mutex;
106 	struct nvkm_mm *mm;
107 	struct iommu_domain *domain;
108 	unsigned long iommu_pgshift;
109 	u16 iommu_bit;
110 
111 	/* Only used by DMA API */
112 	struct dma_attrs attrs;
113 };
114 #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
115 
116 static enum nvkm_memory_target
117 gk20a_instobj_target(struct nvkm_memory *memory)
118 {
119 	return NVKM_MEM_TARGET_HOST;
120 }
121 
122 static u64
123 gk20a_instobj_addr(struct nvkm_memory *memory)
124 {
125 	return gk20a_instobj(memory)->mem.offset;
126 }
127 
128 static u64
129 gk20a_instobj_size(struct nvkm_memory *memory)
130 {
131 	return (u64)gk20a_instobj(memory)->mem.size << 12;
132 }
133 
134 /*
135  * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held.
136  */
137 static void
138 gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
139 {
140 	struct gk20a_instmem *imem = obj->base.imem;
141 	/* there should not be any user left... */
142 	WARN_ON(obj->use_cpt);
143 	list_del(&obj->vaddr_node);
144 	vunmap(obj->base.vaddr);
145 	obj->base.vaddr = NULL;
146 	imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
147 	nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
148 		   imem->vaddr_max);
149 }
150 
151 /*
152  * Must be called while holding gk20a_instmem::lock
153  */
154 static void
155 gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
156 {
157 	while (imem->vaddr_use + size > imem->vaddr_max) {
158 		/* no candidate that can be unmapped, abort... */
159 		if (list_empty(&imem->vaddr_lru))
160 			break;
161 
162 		gk20a_instobj_iommu_recycle_vaddr(
163 				list_first_entry(&imem->vaddr_lru,
164 				struct gk20a_instobj_iommu, vaddr_node));
165 	}
166 }
167 
168 static void __iomem *
169 gk20a_instobj_acquire_dma(struct nvkm_memory *memory)
170 {
171 	struct gk20a_instobj *node = gk20a_instobj(memory);
172 	struct gk20a_instmem *imem = node->imem;
173 	struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
174 
175 	nvkm_ltc_flush(ltc);
176 
177 	return node->vaddr;
178 }
179 
180 static void __iomem *
181 gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
182 {
183 	struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
184 	struct gk20a_instmem *imem = node->base.imem;
185 	struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
186 	const u64 size = nvkm_memory_size(memory);
187 	unsigned long flags;
188 
189 	nvkm_ltc_flush(ltc);
190 
191 	spin_lock_irqsave(&imem->lock, flags);
192 
193 	if (node->base.vaddr) {
194 		if (!node->use_cpt) {
195 			/* remove from LRU list since mapping in use again */
196 			list_del(&node->vaddr_node);
197 		}
198 		goto out;
199 	}
200 
201 	/* try to free some address space if we reached the limit */
202 	gk20a_instmem_vaddr_gc(imem, size);
203 
204 	/* map the pages */
205 	node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP,
206 				pgprot_writecombine(PAGE_KERNEL));
207 	if (!node->base.vaddr) {
208 		nvkm_error(&imem->base.subdev, "cannot map instobj - "
209 			   "this is not going to end well...\n");
210 		goto out;
211 	}
212 
213 	imem->vaddr_use += size;
214 	nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
215 		   imem->vaddr_use, imem->vaddr_max);
216 
217 out:
218 	node->use_cpt++;
219 	spin_unlock_irqrestore(&imem->lock, flags);
220 
221 	return node->base.vaddr;
222 }
223 
224 static void
225 gk20a_instobj_release_dma(struct nvkm_memory *memory)
226 {
227 	struct gk20a_instobj *node = gk20a_instobj(memory);
228 	struct gk20a_instmem *imem = node->imem;
229 	struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
230 
231 	nvkm_ltc_invalidate(ltc);
232 }
233 
234 static void
235 gk20a_instobj_release_iommu(struct nvkm_memory *memory)
236 {
237 	struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
238 	struct gk20a_instmem *imem = node->base.imem;
239 	struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
240 	unsigned long flags;
241 
242 	spin_lock_irqsave(&imem->lock, flags);
243 
244 	/* we should at least have one user to release... */
245 	if (WARN_ON(node->use_cpt == 0))
246 		goto out;
247 
248 	/* add unused objs to the LRU list to recycle their mapping */
249 	if (--node->use_cpt == 0)
250 		list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
251 
252 out:
253 	spin_unlock_irqrestore(&imem->lock, flags);
254 
255 	wmb();
256 	nvkm_ltc_invalidate(ltc);
257 }
258 
259 static u32
260 gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
261 {
262 	struct gk20a_instobj *node = gk20a_instobj(memory);
263 
264 	return node->vaddr[offset / 4];
265 }
266 
267 static void
268 gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
269 {
270 	struct gk20a_instobj *node = gk20a_instobj(memory);
271 
272 	node->vaddr[offset / 4] = data;
273 }
274 
275 static void
276 gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
277 {
278 	struct gk20a_instobj *node = gk20a_instobj(memory);
279 
280 	nvkm_vm_map_at(vma, offset, &node->mem);
281 }
282 
283 static void *
284 gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
285 {
286 	struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
287 	struct gk20a_instmem *imem = node->base.imem;
288 	struct device *dev = imem->base.subdev.device->dev;
289 
290 	if (unlikely(!node->base.vaddr))
291 		goto out;
292 
293 	dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
294 		       node->handle, &imem->attrs);
295 
296 out:
297 	return node;
298 }
299 
300 static void *
301 gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
302 {
303 	struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
304 	struct gk20a_instmem *imem = node->base.imem;
305 	struct device *dev = imem->base.subdev.device->dev;
306 	struct nvkm_mm_node *r;
307 	unsigned long flags;
308 	int i;
309 
310 	if (unlikely(list_empty(&node->base.mem.regions)))
311 		goto out;
312 
313 	spin_lock_irqsave(&imem->lock, flags);
314 
315 	/* vaddr has already been recycled */
316 	if (node->base.vaddr)
317 		gk20a_instobj_iommu_recycle_vaddr(node);
318 
319 	spin_unlock_irqrestore(&imem->lock, flags);
320 
321 	r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
322 			     rl_entry);
323 
324 	/* clear IOMMU bit to unmap pages */
325 	r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
326 
327 	/* Unmap pages from GPU address space and free them */
328 	for (i = 0; i < node->base.mem.size; i++) {
329 		iommu_unmap(imem->domain,
330 			    (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
331 		dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
332 			       DMA_BIDIRECTIONAL);
333 		__free_page(node->pages[i]);
334 	}
335 
336 	/* Release area from GPU address space */
337 	mutex_lock(imem->mm_mutex);
338 	nvkm_mm_free(imem->mm, &r);
339 	mutex_unlock(imem->mm_mutex);
340 
341 out:
342 	return node;
343 }
344 
345 static const struct nvkm_memory_func
346 gk20a_instobj_func_dma = {
347 	.dtor = gk20a_instobj_dtor_dma,
348 	.target = gk20a_instobj_target,
349 	.addr = gk20a_instobj_addr,
350 	.size = gk20a_instobj_size,
351 	.acquire = gk20a_instobj_acquire_dma,
352 	.release = gk20a_instobj_release_dma,
353 	.rd32 = gk20a_instobj_rd32,
354 	.wr32 = gk20a_instobj_wr32,
355 	.map = gk20a_instobj_map,
356 };
357 
358 static const struct nvkm_memory_func
359 gk20a_instobj_func_iommu = {
360 	.dtor = gk20a_instobj_dtor_iommu,
361 	.target = gk20a_instobj_target,
362 	.addr = gk20a_instobj_addr,
363 	.size = gk20a_instobj_size,
364 	.acquire = gk20a_instobj_acquire_iommu,
365 	.release = gk20a_instobj_release_iommu,
366 	.rd32 = gk20a_instobj_rd32,
367 	.wr32 = gk20a_instobj_wr32,
368 	.map = gk20a_instobj_map,
369 };
370 
371 static int
372 gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
373 		       struct gk20a_instobj **_node)
374 {
375 	struct gk20a_instobj_dma *node;
376 	struct nvkm_subdev *subdev = &imem->base.subdev;
377 	struct device *dev = subdev->device->dev;
378 
379 	if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
380 		return -ENOMEM;
381 	*_node = &node->base;
382 
383 	nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
384 
385 	node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
386 					   &node->handle, GFP_KERNEL,
387 					   &imem->attrs);
388 	if (!node->base.vaddr) {
389 		nvkm_error(subdev, "cannot allocate DMA memory\n");
390 		return -ENOMEM;
391 	}
392 
393 	/* alignment check */
394 	if (unlikely(node->handle & (align - 1)))
395 		nvkm_warn(subdev,
396 			  "memory not aligned as requested: %pad (0x%x)\n",
397 			  &node->handle, align);
398 
399 	/* present memory for being mapped using small pages */
400 	node->r.type = 12;
401 	node->r.offset = node->handle >> 12;
402 	node->r.length = (npages << PAGE_SHIFT) >> 12;
403 
404 	node->base.mem.offset = node->handle;
405 
406 	INIT_LIST_HEAD(&node->base.mem.regions);
407 	list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
408 
409 	return 0;
410 }
411 
412 static int
413 gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
414 			 struct gk20a_instobj **_node)
415 {
416 	struct gk20a_instobj_iommu *node;
417 	struct nvkm_subdev *subdev = &imem->base.subdev;
418 	struct device *dev = subdev->device->dev;
419 	struct nvkm_mm_node *r;
420 	int ret;
421 	int i;
422 
423 	/*
424 	 * despite their variable size, instmem allocations are small enough
425 	 * (< 1 page) to be handled by kzalloc
426 	 */
427 	if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) +
428 			     sizeof(*node->dma_addrs)) * npages), GFP_KERNEL)))
429 		return -ENOMEM;
430 	*_node = &node->base;
431 	node->dma_addrs = (void *)(node->pages + npages);
432 
433 	nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
434 
435 	/* Allocate backing memory */
436 	for (i = 0; i < npages; i++) {
437 		struct page *p = alloc_page(GFP_KERNEL);
438 		dma_addr_t dma_adr;
439 
440 		if (p == NULL) {
441 			ret = -ENOMEM;
442 			goto free_pages;
443 		}
444 		node->pages[i] = p;
445 		dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
446 		if (dma_mapping_error(dev, dma_adr)) {
447 			nvkm_error(subdev, "DMA mapping error!\n");
448 			ret = -ENOMEM;
449 			goto free_pages;
450 		}
451 		node->dma_addrs[i] = dma_adr;
452 	}
453 
454 	mutex_lock(imem->mm_mutex);
455 	/* Reserve area from GPU address space */
456 	ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
457 			   align >> imem->iommu_pgshift, &r);
458 	mutex_unlock(imem->mm_mutex);
459 	if (ret) {
460 		nvkm_error(subdev, "IOMMU space is full!\n");
461 		goto free_pages;
462 	}
463 
464 	/* Map into GPU address space */
465 	for (i = 0; i < npages; i++) {
466 		u32 offset = (r->offset + i) << imem->iommu_pgshift;
467 
468 		ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
469 				PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
470 		if (ret < 0) {
471 			nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
472 
473 			while (i-- > 0) {
474 				offset -= PAGE_SIZE;
475 				iommu_unmap(imem->domain, offset, PAGE_SIZE);
476 			}
477 			goto release_area;
478 		}
479 	}
480 
481 	/* IOMMU bit tells that an address is to be resolved through the IOMMU */
482 	r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
483 
484 	node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
485 
486 	INIT_LIST_HEAD(&node->base.mem.regions);
487 	list_add_tail(&r->rl_entry, &node->base.mem.regions);
488 
489 	return 0;
490 
491 release_area:
492 	mutex_lock(imem->mm_mutex);
493 	nvkm_mm_free(imem->mm, &r);
494 	mutex_unlock(imem->mm_mutex);
495 
496 free_pages:
497 	for (i = 0; i < npages && node->pages[i] != NULL; i++) {
498 		dma_addr_t dma_addr = node->dma_addrs[i];
499 		if (dma_addr)
500 			dma_unmap_page(dev, dma_addr, PAGE_SIZE,
501 				       DMA_BIDIRECTIONAL);
502 		__free_page(node->pages[i]);
503 	}
504 
505 	return ret;
506 }
507 
508 static int
509 gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
510 		  struct nvkm_memory **pmemory)
511 {
512 	struct gk20a_instmem *imem = gk20a_instmem(base);
513 	struct nvkm_subdev *subdev = &imem->base.subdev;
514 	struct gk20a_instobj *node = NULL;
515 	int ret;
516 
517 	nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
518 		   imem->domain ? "IOMMU" : "DMA", size, align);
519 
520 	/* Round size and align to page bounds */
521 	size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
522 	align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
523 
524 	if (imem->domain)
525 		ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
526 					       align, &node);
527 	else
528 		ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
529 					     align, &node);
530 	*pmemory = node ? &node->memory : NULL;
531 	if (ret)
532 		return ret;
533 
534 	node->imem = imem;
535 
536 	/* present memory for being mapped using small pages */
537 	node->mem.size = size >> 12;
538 	node->mem.memtype = 0;
539 	node->mem.page_shift = 12;
540 
541 	nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
542 		   size, align, node->mem.offset);
543 
544 	return 0;
545 }
546 
547 static void *
548 gk20a_instmem_dtor(struct nvkm_instmem *base)
549 {
550 	struct gk20a_instmem *imem = gk20a_instmem(base);
551 
552 	/* perform some sanity checks... */
553 	if (!list_empty(&imem->vaddr_lru))
554 		nvkm_warn(&base->subdev, "instobj LRU not empty!\n");
555 
556 	if (imem->vaddr_use != 0)
557 		nvkm_warn(&base->subdev, "instobj vmap area not empty! "
558 			  "0x%x bytes still mapped\n", imem->vaddr_use);
559 
560 	return imem;
561 }
562 
563 static const struct nvkm_instmem_func
564 gk20a_instmem = {
565 	.dtor = gk20a_instmem_dtor,
566 	.memory_new = gk20a_instobj_new,
567 	.persistent = true,
568 	.zero = false,
569 };
570 
571 int
572 gk20a_instmem_new(struct nvkm_device *device, int index,
573 		  struct nvkm_instmem **pimem)
574 {
575 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
576 	struct gk20a_instmem *imem;
577 
578 	if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
579 		return -ENOMEM;
580 	nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
581 	spin_lock_init(&imem->lock);
582 	*pimem = &imem->base;
583 
584 	/* do not allow more than 1MB of CPU-mapped instmem */
585 	imem->vaddr_use = 0;
586 	imem->vaddr_max = 0x100000;
587 	INIT_LIST_HEAD(&imem->vaddr_lru);
588 
589 	if (tdev->iommu.domain) {
590 		imem->mm_mutex = &tdev->iommu.mutex;
591 		imem->mm = &tdev->iommu.mm;
592 		imem->domain = tdev->iommu.domain;
593 		imem->iommu_pgshift = tdev->iommu.pgshift;
594 		imem->iommu_bit = tdev->func->iommu_bit;
595 
596 		nvkm_info(&imem->base.subdev, "using IOMMU\n");
597 	} else {
598 		init_dma_attrs(&imem->attrs);
599 		dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
600 		dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
601 		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
602 
603 		nvkm_info(&imem->base.subdev, "using DMA API\n");
604 	}
605 
606 	return 0;
607 }
608