xref: /openbmc/linux/drivers/gpu/drm/nouveau/nouveau_dmem.c (revision 1c9f8dff62d85ce00b0e99f774a84bd783af7cac)
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
28 #include "nouveau_svm.h"
29 
30 #include <nvif/class.h>
31 #include <nvif/object.h>
32 #include <nvif/push906f.h>
33 #include <nvif/if000c.h>
34 #include <nvif/if500b.h>
35 #include <nvif/if900b.h>
36 
37 #include <nvhw/class/cla0b5.h>
38 
39 #include <linux/sched/mm.h>
40 #include <linux/hmm.h>
41 #include <linux/memremap.h>
42 #include <linux/migrate.h>
43 
44 /*
45  * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
46  * it in vram while in use. We likely want to overhaul memory management for
47  * nouveau to be more page like (not necessarily with system page size but a
48  * bigger page size) at lowest level and have some shim layer on top that would
49  * provide the same functionality as TTM.
50  */
51 #define DMEM_CHUNK_SIZE (2UL << 20)
52 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
53 
54 enum nouveau_aper {
55 	NOUVEAU_APER_VIRT,
56 	NOUVEAU_APER_VRAM,
57 	NOUVEAU_APER_HOST,
58 };
59 
60 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
61 				      enum nouveau_aper, u64 dst_addr,
62 				      enum nouveau_aper, u64 src_addr);
63 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
64 				      enum nouveau_aper, u64 dst_addr);
65 
66 struct nouveau_dmem_chunk {
67 	struct list_head list;
68 	struct nouveau_bo *bo;
69 	struct nouveau_drm *drm;
70 	unsigned long callocated;
71 	struct dev_pagemap pagemap;
72 };
73 
74 struct nouveau_dmem_migrate {
75 	nouveau_migrate_copy_t copy_func;
76 	nouveau_clear_page_t clear_func;
77 	struct nouveau_channel *chan;
78 };
79 
80 struct nouveau_dmem {
81 	struct nouveau_drm *drm;
82 	struct nouveau_dmem_migrate migrate;
83 	struct list_head chunks;
84 	struct mutex mutex;
85 	struct page *free_pages;
86 	spinlock_t lock;
87 };
88 
89 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
90 {
91 	return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
92 }
93 
94 static struct nouveau_drm *page_to_drm(struct page *page)
95 {
96 	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
97 
98 	return chunk->drm;
99 }
100 
101 unsigned long nouveau_dmem_page_addr(struct page *page)
102 {
103 	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
104 	unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
105 				chunk->pagemap.range.start;
106 
107 	return chunk->bo->offset + off;
108 }
109 
110 static void nouveau_dmem_page_free(struct page *page)
111 {
112 	struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
113 	struct nouveau_dmem *dmem = chunk->drm->dmem;
114 
115 	spin_lock(&dmem->lock);
116 	page->zone_device_data = dmem->free_pages;
117 	dmem->free_pages = page;
118 
119 	WARN_ON(!chunk->callocated);
120 	chunk->callocated--;
121 	/*
122 	 * FIXME when chunk->callocated reach 0 we should add the chunk to
123 	 * a reclaim list so that it can be freed in case of memory pressure.
124 	 */
125 	spin_unlock(&dmem->lock);
126 }
127 
128 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
129 {
130 	if (fence) {
131 		nouveau_fence_wait(*fence, true, false);
132 		nouveau_fence_unref(fence);
133 	} else {
134 		/*
135 		 * FIXME wait for channel to be IDLE before calling finalizing
136 		 * the hmem object.
137 		 */
138 	}
139 }
140 
141 static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
142 				struct page *dpage, dma_addr_t *dma_addr)
143 {
144 	struct device *dev = drm->dev->dev;
145 
146 	lock_page(dpage);
147 
148 	*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
149 	if (dma_mapping_error(dev, *dma_addr))
150 		return -EIO;
151 
152 	if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
153 					 NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
154 		dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
155 		return -EIO;
156 	}
157 
158 	return 0;
159 }
160 
161 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
162 {
163 	struct nouveau_drm *drm = page_to_drm(vmf->page);
164 	struct nouveau_dmem *dmem = drm->dmem;
165 	struct nouveau_fence *fence;
166 	struct nouveau_svmm *svmm;
167 	struct page *spage, *dpage;
168 	unsigned long src = 0, dst = 0;
169 	dma_addr_t dma_addr = 0;
170 	vm_fault_t ret = 0;
171 	struct migrate_vma args = {
172 		.vma		= vmf->vma,
173 		.start		= vmf->address,
174 		.end		= vmf->address + PAGE_SIZE,
175 		.src		= &src,
176 		.dst		= &dst,
177 		.pgmap_owner	= drm->dev,
178 		.fault_page	= vmf->page,
179 		.flags		= MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
180 	};
181 
182 	/*
183 	 * FIXME what we really want is to find some heuristic to migrate more
184 	 * than just one page on CPU fault. When such fault happens it is very
185 	 * likely that more surrounding page will CPU fault too.
186 	 */
187 	if (migrate_vma_setup(&args) < 0)
188 		return VM_FAULT_SIGBUS;
189 	if (!args.cpages)
190 		return 0;
191 
192 	spage = migrate_pfn_to_page(src);
193 	if (!spage || !(src & MIGRATE_PFN_MIGRATE))
194 		goto done;
195 
196 	dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
197 	if (!dpage)
198 		goto done;
199 
200 	dst = migrate_pfn(page_to_pfn(dpage));
201 
202 	svmm = spage->zone_device_data;
203 	mutex_lock(&svmm->mutex);
204 	nouveau_svmm_invalidate(svmm, args.start, args.end);
205 	ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
206 	mutex_unlock(&svmm->mutex);
207 	if (ret) {
208 		ret = VM_FAULT_SIGBUS;
209 		goto done;
210 	}
211 
212 	if (!nouveau_fence_new(&fence))
213 		nouveau_fence_emit(fence, dmem->migrate.chan);
214 	migrate_vma_pages(&args);
215 	nouveau_dmem_fence_done(&fence);
216 	dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
217 done:
218 	migrate_vma_finalize(&args);
219 	return ret;
220 }
221 
222 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
223 	.page_free		= nouveau_dmem_page_free,
224 	.migrate_to_ram		= nouveau_dmem_migrate_to_ram,
225 };
226 
227 static int
228 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
229 {
230 	struct nouveau_dmem_chunk *chunk;
231 	struct resource *res;
232 	struct page *page;
233 	void *ptr;
234 	unsigned long i, pfn_first;
235 	int ret;
236 
237 	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
238 	if (chunk == NULL) {
239 		ret = -ENOMEM;
240 		goto out;
241 	}
242 
243 	/* Allocate unused physical address space for device private pages. */
244 	res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
245 				      "nouveau_dmem");
246 	if (IS_ERR(res)) {
247 		ret = PTR_ERR(res);
248 		goto out_free;
249 	}
250 
251 	chunk->drm = drm;
252 	chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
253 	chunk->pagemap.range.start = res->start;
254 	chunk->pagemap.range.end = res->end;
255 	chunk->pagemap.nr_range = 1;
256 	chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
257 	chunk->pagemap.owner = drm->dev;
258 
259 	ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
260 			     NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
261 			     &chunk->bo);
262 	if (ret)
263 		goto out_release;
264 
265 	ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
266 	if (ret)
267 		goto out_bo_free;
268 
269 	ptr = memremap_pages(&chunk->pagemap, numa_node_id());
270 	if (IS_ERR(ptr)) {
271 		ret = PTR_ERR(ptr);
272 		goto out_bo_unpin;
273 	}
274 
275 	mutex_lock(&drm->dmem->mutex);
276 	list_add(&chunk->list, &drm->dmem->chunks);
277 	mutex_unlock(&drm->dmem->mutex);
278 
279 	pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
280 	page = pfn_to_page(pfn_first);
281 	spin_lock(&drm->dmem->lock);
282 	for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
283 		page->zone_device_data = drm->dmem->free_pages;
284 		drm->dmem->free_pages = page;
285 	}
286 	*ppage = page;
287 	chunk->callocated++;
288 	spin_unlock(&drm->dmem->lock);
289 
290 	NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
291 		DMEM_CHUNK_SIZE >> 20);
292 
293 	return 0;
294 
295 out_bo_unpin:
296 	nouveau_bo_unpin(chunk->bo);
297 out_bo_free:
298 	nouveau_bo_ref(NULL, &chunk->bo);
299 out_release:
300 	release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
301 out_free:
302 	kfree(chunk);
303 out:
304 	return ret;
305 }
306 
307 static struct page *
308 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
309 {
310 	struct nouveau_dmem_chunk *chunk;
311 	struct page *page = NULL;
312 	int ret;
313 
314 	spin_lock(&drm->dmem->lock);
315 	if (drm->dmem->free_pages) {
316 		page = drm->dmem->free_pages;
317 		drm->dmem->free_pages = page->zone_device_data;
318 		chunk = nouveau_page_to_chunk(page);
319 		chunk->callocated++;
320 		spin_unlock(&drm->dmem->lock);
321 	} else {
322 		spin_unlock(&drm->dmem->lock);
323 		ret = nouveau_dmem_chunk_alloc(drm, &page);
324 		if (ret)
325 			return NULL;
326 	}
327 
328 	zone_device_page_init(page);
329 	return page;
330 }
331 
332 static void
333 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
334 {
335 	unlock_page(page);
336 	put_page(page);
337 }
338 
339 void
340 nouveau_dmem_resume(struct nouveau_drm *drm)
341 {
342 	struct nouveau_dmem_chunk *chunk;
343 	int ret;
344 
345 	if (drm->dmem == NULL)
346 		return;
347 
348 	mutex_lock(&drm->dmem->mutex);
349 	list_for_each_entry(chunk, &drm->dmem->chunks, list) {
350 		ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
351 		/* FIXME handle pin failure */
352 		WARN_ON(ret);
353 	}
354 	mutex_unlock(&drm->dmem->mutex);
355 }
356 
357 void
358 nouveau_dmem_suspend(struct nouveau_drm *drm)
359 {
360 	struct nouveau_dmem_chunk *chunk;
361 
362 	if (drm->dmem == NULL)
363 		return;
364 
365 	mutex_lock(&drm->dmem->mutex);
366 	list_for_each_entry(chunk, &drm->dmem->chunks, list)
367 		nouveau_bo_unpin(chunk->bo);
368 	mutex_unlock(&drm->dmem->mutex);
369 }
370 
371 /*
372  * Evict all pages mapping a chunk.
373  */
374 static void
375 nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
376 {
377 	unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
378 	unsigned long *src_pfns, *dst_pfns;
379 	dma_addr_t *dma_addrs;
380 	struct nouveau_fence *fence;
381 
382 	src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
383 	dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
384 	dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
385 
386 	migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
387 			npages);
388 
389 	for (i = 0; i < npages; i++) {
390 		if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
391 			struct page *dpage;
392 
393 			/*
394 			 * _GFP_NOFAIL because the GPU is going away and there
395 			 * is nothing sensible we can do if we can't copy the
396 			 * data back.
397 			 */
398 			dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
399 			dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
400 			nouveau_dmem_copy_one(chunk->drm,
401 					migrate_pfn_to_page(src_pfns[i]), dpage,
402 					&dma_addrs[i]);
403 		}
404 	}
405 
406 	if (!nouveau_fence_new(&fence))
407 		nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
408 	migrate_device_pages(src_pfns, dst_pfns, npages);
409 	nouveau_dmem_fence_done(&fence);
410 	migrate_device_finalize(src_pfns, dst_pfns, npages);
411 	kfree(src_pfns);
412 	kfree(dst_pfns);
413 	for (i = 0; i < npages; i++)
414 		dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
415 	kfree(dma_addrs);
416 }
417 
418 void
419 nouveau_dmem_fini(struct nouveau_drm *drm)
420 {
421 	struct nouveau_dmem_chunk *chunk, *tmp;
422 
423 	if (drm->dmem == NULL)
424 		return;
425 
426 	mutex_lock(&drm->dmem->mutex);
427 
428 	list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
429 		nouveau_dmem_evict_chunk(chunk);
430 		nouveau_bo_unpin(chunk->bo);
431 		nouveau_bo_ref(NULL, &chunk->bo);
432 		WARN_ON(chunk->callocated);
433 		list_del(&chunk->list);
434 		memunmap_pages(&chunk->pagemap);
435 		release_mem_region(chunk->pagemap.range.start,
436 				   range_len(&chunk->pagemap.range));
437 		kfree(chunk);
438 	}
439 
440 	mutex_unlock(&drm->dmem->mutex);
441 }
442 
443 static int
444 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
445 		    enum nouveau_aper dst_aper, u64 dst_addr,
446 		    enum nouveau_aper src_aper, u64 src_addr)
447 {
448 	struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
449 	u32 launch_dma = 0;
450 	int ret;
451 
452 	ret = PUSH_WAIT(push, 13);
453 	if (ret)
454 		return ret;
455 
456 	if (src_aper != NOUVEAU_APER_VIRT) {
457 		switch (src_aper) {
458 		case NOUVEAU_APER_VRAM:
459 			PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
460 				  NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, LOCAL_FB));
461 			break;
462 		case NOUVEAU_APER_HOST:
463 			PUSH_IMMD(push, NVA0B5, SET_SRC_PHYS_MODE,
464 				  NVDEF(NVA0B5, SET_SRC_PHYS_MODE, TARGET, COHERENT_SYSMEM));
465 			break;
466 		default:
467 			return -EINVAL;
468 		}
469 
470 		launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, SRC_TYPE, PHYSICAL);
471 	}
472 
473 	if (dst_aper != NOUVEAU_APER_VIRT) {
474 		switch (dst_aper) {
475 		case NOUVEAU_APER_VRAM:
476 			PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
477 				  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
478 			break;
479 		case NOUVEAU_APER_HOST:
480 			PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
481 				  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
482 			break;
483 		default:
484 			return -EINVAL;
485 		}
486 
487 		launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
488 	}
489 
490 	PUSH_MTHD(push, NVA0B5, OFFSET_IN_UPPER,
491 		  NVVAL(NVA0B5, OFFSET_IN_UPPER, UPPER, upper_32_bits(src_addr)),
492 
493 				OFFSET_IN_LOWER, lower_32_bits(src_addr),
494 
495 				OFFSET_OUT_UPPER,
496 		  NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
497 
498 				OFFSET_OUT_LOWER, lower_32_bits(dst_addr),
499 				PITCH_IN, PAGE_SIZE,
500 				PITCH_OUT, PAGE_SIZE,
501 				LINE_LENGTH_IN, PAGE_SIZE,
502 				LINE_COUNT, npages);
503 
504 	PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
505 		  NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
506 		  NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
507 		  NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
508 		  NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
509 		  NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
510 		  NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
511 		  NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, TRUE) |
512 		  NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, FALSE) |
513 		  NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
514 	return 0;
515 }
516 
517 static int
518 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
519 		     enum nouveau_aper dst_aper, u64 dst_addr)
520 {
521 	struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
522 	u32 launch_dma = 0;
523 	int ret;
524 
525 	ret = PUSH_WAIT(push, 12);
526 	if (ret)
527 		return ret;
528 
529 	switch (dst_aper) {
530 	case NOUVEAU_APER_VRAM:
531 		PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
532 			  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, LOCAL_FB));
533 		break;
534 	case NOUVEAU_APER_HOST:
535 		PUSH_IMMD(push, NVA0B5, SET_DST_PHYS_MODE,
536 			  NVDEF(NVA0B5, SET_DST_PHYS_MODE, TARGET, COHERENT_SYSMEM));
537 		break;
538 	default:
539 		return -EINVAL;
540 	}
541 
542 	launch_dma |= NVDEF(NVA0B5, LAUNCH_DMA, DST_TYPE, PHYSICAL);
543 
544 	PUSH_MTHD(push, NVA0B5, SET_REMAP_CONST_A, 0,
545 				SET_REMAP_CONST_B, 0,
546 
547 				SET_REMAP_COMPONENTS,
548 		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_X, CONST_A) |
549 		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, DST_Y, CONST_B) |
550 		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, COMPONENT_SIZE, FOUR) |
551 		  NVDEF(NVA0B5, SET_REMAP_COMPONENTS, NUM_DST_COMPONENTS, TWO));
552 
553 	PUSH_MTHD(push, NVA0B5, OFFSET_OUT_UPPER,
554 		  NVVAL(NVA0B5, OFFSET_OUT_UPPER, UPPER, upper_32_bits(dst_addr)),
555 
556 				OFFSET_OUT_LOWER, lower_32_bits(dst_addr));
557 
558 	PUSH_MTHD(push, NVA0B5, LINE_LENGTH_IN, length >> 3);
559 
560 	PUSH_MTHD(push, NVA0B5, LAUNCH_DMA, launch_dma |
561 		  NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
562 		  NVDEF(NVA0B5, LAUNCH_DMA, FLUSH_ENABLE, TRUE) |
563 		  NVDEF(NVA0B5, LAUNCH_DMA, SEMAPHORE_TYPE, NONE) |
564 		  NVDEF(NVA0B5, LAUNCH_DMA, INTERRUPT_TYPE, NONE) |
565 		  NVDEF(NVA0B5, LAUNCH_DMA, SRC_MEMORY_LAYOUT, PITCH) |
566 		  NVDEF(NVA0B5, LAUNCH_DMA, DST_MEMORY_LAYOUT, PITCH) |
567 		  NVDEF(NVA0B5, LAUNCH_DMA, MULTI_LINE_ENABLE, FALSE) |
568 		  NVDEF(NVA0B5, LAUNCH_DMA, REMAP_ENABLE, TRUE) |
569 		  NVDEF(NVA0B5, LAUNCH_DMA, BYPASS_L2, USE_PTE_SETTING));
570 	return 0;
571 }
572 
573 static int
574 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
575 {
576 	switch (drm->ttm.copy.oclass) {
577 	case PASCAL_DMA_COPY_A:
578 	case PASCAL_DMA_COPY_B:
579 	case  VOLTA_DMA_COPY_A:
580 	case TURING_DMA_COPY_A:
581 		drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
582 		drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
583 		drm->dmem->migrate.chan = drm->ttm.chan;
584 		return 0;
585 	default:
586 		break;
587 	}
588 	return -ENODEV;
589 }
590 
591 void
592 nouveau_dmem_init(struct nouveau_drm *drm)
593 {
594 	int ret;
595 
596 	/* This only make sense on PASCAL or newer */
597 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
598 		return;
599 
600 	if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
601 		return;
602 
603 	drm->dmem->drm = drm;
604 	mutex_init(&drm->dmem->mutex);
605 	INIT_LIST_HEAD(&drm->dmem->chunks);
606 	mutex_init(&drm->dmem->mutex);
607 	spin_lock_init(&drm->dmem->lock);
608 
609 	/* Initialize migration dma helpers before registering memory */
610 	ret = nouveau_dmem_migrate_init(drm);
611 	if (ret) {
612 		kfree(drm->dmem);
613 		drm->dmem = NULL;
614 	}
615 }
616 
617 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
618 		struct nouveau_svmm *svmm, unsigned long src,
619 		dma_addr_t *dma_addr, u64 *pfn)
620 {
621 	struct device *dev = drm->dev->dev;
622 	struct page *dpage, *spage;
623 	unsigned long paddr;
624 
625 	spage = migrate_pfn_to_page(src);
626 	if (!(src & MIGRATE_PFN_MIGRATE))
627 		goto out;
628 
629 	dpage = nouveau_dmem_page_alloc_locked(drm);
630 	if (!dpage)
631 		goto out;
632 
633 	paddr = nouveau_dmem_page_addr(dpage);
634 	if (spage) {
635 		*dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
636 					 DMA_BIDIRECTIONAL);
637 		if (dma_mapping_error(dev, *dma_addr))
638 			goto out_free_page;
639 		if (drm->dmem->migrate.copy_func(drm, 1,
640 			NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
641 			goto out_dma_unmap;
642 	} else {
643 		*dma_addr = DMA_MAPPING_ERROR;
644 		if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
645 			NOUVEAU_APER_VRAM, paddr))
646 			goto out_free_page;
647 	}
648 
649 	dpage->zone_device_data = svmm;
650 	*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
651 		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
652 	if (src & MIGRATE_PFN_WRITE)
653 		*pfn |= NVIF_VMM_PFNMAP_V0_W;
654 	return migrate_pfn(page_to_pfn(dpage));
655 
656 out_dma_unmap:
657 	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
658 out_free_page:
659 	nouveau_dmem_page_free_locked(drm, dpage);
660 out:
661 	*pfn = NVIF_VMM_PFNMAP_V0_NONE;
662 	return 0;
663 }
664 
665 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
666 		struct nouveau_svmm *svmm, struct migrate_vma *args,
667 		dma_addr_t *dma_addrs, u64 *pfns)
668 {
669 	struct nouveau_fence *fence;
670 	unsigned long addr = args->start, nr_dma = 0, i;
671 
672 	for (i = 0; addr < args->end; i++) {
673 		args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
674 				args->src[i], dma_addrs + nr_dma, pfns + i);
675 		if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
676 			nr_dma++;
677 		addr += PAGE_SIZE;
678 	}
679 
680 	if (!nouveau_fence_new(&fence))
681 		nouveau_fence_emit(fence, drm->dmem->migrate.chan);
682 	migrate_vma_pages(args);
683 	nouveau_dmem_fence_done(&fence);
684 	nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
685 
686 	while (nr_dma--) {
687 		dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
688 				DMA_BIDIRECTIONAL);
689 	}
690 	migrate_vma_finalize(args);
691 }
692 
693 int
694 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
695 			 struct nouveau_svmm *svmm,
696 			 struct vm_area_struct *vma,
697 			 unsigned long start,
698 			 unsigned long end)
699 {
700 	unsigned long npages = (end - start) >> PAGE_SHIFT;
701 	unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
702 	dma_addr_t *dma_addrs;
703 	struct migrate_vma args = {
704 		.vma		= vma,
705 		.start		= start,
706 		.pgmap_owner	= drm->dev,
707 		.flags		= MIGRATE_VMA_SELECT_SYSTEM,
708 	};
709 	unsigned long i;
710 	u64 *pfns;
711 	int ret = -ENOMEM;
712 
713 	if (drm->dmem == NULL)
714 		return -ENODEV;
715 
716 	args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
717 	if (!args.src)
718 		goto out;
719 	args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
720 	if (!args.dst)
721 		goto out_free_src;
722 
723 	dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
724 	if (!dma_addrs)
725 		goto out_free_dst;
726 
727 	pfns = nouveau_pfns_alloc(max);
728 	if (!pfns)
729 		goto out_free_dma;
730 
731 	for (i = 0; i < npages; i += max) {
732 		if (args.start + (max << PAGE_SHIFT) > end)
733 			args.end = end;
734 		else
735 			args.end = args.start + (max << PAGE_SHIFT);
736 
737 		ret = migrate_vma_setup(&args);
738 		if (ret)
739 			goto out_free_pfns;
740 
741 		if (args.cpages)
742 			nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
743 						   pfns);
744 		args.start = args.end;
745 	}
746 
747 	ret = 0;
748 out_free_pfns:
749 	nouveau_pfns_free(pfns);
750 out_free_dma:
751 	kfree(dma_addrs);
752 out_free_dst:
753 	kfree(args.dst);
754 out_free_src:
755 	kfree(args.src);
756 out:
757 	return ret;
758 }
759