1 #include <linux/pagemap.h>
2 #include <linux/slab.h>
3 
4 #include <subdev/fb.h>
5 
6 #include "nouveau_drm.h"
7 #include "nouveau_ttm.h"
8 
9 struct nouveau_sgdma_be {
10 	/* this has to be the first field so populate/unpopulated in
11 	 * nouve_bo.c works properly, otherwise have to move them here
12 	 */
13 	struct ttm_dma_tt ttm;
14 	struct drm_device *dev;
15 	struct nouveau_mem *node;
16 };
17 
18 static void
19 nouveau_sgdma_destroy(struct ttm_tt *ttm)
20 {
21 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
22 
23 	if (ttm) {
24 		ttm_dma_tt_fini(&nvbe->ttm);
25 		kfree(nvbe);
26 	}
27 }
28 
29 static int
30 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
31 {
32 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
33 	struct nouveau_mem *node = mem->mm_node;
34 	u64 size = mem->num_pages << 12;
35 
36 	if (ttm->sg) {
37 		node->sg = ttm->sg;
38 		nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
39 	} else {
40 		node->pages = nvbe->ttm.dma_address;
41 		nouveau_vm_map_sg(&node->vma[0], 0, size, node);
42 	}
43 
44 	nvbe->node = node;
45 	return 0;
46 }
47 
48 static int
49 nv04_sgdma_unbind(struct ttm_tt *ttm)
50 {
51 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
52 	nouveau_vm_unmap(&nvbe->node->vma[0]);
53 	return 0;
54 }
55 
56 static struct ttm_backend_func nv04_sgdma_backend = {
57 	.bind			= nv04_sgdma_bind,
58 	.unbind			= nv04_sgdma_unbind,
59 	.destroy		= nouveau_sgdma_destroy
60 };
61 
62 static int
63 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
64 {
65 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
66 	struct nouveau_mem *node = mem->mm_node;
67 
68 	/* noop: bound in move_notify() */
69 	if (ttm->sg) {
70 		node->sg = ttm->sg;
71 	} else
72 		node->pages = nvbe->ttm.dma_address;
73 	return 0;
74 }
75 
76 static int
77 nv50_sgdma_unbind(struct ttm_tt *ttm)
78 {
79 	/* noop: unbound in move_notify() */
80 	return 0;
81 }
82 
83 static struct ttm_backend_func nv50_sgdma_backend = {
84 	.bind			= nv50_sgdma_bind,
85 	.unbind			= nv50_sgdma_unbind,
86 	.destroy		= nouveau_sgdma_destroy
87 };
88 
89 struct ttm_tt *
90 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
91 			 unsigned long size, uint32_t page_flags,
92 			 struct page *dummy_read_page)
93 {
94 	struct nouveau_drm *drm = nouveau_bdev(bdev);
95 	struct nouveau_sgdma_be *nvbe;
96 
97 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
98 	if (!nvbe)
99 		return NULL;
100 
101 	nvbe->dev = drm->dev;
102 	if (nv_device(drm->device)->card_type < NV_50)
103 		nvbe->ttm.ttm.func = &nv04_sgdma_backend;
104 	else
105 		nvbe->ttm.ttm.func = &nv50_sgdma_backend;
106 
107 	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
108 		kfree(nvbe);
109 		return NULL;
110 	}
111 	return &nvbe->ttm.ttm;
112 }
113