1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sub license,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23  * USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <linux/limits.h>
27 #include <linux/swiotlb.h>
28 
29 #include "nouveau_drv.h"
30 #include "nouveau_gem.h"
31 #include "nouveau_mem.h"
32 #include "nouveau_ttm.h"
33 
34 #include <core/tegra.h>
35 
36 static void
37 nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
38 {
39 	nouveau_mem_del(reg);
40 }
41 
42 static int
43 nouveau_vram_manager_new(struct ttm_resource_manager *man,
44 			 struct ttm_buffer_object *bo,
45 			 const struct ttm_place *place,
46 			 struct ttm_resource *reg)
47 {
48 	struct nouveau_bo *nvbo = nouveau_bo(bo);
49 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
50 	int ret;
51 
52 	if (drm->client.device.info.ram_size == 0)
53 		return -ENOMEM;
54 
55 	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
56 	if (ret)
57 		return ret;
58 
59 	ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
60 	if (ret) {
61 		nouveau_mem_del(reg);
62 		return ret;
63 	}
64 
65 	return 0;
66 }
67 
68 const struct ttm_resource_manager_func nouveau_vram_manager = {
69 	.alloc = nouveau_vram_manager_new,
70 	.free = nouveau_manager_del,
71 };
72 
73 static int
74 nouveau_gart_manager_new(struct ttm_resource_manager *man,
75 			 struct ttm_buffer_object *bo,
76 			 const struct ttm_place *place,
77 			 struct ttm_resource *reg)
78 {
79 	struct nouveau_bo *nvbo = nouveau_bo(bo);
80 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
81 	int ret;
82 
83 	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
84 	if (ret)
85 		return ret;
86 
87 	reg->start = 0;
88 	return 0;
89 }
90 
91 const struct ttm_resource_manager_func nouveau_gart_manager = {
92 	.alloc = nouveau_gart_manager_new,
93 	.free = nouveau_manager_del,
94 };
95 
96 static int
97 nv04_gart_manager_new(struct ttm_resource_manager *man,
98 		      struct ttm_buffer_object *bo,
99 		      const struct ttm_place *place,
100 		      struct ttm_resource *reg)
101 {
102 	struct nouveau_bo *nvbo = nouveau_bo(bo);
103 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
104 	struct nouveau_mem *mem;
105 	int ret;
106 
107 	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
108 	mem = nouveau_mem(reg);
109 	if (ret)
110 		return ret;
111 
112 	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
113 			   (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
114 	if (ret) {
115 		nouveau_mem_del(reg);
116 		return ret;
117 	}
118 
119 	reg->start = mem->vma[0].addr >> PAGE_SHIFT;
120 	return 0;
121 }
122 
123 const struct ttm_resource_manager_func nv04_gart_manager = {
124 	.alloc = nv04_gart_manager_new,
125 	.free = nouveau_manager_del,
126 };
127 
128 static int
129 nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
130 {
131 	struct nvif_mmu *mmu = &drm->client.mmu;
132 	int typei;
133 
134 	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
135 					    kind | NVIF_MEM_COHERENT);
136 	if (typei < 0)
137 		return -ENOSYS;
138 
139 	drm->ttm.type_host[!!kind] = typei;
140 
141 	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
142 	if (typei < 0)
143 		return -ENOSYS;
144 
145 	drm->ttm.type_ncoh[!!kind] = typei;
146 	return 0;
147 }
148 
149 static int
150 nouveau_ttm_init_vram(struct nouveau_drm *drm)
151 {
152 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
153 		struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
154 
155 		if (!man)
156 			return -ENOMEM;
157 
158 		man->func = &nouveau_vram_manager;
159 
160 		ttm_resource_manager_init(man,
161 					  drm->gem.vram_available >> PAGE_SHIFT);
162 		ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
163 		ttm_resource_manager_set_used(man, true);
164 		return 0;
165 	} else {
166 		return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false,
167 					  drm->gem.vram_available >> PAGE_SHIFT);
168 	}
169 }
170 
171 static void
172 nouveau_ttm_fini_vram(struct nouveau_drm *drm)
173 {
174 	struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
175 
176 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
177 		ttm_resource_manager_set_used(man, false);
178 		ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
179 		ttm_resource_manager_cleanup(man);
180 		ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
181 		kfree(man);
182 	} else
183 		ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
184 }
185 
186 static int
187 nouveau_ttm_init_gtt(struct nouveau_drm *drm)
188 {
189 	struct ttm_resource_manager *man;
190 	unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
191 	const struct ttm_resource_manager_func *func = NULL;
192 
193 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
194 		func = &nouveau_gart_manager;
195 	else if (!drm->agp.bridge)
196 		func = &nv04_gart_manager;
197 	else
198 		return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true,
199 					  size_pages);
200 
201 	man = kzalloc(sizeof(*man), GFP_KERNEL);
202 	if (!man)
203 		return -ENOMEM;
204 
205 	man->func = func;
206 	man->use_tt = true;
207 	ttm_resource_manager_init(man, size_pages);
208 	ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
209 	ttm_resource_manager_set_used(man, true);
210 	return 0;
211 }
212 
213 static void
214 nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
215 {
216 	struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT);
217 
218 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
219 	    drm->agp.bridge)
220 		ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
221 	else {
222 		ttm_resource_manager_set_used(man, false);
223 		ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
224 		ttm_resource_manager_cleanup(man);
225 		ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
226 		kfree(man);
227 	}
228 }
229 
230 int
231 nouveau_ttm_init(struct nouveau_drm *drm)
232 {
233 	struct nvkm_device *device = nvxx_device(&drm->client.device);
234 	struct nvkm_pci *pci = device->pci;
235 	struct nvif_mmu *mmu = &drm->client.mmu;
236 	struct drm_device *dev = drm->dev;
237 	bool need_swiotlb = false;
238 	int typei, ret;
239 
240 	ret = nouveau_ttm_init_host(drm, 0);
241 	if (ret)
242 		return ret;
243 
244 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
245 	    drm->client.device.info.chipset != 0x50) {
246 		ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
247 		if (ret)
248 			return ret;
249 	}
250 
251 	if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
252 	    drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
253 		typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
254 					   NVIF_MEM_KIND |
255 					   NVIF_MEM_COMP |
256 					   NVIF_MEM_DISP);
257 		if (typei < 0)
258 			return -ENOSYS;
259 
260 		drm->ttm.type_vram = typei;
261 	} else {
262 		drm->ttm.type_vram = -1;
263 	}
264 
265 	if (pci && pci->agp.bridge) {
266 		drm->agp.bridge = pci->agp.bridge;
267 		drm->agp.base = pci->agp.base;
268 		drm->agp.size = pci->agp.size;
269 		drm->agp.cma = pci->agp.cma;
270 	}
271 
272 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
273 	need_swiotlb = is_swiotlb_active();
274 #endif
275 
276 	ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
277 				  dev->anon_inode->i_mapping,
278 				  dev->vma_offset_manager, need_swiotlb,
279 				  drm->client.mmu.dmabits <= 32);
280 	if (ret) {
281 		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
282 		return ret;
283 	}
284 
285 	/* VRAM init */
286 	drm->gem.vram_available = drm->client.device.info.ram_user;
287 
288 	arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
289 				   device->func->resource_size(device, 1));
290 
291 	ret = nouveau_ttm_init_vram(drm);
292 	if (ret) {
293 		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
294 		return ret;
295 	}
296 
297 	drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
298 					 device->func->resource_size(device, 1));
299 
300 	/* GART init */
301 	if (!drm->agp.bridge) {
302 		drm->gem.gart_available = drm->client.vmm.vmm.limit;
303 	} else {
304 		drm->gem.gart_available = drm->agp.size;
305 	}
306 
307 	ret = nouveau_ttm_init_gtt(drm);
308 	if (ret) {
309 		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
310 		return ret;
311 	}
312 
313 	mutex_init(&drm->ttm.io_reserve_mutex);
314 	INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
315 
316 	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
317 	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
318 	return 0;
319 }
320 
321 void
322 nouveau_ttm_fini(struct nouveau_drm *drm)
323 {
324 	struct nvkm_device *device = nvxx_device(&drm->client.device);
325 
326 	nouveau_ttm_fini_vram(drm);
327 	nouveau_ttm_fini_gtt(drm);
328 
329 	ttm_device_fini(&drm->ttm.bdev);
330 
331 	arch_phys_wc_del(drm->ttm.mtrr);
332 	drm->ttm.mtrr = 0;
333 	arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
334 				device->func->resource_size(device, 1));
335 
336 }
337