1 /*
2  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
3  * All Rights Reserved.
4  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sub license,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  */
26 
27 #include "nouveau_drm.h"
28 #include "nouveau_ttm.h"
29 #include "nouveau_gem.h"
30 
31 #include "drm_legacy.h"
32 static int
33 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
34 {
35 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
36 	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
37 	man->priv = pfb;
38 	return 0;
39 }
40 
41 static int
42 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
43 {
44 	man->priv = NULL;
45 	return 0;
46 }
47 
48 static inline void
49 nvkm_mem_node_cleanup(struct nvkm_mem *node)
50 {
51 	if (node->vma[0].node) {
52 		nvkm_vm_unmap(&node->vma[0]);
53 		nvkm_vm_put(&node->vma[0]);
54 	}
55 
56 	if (node->vma[1].node) {
57 		nvkm_vm_unmap(&node->vma[1]);
58 		nvkm_vm_put(&node->vma[1]);
59 	}
60 }
61 
62 static void
63 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
64 			 struct ttm_mem_reg *mem)
65 {
66 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
67 	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
68 	nvkm_mem_node_cleanup(mem->mm_node);
69 	pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node);
70 }
71 
72 static int
73 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
74 			 struct ttm_buffer_object *bo,
75 			 const struct ttm_place *place,
76 			 struct ttm_mem_reg *mem)
77 {
78 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
79 	struct nvkm_fb *pfb = nvxx_fb(&drm->device);
80 	struct nouveau_bo *nvbo = nouveau_bo(bo);
81 	struct nvkm_mem *node;
82 	u32 size_nc = 0;
83 	int ret;
84 
85 	if (drm->device.info.ram_size == 0)
86 		return -ENOMEM;
87 
88 	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
89 		size_nc = 1 << nvbo->page_shift;
90 
91 	ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT,
92 			   mem->page_alignment << PAGE_SHIFT, size_nc,
93 			   (nvbo->tile_flags >> 8) & 0x3ff, &node);
94 	if (ret) {
95 		mem->mm_node = NULL;
96 		return (ret == -ENOSPC) ? 0 : ret;
97 	}
98 
99 	node->page_shift = nvbo->page_shift;
100 
101 	mem->mm_node = node;
102 	mem->start   = node->offset >> PAGE_SHIFT;
103 	return 0;
104 }
105 
106 static void
107 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
108 {
109 	struct nvkm_fb *pfb = man->priv;
110 	struct nvkm_mm *mm = &pfb->vram;
111 	struct nvkm_mm_node *r;
112 	u32 total = 0, free = 0;
113 
114 	mutex_lock(&nv_subdev(pfb)->mutex);
115 	list_for_each_entry(r, &mm->nodes, nl_entry) {
116 		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
117 		       prefix, r->type, ((u64)r->offset << 12),
118 		       (((u64)r->offset + r->length) << 12));
119 
120 		total += r->length;
121 		if (!r->type)
122 			free += r->length;
123 	}
124 	mutex_unlock(&nv_subdev(pfb)->mutex);
125 
126 	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
127 	       prefix, (u64)total << 12, (u64)free << 12);
128 	printk(KERN_DEBUG "%s  block: 0x%08x\n",
129 	       prefix, mm->block_size << 12);
130 }
131 
132 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
133 	nouveau_vram_manager_init,
134 	nouveau_vram_manager_fini,
135 	nouveau_vram_manager_new,
136 	nouveau_vram_manager_del,
137 	nouveau_vram_manager_debug
138 };
139 
140 static int
141 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
142 {
143 	return 0;
144 }
145 
146 static int
147 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
148 {
149 	return 0;
150 }
151 
152 static void
153 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
154 			 struct ttm_mem_reg *mem)
155 {
156 	nvkm_mem_node_cleanup(mem->mm_node);
157 	kfree(mem->mm_node);
158 	mem->mm_node = NULL;
159 }
160 
161 static int
162 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
163 			 struct ttm_buffer_object *bo,
164 			 const struct ttm_place *place,
165 			 struct ttm_mem_reg *mem)
166 {
167 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
168 	struct nouveau_bo *nvbo = nouveau_bo(bo);
169 	struct nvkm_mem *node;
170 
171 	node = kzalloc(sizeof(*node), GFP_KERNEL);
172 	if (!node)
173 		return -ENOMEM;
174 
175 	node->page_shift = 12;
176 
177 	switch (drm->device.info.family) {
178 	case NV_DEVICE_INFO_V0_TNT:
179 	case NV_DEVICE_INFO_V0_CELSIUS:
180 	case NV_DEVICE_INFO_V0_KELVIN:
181 	case NV_DEVICE_INFO_V0_RANKINE:
182 	case NV_DEVICE_INFO_V0_CURIE:
183 		break;
184 	case NV_DEVICE_INFO_V0_TESLA:
185 		if (drm->device.info.chipset != 0x50)
186 			node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
187 		break;
188 	case NV_DEVICE_INFO_V0_FERMI:
189 	case NV_DEVICE_INFO_V0_KEPLER:
190 	case NV_DEVICE_INFO_V0_MAXWELL:
191 		node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
192 		break;
193 	default:
194 		NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
195 			drm->device.info.family);
196 		break;
197 	}
198 
199 	mem->mm_node = node;
200 	mem->start   = 0;
201 	return 0;
202 }
203 
204 static void
205 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
206 {
207 }
208 
209 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
210 	nouveau_gart_manager_init,
211 	nouveau_gart_manager_fini,
212 	nouveau_gart_manager_new,
213 	nouveau_gart_manager_del,
214 	nouveau_gart_manager_debug
215 };
216 
217 /*XXX*/
218 #include <subdev/mmu/nv04.h>
219 static int
220 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
221 {
222 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
223 	struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
224 	struct nv04_mmu_priv *priv = (void *)mmu;
225 	struct nvkm_vm *vm = NULL;
226 	nvkm_vm_ref(priv->vm, &vm, NULL);
227 	man->priv = vm;
228 	return 0;
229 }
230 
231 static int
232 nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
233 {
234 	struct nvkm_vm *vm = man->priv;
235 	nvkm_vm_ref(NULL, &vm, NULL);
236 	man->priv = NULL;
237 	return 0;
238 }
239 
240 static void
241 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
242 {
243 	struct nvkm_mem *node = mem->mm_node;
244 	if (node->vma[0].node)
245 		nvkm_vm_put(&node->vma[0]);
246 	kfree(mem->mm_node);
247 	mem->mm_node = NULL;
248 }
249 
250 static int
251 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
252 		      struct ttm_buffer_object *bo,
253 		      const struct ttm_place *place,
254 		      struct ttm_mem_reg *mem)
255 {
256 	struct nvkm_mem *node;
257 	int ret;
258 
259 	node = kzalloc(sizeof(*node), GFP_KERNEL);
260 	if (!node)
261 		return -ENOMEM;
262 
263 	node->page_shift = 12;
264 
265 	ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
266 			  NV_MEM_ACCESS_RW, &node->vma[0]);
267 	if (ret) {
268 		kfree(node);
269 		return ret;
270 	}
271 
272 	mem->mm_node = node;
273 	mem->start   = node->vma[0].offset >> PAGE_SHIFT;
274 	return 0;
275 }
276 
277 static void
278 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
279 {
280 }
281 
282 const struct ttm_mem_type_manager_func nv04_gart_manager = {
283 	nv04_gart_manager_init,
284 	nv04_gart_manager_fini,
285 	nv04_gart_manager_new,
286 	nv04_gart_manager_del,
287 	nv04_gart_manager_debug
288 };
289 
290 int
291 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
292 {
293 	struct drm_file *file_priv = filp->private_data;
294 	struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
295 
296 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
297 		return drm_legacy_mmap(filp, vma);
298 
299 	return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
300 }
301 
302 static int
303 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
304 {
305 	return ttm_mem_global_init(ref->object);
306 }
307 
308 static void
309 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
310 {
311 	ttm_mem_global_release(ref->object);
312 }
313 
314 int
315 nouveau_ttm_global_init(struct nouveau_drm *drm)
316 {
317 	struct drm_global_reference *global_ref;
318 	int ret;
319 
320 	global_ref = &drm->ttm.mem_global_ref;
321 	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
322 	global_ref->size = sizeof(struct ttm_mem_global);
323 	global_ref->init = &nouveau_ttm_mem_global_init;
324 	global_ref->release = &nouveau_ttm_mem_global_release;
325 
326 	ret = drm_global_item_ref(global_ref);
327 	if (unlikely(ret != 0)) {
328 		DRM_ERROR("Failed setting up TTM memory accounting\n");
329 		drm->ttm.mem_global_ref.release = NULL;
330 		return ret;
331 	}
332 
333 	drm->ttm.bo_global_ref.mem_glob = global_ref->object;
334 	global_ref = &drm->ttm.bo_global_ref.ref;
335 	global_ref->global_type = DRM_GLOBAL_TTM_BO;
336 	global_ref->size = sizeof(struct ttm_bo_global);
337 	global_ref->init = &ttm_bo_global_init;
338 	global_ref->release = &ttm_bo_global_release;
339 
340 	ret = drm_global_item_ref(global_ref);
341 	if (unlikely(ret != 0)) {
342 		DRM_ERROR("Failed setting up TTM BO subsystem\n");
343 		drm_global_item_unref(&drm->ttm.mem_global_ref);
344 		drm->ttm.mem_global_ref.release = NULL;
345 		return ret;
346 	}
347 
348 	return 0;
349 }
350 
351 void
352 nouveau_ttm_global_release(struct nouveau_drm *drm)
353 {
354 	if (drm->ttm.mem_global_ref.release == NULL)
355 		return;
356 
357 	drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
358 	drm_global_item_unref(&drm->ttm.mem_global_ref);
359 	drm->ttm.mem_global_ref.release = NULL;
360 }
361 
362 int
363 nouveau_ttm_init(struct nouveau_drm *drm)
364 {
365 	struct drm_device *dev = drm->dev;
366 	u32 bits;
367 	int ret;
368 
369 	bits = nvxx_mmu(&drm->device)->dma_bits;
370 	if (nv_device_is_pci(nvxx_device(&drm->device))) {
371 		if (drm->agp.stat == ENABLED ||
372 		     !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
373 			bits = 32;
374 
375 		ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
376 		if (ret)
377 			return ret;
378 
379 		ret = pci_set_consistent_dma_mask(dev->pdev,
380 						  DMA_BIT_MASK(bits));
381 		if (ret)
382 			pci_set_consistent_dma_mask(dev->pdev,
383 						    DMA_BIT_MASK(32));
384 	}
385 
386 	ret = nouveau_ttm_global_init(drm);
387 	if (ret)
388 		return ret;
389 
390 	ret = ttm_bo_device_init(&drm->ttm.bdev,
391 				  drm->ttm.bo_global_ref.ref.object,
392 				  &nouveau_bo_driver,
393 				  dev->anon_inode->i_mapping,
394 				  DRM_FILE_PAGE_OFFSET,
395 				  bits <= 32 ? true : false);
396 	if (ret) {
397 		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
398 		return ret;
399 	}
400 
401 	/* VRAM init */
402 	drm->gem.vram_available = drm->device.info.ram_user;
403 
404 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
405 			      drm->gem.vram_available >> PAGE_SHIFT);
406 	if (ret) {
407 		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
408 		return ret;
409 	}
410 
411 	drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1),
412 					 nv_device_resource_len(nvxx_device(&drm->device), 1));
413 
414 	/* GART init */
415 	if (drm->agp.stat != ENABLED) {
416 		drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
417 	} else {
418 		drm->gem.gart_available = drm->agp.size;
419 	}
420 
421 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
422 			      drm->gem.gart_available >> PAGE_SHIFT);
423 	if (ret) {
424 		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
425 		return ret;
426 	}
427 
428 	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
429 	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
430 	return 0;
431 }
432 
433 void
434 nouveau_ttm_fini(struct nouveau_drm *drm)
435 {
436 	mutex_lock(&drm->dev->struct_mutex);
437 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
438 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
439 	mutex_unlock(&drm->dev->struct_mutex);
440 
441 	ttm_bo_device_release(&drm->ttm.bdev);
442 
443 	nouveau_ttm_global_release(drm);
444 
445 	arch_phys_wc_del(drm->ttm.mtrr);
446 	drm->ttm.mtrr = 0;
447 }
448