1 /*
2  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
3  * All Rights Reserved.
4  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sub license,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  */
26 
27 #include "nouveau_drm.h"
28 #include "nouveau_ttm.h"
29 #include "nouveau_gem.h"
30 
31 static int
32 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
33 {
34 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
35 	struct nouveau_fb *pfb = nvkm_fb(&drm->device);
36 	man->priv = pfb;
37 	return 0;
38 }
39 
40 static int
41 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
42 {
43 	man->priv = NULL;
44 	return 0;
45 }
46 
47 static inline void
48 nouveau_mem_node_cleanup(struct nouveau_mem *node)
49 {
50 	if (node->vma[0].node) {
51 		nouveau_vm_unmap(&node->vma[0]);
52 		nouveau_vm_put(&node->vma[0]);
53 	}
54 
55 	if (node->vma[1].node) {
56 		nouveau_vm_unmap(&node->vma[1]);
57 		nouveau_vm_put(&node->vma[1]);
58 	}
59 }
60 
61 static void
62 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
63 			 struct ttm_mem_reg *mem)
64 {
65 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
66 	struct nouveau_fb *pfb = nvkm_fb(&drm->device);
67 	nouveau_mem_node_cleanup(mem->mm_node);
68 	pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node);
69 }
70 
71 static int
72 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
73 			 struct ttm_buffer_object *bo,
74 			 struct ttm_placement *placement,
75 			 uint32_t flags,
76 			 struct ttm_mem_reg *mem)
77 {
78 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
79 	struct nouveau_fb *pfb = nvkm_fb(&drm->device);
80 	struct nouveau_bo *nvbo = nouveau_bo(bo);
81 	struct nouveau_mem *node;
82 	u32 size_nc = 0;
83 	int ret;
84 
85 	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
86 		size_nc = 1 << nvbo->page_shift;
87 
88 	ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT,
89 			   mem->page_alignment << PAGE_SHIFT, size_nc,
90 			   (nvbo->tile_flags >> 8) & 0x3ff, &node);
91 	if (ret) {
92 		mem->mm_node = NULL;
93 		return (ret == -ENOSPC) ? 0 : ret;
94 	}
95 
96 	node->page_shift = nvbo->page_shift;
97 
98 	mem->mm_node = node;
99 	mem->start   = node->offset >> PAGE_SHIFT;
100 	return 0;
101 }
102 
103 static void
104 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
105 {
106 	struct nouveau_fb *pfb = man->priv;
107 	struct nouveau_mm *mm = &pfb->vram;
108 	struct nouveau_mm_node *r;
109 	u32 total = 0, free = 0;
110 
111 	mutex_lock(&nv_subdev(pfb)->mutex);
112 	list_for_each_entry(r, &mm->nodes, nl_entry) {
113 		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
114 		       prefix, r->type, ((u64)r->offset << 12),
115 		       (((u64)r->offset + r->length) << 12));
116 
117 		total += r->length;
118 		if (!r->type)
119 			free += r->length;
120 	}
121 	mutex_unlock(&nv_subdev(pfb)->mutex);
122 
123 	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
124 	       prefix, (u64)total << 12, (u64)free << 12);
125 	printk(KERN_DEBUG "%s  block: 0x%08x\n",
126 	       prefix, mm->block_size << 12);
127 }
128 
129 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
130 	nouveau_vram_manager_init,
131 	nouveau_vram_manager_fini,
132 	nouveau_vram_manager_new,
133 	nouveau_vram_manager_del,
134 	nouveau_vram_manager_debug
135 };
136 
137 static int
138 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
139 {
140 	return 0;
141 }
142 
143 static int
144 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
145 {
146 	return 0;
147 }
148 
149 static void
150 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
151 			 struct ttm_mem_reg *mem)
152 {
153 	nouveau_mem_node_cleanup(mem->mm_node);
154 	kfree(mem->mm_node);
155 	mem->mm_node = NULL;
156 }
157 
158 static int
159 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
160 			 struct ttm_buffer_object *bo,
161 			 struct ttm_placement *placement,
162 			 uint32_t flags,
163 			 struct ttm_mem_reg *mem)
164 {
165 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
166 	struct nouveau_bo *nvbo = nouveau_bo(bo);
167 	struct nouveau_mem *node;
168 
169 	node = kzalloc(sizeof(*node), GFP_KERNEL);
170 	if (!node)
171 		return -ENOMEM;
172 
173 	node->page_shift = 12;
174 
175 	switch (drm->device.info.family) {
176 	case NV_DEVICE_INFO_V0_TESLA:
177 		if (drm->device.info.chipset != 0x50)
178 			node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
179 		break;
180 	case NV_DEVICE_INFO_V0_FERMI:
181 	case NV_DEVICE_INFO_V0_KEPLER:
182 		node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
183 		break;
184 	default:
185 		break;
186 	}
187 
188 	mem->mm_node = node;
189 	mem->start   = 0;
190 	return 0;
191 }
192 
193 static void
194 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
195 {
196 }
197 
198 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
199 	nouveau_gart_manager_init,
200 	nouveau_gart_manager_fini,
201 	nouveau_gart_manager_new,
202 	nouveau_gart_manager_del,
203 	nouveau_gart_manager_debug
204 };
205 
206 /*XXX*/
207 #include <core/subdev/vm/nv04.h>
208 static int
209 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
210 {
211 	struct nouveau_drm *drm = nouveau_bdev(man->bdev);
212 	struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device);
213 	struct nv04_vmmgr_priv *priv = (void *)vmm;
214 	struct nouveau_vm *vm = NULL;
215 	nouveau_vm_ref(priv->vm, &vm, NULL);
216 	man->priv = vm;
217 	return 0;
218 }
219 
220 static int
221 nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
222 {
223 	struct nouveau_vm *vm = man->priv;
224 	nouveau_vm_ref(NULL, &vm, NULL);
225 	man->priv = NULL;
226 	return 0;
227 }
228 
229 static void
230 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
231 {
232 	struct nouveau_mem *node = mem->mm_node;
233 	if (node->vma[0].node)
234 		nouveau_vm_put(&node->vma[0]);
235 	kfree(mem->mm_node);
236 	mem->mm_node = NULL;
237 }
238 
239 static int
240 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
241 		      struct ttm_buffer_object *bo,
242 		      struct ttm_placement *placement,
243 		      uint32_t flags,
244 		      struct ttm_mem_reg *mem)
245 {
246 	struct nouveau_mem *node;
247 	int ret;
248 
249 	node = kzalloc(sizeof(*node), GFP_KERNEL);
250 	if (!node)
251 		return -ENOMEM;
252 
253 	node->page_shift = 12;
254 
255 	ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
256 			     NV_MEM_ACCESS_RW, &node->vma[0]);
257 	if (ret) {
258 		kfree(node);
259 		return ret;
260 	}
261 
262 	mem->mm_node = node;
263 	mem->start   = node->vma[0].offset >> PAGE_SHIFT;
264 	return 0;
265 }
266 
267 static void
268 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
269 {
270 }
271 
272 const struct ttm_mem_type_manager_func nv04_gart_manager = {
273 	nv04_gart_manager_init,
274 	nv04_gart_manager_fini,
275 	nv04_gart_manager_new,
276 	nv04_gart_manager_del,
277 	nv04_gart_manager_debug
278 };
279 
280 int
281 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
282 {
283 	struct drm_file *file_priv = filp->private_data;
284 	struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
285 
286 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
287 		return drm_mmap(filp, vma);
288 
289 	return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
290 }
291 
292 static int
293 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
294 {
295 	return ttm_mem_global_init(ref->object);
296 }
297 
298 static void
299 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
300 {
301 	ttm_mem_global_release(ref->object);
302 }
303 
304 int
305 nouveau_ttm_global_init(struct nouveau_drm *drm)
306 {
307 	struct drm_global_reference *global_ref;
308 	int ret;
309 
310 	global_ref = &drm->ttm.mem_global_ref;
311 	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
312 	global_ref->size = sizeof(struct ttm_mem_global);
313 	global_ref->init = &nouveau_ttm_mem_global_init;
314 	global_ref->release = &nouveau_ttm_mem_global_release;
315 
316 	ret = drm_global_item_ref(global_ref);
317 	if (unlikely(ret != 0)) {
318 		DRM_ERROR("Failed setting up TTM memory accounting\n");
319 		drm->ttm.mem_global_ref.release = NULL;
320 		return ret;
321 	}
322 
323 	drm->ttm.bo_global_ref.mem_glob = global_ref->object;
324 	global_ref = &drm->ttm.bo_global_ref.ref;
325 	global_ref->global_type = DRM_GLOBAL_TTM_BO;
326 	global_ref->size = sizeof(struct ttm_bo_global);
327 	global_ref->init = &ttm_bo_global_init;
328 	global_ref->release = &ttm_bo_global_release;
329 
330 	ret = drm_global_item_ref(global_ref);
331 	if (unlikely(ret != 0)) {
332 		DRM_ERROR("Failed setting up TTM BO subsystem\n");
333 		drm_global_item_unref(&drm->ttm.mem_global_ref);
334 		drm->ttm.mem_global_ref.release = NULL;
335 		return ret;
336 	}
337 
338 	return 0;
339 }
340 
341 void
342 nouveau_ttm_global_release(struct nouveau_drm *drm)
343 {
344 	if (drm->ttm.mem_global_ref.release == NULL)
345 		return;
346 
347 	drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
348 	drm_global_item_unref(&drm->ttm.mem_global_ref);
349 	drm->ttm.mem_global_ref.release = NULL;
350 }
351 
352 int
353 nouveau_ttm_init(struct nouveau_drm *drm)
354 {
355 	struct drm_device *dev = drm->dev;
356 	u32 bits;
357 	int ret;
358 
359 	bits = nvkm_vmmgr(&drm->device)->dma_bits;
360 	if (nv_device_is_pci(nvkm_device(&drm->device))) {
361 		if (drm->agp.stat == ENABLED ||
362 		     !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
363 			bits = 32;
364 
365 		ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
366 		if (ret)
367 			return ret;
368 
369 		ret = pci_set_consistent_dma_mask(dev->pdev,
370 						  DMA_BIT_MASK(bits));
371 		if (ret)
372 			pci_set_consistent_dma_mask(dev->pdev,
373 						    DMA_BIT_MASK(32));
374 	}
375 
376 	ret = nouveau_ttm_global_init(drm);
377 	if (ret)
378 		return ret;
379 
380 	ret = ttm_bo_device_init(&drm->ttm.bdev,
381 				  drm->ttm.bo_global_ref.ref.object,
382 				  &nouveau_bo_driver,
383 				  dev->anon_inode->i_mapping,
384 				  DRM_FILE_PAGE_OFFSET,
385 				  bits <= 32 ? true : false);
386 	if (ret) {
387 		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
388 		return ret;
389 	}
390 
391 	/* VRAM init */
392 	drm->gem.vram_available = drm->device.info.ram_user;
393 
394 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
395 			      drm->gem.vram_available >> PAGE_SHIFT);
396 	if (ret) {
397 		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
398 		return ret;
399 	}
400 
401 	drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1),
402 					 nv_device_resource_len(nvkm_device(&drm->device), 1));
403 
404 	/* GART init */
405 	if (drm->agp.stat != ENABLED) {
406 		drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit;
407 	} else {
408 		drm->gem.gart_available = drm->agp.size;
409 	}
410 
411 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
412 			      drm->gem.gart_available >> PAGE_SHIFT);
413 	if (ret) {
414 		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
415 		return ret;
416 	}
417 
418 	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
419 	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
420 	return 0;
421 }
422 
423 void
424 nouveau_ttm_fini(struct nouveau_drm *drm)
425 {
426 	mutex_lock(&drm->dev->struct_mutex);
427 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
428 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
429 	mutex_unlock(&drm->dev->struct_mutex);
430 
431 	ttm_bo_device_release(&drm->ttm.bdev);
432 
433 	nouveau_ttm_global_release(drm);
434 
435 	arch_phys_wc_del(drm->ttm.mtrr);
436 	drm->ttm.mtrr = 0;
437 }
438