1 /*
2  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
3  * Copyright 2005 Stephane Marchesin
4  *
5  * The Weather Channel (TM) funded Tungsten Graphics to develop the
6  * initial release of the Radeon 8500 driver under the XFree86 license.
7  * This notice must be preserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Ben Skeggs <bskeggs@redhat.com>
30  *    Roy Spliet <r.spliet@student.tudelft.nl>
31  */
32 
33 
34 #include "drmP.h"
35 #include "drm.h"
36 #include "drm_sarea.h"
37 
38 #include "nouveau_drv.h"
39 #include "nouveau_pm.h"
40 #include "nouveau_mm.h"
41 #include "nouveau_vm.h"
42 #include "nouveau_fifo.h"
43 #include "nouveau_fence.h"
44 
45 /*
46  * NV10-NV40 tiling helpers
47  */
48 
49 static void
50 nv10_mem_update_tile_region(struct drm_device *dev,
51 			    struct nouveau_tile_reg *tile, uint32_t addr,
52 			    uint32_t size, uint32_t pitch, uint32_t flags)
53 {
54 	struct drm_nouveau_private *dev_priv = dev->dev_private;
55 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
56 	int i = tile - dev_priv->tile.reg, j;
57 	unsigned long save;
58 
59 	nouveau_fence_unref(&tile->fence);
60 
61 	if (tile->pitch)
62 		pfb->free_tile_region(dev, i);
63 
64 	if (pitch)
65 		pfb->init_tile_region(dev, i, addr, size, pitch, flags);
66 
67 	spin_lock_irqsave(&dev_priv->context_switch_lock, save);
68 	nv_wr32(dev, NV03_PFIFO_CACHES, 0);
69 	nv04_fifo_cache_pull(dev, false);
70 
71 	nouveau_wait_for_idle(dev);
72 
73 	pfb->set_tile_region(dev, i);
74 	for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
75 		if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
76 			dev_priv->eng[j]->set_tile_region(dev, i);
77 	}
78 
79 	nv04_fifo_cache_pull(dev, true);
80 	nv_wr32(dev, NV03_PFIFO_CACHES, 1);
81 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
82 }
83 
84 static struct nouveau_tile_reg *
85 nv10_mem_get_tile_region(struct drm_device *dev, int i)
86 {
87 	struct drm_nouveau_private *dev_priv = dev->dev_private;
88 	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
89 
90 	spin_lock(&dev_priv->tile.lock);
91 
92 	if (!tile->used &&
93 	    (!tile->fence || nouveau_fence_done(tile->fence)))
94 		tile->used = true;
95 	else
96 		tile = NULL;
97 
98 	spin_unlock(&dev_priv->tile.lock);
99 	return tile;
100 }
101 
102 void
103 nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
104 			 struct nouveau_fence *fence)
105 {
106 	struct drm_nouveau_private *dev_priv = dev->dev_private;
107 
108 	if (tile) {
109 		spin_lock(&dev_priv->tile.lock);
110 		if (fence) {
111 			/* Mark it as pending. */
112 			tile->fence = fence;
113 			nouveau_fence_ref(fence);
114 		}
115 
116 		tile->used = false;
117 		spin_unlock(&dev_priv->tile.lock);
118 	}
119 }
120 
121 struct nouveau_tile_reg *
122 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
123 		    uint32_t pitch, uint32_t flags)
124 {
125 	struct drm_nouveau_private *dev_priv = dev->dev_private;
126 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
127 	struct nouveau_tile_reg *tile, *found = NULL;
128 	int i;
129 
130 	for (i = 0; i < pfb->num_tiles; i++) {
131 		tile = nv10_mem_get_tile_region(dev, i);
132 
133 		if (pitch && !found) {
134 			found = tile;
135 			continue;
136 
137 		} else if (tile && tile->pitch) {
138 			/* Kill an unused tile region. */
139 			nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
140 		}
141 
142 		nv10_mem_put_tile_region(dev, tile, NULL);
143 	}
144 
145 	if (found)
146 		nv10_mem_update_tile_region(dev, found, addr, size,
147 					    pitch, flags);
148 	return found;
149 }
150 
151 /*
152  * Cleanup everything
153  */
154 void
155 nouveau_mem_vram_fini(struct drm_device *dev)
156 {
157 	struct drm_nouveau_private *dev_priv = dev->dev_private;
158 
159 	ttm_bo_device_release(&dev_priv->ttm.bdev);
160 
161 	nouveau_ttm_global_release(dev_priv);
162 
163 	if (dev_priv->fb_mtrr >= 0) {
164 		drm_mtrr_del(dev_priv->fb_mtrr,
165 			     pci_resource_start(dev->pdev, 1),
166 			     pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
167 		dev_priv->fb_mtrr = -1;
168 	}
169 }
170 
171 void
172 nouveau_mem_gart_fini(struct drm_device *dev)
173 {
174 	nouveau_sgdma_takedown(dev);
175 
176 	if (drm_core_has_AGP(dev) && dev->agp) {
177 		struct drm_agp_mem *entry, *tempe;
178 
179 		/* Remove AGP resources, but leave dev->agp
180 		   intact until drv_cleanup is called. */
181 		list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
182 			if (entry->bound)
183 				drm_unbind_agp(entry->memory);
184 			drm_free_agp(entry->memory, entry->pages);
185 			kfree(entry);
186 		}
187 		INIT_LIST_HEAD(&dev->agp->memory);
188 
189 		if (dev->agp->acquired)
190 			drm_agp_release(dev);
191 
192 		dev->agp->acquired = 0;
193 		dev->agp->enabled = 0;
194 	}
195 }
196 
197 bool
198 nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
199 {
200 	if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
201 		return true;
202 
203 	return false;
204 }
205 
206 #if __OS_HAS_AGP
207 static unsigned long
208 get_agp_mode(struct drm_device *dev, unsigned long mode)
209 {
210 	struct drm_nouveau_private *dev_priv = dev->dev_private;
211 
212 	/*
213 	 * FW seems to be broken on nv18, it makes the card lock up
214 	 * randomly.
215 	 */
216 	if (dev_priv->chipset == 0x18)
217 		mode &= ~PCI_AGP_COMMAND_FW;
218 
219 	/*
220 	 * AGP mode set in the command line.
221 	 */
222 	if (nouveau_agpmode > 0) {
223 		bool agpv3 = mode & 0x8;
224 		int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
225 
226 		mode = (mode & ~0x7) | (rate & 0x7);
227 	}
228 
229 	return mode;
230 }
231 #endif
232 
233 int
234 nouveau_mem_reset_agp(struct drm_device *dev)
235 {
236 #if __OS_HAS_AGP
237 	uint32_t saved_pci_nv_1, pmc_enable;
238 	int ret;
239 
240 	/* First of all, disable fast writes, otherwise if it's
241 	 * already enabled in the AGP bridge and we disable the card's
242 	 * AGP controller we might be locking ourselves out of it. */
243 	if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
244 	     dev->agp->mode) & PCI_AGP_COMMAND_FW) {
245 		struct drm_agp_info info;
246 		struct drm_agp_mode mode;
247 
248 		ret = drm_agp_info(dev, &info);
249 		if (ret)
250 			return ret;
251 
252 		mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
253 		ret = drm_agp_enable(dev, mode);
254 		if (ret)
255 			return ret;
256 	}
257 
258 	saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
259 
260 	/* clear busmaster bit */
261 	nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
262 	/* disable AGP */
263 	nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
264 
265 	/* power cycle pgraph, if enabled */
266 	pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
267 	if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
268 		nv_wr32(dev, NV03_PMC_ENABLE,
269 				pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
270 		nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
271 				NV_PMC_ENABLE_PGRAPH);
272 	}
273 
274 	/* and restore (gives effect of resetting AGP) */
275 	nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
276 #endif
277 
278 	return 0;
279 }
280 
281 int
282 nouveau_mem_init_agp(struct drm_device *dev)
283 {
284 #if __OS_HAS_AGP
285 	struct drm_nouveau_private *dev_priv = dev->dev_private;
286 	struct drm_agp_info info;
287 	struct drm_agp_mode mode;
288 	int ret;
289 
290 	if (!dev->agp->acquired) {
291 		ret = drm_agp_acquire(dev);
292 		if (ret) {
293 			NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
294 			return ret;
295 		}
296 	}
297 
298 	nouveau_mem_reset_agp(dev);
299 
300 	ret = drm_agp_info(dev, &info);
301 	if (ret) {
302 		NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
303 		return ret;
304 	}
305 
306 	/* see agp.h for the AGPSTAT_* modes available */
307 	mode.mode = get_agp_mode(dev, info.mode);
308 	ret = drm_agp_enable(dev, mode);
309 	if (ret) {
310 		NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
311 		return ret;
312 	}
313 
314 	dev_priv->gart_info.type	= NOUVEAU_GART_AGP;
315 	dev_priv->gart_info.aper_base	= info.aperture_base;
316 	dev_priv->gart_info.aper_size	= info.aperture_size;
317 #endif
318 	return 0;
319 }
320 
321 static const struct vram_types {
322 	int value;
323 	const char *name;
324 } vram_type_map[] = {
325 	{ NV_MEM_TYPE_STOLEN , "stolen system memory" },
326 	{ NV_MEM_TYPE_SGRAM  , "SGRAM" },
327 	{ NV_MEM_TYPE_SDRAM  , "SDRAM" },
328 	{ NV_MEM_TYPE_DDR1   , "DDR1" },
329 	{ NV_MEM_TYPE_DDR2   , "DDR2" },
330 	{ NV_MEM_TYPE_DDR3   , "DDR3" },
331 	{ NV_MEM_TYPE_GDDR2  , "GDDR2" },
332 	{ NV_MEM_TYPE_GDDR3  , "GDDR3" },
333 	{ NV_MEM_TYPE_GDDR4  , "GDDR4" },
334 	{ NV_MEM_TYPE_GDDR5  , "GDDR5" },
335 	{ NV_MEM_TYPE_UNKNOWN, "unknown type" }
336 };
337 
338 int
339 nouveau_mem_vram_init(struct drm_device *dev)
340 {
341 	struct drm_nouveau_private *dev_priv = dev->dev_private;
342 	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
343 	const struct vram_types *vram_type;
344 	int ret, dma_bits;
345 
346 	dma_bits = 32;
347 	if (dev_priv->card_type >= NV_50) {
348 		if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
349 			dma_bits = 40;
350 	} else
351 	if (0 && pci_is_pcie(dev->pdev) &&
352 	    dev_priv->chipset  > 0x40 &&
353 	    dev_priv->chipset != 0x45) {
354 		if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
355 			dma_bits = 39;
356 	}
357 
358 	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
359 	if (ret)
360 		return ret;
361 	ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
362 	if (ret) {
363 		/* Reset to default value. */
364 		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
365 	}
366 
367 
368 	ret = nouveau_ttm_global_init(dev_priv);
369 	if (ret)
370 		return ret;
371 
372 	ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
373 				 dev_priv->ttm.bo_global_ref.ref.object,
374 				 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
375 				 dma_bits <= 32 ? true : false);
376 	if (ret) {
377 		NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
378 		return ret;
379 	}
380 
381 	vram_type = vram_type_map;
382 	while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
383 		if (nouveau_vram_type) {
384 			if (!strcasecmp(nouveau_vram_type, vram_type->name))
385 				break;
386 			dev_priv->vram_type = vram_type->value;
387 		} else {
388 			if (vram_type->value == dev_priv->vram_type)
389 				break;
390 		}
391 		vram_type++;
392 	}
393 
394 	NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
395 		(int)(dev_priv->vram_size >> 20), vram_type->name);
396 	if (dev_priv->vram_sys_base) {
397 		NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
398 			dev_priv->vram_sys_base);
399 	}
400 
401 	dev_priv->fb_available_size = dev_priv->vram_size;
402 	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
403 	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
404 		dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
405 	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
406 
407 	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
408 	dev_priv->fb_aper_free = dev_priv->fb_available_size;
409 
410 	/* mappable vram */
411 	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
412 			     dev_priv->fb_available_size >> PAGE_SHIFT);
413 	if (ret) {
414 		NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
415 		return ret;
416 	}
417 
418 	if (dev_priv->card_type < NV_50) {
419 		ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
420 				     0, 0, NULL, &dev_priv->vga_ram);
421 		if (ret == 0)
422 			ret = nouveau_bo_pin(dev_priv->vga_ram,
423 					     TTM_PL_FLAG_VRAM);
424 
425 		if (ret) {
426 			NV_WARN(dev, "failed to reserve VGA memory\n");
427 			nouveau_bo_ref(NULL, &dev_priv->vga_ram);
428 		}
429 	}
430 
431 	dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
432 					 pci_resource_len(dev->pdev, 1),
433 					 DRM_MTRR_WC);
434 	return 0;
435 }
436 
437 int
438 nouveau_mem_gart_init(struct drm_device *dev)
439 {
440 	struct drm_nouveau_private *dev_priv = dev->dev_private;
441 	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
442 	int ret;
443 
444 	dev_priv->gart_info.type = NOUVEAU_GART_NONE;
445 
446 #if !defined(__powerpc__) && !defined(__ia64__)
447 	if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
448 		ret = nouveau_mem_init_agp(dev);
449 		if (ret)
450 			NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
451 	}
452 #endif
453 
454 	if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
455 		ret = nouveau_sgdma_init(dev);
456 		if (ret) {
457 			NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
458 			return ret;
459 		}
460 	}
461 
462 	NV_INFO(dev, "%d MiB GART (aperture)\n",
463 		(int)(dev_priv->gart_info.aper_size >> 20));
464 	dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
465 
466 	ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
467 			     dev_priv->gart_info.aper_size >> PAGE_SHIFT);
468 	if (ret) {
469 		NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
470 		return ret;
471 	}
472 
473 	return 0;
474 }
475 
476 static int
477 nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
478 		     struct nouveau_pm_tbl_entry *e, u8 len,
479 		     struct nouveau_pm_memtiming *boot,
480 		     struct nouveau_pm_memtiming *t)
481 {
482 	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
483 
484 	/* XXX: I don't trust the -1's and +1's... they must come
485 	 *      from somewhere! */
486 	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
487 		    1 << 16 |
488 		    (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
489 		    (e->tCL + 2 - (t->tCWL - 1));
490 
491 	t->reg[2] = 0x20200000 |
492 		    ((t->tCWL - 1) << 24 |
493 		     e->tRRD << 16 |
494 		     e->tRCDWR << 8 |
495 		     e->tRCDRD);
496 
497 	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
498 		 t->reg[0], t->reg[1], t->reg[2]);
499 	return 0;
500 }
501 
502 static int
503 nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
504 		     struct nouveau_pm_tbl_entry *e, u8 len,
505 		     struct nouveau_pm_memtiming *boot,
506 		     struct nouveau_pm_memtiming *t)
507 {
508 	struct drm_nouveau_private *dev_priv = dev->dev_private;
509 	struct bit_entry P;
510 	uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
511 
512 	if (bit_table(dev, 'P', &P))
513 		return -EINVAL;
514 
515 	switch (min(len, (u8) 22)) {
516 	case 22:
517 		unk21 = e->tUNK_21;
518 	case 21:
519 		unk20 = e->tUNK_20;
520 	case 20:
521 		if (e->tCWL > 0)
522 			t->tCWL = e->tCWL;
523 	case 19:
524 		unk18 = e->tUNK_18;
525 		break;
526 	}
527 
528 	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
529 
530 	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
531 				max(unk18, (u8) 1) << 16 |
532 				(e->tWTR + 2 + (t->tCWL - 1)) << 8;
533 
534 	t->reg[2] = ((t->tCWL - 1) << 24 |
535 		    e->tRRD << 16 |
536 		    e->tRCDWR << 8 |
537 		    e->tRCDRD);
538 
539 	t->reg[4] = e->tUNK_13 << 8  | e->tUNK_13;
540 
541 	t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
542 
543 	t->reg[8] = boot->reg[8] & 0xffffff00;
544 
545 	if (P.version == 1) {
546 		t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
547 
548 		t->reg[3] = (0x14 + e->tCL) << 24 |
549 			    0x16 << 16 |
550 			    (e->tCL - 1) << 8 |
551 			    (e->tCL - 1);
552 
553 		t->reg[4] |= boot->reg[4] & 0xffff0000;
554 
555 		t->reg[6] = (0x33 - t->tCWL) << 16 |
556 			    t->tCWL << 8 |
557 			    (0x2e + e->tCL - t->tCWL);
558 
559 		t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
560 
561 		/* XXX: P.version == 1 only has DDR2 and GDDR3? */
562 		if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) {
563 			t->reg[5] |= (e->tCL + 3) << 8;
564 			t->reg[6] |= (t->tCWL - 2) << 8;
565 			t->reg[8] |= (e->tCL - 4);
566 		} else {
567 			t->reg[5] |= (e->tCL + 2) << 8;
568 			t->reg[6] |= t->tCWL << 8;
569 			t->reg[8] |= (e->tCL - 2);
570 		}
571 	} else {
572 		t->reg[1] |= (5 + e->tCL - (t->tCWL));
573 
574 		/* XXX: 0xb? 0x30? */
575 		t->reg[3] = (0x30 + e->tCL) << 24 |
576 			    (boot->reg[3] & 0x00ff0000)|
577 			    (0xb + e->tCL) << 8 |
578 			    (e->tCL - 1);
579 
580 		t->reg[4] |= (unk20 << 24 | unk21 << 16);
581 
582 		/* XXX: +6? */
583 		t->reg[5] |= (t->tCWL + 6) << 8;
584 
585 		t->reg[6] = (0x5a + e->tCL) << 16 |
586 			    (6 - e->tCL + t->tCWL) << 8 |
587 			    (0x50 + e->tCL - t->tCWL);
588 
589 		tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
590 		t->reg[7] = (tmp7_3 << 24) |
591 			    ((tmp7_3 - 6 + e->tCL) << 16) |
592 			    0x202;
593 	}
594 
595 	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
596 		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
597 	NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
598 		 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
599 	NV_DEBUG(dev, "         240: %08x\n", t->reg[8]);
600 	return 0;
601 }
602 
603 static int
604 nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
605 		     struct nouveau_pm_tbl_entry *e, u8 len,
606 		     struct nouveau_pm_memtiming *boot,
607 		     struct nouveau_pm_memtiming *t)
608 {
609 	if (e->tCWL > 0)
610 		t->tCWL = e->tCWL;
611 
612 	t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
613 		     e->tRFC << 8 | e->tRC);
614 
615 	t->reg[1] = (boot->reg[1] & 0xff000000) |
616 		    (e->tRCDWR & 0x0f) << 20 |
617 		    (e->tRCDRD & 0x0f) << 14 |
618 		    (t->tCWL << 7) |
619 		    (e->tCL & 0x0f);
620 
621 	t->reg[2] = (boot->reg[2] & 0xff0000ff) |
622 		    e->tWR << 16 | e->tWTR << 8;
623 
624 	t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
625 		    (e->tUNK_21 & 0xf) << 5 |
626 		    (e->tUNK_13 & 0x1f);
627 
628 	t->reg[4] = (boot->reg[4] & 0xfff00fff) |
629 		    (e->tRRD&0x1f) << 15;
630 
631 	NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
632 		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
633 	NV_DEBUG(dev, "         2a0: %08x\n", t->reg[4]);
634 	return 0;
635 }
636 
637 /**
638  * MR generation methods
639  */
640 
641 static int
642 nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
643 		    struct nouveau_pm_tbl_entry *e, u8 len,
644 		    struct nouveau_pm_memtiming *boot,
645 		    struct nouveau_pm_memtiming *t)
646 {
647 	t->drive_strength = 0;
648 	if (len < 15) {
649 		t->odt = boot->odt;
650 	} else {
651 		t->odt = e->RAM_FT1 & 0x07;
652 	}
653 
654 	if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
655 		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
656 		return -ERANGE;
657 	}
658 
659 	if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
660 		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
661 		return -ERANGE;
662 	}
663 
664 	if (t->odt > 3) {
665 		NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
666 			t->id, t->odt);
667 		t->odt = 0;
668 	}
669 
670 	t->mr[0] = (boot->mr[0] & 0x100f) |
671 		   (e->tCL) << 4 |
672 		   (e->tWR - 1) << 9;
673 	t->mr[1] = (boot->mr[1] & 0x101fbb) |
674 		   (t->odt & 0x1) << 2 |
675 		   (t->odt & 0x2) << 5;
676 
677 	NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
678 	return 0;
679 }
680 
681 uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
682 	0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
683 
684 static int
685 nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
686 		    struct nouveau_pm_tbl_entry *e, u8 len,
687 		    struct nouveau_pm_memtiming *boot,
688 		    struct nouveau_pm_memtiming *t)
689 {
690 	u8 cl = e->tCL - 4;
691 
692 	t->drive_strength = 0;
693 	if (len < 15) {
694 		t->odt = boot->odt;
695 	} else {
696 		t->odt = e->RAM_FT1 & 0x07;
697 	}
698 
699 	if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
700 		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
701 		return -ERANGE;
702 	}
703 
704 	if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
705 		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
706 		return -ERANGE;
707 	}
708 
709 	if (e->tCWL < 5) {
710 		NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
711 		return -ERANGE;
712 	}
713 
714 	t->mr[0] = (boot->mr[0] & 0x180b) |
715 		   /* CAS */
716 		   (cl & 0x7) << 4 |
717 		   (cl & 0x8) >> 1 |
718 		   (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
719 	t->mr[1] = (boot->mr[1] & 0x101dbb) |
720 		   (t->odt & 0x1) << 2 |
721 		   (t->odt & 0x2) << 5 |
722 		   (t->odt & 0x4) << 7;
723 	t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
724 
725 	NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
726 	return 0;
727 }
728 
729 uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
730 	0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
731 uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
732 	0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
733 
734 static int
735 nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
736 		     struct nouveau_pm_tbl_entry *e, u8 len,
737 		     struct nouveau_pm_memtiming *boot,
738 		     struct nouveau_pm_memtiming *t)
739 {
740 	if (len < 15) {
741 		t->drive_strength = boot->drive_strength;
742 		t->odt = boot->odt;
743 	} else {
744 		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
745 		t->odt = e->RAM_FT1 & 0x07;
746 	}
747 
748 	if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
749 		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
750 		return -ERANGE;
751 	}
752 
753 	if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
754 		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
755 		return -ERANGE;
756 	}
757 
758 	if (t->odt > 3) {
759 		NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
760 			t->id, t->odt);
761 		t->odt = 0;
762 	}
763 
764 	t->mr[0] = (boot->mr[0] & 0xe0b) |
765 		   /* CAS */
766 		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
767 		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
768 	t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
769 		   (t->odt << 2) |
770 		   (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
771 	t->mr[2] = boot->mr[2];
772 
773 	NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
774 		      t->mr[0], t->mr[1], t->mr[2]);
775 	return 0;
776 }
777 
778 static int
779 nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
780 		     struct nouveau_pm_tbl_entry *e, u8 len,
781 		     struct nouveau_pm_memtiming *boot,
782 		     struct nouveau_pm_memtiming *t)
783 {
784 	if (len < 15) {
785 		t->drive_strength = boot->drive_strength;
786 		t->odt = boot->odt;
787 	} else {
788 		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
789 		t->odt = e->RAM_FT1 & 0x03;
790 	}
791 
792 	if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
793 		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
794 		return -ERANGE;
795 	}
796 
797 	if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
798 		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
799 		return -ERANGE;
800 	}
801 
802 	if (t->odt > 3) {
803 		NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
804 			t->id, t->odt);
805 		t->odt = 0;
806 	}
807 
808 	t->mr[0] = (boot->mr[0] & 0x007) |
809 		   ((e->tCL - 5) << 3) |
810 		   ((e->tWR - 4) << 8);
811 	t->mr[1] = (boot->mr[1] & 0x1007f0) |
812 		   t->drive_strength |
813 		   (t->odt << 2);
814 
815 	NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
816 	return 0;
817 }
818 
819 int
820 nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
821 			struct nouveau_pm_memtiming *t)
822 {
823 	struct drm_nouveau_private *dev_priv = dev->dev_private;
824 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
825 	struct nouveau_pm_memtiming *boot = &pm->boot.timing;
826 	struct nouveau_pm_tbl_entry *e;
827 	u8 ver, len, *ptr, *ramcfg;
828 	int ret;
829 
830 	ptr = nouveau_perf_timing(dev, freq, &ver, &len);
831 	if (!ptr || ptr[0] == 0x00) {
832 		*t = *boot;
833 		return 0;
834 	}
835 	e = (struct nouveau_pm_tbl_entry *)ptr;
836 
837 	t->tCWL = boot->tCWL;
838 
839 	switch (dev_priv->card_type) {
840 	case NV_40:
841 		ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
842 		break;
843 	case NV_50:
844 		ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
845 		break;
846 	case NV_C0:
847 	case NV_D0:
848 		ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
849 		break;
850 	default:
851 		ret = -ENODEV;
852 		break;
853 	}
854 
855 	switch (dev_priv->vram_type * !ret) {
856 	case NV_MEM_TYPE_GDDR3:
857 		ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
858 		break;
859 	case NV_MEM_TYPE_GDDR5:
860 		ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
861 		break;
862 	case NV_MEM_TYPE_DDR2:
863 		ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
864 		break;
865 	case NV_MEM_TYPE_DDR3:
866 		ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
867 		break;
868 	default:
869 		ret = -EINVAL;
870 		break;
871 	}
872 
873 	ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
874 	if (ramcfg) {
875 		int dll_off;
876 
877 		if (ver == 0x00)
878 			dll_off = !!(ramcfg[3] & 0x04);
879 		else
880 			dll_off = !!(ramcfg[2] & 0x40);
881 
882 		switch (dev_priv->vram_type) {
883 		case NV_MEM_TYPE_GDDR3:
884 			t->mr[1] &= ~0x00000040;
885 			t->mr[1] |=  0x00000040 * dll_off;
886 			break;
887 		default:
888 			t->mr[1] &= ~0x00000001;
889 			t->mr[1] |=  0x00000001 * dll_off;
890 			break;
891 		}
892 	}
893 
894 	return ret;
895 }
896 
897 void
898 nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
899 {
900 	struct drm_nouveau_private *dev_priv = dev->dev_private;
901 	u32 timing_base, timing_regs, mr_base;
902 	int i;
903 
904 	if (dev_priv->card_type >= 0xC0) {
905 		timing_base = 0x10f290;
906 		mr_base = 0x10f300;
907 	} else {
908 		timing_base = 0x100220;
909 		mr_base = 0x1002c0;
910 	}
911 
912 	t->id = -1;
913 
914 	switch (dev_priv->card_type) {
915 	case NV_50:
916 		timing_regs = 9;
917 		break;
918 	case NV_C0:
919 	case NV_D0:
920 		timing_regs = 5;
921 		break;
922 	case NV_30:
923 	case NV_40:
924 		timing_regs = 3;
925 		break;
926 	default:
927 		timing_regs = 0;
928 		return;
929 	}
930 	for(i = 0; i < timing_regs; i++)
931 		t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
932 
933 	t->tCWL = 0;
934 	if (dev_priv->card_type < NV_C0) {
935 		t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
936 	} else if (dev_priv->card_type <= NV_D0) {
937 		t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
938 	}
939 
940 	t->mr[0] = nv_rd32(dev, mr_base);
941 	t->mr[1] = nv_rd32(dev, mr_base + 0x04);
942 	t->mr[2] = nv_rd32(dev, mr_base + 0x20);
943 	t->mr[3] = nv_rd32(dev, mr_base + 0x24);
944 
945 	t->odt = 0;
946 	t->drive_strength = 0;
947 
948 	switch (dev_priv->vram_type) {
949 	case NV_MEM_TYPE_DDR3:
950 		t->odt |= (t->mr[1] & 0x200) >> 7;
951 	case NV_MEM_TYPE_DDR2:
952 		t->odt |= (t->mr[1] & 0x04) >> 2 |
953 			  (t->mr[1] & 0x40) >> 5;
954 		break;
955 	case NV_MEM_TYPE_GDDR3:
956 	case NV_MEM_TYPE_GDDR5:
957 		t->drive_strength = t->mr[1] & 0x03;
958 		t->odt = (t->mr[1] & 0x0c) >> 2;
959 		break;
960 	default:
961 		break;
962 	}
963 }
964 
965 int
966 nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
967 		 struct nouveau_pm_level *perflvl)
968 {
969 	struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
970 	struct nouveau_pm_memtiming *info = &perflvl->timing;
971 	u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
972 	u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
973 	u32 mr1_dlloff;
974 
975 	switch (dev_priv->vram_type) {
976 	case NV_MEM_TYPE_DDR2:
977 		tDLLK = 2000;
978 		mr1_dlloff = 0x00000001;
979 		break;
980 	case NV_MEM_TYPE_DDR3:
981 		tDLLK = 12000;
982 		tCKSRE = 2000;
983 		tXS = 1000;
984 		mr1_dlloff = 0x00000001;
985 		break;
986 	case NV_MEM_TYPE_GDDR3:
987 		tDLLK = 40000;
988 		mr1_dlloff = 0x00000040;
989 		break;
990 	default:
991 		NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
992 		return -ENODEV;
993 	}
994 
995 	/* fetch current MRs */
996 	switch (dev_priv->vram_type) {
997 	case NV_MEM_TYPE_GDDR3:
998 	case NV_MEM_TYPE_DDR3:
999 		mr[2] = exec->mrg(exec, 2);
1000 	default:
1001 		mr[1] = exec->mrg(exec, 1);
1002 		mr[0] = exec->mrg(exec, 0);
1003 		break;
1004 	}
1005 
1006 	/* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh  */
1007 	if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
1008 		exec->precharge(exec);
1009 		exec->mrs (exec, 1, mr[1] | mr1_dlloff);
1010 		exec->wait(exec, tMRD);
1011 	}
1012 
1013 	/* enter self-refresh mode */
1014 	exec->precharge(exec);
1015 	exec->refresh(exec);
1016 	exec->refresh(exec);
1017 	exec->refresh_auto(exec, false);
1018 	exec->refresh_self(exec, true);
1019 	exec->wait(exec, tCKSRE);
1020 
1021 	/* modify input clock frequency */
1022 	exec->clock_set(exec);
1023 
1024 	/* exit self-refresh mode */
1025 	exec->wait(exec, tCKSRX);
1026 	exec->precharge(exec);
1027 	exec->refresh_self(exec, false);
1028 	exec->refresh_auto(exec, true);
1029 	exec->wait(exec, tXS);
1030 	exec->wait(exec, tXS);
1031 
1032 	/* update MRs */
1033 	if (mr[2] != info->mr[2]) {
1034 		exec->mrs (exec, 2, info->mr[2]);
1035 		exec->wait(exec, tMRD);
1036 	}
1037 
1038 	if (mr[1] != info->mr[1]) {
1039 		/* need to keep DLL off until later, at least on GDDR3 */
1040 		exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
1041 		exec->wait(exec, tMRD);
1042 	}
1043 
1044 	if (mr[0] != info->mr[0]) {
1045 		exec->mrs (exec, 0, info->mr[0]);
1046 		exec->wait(exec, tMRD);
1047 	}
1048 
1049 	/* update PFB timing registers */
1050 	exec->timing_set(exec);
1051 
1052 	/* DLL (enable + ) reset */
1053 	if (!(info->mr[1] & mr1_dlloff)) {
1054 		if (mr[1] & mr1_dlloff) {
1055 			exec->mrs (exec, 1, info->mr[1]);
1056 			exec->wait(exec, tMRD);
1057 		}
1058 		exec->mrs (exec, 0, info->mr[0] | 0x00000100);
1059 		exec->wait(exec, tMRD);
1060 		exec->mrs (exec, 0, info->mr[0] | 0x00000000);
1061 		exec->wait(exec, tMRD);
1062 		exec->wait(exec, tDLLK);
1063 		if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3)
1064 			exec->precharge(exec);
1065 	}
1066 
1067 	return 0;
1068 }
1069 
1070 int
1071 nouveau_mem_vbios_type(struct drm_device *dev)
1072 {
1073 	struct bit_entry M;
1074 	u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
1075 	if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
1076 		u8 *table = ROMPTR(dev, M.data[3]);
1077 		if (table && table[0] == 0x10 && ramcfg < table[3]) {
1078 			u8 *entry = table + table[1] + (ramcfg * table[2]);
1079 			switch (entry[0] & 0x0f) {
1080 			case 0: return NV_MEM_TYPE_DDR2;
1081 			case 1: return NV_MEM_TYPE_DDR3;
1082 			case 2: return NV_MEM_TYPE_GDDR3;
1083 			case 3: return NV_MEM_TYPE_GDDR5;
1084 			default:
1085 				break;
1086 			}
1087 
1088 		}
1089 	}
1090 	return NV_MEM_TYPE_UNKNOWN;
1091 }
1092 
1093 static int
1094 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1095 {
1096 	/* nothing to do */
1097 	return 0;
1098 }
1099 
1100 static int
1101 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
1102 {
1103 	/* nothing to do */
1104 	return 0;
1105 }
1106 
1107 static inline void
1108 nouveau_mem_node_cleanup(struct nouveau_mem *node)
1109 {
1110 	if (node->vma[0].node) {
1111 		nouveau_vm_unmap(&node->vma[0]);
1112 		nouveau_vm_put(&node->vma[0]);
1113 	}
1114 
1115 	if (node->vma[1].node) {
1116 		nouveau_vm_unmap(&node->vma[1]);
1117 		nouveau_vm_put(&node->vma[1]);
1118 	}
1119 }
1120 
1121 static void
1122 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
1123 			 struct ttm_mem_reg *mem)
1124 {
1125 	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1126 	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1127 	struct drm_device *dev = dev_priv->dev;
1128 
1129 	nouveau_mem_node_cleanup(mem->mm_node);
1130 	vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
1131 }
1132 
1133 static int
1134 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
1135 			 struct ttm_buffer_object *bo,
1136 			 struct ttm_placement *placement,
1137 			 struct ttm_mem_reg *mem)
1138 {
1139 	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1140 	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1141 	struct drm_device *dev = dev_priv->dev;
1142 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1143 	struct nouveau_mem *node;
1144 	u32 size_nc = 0;
1145 	int ret;
1146 
1147 	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
1148 		size_nc = 1 << nvbo->page_shift;
1149 
1150 	ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
1151 			mem->page_alignment << PAGE_SHIFT, size_nc,
1152 			(nvbo->tile_flags >> 8) & 0x3ff, &node);
1153 	if (ret) {
1154 		mem->mm_node = NULL;
1155 		return (ret == -ENOSPC) ? 0 : ret;
1156 	}
1157 
1158 	node->page_shift = nvbo->page_shift;
1159 
1160 	mem->mm_node = node;
1161 	mem->start   = node->offset >> PAGE_SHIFT;
1162 	return 0;
1163 }
1164 
1165 void
1166 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1167 {
1168 	struct nouveau_mm *mm = man->priv;
1169 	struct nouveau_mm_node *r;
1170 	u32 total = 0, free = 0;
1171 
1172 	mutex_lock(&mm->mutex);
1173 	list_for_each_entry(r, &mm->nodes, nl_entry) {
1174 		printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
1175 		       prefix, r->type, ((u64)r->offset << 12),
1176 		       (((u64)r->offset + r->length) << 12));
1177 
1178 		total += r->length;
1179 		if (!r->type)
1180 			free += r->length;
1181 	}
1182 	mutex_unlock(&mm->mutex);
1183 
1184 	printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
1185 	       prefix, (u64)total << 12, (u64)free << 12);
1186 	printk(KERN_DEBUG "%s  block: 0x%08x\n",
1187 	       prefix, mm->block_size << 12);
1188 }
1189 
1190 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
1191 	nouveau_vram_manager_init,
1192 	nouveau_vram_manager_fini,
1193 	nouveau_vram_manager_new,
1194 	nouveau_vram_manager_del,
1195 	nouveau_vram_manager_debug
1196 };
1197 
1198 static int
1199 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1200 {
1201 	return 0;
1202 }
1203 
1204 static int
1205 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
1206 {
1207 	return 0;
1208 }
1209 
1210 static void
1211 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
1212 			 struct ttm_mem_reg *mem)
1213 {
1214 	nouveau_mem_node_cleanup(mem->mm_node);
1215 	kfree(mem->mm_node);
1216 	mem->mm_node = NULL;
1217 }
1218 
1219 static int
1220 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
1221 			 struct ttm_buffer_object *bo,
1222 			 struct ttm_placement *placement,
1223 			 struct ttm_mem_reg *mem)
1224 {
1225 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1226 	struct nouveau_mem *node;
1227 
1228 	if (unlikely((mem->num_pages << PAGE_SHIFT) >=
1229 		     dev_priv->gart_info.aper_size))
1230 		return -ENOMEM;
1231 
1232 	node = kzalloc(sizeof(*node), GFP_KERNEL);
1233 	if (!node)
1234 		return -ENOMEM;
1235 	node->page_shift = 12;
1236 
1237 	mem->mm_node = node;
1238 	mem->start   = 0;
1239 	return 0;
1240 }
1241 
1242 void
1243 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1244 {
1245 }
1246 
1247 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1248 	nouveau_gart_manager_init,
1249 	nouveau_gart_manager_fini,
1250 	nouveau_gart_manager_new,
1251 	nouveau_gart_manager_del,
1252 	nouveau_gart_manager_debug
1253 };
1254