1 #include "drmP.h"
2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9 
10 struct nouveau_sgdma_be {
11 	struct ttm_backend backend;
12 	struct drm_device *dev;
13 
14 	dma_addr_t *pages;
15 	bool *ttm_alloced;
16 	unsigned nr_pages;
17 
18 	u64 offset;
19 	bool bound;
20 };
21 
22 static int
23 nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
24 		       struct page **pages, struct page *dummy_read_page,
25 		       dma_addr_t *dma_addrs)
26 {
27 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
28 	struct drm_device *dev = nvbe->dev;
29 
30 	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
31 
32 	if (nvbe->pages)
33 		return -EINVAL;
34 
35 	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
36 	if (!nvbe->pages)
37 		return -ENOMEM;
38 
39 	nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
40 	if (!nvbe->ttm_alloced) {
41 		kfree(nvbe->pages);
42 		nvbe->pages = NULL;
43 		return -ENOMEM;
44 	}
45 
46 	nvbe->nr_pages = 0;
47 	while (num_pages--) {
48 		/* this code path isn't called and is incorrect anyways */
49 		if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
50 			nvbe->pages[nvbe->nr_pages] =
51 					dma_addrs[nvbe->nr_pages];
52 		 	nvbe->ttm_alloced[nvbe->nr_pages] = true;
53 		} else {
54 			nvbe->pages[nvbe->nr_pages] =
55 				pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
56 				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
57 			if (pci_dma_mapping_error(dev->pdev,
58 						  nvbe->pages[nvbe->nr_pages])) {
59 				be->func->clear(be);
60 				return -EFAULT;
61 			}
62 			nvbe->ttm_alloced[nvbe->nr_pages] = false;
63 		}
64 
65 		nvbe->nr_pages++;
66 	}
67 
68 	return 0;
69 }
70 
71 static void
72 nouveau_sgdma_clear(struct ttm_backend *be)
73 {
74 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
75 	struct drm_device *dev;
76 
77 	if (nvbe && nvbe->pages) {
78 		dev = nvbe->dev;
79 		NV_DEBUG(dev, "\n");
80 
81 		if (nvbe->bound)
82 			be->func->unbind(be);
83 
84 		while (nvbe->nr_pages--) {
85 			if (!nvbe->ttm_alloced[nvbe->nr_pages])
86 				pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
87 				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
88 		}
89 		kfree(nvbe->pages);
90 		kfree(nvbe->ttm_alloced);
91 		nvbe->pages = NULL;
92 		nvbe->ttm_alloced = NULL;
93 		nvbe->nr_pages = 0;
94 	}
95 }
96 
97 static void
98 nouveau_sgdma_destroy(struct ttm_backend *be)
99 {
100 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
101 
102 	if (be) {
103 		NV_DEBUG(nvbe->dev, "\n");
104 
105 		if (nvbe) {
106 			if (nvbe->pages)
107 				be->func->clear(be);
108 			kfree(nvbe);
109 		}
110 	}
111 }
112 
113 static int
114 nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
115 {
116 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
117 	struct drm_device *dev = nvbe->dev;
118 	struct drm_nouveau_private *dev_priv = dev->dev_private;
119 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
120 	unsigned i, j, pte;
121 
122 	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
123 
124 	nvbe->offset = mem->start << PAGE_SHIFT;
125 	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
126 	for (i = 0; i < nvbe->nr_pages; i++) {
127 		dma_addr_t dma_offset = nvbe->pages[i];
128 		uint32_t offset_l = lower_32_bits(dma_offset);
129 
130 		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
131 			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
132 			offset_l += NV_CTXDMA_PAGE_SIZE;
133 		}
134 	}
135 
136 	nvbe->bound = true;
137 	return 0;
138 }
139 
140 static int
141 nv04_sgdma_unbind(struct ttm_backend *be)
142 {
143 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
144 	struct drm_device *dev = nvbe->dev;
145 	struct drm_nouveau_private *dev_priv = dev->dev_private;
146 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
147 	unsigned i, j, pte;
148 
149 	NV_DEBUG(dev, "\n");
150 
151 	if (!nvbe->bound)
152 		return 0;
153 
154 	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
155 	for (i = 0; i < nvbe->nr_pages; i++) {
156 		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
157 			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
158 	}
159 
160 	nvbe->bound = false;
161 	return 0;
162 }
163 
164 static struct ttm_backend_func nv04_sgdma_backend = {
165 	.populate		= nouveau_sgdma_populate,
166 	.clear			= nouveau_sgdma_clear,
167 	.bind			= nv04_sgdma_bind,
168 	.unbind			= nv04_sgdma_unbind,
169 	.destroy		= nouveau_sgdma_destroy
170 };
171 
172 static void
173 nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
174 {
175 	struct drm_device *dev = nvbe->dev;
176 
177 	nv_wr32(dev, 0x100810, 0x00000022);
178 	if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
179 		NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
180 			 nv_rd32(dev, 0x100810));
181 	nv_wr32(dev, 0x100810, 0x00000000);
182 }
183 
184 static int
185 nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
186 {
187 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
188 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
189 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
190 	dma_addr_t *list = nvbe->pages;
191 	u32 pte = mem->start << 2;
192 	u32 cnt = nvbe->nr_pages;
193 
194 	nvbe->offset = mem->start << PAGE_SHIFT;
195 
196 	while (cnt--) {
197 		nv_wo32(pgt, pte, (*list++ >> 7) | 1);
198 		pte += 4;
199 	}
200 
201 	nv41_sgdma_flush(nvbe);
202 	nvbe->bound = true;
203 	return 0;
204 }
205 
206 static int
207 nv41_sgdma_unbind(struct ttm_backend *be)
208 {
209 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
210 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
211 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
212 	u32 pte = (nvbe->offset >> 12) << 2;
213 	u32 cnt = nvbe->nr_pages;
214 
215 	while (cnt--) {
216 		nv_wo32(pgt, pte, 0x00000000);
217 		pte += 4;
218 	}
219 
220 	nv41_sgdma_flush(nvbe);
221 	nvbe->bound = false;
222 	return 0;
223 }
224 
225 static struct ttm_backend_func nv41_sgdma_backend = {
226 	.populate		= nouveau_sgdma_populate,
227 	.clear			= nouveau_sgdma_clear,
228 	.bind			= nv41_sgdma_bind,
229 	.unbind			= nv41_sgdma_unbind,
230 	.destroy		= nouveau_sgdma_destroy
231 };
232 
233 static void
234 nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
235 {
236 	struct drm_device *dev = nvbe->dev;
237 
238 	nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
239 	nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
240 	if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
241 		NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
242 			 nv_rd32(dev, 0x100808));
243 	nv_wr32(dev, 0x100808, 0x00000000);
244 }
245 
246 static void
247 nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
248 {
249 	struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
250 	dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
251 	u32 pte, tmp[4];
252 
253 	pte   = base >> 2;
254 	base &= ~0x0000000f;
255 
256 	tmp[0] = nv_ro32(pgt, base + 0x0);
257 	tmp[1] = nv_ro32(pgt, base + 0x4);
258 	tmp[2] = nv_ro32(pgt, base + 0x8);
259 	tmp[3] = nv_ro32(pgt, base + 0xc);
260 	while (cnt--) {
261 		u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
262 		switch (pte++ & 0x3) {
263 		case 0:
264 			tmp[0] &= ~0x07ffffff;
265 			tmp[0] |= addr;
266 			break;
267 		case 1:
268 			tmp[0] &= ~0xf8000000;
269 			tmp[0] |= addr << 27;
270 			tmp[1] &= ~0x003fffff;
271 			tmp[1] |= addr >> 5;
272 			break;
273 		case 2:
274 			tmp[1] &= ~0xffc00000;
275 			tmp[1] |= addr << 22;
276 			tmp[2] &= ~0x0001ffff;
277 			tmp[2] |= addr >> 10;
278 			break;
279 		case 3:
280 			tmp[2] &= ~0xfffe0000;
281 			tmp[2] |= addr << 17;
282 			tmp[3] &= ~0x00000fff;
283 			tmp[3] |= addr >> 15;
284 			break;
285 		}
286 	}
287 
288 	tmp[3] |= 0x40000000;
289 
290 	nv_wo32(pgt, base + 0x0, tmp[0]);
291 	nv_wo32(pgt, base + 0x4, tmp[1]);
292 	nv_wo32(pgt, base + 0x8, tmp[2]);
293 	nv_wo32(pgt, base + 0xc, tmp[3]);
294 }
295 
296 static int
297 nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
298 {
299 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
300 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
301 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
302 	dma_addr_t *list = nvbe->pages;
303 	u32 pte = mem->start << 2, tmp[4];
304 	u32 cnt = nvbe->nr_pages;
305 	int i;
306 
307 	nvbe->offset = mem->start << PAGE_SHIFT;
308 
309 	if (pte & 0x0000000c) {
310 		u32  max = 4 - ((pte >> 2) & 0x3);
311 		u32 part = (cnt > max) ? max : cnt;
312 		nv44_sgdma_fill(pgt, list, pte, part);
313 		pte  += (part << 2);
314 		list += part;
315 		cnt  -= part;
316 	}
317 
318 	while (cnt >= 4) {
319 		for (i = 0; i < 4; i++)
320 			tmp[i] = *list++ >> 12;
321 		nv_wo32(pgt, pte + 0x0, tmp[0] >>  0 | tmp[1] << 27);
322 		nv_wo32(pgt, pte + 0x4, tmp[1] >>  5 | tmp[2] << 22);
323 		nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
324 		nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
325 		pte  += 0x10;
326 		cnt  -= 4;
327 	}
328 
329 	if (cnt)
330 		nv44_sgdma_fill(pgt, list, pte, cnt);
331 
332 	nv44_sgdma_flush(nvbe);
333 	nvbe->bound = true;
334 	return 0;
335 }
336 
337 static int
338 nv44_sgdma_unbind(struct ttm_backend *be)
339 {
340 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
341 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
342 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
343 	u32 pte = (nvbe->offset >> 12) << 2;
344 	u32 cnt = nvbe->nr_pages;
345 
346 	if (pte & 0x0000000c) {
347 		u32  max = 4 - ((pte >> 2) & 0x3);
348 		u32 part = (cnt > max) ? max : cnt;
349 		nv44_sgdma_fill(pgt, NULL, pte, part);
350 		pte  += (part << 2);
351 		cnt  -= part;
352 	}
353 
354 	while (cnt >= 4) {
355 		nv_wo32(pgt, pte + 0x0, 0x00000000);
356 		nv_wo32(pgt, pte + 0x4, 0x00000000);
357 		nv_wo32(pgt, pte + 0x8, 0x00000000);
358 		nv_wo32(pgt, pte + 0xc, 0x00000000);
359 		pte  += 0x10;
360 		cnt  -= 4;
361 	}
362 
363 	if (cnt)
364 		nv44_sgdma_fill(pgt, NULL, pte, cnt);
365 
366 	nv44_sgdma_flush(nvbe);
367 	nvbe->bound = false;
368 	return 0;
369 }
370 
371 static struct ttm_backend_func nv44_sgdma_backend = {
372 	.populate		= nouveau_sgdma_populate,
373 	.clear			= nouveau_sgdma_clear,
374 	.bind			= nv44_sgdma_bind,
375 	.unbind			= nv44_sgdma_unbind,
376 	.destroy		= nouveau_sgdma_destroy
377 };
378 
379 static int
380 nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
381 {
382 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
383 	struct nouveau_mem *node = mem->mm_node;
384 	/* noop: bound in move_notify() */
385 	node->pages = nvbe->pages;
386 	nvbe->pages = (dma_addr_t *)node;
387 	nvbe->bound = true;
388 	return 0;
389 }
390 
391 static int
392 nv50_sgdma_unbind(struct ttm_backend *be)
393 {
394 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
395 	struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
396 	/* noop: unbound in move_notify() */
397 	nvbe->pages = node->pages;
398 	node->pages = NULL;
399 	nvbe->bound = false;
400 	return 0;
401 }
402 
403 static struct ttm_backend_func nv50_sgdma_backend = {
404 	.populate		= nouveau_sgdma_populate,
405 	.clear			= nouveau_sgdma_clear,
406 	.bind			= nv50_sgdma_bind,
407 	.unbind			= nv50_sgdma_unbind,
408 	.destroy		= nouveau_sgdma_destroy
409 };
410 
411 struct ttm_backend *
412 nouveau_sgdma_init_ttm(struct drm_device *dev)
413 {
414 	struct drm_nouveau_private *dev_priv = dev->dev_private;
415 	struct nouveau_sgdma_be *nvbe;
416 
417 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
418 	if (!nvbe)
419 		return NULL;
420 
421 	nvbe->dev = dev;
422 
423 	nvbe->backend.func = dev_priv->gart_info.func;
424 	return &nvbe->backend;
425 }
426 
427 int
428 nouveau_sgdma_init(struct drm_device *dev)
429 {
430 	struct drm_nouveau_private *dev_priv = dev->dev_private;
431 	struct nouveau_gpuobj *gpuobj = NULL;
432 	u32 aper_size, align;
433 	int ret;
434 
435 	if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
436 		aper_size = 512 * 1024 * 1024;
437 	else
438 		aper_size = 64 * 1024 * 1024;
439 
440 	/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
441 	 * christmas.  The cards before it have them, the cards after
442 	 * it have them, why is NV44 so unloved?
443 	 */
444 	dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
445 	if (!dev_priv->gart_info.dummy.page)
446 		return -ENOMEM;
447 
448 	dev_priv->gart_info.dummy.addr =
449 		pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
450 			     0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
451 	if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
452 		NV_ERROR(dev, "error mapping dummy page\n");
453 		__free_page(dev_priv->gart_info.dummy.page);
454 		dev_priv->gart_info.dummy.page = NULL;
455 		return -ENOMEM;
456 	}
457 
458 	if (dev_priv->card_type >= NV_50) {
459 		dev_priv->gart_info.aper_base = 0;
460 		dev_priv->gart_info.aper_size = aper_size;
461 		dev_priv->gart_info.type = NOUVEAU_GART_HW;
462 		dev_priv->gart_info.func = &nv50_sgdma_backend;
463 	} else
464 	if (0 && pci_is_pcie(dev->pdev) &&
465 	    dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
466 		if (nv44_graph_class(dev)) {
467 			dev_priv->gart_info.func = &nv44_sgdma_backend;
468 			align = 512 * 1024;
469 		} else {
470 			dev_priv->gart_info.func = &nv41_sgdma_backend;
471 			align = 16;
472 		}
473 
474 		ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
475 					 NVOBJ_FLAG_ZERO_ALLOC |
476 					 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
477 		if (ret) {
478 			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
479 			return ret;
480 		}
481 
482 		dev_priv->gart_info.sg_ctxdma = gpuobj;
483 		dev_priv->gart_info.aper_base = 0;
484 		dev_priv->gart_info.aper_size = aper_size;
485 		dev_priv->gart_info.type = NOUVEAU_GART_HW;
486 	} else {
487 		ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
488 					 NVOBJ_FLAG_ZERO_ALLOC |
489 					 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
490 		if (ret) {
491 			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
492 			return ret;
493 		}
494 
495 		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
496 				   (1 << 12) /* PT present */ |
497 				   (0 << 13) /* PT *not* linear */ |
498 				   (0 << 14) /* RW */ |
499 				   (2 << 16) /* PCI */);
500 		nv_wo32(gpuobj, 4, aper_size - 1);
501 
502 		dev_priv->gart_info.sg_ctxdma = gpuobj;
503 		dev_priv->gart_info.aper_base = 0;
504 		dev_priv->gart_info.aper_size = aper_size;
505 		dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
506 		dev_priv->gart_info.func = &nv04_sgdma_backend;
507 	}
508 
509 	return 0;
510 }
511 
512 void
513 nouveau_sgdma_takedown(struct drm_device *dev)
514 {
515 	struct drm_nouveau_private *dev_priv = dev->dev_private;
516 
517 	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
518 
519 	if (dev_priv->gart_info.dummy.page) {
520 		pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
521 			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
522 		__free_page(dev_priv->gart_info.dummy.page);
523 		dev_priv->gart_info.dummy.page = NULL;
524 	}
525 }
526 
527 uint32_t
528 nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
529 {
530 	struct drm_nouveau_private *dev_priv = dev->dev_private;
531 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
532 	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
533 
534 	BUG_ON(dev_priv->card_type >= NV_50);
535 
536 	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
537 		(offset & NV_CTXDMA_PAGE_MASK);
538 }
539