xref: /openbmc/linux/drivers/gpu/drm/gma500/gtt.c (revision e5c86679)
1 /*
2  * Copyright (c) 2007, Intel Corporation.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19  *	    Alan Cox <alan@linux.intel.com>
20  */
21 
22 #include <drm/drmP.h>
23 #include <linux/shmem_fs.h>
24 #include "psb_drv.h"
25 #include "blitter.h"
26 
27 
28 /*
29  *	GTT resource allocator - manage page mappings in GTT space
30  */
31 
32 /**
33  *	psb_gtt_mask_pte	-	generate GTT pte entry
34  *	@pfn: page number to encode
35  *	@type: type of memory in the GTT
36  *
37  *	Set the GTT entry for the appropriate memory type.
38  */
39 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
40 {
41 	uint32_t mask = PSB_PTE_VALID;
42 
43 	/* Ensure we explode rather than put an invalid low mapping of
44 	   a high mapping page into the gtt */
45 	BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
46 
47 	if (type & PSB_MMU_CACHED_MEMORY)
48 		mask |= PSB_PTE_CACHED;
49 	if (type & PSB_MMU_RO_MEMORY)
50 		mask |= PSB_PTE_RO;
51 	if (type & PSB_MMU_WO_MEMORY)
52 		mask |= PSB_PTE_WO;
53 
54 	return (pfn << PAGE_SHIFT) | mask;
55 }
56 
57 /**
58  *	psb_gtt_entry		-	find the GTT entries for a gtt_range
59  *	@dev: our DRM device
60  *	@r: our GTT range
61  *
62  *	Given a gtt_range object return the GTT offset of the page table
63  *	entries for this gtt_range
64  */
65 static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
66 {
67 	struct drm_psb_private *dev_priv = dev->dev_private;
68 	unsigned long offset;
69 
70 	offset = r->resource.start - dev_priv->gtt_mem->start;
71 
72 	return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
73 }
74 
75 /**
76  *	psb_gtt_insert	-	put an object into the GTT
77  *	@dev: our DRM device
78  *	@r: our GTT range
79  *	@resume: on resume
80  *
81  *	Take our preallocated GTT range and insert the GEM object into
82  *	the GTT. This is protected via the gtt mutex which the caller
83  *	must hold.
84  */
85 static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
86 			  int resume)
87 {
88 	u32 __iomem *gtt_slot;
89 	u32 pte;
90 	struct page **pages;
91 	int i;
92 
93 	if (r->pages == NULL) {
94 		WARN_ON(1);
95 		return -EINVAL;
96 	}
97 
98 	WARN_ON(r->stolen);	/* refcount these maybe ? */
99 
100 	gtt_slot = psb_gtt_entry(dev, r);
101 	pages = r->pages;
102 
103 	if (!resume) {
104 		/* Make sure changes are visible to the GPU */
105 		set_pages_array_wc(pages, r->npage);
106 	}
107 
108 	/* Write our page entries into the GTT itself */
109 	for (i = r->roll; i < r->npage; i++) {
110 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
111 				       PSB_MMU_CACHED_MEMORY);
112 		iowrite32(pte, gtt_slot++);
113 	}
114 	for (i = 0; i < r->roll; i++) {
115 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
116 				       PSB_MMU_CACHED_MEMORY);
117 		iowrite32(pte, gtt_slot++);
118 	}
119 	/* Make sure all the entries are set before we return */
120 	ioread32(gtt_slot - 1);
121 
122 	return 0;
123 }
124 
125 /**
126  *	psb_gtt_remove	-	remove an object from the GTT
127  *	@dev: our DRM device
128  *	@r: our GTT range
129  *
130  *	Remove a preallocated GTT range from the GTT. Overwrite all the
131  *	page table entries with the dummy page. This is protected via the gtt
132  *	mutex which the caller must hold.
133  */
134 static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
135 {
136 	struct drm_psb_private *dev_priv = dev->dev_private;
137 	u32 __iomem *gtt_slot;
138 	u32 pte;
139 	int i;
140 
141 	WARN_ON(r->stolen);
142 
143 	gtt_slot = psb_gtt_entry(dev, r);
144 	pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
145 			       PSB_MMU_CACHED_MEMORY);
146 
147 	for (i = 0; i < r->npage; i++)
148 		iowrite32(pte, gtt_slot++);
149 	ioread32(gtt_slot - 1);
150 	set_pages_array_wb(r->pages, r->npage);
151 }
152 
153 /**
154  *	psb_gtt_roll	-	set scrolling position
155  *	@dev: our DRM device
156  *	@r: the gtt mapping we are using
157  *	@roll: roll offset
158  *
159  *	Roll an existing pinned mapping by moving the pages through the GTT.
160  *	This allows us to implement hardware scrolling on the consoles without
161  *	a 2D engine
162  */
163 void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
164 {
165 	u32 __iomem *gtt_slot;
166 	u32 pte;
167 	int i;
168 
169 	if (roll >= r->npage) {
170 		WARN_ON(1);
171 		return;
172 	}
173 
174 	r->roll = roll;
175 
176 	/* Not currently in the GTT - no worry we will write the mapping at
177 	   the right position when it gets pinned */
178 	if (!r->stolen && !r->in_gart)
179 		return;
180 
181 	gtt_slot = psb_gtt_entry(dev, r);
182 
183 	for (i = r->roll; i < r->npage; i++) {
184 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
185 				       PSB_MMU_CACHED_MEMORY);
186 		iowrite32(pte, gtt_slot++);
187 	}
188 	for (i = 0; i < r->roll; i++) {
189 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
190 				       PSB_MMU_CACHED_MEMORY);
191 		iowrite32(pte, gtt_slot++);
192 	}
193 	ioread32(gtt_slot - 1);
194 }
195 
196 /**
197  *	psb_gtt_attach_pages	-	attach and pin GEM pages
198  *	@gt: the gtt range
199  *
200  *	Pin and build an in kernel list of the pages that back our GEM object.
201  *	While we hold this the pages cannot be swapped out. This is protected
202  *	via the gtt mutex which the caller must hold.
203  */
204 static int psb_gtt_attach_pages(struct gtt_range *gt)
205 {
206 	struct page **pages;
207 
208 	WARN_ON(gt->pages);
209 
210 	pages = drm_gem_get_pages(&gt->gem);
211 	if (IS_ERR(pages))
212 		return PTR_ERR(pages);
213 
214 	gt->npage = gt->gem.size / PAGE_SIZE;
215 	gt->pages = pages;
216 
217 	return 0;
218 }
219 
220 /**
221  *	psb_gtt_detach_pages	-	attach and pin GEM pages
222  *	@gt: the gtt range
223  *
224  *	Undo the effect of psb_gtt_attach_pages. At this point the pages
225  *	must have been removed from the GTT as they could now be paged out
226  *	and move bus address. This is protected via the gtt mutex which the
227  *	caller must hold.
228  */
229 static void psb_gtt_detach_pages(struct gtt_range *gt)
230 {
231 	drm_gem_put_pages(&gt->gem, gt->pages, true, false);
232 	gt->pages = NULL;
233 }
234 
235 /**
236  *	psb_gtt_pin		-	pin pages into the GTT
237  *	@gt: range to pin
238  *
239  *	Pin a set of pages into the GTT. The pins are refcounted so that
240  *	multiple pins need multiple unpins to undo.
241  *
242  *	Non GEM backed objects treat this as a no-op as they are always GTT
243  *	backed objects.
244  */
245 int psb_gtt_pin(struct gtt_range *gt)
246 {
247 	int ret = 0;
248 	struct drm_device *dev = gt->gem.dev;
249 	struct drm_psb_private *dev_priv = dev->dev_private;
250 	u32 gpu_base = dev_priv->gtt.gatt_start;
251 
252 	mutex_lock(&dev_priv->gtt_mutex);
253 
254 	if (gt->in_gart == 0 && gt->stolen == 0) {
255 		ret = psb_gtt_attach_pages(gt);
256 		if (ret < 0)
257 			goto out;
258 		ret = psb_gtt_insert(dev, gt, 0);
259 		if (ret < 0) {
260 			psb_gtt_detach_pages(gt);
261 			goto out;
262 		}
263 		psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
264 				     gt->pages, (gpu_base + gt->offset),
265 				     gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
266 	}
267 	gt->in_gart++;
268 out:
269 	mutex_unlock(&dev_priv->gtt_mutex);
270 	return ret;
271 }
272 
273 /**
274  *	psb_gtt_unpin		-	Drop a GTT pin requirement
275  *	@gt: range to pin
276  *
277  *	Undoes the effect of psb_gtt_pin. On the last drop the GEM object
278  *	will be removed from the GTT which will also drop the page references
279  *	and allow the VM to clean up or page stuff.
280  *
281  *	Non GEM backed objects treat this as a no-op as they are always GTT
282  *	backed objects.
283  */
284 void psb_gtt_unpin(struct gtt_range *gt)
285 {
286 	struct drm_device *dev = gt->gem.dev;
287 	struct drm_psb_private *dev_priv = dev->dev_private;
288 	u32 gpu_base = dev_priv->gtt.gatt_start;
289 	int ret;
290 
291 	/* While holding the gtt_mutex no new blits can be initiated */
292 	mutex_lock(&dev_priv->gtt_mutex);
293 
294 	/* Wait for any possible usage of the memory to be finished */
295 	ret = gma_blt_wait_idle(dev_priv);
296 	if (ret) {
297 		DRM_ERROR("Failed to idle the blitter, unpin failed!");
298 		goto out;
299 	}
300 
301 	WARN_ON(!gt->in_gart);
302 
303 	gt->in_gart--;
304 	if (gt->in_gart == 0 && gt->stolen == 0) {
305 		psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
306 				     (gpu_base + gt->offset), gt->npage, 0, 0);
307 		psb_gtt_remove(dev, gt);
308 		psb_gtt_detach_pages(gt);
309 	}
310 
311 out:
312 	mutex_unlock(&dev_priv->gtt_mutex);
313 }
314 
315 /*
316  *	GTT resource allocator - allocate and manage GTT address space
317  */
318 
319 /**
320  *	psb_gtt_alloc_range	-	allocate GTT address space
321  *	@dev: Our DRM device
322  *	@len: length (bytes) of address space required
323  *	@name: resource name
324  *	@backed: resource should be backed by stolen pages
325  *	@align: requested alignment
326  *
327  *	Ask the kernel core to find us a suitable range of addresses
328  *	to use for a GTT mapping.
329  *
330  *	Returns a gtt_range structure describing the object, or NULL on
331  *	error. On successful return the resource is both allocated and marked
332  *	as in use.
333  */
334 struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
335 				      const char *name, int backed, u32 align)
336 {
337 	struct drm_psb_private *dev_priv = dev->dev_private;
338 	struct gtt_range *gt;
339 	struct resource *r = dev_priv->gtt_mem;
340 	int ret;
341 	unsigned long start, end;
342 
343 	if (backed) {
344 		/* The start of the GTT is the stolen pages */
345 		start = r->start;
346 		end = r->start + dev_priv->gtt.stolen_size - 1;
347 	} else {
348 		/* The rest we will use for GEM backed objects */
349 		start = r->start + dev_priv->gtt.stolen_size;
350 		end = r->end;
351 	}
352 
353 	gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
354 	if (gt == NULL)
355 		return NULL;
356 	gt->resource.name = name;
357 	gt->stolen = backed;
358 	gt->in_gart = backed;
359 	gt->roll = 0;
360 	/* Ensure this is set for non GEM objects */
361 	gt->gem.dev = dev;
362 	ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
363 				len, start, end, align, NULL, NULL);
364 	if (ret == 0) {
365 		gt->offset = gt->resource.start - r->start;
366 		return gt;
367 	}
368 	kfree(gt);
369 	return NULL;
370 }
371 
372 /**
373  *	psb_gtt_free_range	-	release GTT address space
374  *	@dev: our DRM device
375  *	@gt: a mapping created with psb_gtt_alloc_range
376  *
377  *	Release a resource that was allocated with psb_gtt_alloc_range. If the
378  *	object has been pinned by mmap users we clean this up here currently.
379  */
380 void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
381 {
382 	/* Undo the mmap pin if we are destroying the object */
383 	if (gt->mmapping) {
384 		psb_gtt_unpin(gt);
385 		gt->mmapping = 0;
386 	}
387 	WARN_ON(gt->in_gart && !gt->stolen);
388 	release_resource(&gt->resource);
389 	kfree(gt);
390 }
391 
392 static void psb_gtt_alloc(struct drm_device *dev)
393 {
394 	struct drm_psb_private *dev_priv = dev->dev_private;
395 	init_rwsem(&dev_priv->gtt.sem);
396 }
397 
398 void psb_gtt_takedown(struct drm_device *dev)
399 {
400 	struct drm_psb_private *dev_priv = dev->dev_private;
401 
402 	if (dev_priv->gtt_map) {
403 		iounmap(dev_priv->gtt_map);
404 		dev_priv->gtt_map = NULL;
405 	}
406 	if (dev_priv->gtt_initialized) {
407 		pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
408 				      dev_priv->gmch_ctrl);
409 		PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
410 		(void) PSB_RVDC32(PSB_PGETBL_CTL);
411 	}
412 	if (dev_priv->vram_addr)
413 		iounmap(dev_priv->gtt_map);
414 }
415 
416 int psb_gtt_init(struct drm_device *dev, int resume)
417 {
418 	struct drm_psb_private *dev_priv = dev->dev_private;
419 	unsigned gtt_pages;
420 	unsigned long stolen_size, vram_stolen_size;
421 	unsigned i, num_pages;
422 	unsigned pfn_base;
423 	struct psb_gtt *pg;
424 
425 	int ret = 0;
426 	uint32_t pte;
427 
428 	if (!resume) {
429 		mutex_init(&dev_priv->gtt_mutex);
430 		mutex_init(&dev_priv->mmap_mutex);
431 		psb_gtt_alloc(dev);
432 	}
433 
434 	pg = &dev_priv->gtt;
435 
436 	/* Enable the GTT */
437 	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
438 	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
439 			      dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
440 
441 	dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
442 	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
443 	(void) PSB_RVDC32(PSB_PGETBL_CTL);
444 
445 	/* The root resource we allocate address space from */
446 	dev_priv->gtt_initialized = 1;
447 
448 	pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
449 
450 	/*
451 	 *	The video mmu has a hw bug when accessing 0x0D0000000.
452 	 *	Make gatt start at 0x0e000,0000. This doesn't actually
453 	 *	matter for us but may do if the video acceleration ever
454 	 *	gets opened up.
455 	 */
456 	pg->mmu_gatt_start = 0xE0000000;
457 
458 	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
459 	gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
460 								>> PAGE_SHIFT;
461 	/* CDV doesn't report this. In which case the system has 64 gtt pages */
462 	if (pg->gtt_start == 0 || gtt_pages == 0) {
463 		dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
464 		gtt_pages = 64;
465 		pg->gtt_start = dev_priv->pge_ctl;
466 	}
467 
468 	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
469 	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
470 								>> PAGE_SHIFT;
471 	dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
472 
473 	if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
474 		static struct resource fudge;	/* Preferably peppermint */
475 		/* This can occur on CDV systems. Fudge it in this case.
476 		   We really don't care what imaginary space is being allocated
477 		   at this point */
478 		dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
479 		pg->gatt_start = 0x40000000;
480 		pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
481 		/* This is a little confusing but in fact the GTT is providing
482 		   a view from the GPU into memory and not vice versa. As such
483 		   this is really allocating space that is not the same as the
484 		   CPU address space on CDV */
485 		fudge.start = 0x40000000;
486 		fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
487 		fudge.name = "fudge";
488 		fudge.flags = IORESOURCE_MEM;
489 		dev_priv->gtt_mem = &fudge;
490 	}
491 
492 	pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
493 	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
494 								- PAGE_SIZE;
495 
496 	stolen_size = vram_stolen_size;
497 
498 	dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
499 			dev_priv->stolen_base, vram_stolen_size / 1024);
500 
501 	if (resume && (gtt_pages != pg->gtt_pages) &&
502 	    (stolen_size != pg->stolen_size)) {
503 		dev_err(dev->dev, "GTT resume error.\n");
504 		ret = -EINVAL;
505 		goto out_err;
506 	}
507 
508 	pg->gtt_pages = gtt_pages;
509 	pg->stolen_size = stolen_size;
510 	dev_priv->vram_stolen_size = vram_stolen_size;
511 
512 	/*
513 	 *	Map the GTT and the stolen memory area
514 	 */
515 	if (!resume)
516 		dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
517 						gtt_pages << PAGE_SHIFT);
518 	if (!dev_priv->gtt_map) {
519 		dev_err(dev->dev, "Failure to map gtt.\n");
520 		ret = -ENOMEM;
521 		goto out_err;
522 	}
523 
524 	if (!resume)
525 		dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
526 						 stolen_size);
527 
528 	if (!dev_priv->vram_addr) {
529 		dev_err(dev->dev, "Failure to map stolen base.\n");
530 		ret = -ENOMEM;
531 		goto out_err;
532 	}
533 
534 	/*
535 	 * Insert vram stolen pages into the GTT
536 	 */
537 
538 	pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
539 	num_pages = vram_stolen_size >> PAGE_SHIFT;
540 	dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
541 		num_pages, pfn_base << PAGE_SHIFT, 0);
542 	for (i = 0; i < num_pages; ++i) {
543 		pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
544 		iowrite32(pte, dev_priv->gtt_map + i);
545 	}
546 
547 	/*
548 	 * Init rest of GTT to the scratch page to avoid accidents or scribbles
549 	 */
550 
551 	pfn_base = page_to_pfn(dev_priv->scratch_page);
552 	pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
553 	for (; i < gtt_pages; ++i)
554 		iowrite32(pte, dev_priv->gtt_map + i);
555 
556 	(void) ioread32(dev_priv->gtt_map + i - 1);
557 	return 0;
558 
559 out_err:
560 	psb_gtt_takedown(dev);
561 	return ret;
562 }
563 
564 int psb_gtt_restore(struct drm_device *dev)
565 {
566 	struct drm_psb_private *dev_priv = dev->dev_private;
567 	struct resource *r = dev_priv->gtt_mem->child;
568 	struct gtt_range *range;
569 	unsigned int restored = 0, total = 0, size = 0;
570 
571 	/* On resume, the gtt_mutex is already initialized */
572 	mutex_lock(&dev_priv->gtt_mutex);
573 	psb_gtt_init(dev, 1);
574 
575 	while (r != NULL) {
576 		range = container_of(r, struct gtt_range, resource);
577 		if (range->pages) {
578 			psb_gtt_insert(dev, range, 1);
579 			size += range->resource.end - range->resource.start;
580 			restored++;
581 		}
582 		r = r->sibling;
583 		total++;
584 	}
585 	mutex_unlock(&dev_priv->gtt_mutex);
586 	DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
587 			 total, (size / 1024));
588 
589 	return 0;
590 }
591