xref: /openbmc/linux/drivers/gpu/drm/gma500/gtt.c (revision bc000245)
1 /*
2  * Copyright (c) 2007, Intel Corporation.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
19  *	    Alan Cox <alan@linux.intel.com>
20  */
21 
22 #include <drm/drmP.h>
23 #include <linux/shmem_fs.h>
24 #include "psb_drv.h"
25 
26 
27 /*
28  *	GTT resource allocator - manage page mappings in GTT space
29  */
30 
31 /**
32  *	psb_gtt_mask_pte	-	generate GTT pte entry
33  *	@pfn: page number to encode
34  *	@type: type of memory in the GTT
35  *
36  *	Set the GTT entry for the appropriate memory type.
37  */
38 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
39 {
40 	uint32_t mask = PSB_PTE_VALID;
41 
42 	/* Ensure we explode rather than put an invalid low mapping of
43 	   a high mapping page into the gtt */
44 	BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
45 
46 	if (type & PSB_MMU_CACHED_MEMORY)
47 		mask |= PSB_PTE_CACHED;
48 	if (type & PSB_MMU_RO_MEMORY)
49 		mask |= PSB_PTE_RO;
50 	if (type & PSB_MMU_WO_MEMORY)
51 		mask |= PSB_PTE_WO;
52 
53 	return (pfn << PAGE_SHIFT) | mask;
54 }
55 
56 /**
57  *	psb_gtt_entry		-	find the GTT entries for a gtt_range
58  *	@dev: our DRM device
59  *	@r: our GTT range
60  *
61  *	Given a gtt_range object return the GTT offset of the page table
62  *	entries for this gtt_range
63  */
64 static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
65 {
66 	struct drm_psb_private *dev_priv = dev->dev_private;
67 	unsigned long offset;
68 
69 	offset = r->resource.start - dev_priv->gtt_mem->start;
70 
71 	return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
72 }
73 
74 /**
75  *	psb_gtt_insert	-	put an object into the GTT
76  *	@dev: our DRM device
77  *	@r: our GTT range
78  *
79  *	Take our preallocated GTT range and insert the GEM object into
80  *	the GTT. This is protected via the gtt mutex which the caller
81  *	must hold.
82  */
83 static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
84 			  int resume)
85 {
86 	u32 __iomem *gtt_slot;
87 	u32 pte;
88 	struct page **pages;
89 	int i;
90 
91 	if (r->pages == NULL) {
92 		WARN_ON(1);
93 		return -EINVAL;
94 	}
95 
96 	WARN_ON(r->stolen);	/* refcount these maybe ? */
97 
98 	gtt_slot = psb_gtt_entry(dev, r);
99 	pages = r->pages;
100 
101 	if (!resume) {
102 		/* Make sure changes are visible to the GPU */
103 		set_pages_array_wc(pages, r->npage);
104 	}
105 
106 	/* Write our page entries into the GTT itself */
107 	for (i = r->roll; i < r->npage; i++) {
108 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
109 		iowrite32(pte, gtt_slot++);
110 	}
111 	for (i = 0; i < r->roll; i++) {
112 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
113 		iowrite32(pte, gtt_slot++);
114 	}
115 	/* Make sure all the entries are set before we return */
116 	ioread32(gtt_slot - 1);
117 
118 	return 0;
119 }
120 
121 /**
122  *	psb_gtt_remove	-	remove an object from the GTT
123  *	@dev: our DRM device
124  *	@r: our GTT range
125  *
126  *	Remove a preallocated GTT range from the GTT. Overwrite all the
127  *	page table entries with the dummy page. This is protected via the gtt
128  *	mutex which the caller must hold.
129  */
130 static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
131 {
132 	struct drm_psb_private *dev_priv = dev->dev_private;
133 	u32 __iomem *gtt_slot;
134 	u32 pte;
135 	int i;
136 
137 	WARN_ON(r->stolen);
138 
139 	gtt_slot = psb_gtt_entry(dev, r);
140 	pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
141 
142 	for (i = 0; i < r->npage; i++)
143 		iowrite32(pte, gtt_slot++);
144 	ioread32(gtt_slot - 1);
145 	set_pages_array_wb(r->pages, r->npage);
146 }
147 
148 /**
149  *	psb_gtt_roll	-	set scrolling position
150  *	@dev: our DRM device
151  *	@r: the gtt mapping we are using
152  *	@roll: roll offset
153  *
154  *	Roll an existing pinned mapping by moving the pages through the GTT.
155  *	This allows us to implement hardware scrolling on the consoles without
156  *	a 2D engine
157  */
158 void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
159 {
160 	u32 __iomem *gtt_slot;
161 	u32 pte;
162 	int i;
163 
164 	if (roll >= r->npage) {
165 		WARN_ON(1);
166 		return;
167 	}
168 
169 	r->roll = roll;
170 
171 	/* Not currently in the GTT - no worry we will write the mapping at
172 	   the right position when it gets pinned */
173 	if (!r->stolen && !r->in_gart)
174 		return;
175 
176 	gtt_slot = psb_gtt_entry(dev, r);
177 
178 	for (i = r->roll; i < r->npage; i++) {
179 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
180 		iowrite32(pte, gtt_slot++);
181 	}
182 	for (i = 0; i < r->roll; i++) {
183 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
184 		iowrite32(pte, gtt_slot++);
185 	}
186 	ioread32(gtt_slot - 1);
187 }
188 
189 /**
190  *	psb_gtt_attach_pages	-	attach and pin GEM pages
191  *	@gt: the gtt range
192  *
193  *	Pin and build an in kernel list of the pages that back our GEM object.
194  *	While we hold this the pages cannot be swapped out. This is protected
195  *	via the gtt mutex which the caller must hold.
196  */
197 static int psb_gtt_attach_pages(struct gtt_range *gt)
198 {
199 	struct page **pages;
200 
201 	WARN_ON(gt->pages);
202 
203 	pages = drm_gem_get_pages(&gt->gem, 0);
204 	if (IS_ERR(pages))
205 		return PTR_ERR(pages);
206 
207 	gt->npage = gt->gem.size / PAGE_SIZE;
208 	gt->pages = pages;
209 
210 	return 0;
211 }
212 
213 /**
214  *	psb_gtt_detach_pages	-	attach and pin GEM pages
215  *	@gt: the gtt range
216  *
217  *	Undo the effect of psb_gtt_attach_pages. At this point the pages
218  *	must have been removed from the GTT as they could now be paged out
219  *	and move bus address. This is protected via the gtt mutex which the
220  *	caller must hold.
221  */
222 static void psb_gtt_detach_pages(struct gtt_range *gt)
223 {
224 	drm_gem_put_pages(&gt->gem, gt->pages, true, false);
225 	gt->pages = NULL;
226 }
227 
228 /**
229  *	psb_gtt_pin		-	pin pages into the GTT
230  *	@gt: range to pin
231  *
232  *	Pin a set of pages into the GTT. The pins are refcounted so that
233  *	multiple pins need multiple unpins to undo.
234  *
235  *	Non GEM backed objects treat this as a no-op as they are always GTT
236  *	backed objects.
237  */
238 int psb_gtt_pin(struct gtt_range *gt)
239 {
240 	int ret = 0;
241 	struct drm_device *dev = gt->gem.dev;
242 	struct drm_psb_private *dev_priv = dev->dev_private;
243 
244 	mutex_lock(&dev_priv->gtt_mutex);
245 
246 	if (gt->in_gart == 0 && gt->stolen == 0) {
247 		ret = psb_gtt_attach_pages(gt);
248 		if (ret < 0)
249 			goto out;
250 		ret = psb_gtt_insert(dev, gt, 0);
251 		if (ret < 0) {
252 			psb_gtt_detach_pages(gt);
253 			goto out;
254 		}
255 	}
256 	gt->in_gart++;
257 out:
258 	mutex_unlock(&dev_priv->gtt_mutex);
259 	return ret;
260 }
261 
262 /**
263  *	psb_gtt_unpin		-	Drop a GTT pin requirement
264  *	@gt: range to pin
265  *
266  *	Undoes the effect of psb_gtt_pin. On the last drop the GEM object
267  *	will be removed from the GTT which will also drop the page references
268  *	and allow the VM to clean up or page stuff.
269  *
270  *	Non GEM backed objects treat this as a no-op as they are always GTT
271  *	backed objects.
272  */
273 void psb_gtt_unpin(struct gtt_range *gt)
274 {
275 	struct drm_device *dev = gt->gem.dev;
276 	struct drm_psb_private *dev_priv = dev->dev_private;
277 
278 	mutex_lock(&dev_priv->gtt_mutex);
279 
280 	WARN_ON(!gt->in_gart);
281 
282 	gt->in_gart--;
283 	if (gt->in_gart == 0 && gt->stolen == 0) {
284 		psb_gtt_remove(dev, gt);
285 		psb_gtt_detach_pages(gt);
286 	}
287 	mutex_unlock(&dev_priv->gtt_mutex);
288 }
289 
290 /*
291  *	GTT resource allocator - allocate and manage GTT address space
292  */
293 
294 /**
295  *	psb_gtt_alloc_range	-	allocate GTT address space
296  *	@dev: Our DRM device
297  *	@len: length (bytes) of address space required
298  *	@name: resource name
299  *	@backed: resource should be backed by stolen pages
300  *
301  *	Ask the kernel core to find us a suitable range of addresses
302  *	to use for a GTT mapping.
303  *
304  *	Returns a gtt_range structure describing the object, or NULL on
305  *	error. On successful return the resource is both allocated and marked
306  *	as in use.
307  */
308 struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
309 						const char *name, int backed)
310 {
311 	struct drm_psb_private *dev_priv = dev->dev_private;
312 	struct gtt_range *gt;
313 	struct resource *r = dev_priv->gtt_mem;
314 	int ret;
315 	unsigned long start, end;
316 
317 	if (backed) {
318 		/* The start of the GTT is the stolen pages */
319 		start = r->start;
320 		end = r->start + dev_priv->gtt.stolen_size - 1;
321 	} else {
322 		/* The rest we will use for GEM backed objects */
323 		start = r->start + dev_priv->gtt.stolen_size;
324 		end = r->end;
325 	}
326 
327 	gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
328 	if (gt == NULL)
329 		return NULL;
330 	gt->resource.name = name;
331 	gt->stolen = backed;
332 	gt->in_gart = backed;
333 	gt->roll = 0;
334 	/* Ensure this is set for non GEM objects */
335 	gt->gem.dev = dev;
336 	ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
337 				len, start, end, PAGE_SIZE, NULL, NULL);
338 	if (ret == 0) {
339 		gt->offset = gt->resource.start - r->start;
340 		return gt;
341 	}
342 	kfree(gt);
343 	return NULL;
344 }
345 
346 /**
347  *	psb_gtt_free_range	-	release GTT address space
348  *	@dev: our DRM device
349  *	@gt: a mapping created with psb_gtt_alloc_range
350  *
351  *	Release a resource that was allocated with psb_gtt_alloc_range. If the
352  *	object has been pinned by mmap users we clean this up here currently.
353  */
354 void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
355 {
356 	/* Undo the mmap pin if we are destroying the object */
357 	if (gt->mmapping) {
358 		psb_gtt_unpin(gt);
359 		gt->mmapping = 0;
360 	}
361 	WARN_ON(gt->in_gart && !gt->stolen);
362 	release_resource(&gt->resource);
363 	kfree(gt);
364 }
365 
366 static void psb_gtt_alloc(struct drm_device *dev)
367 {
368 	struct drm_psb_private *dev_priv = dev->dev_private;
369 	init_rwsem(&dev_priv->gtt.sem);
370 }
371 
372 void psb_gtt_takedown(struct drm_device *dev)
373 {
374 	struct drm_psb_private *dev_priv = dev->dev_private;
375 
376 	if (dev_priv->gtt_map) {
377 		iounmap(dev_priv->gtt_map);
378 		dev_priv->gtt_map = NULL;
379 	}
380 	if (dev_priv->gtt_initialized) {
381 		pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
382 				      dev_priv->gmch_ctrl);
383 		PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
384 		(void) PSB_RVDC32(PSB_PGETBL_CTL);
385 	}
386 	if (dev_priv->vram_addr)
387 		iounmap(dev_priv->gtt_map);
388 }
389 
390 int psb_gtt_init(struct drm_device *dev, int resume)
391 {
392 	struct drm_psb_private *dev_priv = dev->dev_private;
393 	unsigned gtt_pages;
394 	unsigned long stolen_size, vram_stolen_size;
395 	unsigned i, num_pages;
396 	unsigned pfn_base;
397 	struct psb_gtt *pg;
398 
399 	int ret = 0;
400 	uint32_t pte;
401 
402 	if (!resume) {
403 		mutex_init(&dev_priv->gtt_mutex);
404 		psb_gtt_alloc(dev);
405 	}
406 
407 	pg = &dev_priv->gtt;
408 
409 	/* Enable the GTT */
410 	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
411 	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
412 			      dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
413 
414 	dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
415 	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
416 	(void) PSB_RVDC32(PSB_PGETBL_CTL);
417 
418 	/* The root resource we allocate address space from */
419 	dev_priv->gtt_initialized = 1;
420 
421 	pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
422 
423 	/*
424 	 *	The video mmu has a hw bug when accessing 0x0D0000000.
425 	 *	Make gatt start at 0x0e000,0000. This doesn't actually
426 	 *	matter for us but may do if the video acceleration ever
427 	 *	gets opened up.
428 	 */
429 	pg->mmu_gatt_start = 0xE0000000;
430 
431 	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
432 	gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
433 								>> PAGE_SHIFT;
434 	/* CDV doesn't report this. In which case the system has 64 gtt pages */
435 	if (pg->gtt_start == 0 || gtt_pages == 0) {
436 		dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
437 		gtt_pages = 64;
438 		pg->gtt_start = dev_priv->pge_ctl;
439 	}
440 
441 	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
442 	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
443 								>> PAGE_SHIFT;
444 	dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
445 
446 	if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
447 		static struct resource fudge;	/* Preferably peppermint */
448 		/* This can occur on CDV systems. Fudge it in this case.
449 		   We really don't care what imaginary space is being allocated
450 		   at this point */
451 		dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
452 		pg->gatt_start = 0x40000000;
453 		pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
454 		/* This is a little confusing but in fact the GTT is providing
455 		   a view from the GPU into memory and not vice versa. As such
456 		   this is really allocating space that is not the same as the
457 		   CPU address space on CDV */
458 		fudge.start = 0x40000000;
459 		fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
460 		fudge.name = "fudge";
461 		fudge.flags = IORESOURCE_MEM;
462 		dev_priv->gtt_mem = &fudge;
463 	}
464 
465 	pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
466 	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
467 								- PAGE_SIZE;
468 
469 	stolen_size = vram_stolen_size;
470 
471 	dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
472 			dev_priv->stolen_base, vram_stolen_size / 1024);
473 
474 	if (resume && (gtt_pages != pg->gtt_pages) &&
475 	    (stolen_size != pg->stolen_size)) {
476 		dev_err(dev->dev, "GTT resume error.\n");
477 		ret = -EINVAL;
478 		goto out_err;
479 	}
480 
481 	pg->gtt_pages = gtt_pages;
482 	pg->stolen_size = stolen_size;
483 	dev_priv->vram_stolen_size = vram_stolen_size;
484 
485 	/*
486 	 *	Map the GTT and the stolen memory area
487 	 */
488 	if (!resume)
489 		dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
490 						gtt_pages << PAGE_SHIFT);
491 	if (!dev_priv->gtt_map) {
492 		dev_err(dev->dev, "Failure to map gtt.\n");
493 		ret = -ENOMEM;
494 		goto out_err;
495 	}
496 
497 	if (!resume)
498 		dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
499 						 stolen_size);
500 	if (!dev_priv->vram_addr) {
501 		dev_err(dev->dev, "Failure to map stolen base.\n");
502 		ret = -ENOMEM;
503 		goto out_err;
504 	}
505 
506 	/*
507 	 * Insert vram stolen pages into the GTT
508 	 */
509 
510 	pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
511 	num_pages = vram_stolen_size >> PAGE_SHIFT;
512 	dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
513 		num_pages, pfn_base << PAGE_SHIFT, 0);
514 	for (i = 0; i < num_pages; ++i) {
515 		pte = psb_gtt_mask_pte(pfn_base + i, 0);
516 		iowrite32(pte, dev_priv->gtt_map + i);
517 	}
518 
519 	/*
520 	 * Init rest of GTT to the scratch page to avoid accidents or scribbles
521 	 */
522 
523 	pfn_base = page_to_pfn(dev_priv->scratch_page);
524 	pte = psb_gtt_mask_pte(pfn_base, 0);
525 	for (; i < gtt_pages; ++i)
526 		iowrite32(pte, dev_priv->gtt_map + i);
527 
528 	(void) ioread32(dev_priv->gtt_map + i - 1);
529 	return 0;
530 
531 out_err:
532 	psb_gtt_takedown(dev);
533 	return ret;
534 }
535 
536 int psb_gtt_restore(struct drm_device *dev)
537 {
538 	struct drm_psb_private *dev_priv = dev->dev_private;
539 	struct resource *r = dev_priv->gtt_mem->child;
540 	struct gtt_range *range;
541 	unsigned int restored = 0, total = 0, size = 0;
542 
543 	/* On resume, the gtt_mutex is already initialized */
544 	mutex_lock(&dev_priv->gtt_mutex);
545 	psb_gtt_init(dev, 1);
546 
547 	while (r != NULL) {
548 		range = container_of(r, struct gtt_range, resource);
549 		if (range->pages) {
550 			psb_gtt_insert(dev, range, 1);
551 			size += range->resource.end - range->resource.start;
552 			restored++;
553 		}
554 		r = r->sibling;
555 		total++;
556 	}
557 	mutex_unlock(&dev_priv->gtt_mutex);
558 	DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
559 			 total, (size / 1024));
560 
561 	return 0;
562 }
563