xref: /openbmc/linux/drivers/gpu/drm/gma500/gtt.c (revision 884caada)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007, Intel Corporation.
4  * All Rights Reserved.
5  *
6  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
7  *	    Alan Cox <alan@linux.intel.com>
8  */
9 
10 #include <linux/shmem_fs.h>
11 
12 #include <asm/set_memory.h>
13 
14 #include "blitter.h"
15 #include "psb_drv.h"
16 
17 
18 /*
19  *	GTT resource allocator - manage page mappings in GTT space
20  */
21 
22 /**
23  *	psb_gtt_mask_pte	-	generate GTT pte entry
24  *	@pfn: page number to encode
25  *	@type: type of memory in the GTT
26  *
27  *	Set the GTT entry for the appropriate memory type.
28  */
29 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
30 {
31 	uint32_t mask = PSB_PTE_VALID;
32 
33 	/* Ensure we explode rather than put an invalid low mapping of
34 	   a high mapping page into the gtt */
35 	BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
36 
37 	if (type & PSB_MMU_CACHED_MEMORY)
38 		mask |= PSB_PTE_CACHED;
39 	if (type & PSB_MMU_RO_MEMORY)
40 		mask |= PSB_PTE_RO;
41 	if (type & PSB_MMU_WO_MEMORY)
42 		mask |= PSB_PTE_WO;
43 
44 	return (pfn << PAGE_SHIFT) | mask;
45 }
46 
47 /**
48  *	psb_gtt_entry		-	find the GTT entries for a gtt_range
49  *	@dev: our DRM device
50  *	@r: our GTT range
51  *
52  *	Given a gtt_range object return the GTT offset of the page table
53  *	entries for this gtt_range
54  */
55 static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
56 {
57 	struct drm_psb_private *dev_priv = dev->dev_private;
58 	unsigned long offset;
59 
60 	offset = r->resource.start - dev_priv->gtt_mem->start;
61 
62 	return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
63 }
64 
65 /**
66  *	psb_gtt_insert	-	put an object into the GTT
67  *	@dev: our DRM device
68  *	@r: our GTT range
69  *	@resume: on resume
70  *
71  *	Take our preallocated GTT range and insert the GEM object into
72  *	the GTT. This is protected via the gtt mutex which the caller
73  *	must hold.
74  */
75 static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
76 			  int resume)
77 {
78 	u32 __iomem *gtt_slot;
79 	u32 pte;
80 	struct page **pages;
81 	int i;
82 
83 	if (r->pages == NULL) {
84 		WARN_ON(1);
85 		return -EINVAL;
86 	}
87 
88 	WARN_ON(r->stolen);	/* refcount these maybe ? */
89 
90 	gtt_slot = psb_gtt_entry(dev, r);
91 	pages = r->pages;
92 
93 	if (!resume) {
94 		/* Make sure changes are visible to the GPU */
95 		set_pages_array_wc(pages, r->npage);
96 	}
97 
98 	/* Write our page entries into the GTT itself */
99 	for (i = r->roll; i < r->npage; i++) {
100 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
101 				       PSB_MMU_CACHED_MEMORY);
102 		iowrite32(pte, gtt_slot++);
103 	}
104 	for (i = 0; i < r->roll; i++) {
105 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
106 				       PSB_MMU_CACHED_MEMORY);
107 		iowrite32(pte, gtt_slot++);
108 	}
109 	/* Make sure all the entries are set before we return */
110 	ioread32(gtt_slot - 1);
111 
112 	return 0;
113 }
114 
115 /**
116  *	psb_gtt_remove	-	remove an object from the GTT
117  *	@dev: our DRM device
118  *	@r: our GTT range
119  *
120  *	Remove a preallocated GTT range from the GTT. Overwrite all the
121  *	page table entries with the dummy page. This is protected via the gtt
122  *	mutex which the caller must hold.
123  */
124 static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
125 {
126 	struct drm_psb_private *dev_priv = dev->dev_private;
127 	u32 __iomem *gtt_slot;
128 	u32 pte;
129 	int i;
130 
131 	WARN_ON(r->stolen);
132 
133 	gtt_slot = psb_gtt_entry(dev, r);
134 	pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
135 			       PSB_MMU_CACHED_MEMORY);
136 
137 	for (i = 0; i < r->npage; i++)
138 		iowrite32(pte, gtt_slot++);
139 	ioread32(gtt_slot - 1);
140 	set_pages_array_wb(r->pages, r->npage);
141 }
142 
143 /**
144  *	psb_gtt_roll	-	set scrolling position
145  *	@dev: our DRM device
146  *	@r: the gtt mapping we are using
147  *	@roll: roll offset
148  *
149  *	Roll an existing pinned mapping by moving the pages through the GTT.
150  *	This allows us to implement hardware scrolling on the consoles without
151  *	a 2D engine
152  */
153 void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
154 {
155 	u32 __iomem *gtt_slot;
156 	u32 pte;
157 	int i;
158 
159 	if (roll >= r->npage) {
160 		WARN_ON(1);
161 		return;
162 	}
163 
164 	r->roll = roll;
165 
166 	/* Not currently in the GTT - no worry we will write the mapping at
167 	   the right position when it gets pinned */
168 	if (!r->stolen && !r->in_gart)
169 		return;
170 
171 	gtt_slot = psb_gtt_entry(dev, r);
172 
173 	for (i = r->roll; i < r->npage; i++) {
174 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
175 				       PSB_MMU_CACHED_MEMORY);
176 		iowrite32(pte, gtt_slot++);
177 	}
178 	for (i = 0; i < r->roll; i++) {
179 		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
180 				       PSB_MMU_CACHED_MEMORY);
181 		iowrite32(pte, gtt_slot++);
182 	}
183 	ioread32(gtt_slot - 1);
184 }
185 
186 /**
187  *	psb_gtt_attach_pages	-	attach and pin GEM pages
188  *	@gt: the gtt range
189  *
190  *	Pin and build an in kernel list of the pages that back our GEM object.
191  *	While we hold this the pages cannot be swapped out. This is protected
192  *	via the gtt mutex which the caller must hold.
193  */
194 static int psb_gtt_attach_pages(struct gtt_range *gt)
195 {
196 	struct page **pages;
197 
198 	WARN_ON(gt->pages);
199 
200 	pages = drm_gem_get_pages(&gt->gem);
201 	if (IS_ERR(pages))
202 		return PTR_ERR(pages);
203 
204 	gt->npage = gt->gem.size / PAGE_SIZE;
205 	gt->pages = pages;
206 
207 	return 0;
208 }
209 
210 /**
211  *	psb_gtt_detach_pages	-	attach and pin GEM pages
212  *	@gt: the gtt range
213  *
214  *	Undo the effect of psb_gtt_attach_pages. At this point the pages
215  *	must have been removed from the GTT as they could now be paged out
216  *	and move bus address. This is protected via the gtt mutex which the
217  *	caller must hold.
218  */
219 static void psb_gtt_detach_pages(struct gtt_range *gt)
220 {
221 	drm_gem_put_pages(&gt->gem, gt->pages, true, false);
222 	gt->pages = NULL;
223 }
224 
225 /**
226  *	psb_gtt_pin		-	pin pages into the GTT
227  *	@gt: range to pin
228  *
229  *	Pin a set of pages into the GTT. The pins are refcounted so that
230  *	multiple pins need multiple unpins to undo.
231  *
232  *	Non GEM backed objects treat this as a no-op as they are always GTT
233  *	backed objects.
234  */
235 int psb_gtt_pin(struct gtt_range *gt)
236 {
237 	int ret = 0;
238 	struct drm_device *dev = gt->gem.dev;
239 	struct drm_psb_private *dev_priv = dev->dev_private;
240 	u32 gpu_base = dev_priv->gtt.gatt_start;
241 
242 	mutex_lock(&dev_priv->gtt_mutex);
243 
244 	if (gt->in_gart == 0 && gt->stolen == 0) {
245 		ret = psb_gtt_attach_pages(gt);
246 		if (ret < 0)
247 			goto out;
248 		ret = psb_gtt_insert(dev, gt, 0);
249 		if (ret < 0) {
250 			psb_gtt_detach_pages(gt);
251 			goto out;
252 		}
253 		psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
254 				     gt->pages, (gpu_base + gt->offset),
255 				     gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
256 	}
257 	gt->in_gart++;
258 out:
259 	mutex_unlock(&dev_priv->gtt_mutex);
260 	return ret;
261 }
262 
263 /**
264  *	psb_gtt_unpin		-	Drop a GTT pin requirement
265  *	@gt: range to pin
266  *
267  *	Undoes the effect of psb_gtt_pin. On the last drop the GEM object
268  *	will be removed from the GTT which will also drop the page references
269  *	and allow the VM to clean up or page stuff.
270  *
271  *	Non GEM backed objects treat this as a no-op as they are always GTT
272  *	backed objects.
273  */
274 void psb_gtt_unpin(struct gtt_range *gt)
275 {
276 	struct drm_device *dev = gt->gem.dev;
277 	struct drm_psb_private *dev_priv = dev->dev_private;
278 	u32 gpu_base = dev_priv->gtt.gatt_start;
279 	int ret;
280 
281 	/* While holding the gtt_mutex no new blits can be initiated */
282 	mutex_lock(&dev_priv->gtt_mutex);
283 
284 	/* Wait for any possible usage of the memory to be finished */
285 	ret = gma_blt_wait_idle(dev_priv);
286 	if (ret) {
287 		DRM_ERROR("Failed to idle the blitter, unpin failed!");
288 		goto out;
289 	}
290 
291 	WARN_ON(!gt->in_gart);
292 
293 	gt->in_gart--;
294 	if (gt->in_gart == 0 && gt->stolen == 0) {
295 		psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
296 				     (gpu_base + gt->offset), gt->npage, 0, 0);
297 		psb_gtt_remove(dev, gt);
298 		psb_gtt_detach_pages(gt);
299 	}
300 
301 out:
302 	mutex_unlock(&dev_priv->gtt_mutex);
303 }
304 
305 /*
306  *	GTT resource allocator - allocate and manage GTT address space
307  */
308 
309 /**
310  *	psb_gtt_alloc_range	-	allocate GTT address space
311  *	@dev: Our DRM device
312  *	@len: length (bytes) of address space required
313  *	@name: resource name
314  *	@backed: resource should be backed by stolen pages
315  *	@align: requested alignment
316  *
317  *	Ask the kernel core to find us a suitable range of addresses
318  *	to use for a GTT mapping.
319  *
320  *	Returns a gtt_range structure describing the object, or NULL on
321  *	error. On successful return the resource is both allocated and marked
322  *	as in use.
323  */
324 struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
325 				      const char *name, int backed, u32 align)
326 {
327 	struct drm_psb_private *dev_priv = dev->dev_private;
328 	struct gtt_range *gt;
329 	struct resource *r = dev_priv->gtt_mem;
330 	int ret;
331 	unsigned long start, end;
332 
333 	if (backed) {
334 		/* The start of the GTT is the stolen pages */
335 		start = r->start;
336 		end = r->start + dev_priv->gtt.stolen_size - 1;
337 	} else {
338 		/* The rest we will use for GEM backed objects */
339 		start = r->start + dev_priv->gtt.stolen_size;
340 		end = r->end;
341 	}
342 
343 	gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
344 	if (gt == NULL)
345 		return NULL;
346 	gt->resource.name = name;
347 	gt->stolen = backed;
348 	gt->in_gart = backed;
349 	gt->roll = 0;
350 	/* Ensure this is set for non GEM objects */
351 	gt->gem.dev = dev;
352 	ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
353 				len, start, end, align, NULL, NULL);
354 	if (ret == 0) {
355 		gt->offset = gt->resource.start - r->start;
356 		return gt;
357 	}
358 	kfree(gt);
359 	return NULL;
360 }
361 
362 /**
363  *	psb_gtt_free_range	-	release GTT address space
364  *	@dev: our DRM device
365  *	@gt: a mapping created with psb_gtt_alloc_range
366  *
367  *	Release a resource that was allocated with psb_gtt_alloc_range. If the
368  *	object has been pinned by mmap users we clean this up here currently.
369  */
370 void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
371 {
372 	/* Undo the mmap pin if we are destroying the object */
373 	if (gt->mmapping) {
374 		psb_gtt_unpin(gt);
375 		gt->mmapping = 0;
376 	}
377 	WARN_ON(gt->in_gart && !gt->stolen);
378 	release_resource(&gt->resource);
379 	kfree(gt);
380 }
381 
382 static void psb_gtt_alloc(struct drm_device *dev)
383 {
384 	struct drm_psb_private *dev_priv = dev->dev_private;
385 	init_rwsem(&dev_priv->gtt.sem);
386 }
387 
388 void psb_gtt_takedown(struct drm_device *dev)
389 {
390 	struct drm_psb_private *dev_priv = dev->dev_private;
391 
392 	if (dev_priv->gtt_map) {
393 		iounmap(dev_priv->gtt_map);
394 		dev_priv->gtt_map = NULL;
395 	}
396 	if (dev_priv->gtt_initialized) {
397 		pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
398 				      dev_priv->gmch_ctrl);
399 		PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
400 		(void) PSB_RVDC32(PSB_PGETBL_CTL);
401 	}
402 	if (dev_priv->vram_addr)
403 		iounmap(dev_priv->gtt_map);
404 }
405 
406 int psb_gtt_init(struct drm_device *dev, int resume)
407 {
408 	struct drm_psb_private *dev_priv = dev->dev_private;
409 	unsigned gtt_pages;
410 	unsigned long stolen_size, vram_stolen_size;
411 	unsigned i, num_pages;
412 	unsigned pfn_base;
413 	struct psb_gtt *pg;
414 
415 	int ret = 0;
416 	uint32_t pte;
417 
418 	if (!resume) {
419 		mutex_init(&dev_priv->gtt_mutex);
420 		mutex_init(&dev_priv->mmap_mutex);
421 		psb_gtt_alloc(dev);
422 	}
423 
424 	pg = &dev_priv->gtt;
425 
426 	/* Enable the GTT */
427 	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
428 	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
429 			      dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
430 
431 	dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
432 	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
433 	(void) PSB_RVDC32(PSB_PGETBL_CTL);
434 
435 	/* The root resource we allocate address space from */
436 	dev_priv->gtt_initialized = 1;
437 
438 	pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
439 
440 	/*
441 	 *	The video mmu has a hw bug when accessing 0x0D0000000.
442 	 *	Make gatt start at 0x0e000,0000. This doesn't actually
443 	 *	matter for us but may do if the video acceleration ever
444 	 *	gets opened up.
445 	 */
446 	pg->mmu_gatt_start = 0xE0000000;
447 
448 	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
449 	gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
450 								>> PAGE_SHIFT;
451 	/* CDV doesn't report this. In which case the system has 64 gtt pages */
452 	if (pg->gtt_start == 0 || gtt_pages == 0) {
453 		dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
454 		gtt_pages = 64;
455 		pg->gtt_start = dev_priv->pge_ctl;
456 	}
457 
458 	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
459 	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
460 								>> PAGE_SHIFT;
461 	dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
462 
463 	if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
464 		static struct resource fudge;	/* Preferably peppermint */
465 		/* This can occur on CDV systems. Fudge it in this case.
466 		   We really don't care what imaginary space is being allocated
467 		   at this point */
468 		dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
469 		pg->gatt_start = 0x40000000;
470 		pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
471 		/* This is a little confusing but in fact the GTT is providing
472 		   a view from the GPU into memory and not vice versa. As such
473 		   this is really allocating space that is not the same as the
474 		   CPU address space on CDV */
475 		fudge.start = 0x40000000;
476 		fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
477 		fudge.name = "fudge";
478 		fudge.flags = IORESOURCE_MEM;
479 		dev_priv->gtt_mem = &fudge;
480 	}
481 
482 	pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
483 	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
484 								- PAGE_SIZE;
485 
486 	stolen_size = vram_stolen_size;
487 
488 	dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
489 			dev_priv->stolen_base, vram_stolen_size / 1024);
490 
491 	if (resume && (gtt_pages != pg->gtt_pages) &&
492 	    (stolen_size != pg->stolen_size)) {
493 		dev_err(dev->dev, "GTT resume error.\n");
494 		ret = -EINVAL;
495 		goto out_err;
496 	}
497 
498 	pg->gtt_pages = gtt_pages;
499 	pg->stolen_size = stolen_size;
500 	dev_priv->vram_stolen_size = vram_stolen_size;
501 
502 	/*
503 	 *	Map the GTT and the stolen memory area
504 	 */
505 	if (!resume)
506 		dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
507 						gtt_pages << PAGE_SHIFT);
508 	if (!dev_priv->gtt_map) {
509 		dev_err(dev->dev, "Failure to map gtt.\n");
510 		ret = -ENOMEM;
511 		goto out_err;
512 	}
513 
514 	if (!resume)
515 		dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
516 						 stolen_size);
517 
518 	if (!dev_priv->vram_addr) {
519 		dev_err(dev->dev, "Failure to map stolen base.\n");
520 		ret = -ENOMEM;
521 		goto out_err;
522 	}
523 
524 	/*
525 	 * Insert vram stolen pages into the GTT
526 	 */
527 
528 	pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
529 	num_pages = vram_stolen_size >> PAGE_SHIFT;
530 	dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
531 		num_pages, pfn_base << PAGE_SHIFT, 0);
532 	for (i = 0; i < num_pages; ++i) {
533 		pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
534 		iowrite32(pte, dev_priv->gtt_map + i);
535 	}
536 
537 	/*
538 	 * Init rest of GTT to the scratch page to avoid accidents or scribbles
539 	 */
540 
541 	pfn_base = page_to_pfn(dev_priv->scratch_page);
542 	pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
543 	for (; i < gtt_pages; ++i)
544 		iowrite32(pte, dev_priv->gtt_map + i);
545 
546 	(void) ioread32(dev_priv->gtt_map + i - 1);
547 	return 0;
548 
549 out_err:
550 	psb_gtt_takedown(dev);
551 	return ret;
552 }
553 
554 int psb_gtt_restore(struct drm_device *dev)
555 {
556 	struct drm_psb_private *dev_priv = dev->dev_private;
557 	struct resource *r = dev_priv->gtt_mem->child;
558 	struct gtt_range *range;
559 	unsigned int restored = 0, total = 0, size = 0;
560 
561 	/* On resume, the gtt_mutex is already initialized */
562 	mutex_lock(&dev_priv->gtt_mutex);
563 	psb_gtt_init(dev, 1);
564 
565 	while (r != NULL) {
566 		range = container_of(r, struct gtt_range, resource);
567 		if (range->pages) {
568 			psb_gtt_insert(dev, range, 1);
569 			size += range->resource.end - range->resource.start;
570 			restored++;
571 		}
572 		r = r->sibling;
573 		total++;
574 	}
575 	mutex_unlock(&dev_priv->gtt_mutex);
576 	DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
577 			 total, (size / 1024));
578 
579 	return 0;
580 }
581