xref: /openbmc/linux/drivers/gpu/drm/gma500/gem.c (revision 09717af7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  psb GEM interface
4  *
5  * Copyright (c) 2011, Intel Corporation.
6  *
7  * Authors: Alan Cox
8  *
9  * TODO:
10  *	-	we need to work out if the MMU is relevant (eg for
11  *		accelerated operations on a GEM object)
12  */
13 
14 #include <linux/pagemap.h>
15 
16 #include <asm/set_memory.h>
17 
18 #include <drm/drm.h>
19 #include <drm/drm_vma_manager.h>
20 
21 #include "gem.h"
22 #include "psb_drv.h"
23 
24 int psb_gem_pin(struct psb_gem_object *pobj)
25 {
26 	struct drm_gem_object *obj = &pobj->base;
27 	struct drm_device *dev = obj->dev;
28 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
29 	u32 gpu_base = dev_priv->gtt.gatt_start;
30 	struct page **pages;
31 	unsigned int npages;
32 	int ret;
33 
34 	mutex_lock(&dev_priv->gtt_mutex);
35 
36 	if (pobj->in_gart || pobj->stolen)
37 		goto out; /* already mapped */
38 
39 	pages = drm_gem_get_pages(obj);
40 	if (IS_ERR(pages)) {
41 		ret = PTR_ERR(pages);
42 		goto err_mutex_unlock;
43 	}
44 
45 	npages = obj->size / PAGE_SIZE;
46 
47 	set_pages_array_wc(pages, npages);
48 
49 	psb_gtt_insert_pages(dev_priv, &pobj->resource, pages);
50 	psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages,
51 			     (gpu_base + pobj->offset), npages, 0, 0,
52 			     PSB_MMU_CACHED_MEMORY);
53 
54 	pobj->npage = npages;
55 	pobj->pages = pages;
56 
57 out:
58 	++pobj->in_gart;
59 	mutex_unlock(&dev_priv->gtt_mutex);
60 
61 	return 0;
62 
63 err_mutex_unlock:
64 	mutex_unlock(&dev_priv->gtt_mutex);
65 	return ret;
66 }
67 
68 void psb_gem_unpin(struct psb_gem_object *pobj)
69 {
70 	struct drm_gem_object *obj = &pobj->base;
71 	struct drm_device *dev = obj->dev;
72 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
73 	u32 gpu_base = dev_priv->gtt.gatt_start;
74 
75 	mutex_lock(&dev_priv->gtt_mutex);
76 
77 	WARN_ON(!pobj->in_gart);
78 
79 	--pobj->in_gart;
80 
81 	if (pobj->in_gart || pobj->stolen)
82 		goto out;
83 
84 	psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
85 			     (gpu_base + pobj->offset), pobj->npage, 0, 0);
86 	psb_gtt_remove_pages(dev_priv, &pobj->resource);
87 
88 	/* Reset caching flags */
89 	set_pages_array_wb(pobj->pages, pobj->npage);
90 
91 	drm_gem_put_pages(obj, pobj->pages, true, false);
92 	pobj->pages = NULL;
93 	pobj->npage = 0;
94 
95 out:
96 	mutex_unlock(&dev_priv->gtt_mutex);
97 }
98 
99 static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
100 
101 static void psb_gem_free_object(struct drm_gem_object *obj)
102 {
103 	struct psb_gem_object *pobj = to_psb_gem_object(obj);
104 
105 	drm_gem_object_release(obj);
106 
107 	/* Undo the mmap pin if we are destroying the object */
108 	if (pobj->mmapping)
109 		psb_gem_unpin(pobj);
110 
111 	WARN_ON(pobj->in_gart && !pobj->stolen);
112 
113 	release_resource(&pobj->resource);
114 	kfree(pobj);
115 }
116 
117 static const struct vm_operations_struct psb_gem_vm_ops = {
118 	.fault = psb_gem_fault,
119 	.open = drm_gem_vm_open,
120 	.close = drm_gem_vm_close,
121 };
122 
123 static const struct drm_gem_object_funcs psb_gem_object_funcs = {
124 	.free = psb_gem_free_object,
125 	.vm_ops = &psb_gem_vm_ops,
126 };
127 
128 struct psb_gem_object *
129 psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align)
130 {
131 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
132 	struct psb_gem_object *pobj;
133 	struct drm_gem_object *obj;
134 	int ret;
135 
136 	size = roundup(size, PAGE_SIZE);
137 
138 	pobj = kzalloc(sizeof(*pobj), GFP_KERNEL);
139 	if (!pobj)
140 		return ERR_PTR(-ENOMEM);
141 	obj = &pobj->base;
142 
143 	/* GTT resource */
144 
145 	ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen,
146 					&pobj->offset);
147 	if (ret)
148 		goto err_kfree;
149 
150 	if (stolen) {
151 		pobj->stolen = true;
152 		pobj->in_gart = 1;
153 	}
154 
155 	/* GEM object */
156 
157 	obj->funcs = &psb_gem_object_funcs;
158 
159 	if (stolen) {
160 		drm_gem_private_object_init(dev, obj, size);
161 	} else {
162 		ret = drm_gem_object_init(dev, obj, size);
163 		if (ret)
164 			goto err_release_resource;
165 
166 		/* Limit the object to 32-bit mappings */
167 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
168 	}
169 
170 	return pobj;
171 
172 err_release_resource:
173 	release_resource(&pobj->resource);
174 err_kfree:
175 	kfree(pobj);
176 	return ERR_PTR(ret);
177 }
178 
179 /**
180  *	psb_gem_dumb_create	-	create a dumb buffer
181  *	@file: our client file
182  *	@dev: our device
183  *	@args: the requested arguments copied from userspace
184  *
185  *	Allocate a buffer suitable for use for a frame buffer of the
186  *	form described by user space. Give userspace a handle by which
187  *	to reference it.
188  */
189 int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
190 			struct drm_mode_create_dumb *args)
191 {
192 	size_t pitch, size;
193 	struct psb_gem_object *pobj;
194 	struct drm_gem_object *obj;
195 	u32 handle;
196 	int ret;
197 
198 	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
199 	pitch = ALIGN(pitch, 64);
200 
201 	size = pitch * args->height;
202 	size = roundup(size, PAGE_SIZE);
203 	if (!size)
204 		return -EINVAL;
205 
206 	pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
207 	if (IS_ERR(pobj))
208 		return PTR_ERR(pobj);
209 	obj = &pobj->base;
210 
211 	ret = drm_gem_handle_create(file, obj, &handle);
212 	if (ret)
213 		goto err_drm_gem_object_put;
214 
215 	drm_gem_object_put(obj);
216 
217 	args->pitch = pitch;
218 	args->size = size;
219 	args->handle = handle;
220 
221 	return 0;
222 
223 err_drm_gem_object_put:
224 	drm_gem_object_put(obj);
225 	return ret;
226 }
227 
228 /**
229  *	psb_gem_fault		-	pagefault handler for GEM objects
230  *	@vmf: fault detail
231  *
232  *	Invoked when a fault occurs on an mmap of a GEM managed area. GEM
233  *	does most of the work for us including the actual map/unmap calls
234  *	but we need to do the actual page work.
235  *
236  *	This code eventually needs to handle faulting objects in and out
237  *	of the GTT and repacking it when we run out of space. We can put
238  *	that off for now and for our simple uses
239  *
240  *	The VMA was set up by GEM. In doing so it also ensured that the
241  *	vma->vm_private_data points to the GEM object that is backing this
242  *	mapping.
243  */
244 static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
245 {
246 	struct vm_area_struct *vma = vmf->vma;
247 	struct drm_gem_object *obj;
248 	struct psb_gem_object *pobj;
249 	int err;
250 	vm_fault_t ret;
251 	unsigned long pfn;
252 	pgoff_t page_offset;
253 	struct drm_device *dev;
254 	struct drm_psb_private *dev_priv;
255 
256 	obj = vma->vm_private_data;	/* GEM object */
257 	dev = obj->dev;
258 	dev_priv = to_drm_psb_private(dev);
259 
260 	pobj = to_psb_gem_object(obj);
261 
262 	/* Make sure we don't parallel update on a fault, nor move or remove
263 	   something from beneath our feet */
264 	mutex_lock(&dev_priv->mmap_mutex);
265 
266 	/* For now the mmap pins the object and it stays pinned. As things
267 	   stand that will do us no harm */
268 	if (pobj->mmapping == 0) {
269 		err = psb_gem_pin(pobj);
270 		if (err < 0) {
271 			dev_err(dev->dev, "gma500: pin failed: %d\n", err);
272 			ret = vmf_error(err);
273 			goto fail;
274 		}
275 		pobj->mmapping = 1;
276 	}
277 
278 	/* Page relative to the VMA start - we must calculate this ourselves
279 	   because vmf->pgoff is the fake GEM offset */
280 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
281 
282 	/* CPU view of the page, don't go via the GART for CPU writes */
283 	if (pobj->stolen)
284 		pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT;
285 	else
286 		pfn = page_to_pfn(pobj->pages[page_offset]);
287 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
288 fail:
289 	mutex_unlock(&dev_priv->mmap_mutex);
290 
291 	return ret;
292 }
293