xref: /openbmc/linux/drivers/gpu/drm/gma500/fbdev.c (revision 55b37d9c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3  * Copyright (c) 2007-2011, Intel Corporation.
4  * All Rights Reserved.
5  *
6  **************************************************************************/
7 
8 #include <linux/pfn_t.h>
9 
10 #include <drm/drm_crtc_helper.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_fb_helper.h>
13 #include <drm/drm_framebuffer.h>
14 
15 #include "gem.h"
16 #include "psb_drv.h"
17 
18 /*
19  * VM area struct
20  */
21 
22 static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
23 {
24 	struct vm_area_struct *vma = vmf->vma;
25 	struct fb_info *info = vma->vm_private_data;
26 	unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
27 	unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
28 	vm_fault_t err = VM_FAULT_SIGBUS;
29 	unsigned long page_num = vma_pages(vma);
30 	unsigned long i;
31 
32 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
33 
34 	for (i = 0; i < page_num; ++i) {
35 		err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
36 		if (unlikely(err & VM_FAULT_ERROR))
37 			break;
38 		address += PAGE_SIZE;
39 		++pfn;
40 	}
41 
42 	return err;
43 }
44 
45 static const struct vm_operations_struct psb_fbdev_vm_ops = {
46 	.fault	= psb_fbdev_vm_fault,
47 };
48 
49 /*
50  * struct fb_ops
51  */
52 
53 #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
54 
55 static int psb_fbdev_fb_setcolreg(unsigned int regno,
56 				  unsigned int red, unsigned int green,
57 				  unsigned int blue, unsigned int transp,
58 				  struct fb_info *info)
59 {
60 	struct drm_fb_helper *fb_helper = info->par;
61 	struct drm_framebuffer *fb = fb_helper->fb;
62 	uint32_t v;
63 
64 	if (!fb)
65 		return -ENOMEM;
66 
67 	if (regno > 255)
68 		return 1;
69 
70 	red = CMAP_TOHW(red, info->var.red.length);
71 	blue = CMAP_TOHW(blue, info->var.blue.length);
72 	green = CMAP_TOHW(green, info->var.green.length);
73 	transp = CMAP_TOHW(transp, info->var.transp.length);
74 
75 	v = (red << info->var.red.offset) |
76 	    (green << info->var.green.offset) |
77 	    (blue << info->var.blue.offset) |
78 	    (transp << info->var.transp.offset);
79 
80 	if (regno < 16) {
81 		switch (fb->format->cpp[0] * 8) {
82 		case 16:
83 			((uint32_t *) info->pseudo_palette)[regno] = v;
84 			break;
85 		case 24:
86 		case 32:
87 			((uint32_t *) info->pseudo_palette)[regno] = v;
88 			break;
89 		}
90 	}
91 
92 	return 0;
93 }
94 
95 static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
96 {
97 	if (vma->vm_pgoff != 0)
98 		return -EINVAL;
99 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
100 		return -EINVAL;
101 
102 	/*
103 	 * If this is a GEM object then info->screen_base is the virtual
104 	 * kernel remapping of the object. FIXME: Review if this is
105 	 * suitable for our mmap work
106 	 */
107 	vma->vm_ops = &psb_fbdev_vm_ops;
108 	vma->vm_private_data = info;
109 	vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
110 
111 	return 0;
112 }
113 
114 static void psb_fbdev_fb_destroy(struct fb_info *info)
115 {
116 	struct drm_fb_helper *fb_helper = info->par;
117 	struct drm_framebuffer *fb = fb_helper->fb;
118 	struct drm_gem_object *obj = fb->obj[0];
119 
120 	drm_fb_helper_fini(fb_helper);
121 
122 	drm_framebuffer_unregister_private(fb);
123 	fb->obj[0] = NULL;
124 	drm_framebuffer_cleanup(fb);
125 	kfree(fb);
126 
127 	drm_gem_object_put(obj);
128 
129 	drm_client_release(&fb_helper->client);
130 
131 	drm_fb_helper_unprepare(fb_helper);
132 	kfree(fb_helper);
133 }
134 
135 static const struct fb_ops psb_fbdev_fb_ops = {
136 	.owner = THIS_MODULE,
137 	DRM_FB_HELPER_DEFAULT_OPS,
138 	.fb_setcolreg = psb_fbdev_fb_setcolreg,
139 	.fb_read = drm_fb_helper_cfb_read,
140 	.fb_write = drm_fb_helper_cfb_write,
141 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
142 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
143 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
144 	.fb_mmap = psb_fbdev_fb_mmap,
145 	.fb_destroy = psb_fbdev_fb_destroy,
146 };
147 
148 /*
149  * struct drm_fb_helper_funcs
150  */
151 
152 static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
153 			      struct drm_fb_helper_surface_size *sizes)
154 {
155 	struct drm_device *dev = fb_helper->dev;
156 	struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
157 	struct pci_dev *pdev = to_pci_dev(dev->dev);
158 	struct fb_info *info;
159 	struct drm_framebuffer *fb;
160 	struct drm_mode_fb_cmd2 mode_cmd = { };
161 	int size;
162 	int ret;
163 	struct psb_gem_object *backing;
164 	struct drm_gem_object *obj;
165 	u32 bpp, depth;
166 
167 	/* No 24-bit packed mode */
168 	if (sizes->surface_bpp == 24) {
169 		sizes->surface_bpp = 32;
170 		sizes->surface_depth = 24;
171 	}
172 	bpp = sizes->surface_bpp;
173 	depth = sizes->surface_depth;
174 
175 	/*
176 	 * If the mode does not fit in 32 bit then switch to 16 bit to get
177 	 * a console on full resolution. The X mode setting server will
178 	 * allocate its own 32-bit GEM framebuffer.
179 	 */
180 	size = ALIGN(sizes->surface_width * DIV_ROUND_UP(bpp, 8), 64) *
181 		     sizes->surface_height;
182 	size = ALIGN(size, PAGE_SIZE);
183 
184 	if (size > dev_priv->vram_stolen_size) {
185 		sizes->surface_bpp = 16;
186 		sizes->surface_depth = 16;
187 	}
188 	bpp = sizes->surface_bpp;
189 	depth = sizes->surface_depth;
190 
191 	mode_cmd.width = sizes->surface_width;
192 	mode_cmd.height = sizes->surface_height;
193 	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
194 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
195 
196 	size = mode_cmd.pitches[0] * mode_cmd.height;
197 	size = ALIGN(size, PAGE_SIZE);
198 
199 	/* Allocate the framebuffer in the GTT with stolen page backing */
200 	backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
201 	if (IS_ERR(backing))
202 		return PTR_ERR(backing);
203 	obj = &backing->base;
204 
205 	fb = psb_framebuffer_create(dev, &mode_cmd, obj);
206 	if (IS_ERR(fb)) {
207 		ret = PTR_ERR(fb);
208 		goto err_drm_gem_object_put;
209 	}
210 
211 	fb_helper->fb = fb;
212 
213 	info = drm_fb_helper_alloc_info(fb_helper);
214 	if (IS_ERR(info)) {
215 		ret = PTR_ERR(info);
216 		goto err_drm_framebuffer_unregister_private;
217 	}
218 
219 	info->fbops = &psb_fbdev_fb_ops;
220 	info->flags = FBINFO_DEFAULT;
221 	/* Accessed stolen memory directly */
222 	info->screen_base = dev_priv->vram_addr + backing->offset;
223 	info->screen_size = size;
224 
225 	drm_fb_helper_fill_info(info, fb_helper, sizes);
226 
227 	info->fix.smem_start = dev_priv->stolen_base + backing->offset;
228 	info->fix.smem_len = size;
229 	info->fix.ywrapstep = 0;
230 	info->fix.ypanstep = 0;
231 	info->fix.mmio_start = pci_resource_start(pdev, 0);
232 	info->fix.mmio_len = pci_resource_len(pdev, 0);
233 
234 	memset(info->screen_base, 0, info->screen_size);
235 
236 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
237 
238 	dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
239 
240 	return 0;
241 
242 err_drm_framebuffer_unregister_private:
243 	drm_framebuffer_unregister_private(fb);
244 	fb->obj[0] = NULL;
245 	drm_framebuffer_cleanup(fb);
246 	kfree(fb);
247 err_drm_gem_object_put:
248 	drm_gem_object_put(obj);
249 	return ret;
250 }
251 
252 static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = {
253 	.fb_probe = psb_fbdev_fb_probe,
254 };
255 
256 /*
257  * struct drm_client_funcs and setup code
258  */
259 
260 static void psb_fbdev_client_unregister(struct drm_client_dev *client)
261 {
262 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
263 
264 	if (fb_helper->info) {
265 		drm_fb_helper_unregister_info(fb_helper);
266 	} else {
267 		drm_fb_helper_unprepare(fb_helper);
268 		drm_client_release(&fb_helper->client);
269 		kfree(fb_helper);
270 	}
271 }
272 
273 static int psb_fbdev_client_restore(struct drm_client_dev *client)
274 {
275 	drm_fb_helper_lastclose(client->dev);
276 
277 	return 0;
278 }
279 
280 static int psb_fbdev_client_hotplug(struct drm_client_dev *client)
281 {
282 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
283 	struct drm_device *dev = client->dev;
284 	int ret;
285 
286 	if (dev->fb_helper)
287 		return drm_fb_helper_hotplug_event(dev->fb_helper);
288 
289 	ret = drm_fb_helper_init(dev, fb_helper);
290 	if (ret)
291 		goto err_drm_err;
292 
293 	if (!drm_drv_uses_atomic_modeset(dev))
294 		drm_helper_disable_unused_functions(dev);
295 
296 	ret = drm_fb_helper_initial_config(fb_helper);
297 	if (ret)
298 		goto err_drm_fb_helper_fini;
299 
300 	return 0;
301 
302 err_drm_fb_helper_fini:
303 	drm_fb_helper_fini(fb_helper);
304 err_drm_err:
305 	drm_err(dev, "Failed to setup gma500 fbdev emulation (ret=%d)\n", ret);
306 	return ret;
307 }
308 
309 static const struct drm_client_funcs psb_fbdev_client_funcs = {
310 	.owner		= THIS_MODULE,
311 	.unregister	= psb_fbdev_client_unregister,
312 	.restore	= psb_fbdev_client_restore,
313 	.hotplug	= psb_fbdev_client_hotplug,
314 };
315 
316 void psb_fbdev_setup(struct drm_psb_private *dev_priv)
317 {
318 	struct drm_device *dev = &dev_priv->dev;
319 	struct drm_fb_helper *fb_helper;
320 	int ret;
321 
322 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
323 	if (!fb_helper)
324 		return;
325 	drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fbdev_fb_helper_funcs);
326 
327 	ret = drm_client_init(dev, &fb_helper->client, "fbdev-gma500", &psb_fbdev_client_funcs);
328 	if (ret) {
329 		drm_err(dev, "Failed to register client: %d\n", ret);
330 		goto err_drm_fb_helper_unprepare;
331 	}
332 
333 	ret = psb_fbdev_client_hotplug(&fb_helper->client);
334 	if (ret)
335 		drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
336 
337 	drm_client_register(&fb_helper->client);
338 
339 	return;
340 
341 err_drm_fb_helper_unprepare:
342 	drm_fb_helper_unprepare(fb_helper);
343 	kfree(fb_helper);
344 }
345