xref: /openbmc/linux/drivers/gpu/drm/drm_fbdev_generic.c (revision 90741096769bd75152a5fe397343e5893c7d905a)
1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/moduleparam.h>
4 #include <linux/vmalloc.h>
5 
6 #include <drm/drm_crtc_helper.h>
7 #include <drm/drm_drv.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_print.h>
11 
12 #include <drm/drm_fbdev_generic.h>
13 
14 static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
15 {
16 	struct drm_device *dev = fb_helper->dev;
17 	struct drm_framebuffer *fb = fb_helper->fb;
18 
19 	return dev->mode_config.prefer_shadow_fbdev ||
20 	       dev->mode_config.prefer_shadow ||
21 	       fb->funcs->dirty;
22 }
23 
24 /* @user: 1=userspace, 0=fbcon */
25 static int drm_fbdev_fb_open(struct fb_info *info, int user)
26 {
27 	struct drm_fb_helper *fb_helper = info->par;
28 
29 	/* No need to take a ref for fbcon because it unbinds on unregister */
30 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
31 		return -ENODEV;
32 
33 	return 0;
34 }
35 
36 static int drm_fbdev_fb_release(struct fb_info *info, int user)
37 {
38 	struct drm_fb_helper *fb_helper = info->par;
39 
40 	if (user)
41 		module_put(fb_helper->dev->driver->fops->owner);
42 
43 	return 0;
44 }
45 
46 static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
47 {
48 	struct fb_info *fbi = fb_helper->info;
49 	void *shadow = NULL;
50 
51 	if (!fb_helper->dev)
52 		return;
53 
54 	if (fbi) {
55 		if (fbi->fbdefio)
56 			fb_deferred_io_cleanup(fbi);
57 		if (drm_fbdev_use_shadow_fb(fb_helper))
58 			shadow = fbi->screen_buffer;
59 	}
60 
61 	drm_fb_helper_fini(fb_helper);
62 
63 	if (shadow)
64 		vfree(shadow);
65 	else if (fb_helper->buffer)
66 		drm_client_buffer_vunmap(fb_helper->buffer);
67 
68 	drm_client_framebuffer_delete(fb_helper->buffer);
69 }
70 
71 static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
72 {
73 	drm_fbdev_cleanup(fb_helper);
74 	drm_client_release(&fb_helper->client);
75 	kfree(fb_helper);
76 }
77 
78 /*
79  * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
80  * unregister_framebuffer() or fb_release().
81  */
82 static void drm_fbdev_fb_destroy(struct fb_info *info)
83 {
84 	drm_fbdev_release(info->par);
85 }
86 
87 static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
88 {
89 	struct drm_fb_helper *fb_helper = info->par;
90 
91 	if (drm_fbdev_use_shadow_fb(fb_helper))
92 		return fb_deferred_io_mmap(info, vma);
93 	else if (fb_helper->dev->driver->gem_prime_mmap)
94 		return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
95 	else
96 		return -ENODEV;
97 }
98 
99 static bool drm_fbdev_use_iomem(struct fb_info *info)
100 {
101 	struct drm_fb_helper *fb_helper = info->par;
102 	struct drm_client_buffer *buffer = fb_helper->buffer;
103 
104 	return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
105 }
106 
107 static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
108 				 size_t count, loff_t *ppos)
109 {
110 	ssize_t ret;
111 
112 	if (drm_fbdev_use_iomem(info))
113 		ret = drm_fb_helper_cfb_read(info, buf, count, ppos);
114 	else
115 		ret = drm_fb_helper_sys_read(info, buf, count, ppos);
116 
117 	return ret;
118 }
119 
120 static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
121 				  size_t count, loff_t *ppos)
122 {
123 	ssize_t ret;
124 
125 	if (drm_fbdev_use_iomem(info))
126 		ret = drm_fb_helper_cfb_write(info, buf, count, ppos);
127 	else
128 		ret = drm_fb_helper_sys_write(info, buf, count, ppos);
129 
130 	return ret;
131 }
132 
133 static void drm_fbdev_fb_fillrect(struct fb_info *info,
134 				  const struct fb_fillrect *rect)
135 {
136 	if (drm_fbdev_use_iomem(info))
137 		drm_fb_helper_cfb_fillrect(info, rect);
138 	else
139 		drm_fb_helper_sys_fillrect(info, rect);
140 }
141 
142 static void drm_fbdev_fb_copyarea(struct fb_info *info,
143 				  const struct fb_copyarea *area)
144 {
145 	if (drm_fbdev_use_iomem(info))
146 		drm_fb_helper_cfb_copyarea(info, area);
147 	else
148 		drm_fb_helper_sys_copyarea(info, area);
149 }
150 
151 static void drm_fbdev_fb_imageblit(struct fb_info *info,
152 				   const struct fb_image *image)
153 {
154 	if (drm_fbdev_use_iomem(info))
155 		drm_fb_helper_cfb_imageblit(info, image);
156 	else
157 		drm_fb_helper_sys_imageblit(info, image);
158 }
159 
160 static const struct fb_ops drm_fbdev_fb_ops = {
161 	.owner		= THIS_MODULE,
162 	DRM_FB_HELPER_DEFAULT_OPS,
163 	.fb_open	= drm_fbdev_fb_open,
164 	.fb_release	= drm_fbdev_fb_release,
165 	.fb_destroy	= drm_fbdev_fb_destroy,
166 	.fb_mmap	= drm_fbdev_fb_mmap,
167 	.fb_read	= drm_fbdev_fb_read,
168 	.fb_write	= drm_fbdev_fb_write,
169 	.fb_fillrect	= drm_fbdev_fb_fillrect,
170 	.fb_copyarea	= drm_fbdev_fb_copyarea,
171 	.fb_imageblit	= drm_fbdev_fb_imageblit,
172 };
173 
174 /*
175  * This function uses the client API to create a framebuffer backed by a dumb buffer.
176  */
177 static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
178 			      struct drm_fb_helper_surface_size *sizes)
179 {
180 	struct drm_client_dev *client = &fb_helper->client;
181 	struct drm_device *dev = fb_helper->dev;
182 	struct drm_client_buffer *buffer;
183 	struct drm_framebuffer *fb;
184 	struct fb_info *fbi;
185 	u32 format;
186 	struct iosys_map map;
187 	int ret;
188 
189 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
190 		    sizes->surface_width, sizes->surface_height,
191 		    sizes->surface_bpp);
192 
193 	format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
194 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
195 					       sizes->surface_height, format);
196 	if (IS_ERR(buffer))
197 		return PTR_ERR(buffer);
198 
199 	fb_helper->buffer = buffer;
200 	fb_helper->fb = buffer->fb;
201 	fb = buffer->fb;
202 
203 	fbi = drm_fb_helper_alloc_info(fb_helper);
204 	if (IS_ERR(fbi))
205 		return PTR_ERR(fbi);
206 
207 	fbi->fbops = &drm_fbdev_fb_ops;
208 	fbi->screen_size = sizes->surface_height * fb->pitches[0];
209 	fbi->fix.smem_len = fbi->screen_size;
210 	fbi->flags = FBINFO_DEFAULT;
211 
212 	drm_fb_helper_fill_info(fbi, fb_helper, sizes);
213 
214 	if (drm_fbdev_use_shadow_fb(fb_helper)) {
215 		fbi->screen_buffer = vzalloc(fbi->screen_size);
216 		if (!fbi->screen_buffer)
217 			return -ENOMEM;
218 		fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
219 
220 		/* Set a default deferred I/O handler */
221 		fb_helper->fbdefio.delay = HZ / 20;
222 		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
223 
224 		fbi->fbdefio = &fb_helper->fbdefio;
225 		ret = fb_deferred_io_init(fbi);
226 		if (ret)
227 			return ret;
228 	} else {
229 		/* buffer is mapped for HW framebuffer */
230 		ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
231 		if (ret)
232 			return ret;
233 		if (map.is_iomem) {
234 			fbi->screen_base = map.vaddr_iomem;
235 		} else {
236 			fbi->screen_buffer = map.vaddr;
237 			fbi->flags |= FBINFO_VIRTFB;
238 		}
239 
240 		/*
241 		 * Shamelessly leak the physical address to user-space. As
242 		 * page_to_phys() is undefined for I/O memory, warn in this
243 		 * case.
244 		 */
245 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
246 		if (fb_helper->hint_leak_smem_start && fbi->fix.smem_start == 0 &&
247 		    !drm_WARN_ON_ONCE(dev, map.is_iomem))
248 			fbi->fix.smem_start =
249 				page_to_phys(virt_to_page(fbi->screen_buffer));
250 #endif
251 	}
252 
253 	return 0;
254 }
255 
256 static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
257 				       struct drm_clip_rect *clip,
258 				       struct iosys_map *dst)
259 {
260 	struct drm_framebuffer *fb = fb_helper->fb;
261 	size_t offset = clip->y1 * fb->pitches[0];
262 	size_t len = clip->x2 - clip->x1;
263 	unsigned int y;
264 	void *src;
265 
266 	switch (drm_format_info_bpp(fb->format, 0)) {
267 	case 1:
268 		offset += clip->x1 / 8;
269 		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
270 		break;
271 	case 2:
272 		offset += clip->x1 / 4;
273 		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
274 		break;
275 	case 4:
276 		offset += clip->x1 / 2;
277 		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
278 		break;
279 	default:
280 		offset += clip->x1 * fb->format->cpp[0];
281 		len *= fb->format->cpp[0];
282 		break;
283 	}
284 
285 	src = fb_helper->info->screen_buffer + offset;
286 	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
287 
288 	for (y = clip->y1; y < clip->y2; y++) {
289 		iosys_map_memcpy_to(dst, 0, src, len);
290 		iosys_map_incr(dst, fb->pitches[0]);
291 		src += fb->pitches[0];
292 	}
293 }
294 
295 static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
296 				 struct drm_clip_rect *clip)
297 {
298 	struct drm_client_buffer *buffer = fb_helper->buffer;
299 	struct iosys_map map, dst;
300 	int ret;
301 
302 	/*
303 	 * We have to pin the client buffer to its current location while
304 	 * flushing the shadow buffer. In the general case, concurrent
305 	 * modesetting operations could try to move the buffer and would
306 	 * fail. The modeset has to be serialized by acquiring the reservation
307 	 * object of the underlying BO here.
308 	 *
309 	 * For fbdev emulation, we only have to protect against fbdev modeset
310 	 * operations. Nothing else will involve the client buffer's BO. So it
311 	 * is sufficient to acquire struct drm_fb_helper.lock here.
312 	 */
313 	mutex_lock(&fb_helper->lock);
314 
315 	ret = drm_client_buffer_vmap(buffer, &map);
316 	if (ret)
317 		goto out;
318 
319 	dst = map;
320 	drm_fbdev_damage_blit_real(fb_helper, clip, &dst);
321 
322 	drm_client_buffer_vunmap(buffer);
323 
324 out:
325 	mutex_unlock(&fb_helper->lock);
326 
327 	return ret;
328 }
329 
330 static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
331 {
332 	struct drm_device *dev = helper->dev;
333 	int ret;
334 
335 	if (!drm_fbdev_use_shadow_fb(helper))
336 		return 0;
337 
338 	/* Call damage handlers only if necessary */
339 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
340 		return 0;
341 
342 	if (helper->buffer) {
343 		ret = drm_fbdev_damage_blit(helper, clip);
344 		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
345 			return ret;
346 	}
347 
348 	if (helper->fb->funcs->dirty) {
349 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
350 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
351 			return ret;
352 	}
353 
354 	return 0;
355 }
356 
357 static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
358 	.fb_probe = drm_fbdev_fb_probe,
359 	.fb_dirty = drm_fbdev_fb_dirty,
360 };
361 
362 static void drm_fbdev_client_unregister(struct drm_client_dev *client)
363 {
364 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
365 
366 	if (fb_helper->info)
367 		/* drm_fbdev_fb_destroy() takes care of cleanup */
368 		drm_fb_helper_unregister_info(fb_helper);
369 	else
370 		drm_fbdev_release(fb_helper);
371 }
372 
373 static int drm_fbdev_client_restore(struct drm_client_dev *client)
374 {
375 	drm_fb_helper_lastclose(client->dev);
376 
377 	return 0;
378 }
379 
380 static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
381 {
382 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
383 	struct drm_device *dev = client->dev;
384 	int ret;
385 
386 	/* Setup is not retried if it has failed */
387 	if (!fb_helper->dev && fb_helper->funcs)
388 		return 0;
389 
390 	if (dev->fb_helper)
391 		return drm_fb_helper_hotplug_event(dev->fb_helper);
392 
393 	if (!dev->mode_config.num_connector) {
394 		drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
395 		return 0;
396 	}
397 
398 	drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
399 
400 	ret = drm_fb_helper_init(dev, fb_helper);
401 	if (ret)
402 		goto err;
403 
404 	if (!drm_drv_uses_atomic_modeset(dev))
405 		drm_helper_disable_unused_functions(dev);
406 
407 	ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
408 	if (ret)
409 		goto err_cleanup;
410 
411 	return 0;
412 
413 err_cleanup:
414 	drm_fbdev_cleanup(fb_helper);
415 err:
416 	fb_helper->dev = NULL;
417 	fb_helper->info = NULL;
418 
419 	drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
420 
421 	return ret;
422 }
423 
424 static const struct drm_client_funcs drm_fbdev_client_funcs = {
425 	.owner		= THIS_MODULE,
426 	.unregister	= drm_fbdev_client_unregister,
427 	.restore	= drm_fbdev_client_restore,
428 	.hotplug	= drm_fbdev_client_hotplug,
429 };
430 
431 /**
432  * drm_fbdev_generic_setup() - Setup generic fbdev emulation
433  * @dev: DRM device
434  * @preferred_bpp: Preferred bits per pixel for the device.
435  *                 @dev->mode_config.preferred_depth is used if this is zero.
436  *
437  * This function sets up generic fbdev emulation for drivers that supports
438  * dumb buffers with a virtual address and that can be mmap'ed.
439  * drm_fbdev_generic_setup() shall be called after the DRM driver registered
440  * the new DRM device with drm_dev_register().
441  *
442  * Restore, hotplug events and teardown are all taken care of. Drivers that do
443  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
444  * Simple drivers might use drm_mode_config_helper_suspend().
445  *
446  * Drivers that set the dirty callback on their framebuffer will get a shadow
447  * fbdev buffer that is blitted onto the real buffer. This is done in order to
448  * make deferred I/O work with all kinds of buffers. A shadow buffer can be
449  * requested explicitly by setting struct drm_mode_config.prefer_shadow or
450  * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
451  * required to use generic fbdev emulation with SHMEM helpers.
452  *
453  * This function is safe to call even when there are no connectors present.
454  * Setup will be retried on the next hotplug event.
455  *
456  * The fbdev is destroyed by drm_dev_unregister().
457  */
458 void drm_fbdev_generic_setup(struct drm_device *dev,
459 			     unsigned int preferred_bpp)
460 {
461 	struct drm_fb_helper *fb_helper;
462 	int ret;
463 
464 	drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
465 	drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
466 
467 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
468 	if (!fb_helper)
469 		return;
470 
471 	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
472 	if (ret) {
473 		kfree(fb_helper);
474 		drm_err(dev, "Failed to register client: %d\n", ret);
475 		return;
476 	}
477 
478 	/*
479 	 * FIXME: This mixes up depth with bpp, which results in a glorious
480 	 * mess, resulting in some drivers picking wrong fbdev defaults and
481 	 * others wrong preferred_depth defaults.
482 	 */
483 	if (!preferred_bpp)
484 		preferred_bpp = dev->mode_config.preferred_depth;
485 	if (!preferred_bpp)
486 		preferred_bpp = 32;
487 	fb_helper->preferred_bpp = preferred_bpp;
488 
489 	ret = drm_fbdev_client_hotplug(&fb_helper->client);
490 	if (ret)
491 		drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
492 
493 	drm_client_register(&fb_helper->client);
494 }
495 EXPORT_SYMBOL(drm_fbdev_generic_setup);
496