1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/moduleparam.h>
4 #include <linux/vmalloc.h>
5 
6 #include <drm/drm_crtc_helper.h>
7 #include <drm/drm_drv.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_print.h>
11 
12 #include <drm/drm_fbdev_generic.h>
13 
14 static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
15 {
16 	struct drm_device *dev = fb_helper->dev;
17 	struct drm_framebuffer *fb = fb_helper->fb;
18 
19 	return dev->mode_config.prefer_shadow_fbdev ||
20 	       dev->mode_config.prefer_shadow ||
21 	       fb->funcs->dirty;
22 }
23 
24 /* @user: 1=userspace, 0=fbcon */
25 static int drm_fbdev_fb_open(struct fb_info *info, int user)
26 {
27 	struct drm_fb_helper *fb_helper = info->par;
28 
29 	/* No need to take a ref for fbcon because it unbinds on unregister */
30 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
31 		return -ENODEV;
32 
33 	return 0;
34 }
35 
36 static int drm_fbdev_fb_release(struct fb_info *info, int user)
37 {
38 	struct drm_fb_helper *fb_helper = info->par;
39 
40 	if (user)
41 		module_put(fb_helper->dev->driver->fops->owner);
42 
43 	return 0;
44 }
45 
46 static void drm_fbdev_fb_destroy(struct fb_info *info)
47 {
48 	struct drm_fb_helper *fb_helper = info->par;
49 	void *shadow = NULL;
50 
51 	if (!fb_helper->dev)
52 		return;
53 
54 	if (info->fbdefio)
55 		fb_deferred_io_cleanup(info);
56 	if (drm_fbdev_use_shadow_fb(fb_helper))
57 		shadow = info->screen_buffer;
58 
59 	drm_fb_helper_fini(fb_helper);
60 
61 	if (shadow)
62 		vfree(shadow);
63 	else if (fb_helper->buffer)
64 		drm_client_buffer_vunmap(fb_helper->buffer);
65 
66 	drm_client_framebuffer_delete(fb_helper->buffer);
67 	drm_client_release(&fb_helper->client);
68 
69 	drm_fb_helper_unprepare(fb_helper);
70 	kfree(fb_helper);
71 }
72 
73 static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
74 {
75 	struct drm_fb_helper *fb_helper = info->par;
76 
77 	if (drm_fbdev_use_shadow_fb(fb_helper))
78 		return fb_deferred_io_mmap(info, vma);
79 	else if (fb_helper->dev->driver->gem_prime_mmap)
80 		return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
81 	else
82 		return -ENODEV;
83 }
84 
85 static bool drm_fbdev_use_iomem(struct fb_info *info)
86 {
87 	struct drm_fb_helper *fb_helper = info->par;
88 	struct drm_client_buffer *buffer = fb_helper->buffer;
89 
90 	return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
91 }
92 
93 static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
94 				 size_t count, loff_t *ppos)
95 {
96 	ssize_t ret;
97 
98 	if (drm_fbdev_use_iomem(info))
99 		ret = drm_fb_helper_cfb_read(info, buf, count, ppos);
100 	else
101 		ret = drm_fb_helper_sys_read(info, buf, count, ppos);
102 
103 	return ret;
104 }
105 
106 static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
107 				  size_t count, loff_t *ppos)
108 {
109 	ssize_t ret;
110 
111 	if (drm_fbdev_use_iomem(info))
112 		ret = drm_fb_helper_cfb_write(info, buf, count, ppos);
113 	else
114 		ret = drm_fb_helper_sys_write(info, buf, count, ppos);
115 
116 	return ret;
117 }
118 
119 static void drm_fbdev_fb_fillrect(struct fb_info *info,
120 				  const struct fb_fillrect *rect)
121 {
122 	if (drm_fbdev_use_iomem(info))
123 		drm_fb_helper_cfb_fillrect(info, rect);
124 	else
125 		drm_fb_helper_sys_fillrect(info, rect);
126 }
127 
128 static void drm_fbdev_fb_copyarea(struct fb_info *info,
129 				  const struct fb_copyarea *area)
130 {
131 	if (drm_fbdev_use_iomem(info))
132 		drm_fb_helper_cfb_copyarea(info, area);
133 	else
134 		drm_fb_helper_sys_copyarea(info, area);
135 }
136 
137 static void drm_fbdev_fb_imageblit(struct fb_info *info,
138 				   const struct fb_image *image)
139 {
140 	if (drm_fbdev_use_iomem(info))
141 		drm_fb_helper_cfb_imageblit(info, image);
142 	else
143 		drm_fb_helper_sys_imageblit(info, image);
144 }
145 
146 static const struct fb_ops drm_fbdev_fb_ops = {
147 	.owner		= THIS_MODULE,
148 	DRM_FB_HELPER_DEFAULT_OPS,
149 	.fb_open	= drm_fbdev_fb_open,
150 	.fb_release	= drm_fbdev_fb_release,
151 	.fb_destroy	= drm_fbdev_fb_destroy,
152 	.fb_mmap	= drm_fbdev_fb_mmap,
153 	.fb_read	= drm_fbdev_fb_read,
154 	.fb_write	= drm_fbdev_fb_write,
155 	.fb_fillrect	= drm_fbdev_fb_fillrect,
156 	.fb_copyarea	= drm_fbdev_fb_copyarea,
157 	.fb_imageblit	= drm_fbdev_fb_imageblit,
158 };
159 
160 /*
161  * This function uses the client API to create a framebuffer backed by a dumb buffer.
162  */
163 static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
164 			      struct drm_fb_helper_surface_size *sizes)
165 {
166 	struct drm_client_dev *client = &fb_helper->client;
167 	struct drm_device *dev = fb_helper->dev;
168 	struct drm_client_buffer *buffer;
169 	struct drm_framebuffer *fb;
170 	struct fb_info *info;
171 	u32 format;
172 	struct iosys_map map;
173 	int ret;
174 
175 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
176 		    sizes->surface_width, sizes->surface_height,
177 		    sizes->surface_bpp);
178 
179 	format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
180 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
181 					       sizes->surface_height, format);
182 	if (IS_ERR(buffer))
183 		return PTR_ERR(buffer);
184 
185 	fb_helper->buffer = buffer;
186 	fb_helper->fb = buffer->fb;
187 	fb = buffer->fb;
188 
189 	info = drm_fb_helper_alloc_info(fb_helper);
190 	if (IS_ERR(info))
191 		return PTR_ERR(info);
192 
193 	info->fbops = &drm_fbdev_fb_ops;
194 	info->screen_size = sizes->surface_height * fb->pitches[0];
195 	info->fix.smem_len = info->screen_size;
196 	info->flags = FBINFO_DEFAULT;
197 
198 	drm_fb_helper_fill_info(info, fb_helper, sizes);
199 
200 	if (drm_fbdev_use_shadow_fb(fb_helper)) {
201 		info->screen_buffer = vzalloc(info->screen_size);
202 		if (!info->screen_buffer)
203 			return -ENOMEM;
204 		info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
205 
206 		/* Set a default deferred I/O handler */
207 		fb_helper->fbdefio.delay = HZ / 20;
208 		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
209 
210 		info->fbdefio = &fb_helper->fbdefio;
211 		ret = fb_deferred_io_init(info);
212 		if (ret)
213 			return ret;
214 	} else {
215 		/* buffer is mapped for HW framebuffer */
216 		ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
217 		if (ret)
218 			return ret;
219 		if (map.is_iomem) {
220 			info->screen_base = map.vaddr_iomem;
221 		} else {
222 			info->screen_buffer = map.vaddr;
223 			info->flags |= FBINFO_VIRTFB;
224 		}
225 
226 		/*
227 		 * Shamelessly leak the physical address to user-space. As
228 		 * page_to_phys() is undefined for I/O memory, warn in this
229 		 * case.
230 		 */
231 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
232 		if (fb_helper->hint_leak_smem_start && info->fix.smem_start == 0 &&
233 		    !drm_WARN_ON_ONCE(dev, map.is_iomem))
234 			info->fix.smem_start =
235 				page_to_phys(virt_to_page(info->screen_buffer));
236 #endif
237 	}
238 
239 	return 0;
240 }
241 
242 static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
243 				       struct drm_clip_rect *clip,
244 				       struct iosys_map *dst)
245 {
246 	struct drm_framebuffer *fb = fb_helper->fb;
247 	size_t offset = clip->y1 * fb->pitches[0];
248 	size_t len = clip->x2 - clip->x1;
249 	unsigned int y;
250 	void *src;
251 
252 	switch (drm_format_info_bpp(fb->format, 0)) {
253 	case 1:
254 		offset += clip->x1 / 8;
255 		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
256 		break;
257 	case 2:
258 		offset += clip->x1 / 4;
259 		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
260 		break;
261 	case 4:
262 		offset += clip->x1 / 2;
263 		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
264 		break;
265 	default:
266 		offset += clip->x1 * fb->format->cpp[0];
267 		len *= fb->format->cpp[0];
268 		break;
269 	}
270 
271 	src = fb_helper->info->screen_buffer + offset;
272 	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
273 
274 	for (y = clip->y1; y < clip->y2; y++) {
275 		iosys_map_memcpy_to(dst, 0, src, len);
276 		iosys_map_incr(dst, fb->pitches[0]);
277 		src += fb->pitches[0];
278 	}
279 }
280 
281 static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
282 				 struct drm_clip_rect *clip)
283 {
284 	struct drm_client_buffer *buffer = fb_helper->buffer;
285 	struct iosys_map map, dst;
286 	int ret;
287 
288 	/*
289 	 * We have to pin the client buffer to its current location while
290 	 * flushing the shadow buffer. In the general case, concurrent
291 	 * modesetting operations could try to move the buffer and would
292 	 * fail. The modeset has to be serialized by acquiring the reservation
293 	 * object of the underlying BO here.
294 	 *
295 	 * For fbdev emulation, we only have to protect against fbdev modeset
296 	 * operations. Nothing else will involve the client buffer's BO. So it
297 	 * is sufficient to acquire struct drm_fb_helper.lock here.
298 	 */
299 	mutex_lock(&fb_helper->lock);
300 
301 	ret = drm_client_buffer_vmap(buffer, &map);
302 	if (ret)
303 		goto out;
304 
305 	dst = map;
306 	drm_fbdev_damage_blit_real(fb_helper, clip, &dst);
307 
308 	drm_client_buffer_vunmap(buffer);
309 
310 out:
311 	mutex_unlock(&fb_helper->lock);
312 
313 	return ret;
314 }
315 
316 static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
317 {
318 	struct drm_device *dev = helper->dev;
319 	int ret;
320 
321 	if (!drm_fbdev_use_shadow_fb(helper))
322 		return 0;
323 
324 	/* Call damage handlers only if necessary */
325 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
326 		return 0;
327 
328 	if (helper->buffer) {
329 		ret = drm_fbdev_damage_blit(helper, clip);
330 		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
331 			return ret;
332 	}
333 
334 	if (helper->fb->funcs->dirty) {
335 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
336 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
337 			return ret;
338 	}
339 
340 	return 0;
341 }
342 
343 static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
344 	.fb_probe = drm_fbdev_fb_probe,
345 	.fb_dirty = drm_fbdev_fb_dirty,
346 };
347 
348 static void drm_fbdev_client_unregister(struct drm_client_dev *client)
349 {
350 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
351 
352 	if (fb_helper->info) {
353 		drm_fb_helper_unregister_info(fb_helper);
354 	} else {
355 		drm_client_release(&fb_helper->client);
356 		drm_fb_helper_unprepare(fb_helper);
357 		kfree(fb_helper);
358 	}
359 }
360 
361 static int drm_fbdev_client_restore(struct drm_client_dev *client)
362 {
363 	drm_fb_helper_lastclose(client->dev);
364 
365 	return 0;
366 }
367 
368 static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
369 {
370 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
371 	struct drm_device *dev = client->dev;
372 	int ret;
373 
374 	if (dev->fb_helper)
375 		return drm_fb_helper_hotplug_event(dev->fb_helper);
376 
377 	ret = drm_fb_helper_init(dev, fb_helper);
378 	if (ret)
379 		goto err_drm_err;
380 
381 	if (!drm_drv_uses_atomic_modeset(dev))
382 		drm_helper_disable_unused_functions(dev);
383 
384 	ret = drm_fb_helper_initial_config(fb_helper);
385 	if (ret)
386 		goto err_drm_fb_helper_fini;
387 
388 	return 0;
389 
390 err_drm_fb_helper_fini:
391 	drm_fb_helper_fini(fb_helper);
392 err_drm_err:
393 	drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
394 	return ret;
395 }
396 
397 static const struct drm_client_funcs drm_fbdev_client_funcs = {
398 	.owner		= THIS_MODULE,
399 	.unregister	= drm_fbdev_client_unregister,
400 	.restore	= drm_fbdev_client_restore,
401 	.hotplug	= drm_fbdev_client_hotplug,
402 };
403 
404 /**
405  * drm_fbdev_generic_setup() - Setup generic fbdev emulation
406  * @dev: DRM device
407  * @preferred_bpp: Preferred bits per pixel for the device.
408  *
409  * This function sets up generic fbdev emulation for drivers that supports
410  * dumb buffers with a virtual address and that can be mmap'ed.
411  * drm_fbdev_generic_setup() shall be called after the DRM driver registered
412  * the new DRM device with drm_dev_register().
413  *
414  * Restore, hotplug events and teardown are all taken care of. Drivers that do
415  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
416  * Simple drivers might use drm_mode_config_helper_suspend().
417  *
418  * Drivers that set the dirty callback on their framebuffer will get a shadow
419  * fbdev buffer that is blitted onto the real buffer. This is done in order to
420  * make deferred I/O work with all kinds of buffers. A shadow buffer can be
421  * requested explicitly by setting struct drm_mode_config.prefer_shadow or
422  * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
423  * required to use generic fbdev emulation with SHMEM helpers.
424  *
425  * This function is safe to call even when there are no connectors present.
426  * Setup will be retried on the next hotplug event.
427  *
428  * The fbdev is destroyed by drm_dev_unregister().
429  */
430 void drm_fbdev_generic_setup(struct drm_device *dev,
431 			     unsigned int preferred_bpp)
432 {
433 	struct drm_fb_helper *fb_helper;
434 	int ret;
435 
436 	drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
437 	drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
438 
439 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
440 	if (!fb_helper)
441 		return;
442 	drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fb_helper_generic_funcs);
443 
444 	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
445 	if (ret) {
446 		drm_err(dev, "Failed to register client: %d\n", ret);
447 		goto err_drm_client_init;
448 	}
449 
450 	ret = drm_fbdev_client_hotplug(&fb_helper->client);
451 	if (ret)
452 		drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
453 
454 	drm_client_register(&fb_helper->client);
455 
456 	return;
457 
458 err_drm_client_init:
459 	drm_fb_helper_unprepare(fb_helper);
460 	kfree(fb_helper);
461 	return;
462 }
463 EXPORT_SYMBOL(drm_fbdev_generic_setup);
464