1 // SPDX-License-Identifier: MIT
2 
3 #include <linux/moduleparam.h>
4 #include <linux/vmalloc.h>
5 
6 #include <drm/drm_crtc_helper.h>
7 #include <drm/drm_drv.h>
8 #include <drm/drm_fb_helper.h>
9 #include <drm/drm_framebuffer.h>
10 #include <drm/drm_print.h>
11 
12 #include <drm/drm_fbdev_generic.h>
13 
14 static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
15 {
16 	struct drm_device *dev = fb_helper->dev;
17 	struct drm_framebuffer *fb = fb_helper->fb;
18 
19 	return dev->mode_config.prefer_shadow_fbdev ||
20 	       dev->mode_config.prefer_shadow ||
21 	       fb->funcs->dirty;
22 }
23 
24 /* @user: 1=userspace, 0=fbcon */
25 static int drm_fbdev_fb_open(struct fb_info *info, int user)
26 {
27 	struct drm_fb_helper *fb_helper = info->par;
28 
29 	/* No need to take a ref for fbcon because it unbinds on unregister */
30 	if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
31 		return -ENODEV;
32 
33 	return 0;
34 }
35 
36 static int drm_fbdev_fb_release(struct fb_info *info, int user)
37 {
38 	struct drm_fb_helper *fb_helper = info->par;
39 
40 	if (user)
41 		module_put(fb_helper->dev->driver->fops->owner);
42 
43 	return 0;
44 }
45 
46 static void drm_fbdev_fb_destroy(struct fb_info *info)
47 {
48 	struct drm_fb_helper *fb_helper = info->par;
49 	void *shadow = NULL;
50 
51 	if (!fb_helper->dev)
52 		return;
53 
54 	if (info->fbdefio)
55 		fb_deferred_io_cleanup(info);
56 	if (drm_fbdev_use_shadow_fb(fb_helper))
57 		shadow = info->screen_buffer;
58 
59 	drm_fb_helper_fini(fb_helper);
60 
61 	if (shadow)
62 		vfree(shadow);
63 	else if (fb_helper->buffer)
64 		drm_client_buffer_vunmap(fb_helper->buffer);
65 
66 	drm_client_framebuffer_delete(fb_helper->buffer);
67 	drm_client_release(&fb_helper->client);
68 	kfree(fb_helper);
69 }
70 
71 static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
72 {
73 	struct drm_fb_helper *fb_helper = info->par;
74 
75 	if (drm_fbdev_use_shadow_fb(fb_helper))
76 		return fb_deferred_io_mmap(info, vma);
77 	else if (fb_helper->dev->driver->gem_prime_mmap)
78 		return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
79 	else
80 		return -ENODEV;
81 }
82 
83 static bool drm_fbdev_use_iomem(struct fb_info *info)
84 {
85 	struct drm_fb_helper *fb_helper = info->par;
86 	struct drm_client_buffer *buffer = fb_helper->buffer;
87 
88 	return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
89 }
90 
91 static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
92 				 size_t count, loff_t *ppos)
93 {
94 	ssize_t ret;
95 
96 	if (drm_fbdev_use_iomem(info))
97 		ret = drm_fb_helper_cfb_read(info, buf, count, ppos);
98 	else
99 		ret = drm_fb_helper_sys_read(info, buf, count, ppos);
100 
101 	return ret;
102 }
103 
104 static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
105 				  size_t count, loff_t *ppos)
106 {
107 	ssize_t ret;
108 
109 	if (drm_fbdev_use_iomem(info))
110 		ret = drm_fb_helper_cfb_write(info, buf, count, ppos);
111 	else
112 		ret = drm_fb_helper_sys_write(info, buf, count, ppos);
113 
114 	return ret;
115 }
116 
117 static void drm_fbdev_fb_fillrect(struct fb_info *info,
118 				  const struct fb_fillrect *rect)
119 {
120 	if (drm_fbdev_use_iomem(info))
121 		drm_fb_helper_cfb_fillrect(info, rect);
122 	else
123 		drm_fb_helper_sys_fillrect(info, rect);
124 }
125 
126 static void drm_fbdev_fb_copyarea(struct fb_info *info,
127 				  const struct fb_copyarea *area)
128 {
129 	if (drm_fbdev_use_iomem(info))
130 		drm_fb_helper_cfb_copyarea(info, area);
131 	else
132 		drm_fb_helper_sys_copyarea(info, area);
133 }
134 
135 static void drm_fbdev_fb_imageblit(struct fb_info *info,
136 				   const struct fb_image *image)
137 {
138 	if (drm_fbdev_use_iomem(info))
139 		drm_fb_helper_cfb_imageblit(info, image);
140 	else
141 		drm_fb_helper_sys_imageblit(info, image);
142 }
143 
144 static const struct fb_ops drm_fbdev_fb_ops = {
145 	.owner		= THIS_MODULE,
146 	DRM_FB_HELPER_DEFAULT_OPS,
147 	.fb_open	= drm_fbdev_fb_open,
148 	.fb_release	= drm_fbdev_fb_release,
149 	.fb_destroy	= drm_fbdev_fb_destroy,
150 	.fb_mmap	= drm_fbdev_fb_mmap,
151 	.fb_read	= drm_fbdev_fb_read,
152 	.fb_write	= drm_fbdev_fb_write,
153 	.fb_fillrect	= drm_fbdev_fb_fillrect,
154 	.fb_copyarea	= drm_fbdev_fb_copyarea,
155 	.fb_imageblit	= drm_fbdev_fb_imageblit,
156 };
157 
158 /*
159  * This function uses the client API to create a framebuffer backed by a dumb buffer.
160  */
161 static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
162 			      struct drm_fb_helper_surface_size *sizes)
163 {
164 	struct drm_client_dev *client = &fb_helper->client;
165 	struct drm_device *dev = fb_helper->dev;
166 	struct drm_client_buffer *buffer;
167 	struct drm_framebuffer *fb;
168 	struct fb_info *info;
169 	u32 format;
170 	struct iosys_map map;
171 	int ret;
172 
173 	drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
174 		    sizes->surface_width, sizes->surface_height,
175 		    sizes->surface_bpp);
176 
177 	format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
178 	buffer = drm_client_framebuffer_create(client, sizes->surface_width,
179 					       sizes->surface_height, format);
180 	if (IS_ERR(buffer))
181 		return PTR_ERR(buffer);
182 
183 	fb_helper->buffer = buffer;
184 	fb_helper->fb = buffer->fb;
185 	fb = buffer->fb;
186 
187 	info = drm_fb_helper_alloc_info(fb_helper);
188 	if (IS_ERR(info))
189 		return PTR_ERR(info);
190 
191 	info->fbops = &drm_fbdev_fb_ops;
192 	info->screen_size = sizes->surface_height * fb->pitches[0];
193 	info->fix.smem_len = info->screen_size;
194 	info->flags = FBINFO_DEFAULT;
195 
196 	drm_fb_helper_fill_info(info, fb_helper, sizes);
197 
198 	if (drm_fbdev_use_shadow_fb(fb_helper)) {
199 		info->screen_buffer = vzalloc(info->screen_size);
200 		if (!info->screen_buffer)
201 			return -ENOMEM;
202 		info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
203 
204 		/* Set a default deferred I/O handler */
205 		fb_helper->fbdefio.delay = HZ / 20;
206 		fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
207 
208 		info->fbdefio = &fb_helper->fbdefio;
209 		ret = fb_deferred_io_init(info);
210 		if (ret)
211 			return ret;
212 	} else {
213 		/* buffer is mapped for HW framebuffer */
214 		ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
215 		if (ret)
216 			return ret;
217 		if (map.is_iomem) {
218 			info->screen_base = map.vaddr_iomem;
219 		} else {
220 			info->screen_buffer = map.vaddr;
221 			info->flags |= FBINFO_VIRTFB;
222 		}
223 
224 		/*
225 		 * Shamelessly leak the physical address to user-space. As
226 		 * page_to_phys() is undefined for I/O memory, warn in this
227 		 * case.
228 		 */
229 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
230 		if (fb_helper->hint_leak_smem_start && info->fix.smem_start == 0 &&
231 		    !drm_WARN_ON_ONCE(dev, map.is_iomem))
232 			info->fix.smem_start =
233 				page_to_phys(virt_to_page(info->screen_buffer));
234 #endif
235 	}
236 
237 	return 0;
238 }
239 
240 static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
241 				       struct drm_clip_rect *clip,
242 				       struct iosys_map *dst)
243 {
244 	struct drm_framebuffer *fb = fb_helper->fb;
245 	size_t offset = clip->y1 * fb->pitches[0];
246 	size_t len = clip->x2 - clip->x1;
247 	unsigned int y;
248 	void *src;
249 
250 	switch (drm_format_info_bpp(fb->format, 0)) {
251 	case 1:
252 		offset += clip->x1 / 8;
253 		len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
254 		break;
255 	case 2:
256 		offset += clip->x1 / 4;
257 		len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
258 		break;
259 	case 4:
260 		offset += clip->x1 / 2;
261 		len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
262 		break;
263 	default:
264 		offset += clip->x1 * fb->format->cpp[0];
265 		len *= fb->format->cpp[0];
266 		break;
267 	}
268 
269 	src = fb_helper->info->screen_buffer + offset;
270 	iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
271 
272 	for (y = clip->y1; y < clip->y2; y++) {
273 		iosys_map_memcpy_to(dst, 0, src, len);
274 		iosys_map_incr(dst, fb->pitches[0]);
275 		src += fb->pitches[0];
276 	}
277 }
278 
279 static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
280 				 struct drm_clip_rect *clip)
281 {
282 	struct drm_client_buffer *buffer = fb_helper->buffer;
283 	struct iosys_map map, dst;
284 	int ret;
285 
286 	/*
287 	 * We have to pin the client buffer to its current location while
288 	 * flushing the shadow buffer. In the general case, concurrent
289 	 * modesetting operations could try to move the buffer and would
290 	 * fail. The modeset has to be serialized by acquiring the reservation
291 	 * object of the underlying BO here.
292 	 *
293 	 * For fbdev emulation, we only have to protect against fbdev modeset
294 	 * operations. Nothing else will involve the client buffer's BO. So it
295 	 * is sufficient to acquire struct drm_fb_helper.lock here.
296 	 */
297 	mutex_lock(&fb_helper->lock);
298 
299 	ret = drm_client_buffer_vmap(buffer, &map);
300 	if (ret)
301 		goto out;
302 
303 	dst = map;
304 	drm_fbdev_damage_blit_real(fb_helper, clip, &dst);
305 
306 	drm_client_buffer_vunmap(buffer);
307 
308 out:
309 	mutex_unlock(&fb_helper->lock);
310 
311 	return ret;
312 }
313 
314 static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
315 {
316 	struct drm_device *dev = helper->dev;
317 	int ret;
318 
319 	if (!drm_fbdev_use_shadow_fb(helper))
320 		return 0;
321 
322 	/* Call damage handlers only if necessary */
323 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
324 		return 0;
325 
326 	if (helper->buffer) {
327 		ret = drm_fbdev_damage_blit(helper, clip);
328 		if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
329 			return ret;
330 	}
331 
332 	if (helper->fb->funcs->dirty) {
333 		ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
334 		if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
335 			return ret;
336 	}
337 
338 	return 0;
339 }
340 
341 static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
342 	.fb_probe = drm_fbdev_fb_probe,
343 	.fb_dirty = drm_fbdev_fb_dirty,
344 };
345 
346 static void drm_fbdev_client_unregister(struct drm_client_dev *client)
347 {
348 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
349 
350 	if (fb_helper->info) {
351 		drm_fb_helper_unregister_info(fb_helper);
352 	} else {
353 		drm_client_release(&fb_helper->client);
354 		drm_fb_helper_unprepare(fb_helper);
355 		kfree(fb_helper);
356 	}
357 }
358 
359 static int drm_fbdev_client_restore(struct drm_client_dev *client)
360 {
361 	drm_fb_helper_lastclose(client->dev);
362 
363 	return 0;
364 }
365 
366 static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
367 {
368 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
369 	struct drm_device *dev = client->dev;
370 	int ret;
371 
372 	if (dev->fb_helper)
373 		return drm_fb_helper_hotplug_event(dev->fb_helper);
374 
375 	ret = drm_fb_helper_init(dev, fb_helper);
376 	if (ret)
377 		goto err_drm_err;
378 
379 	if (!drm_drv_uses_atomic_modeset(dev))
380 		drm_helper_disable_unused_functions(dev);
381 
382 	ret = drm_fb_helper_initial_config(fb_helper);
383 	if (ret)
384 		goto err_drm_fb_helper_fini;
385 
386 	return 0;
387 
388 err_drm_fb_helper_fini:
389 	drm_fb_helper_fini(fb_helper);
390 err_drm_err:
391 	drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
392 	return ret;
393 }
394 
395 static const struct drm_client_funcs drm_fbdev_client_funcs = {
396 	.owner		= THIS_MODULE,
397 	.unregister	= drm_fbdev_client_unregister,
398 	.restore	= drm_fbdev_client_restore,
399 	.hotplug	= drm_fbdev_client_hotplug,
400 };
401 
402 /**
403  * drm_fbdev_generic_setup() - Setup generic fbdev emulation
404  * @dev: DRM device
405  * @preferred_bpp: Preferred bits per pixel for the device.
406  *
407  * This function sets up generic fbdev emulation for drivers that supports
408  * dumb buffers with a virtual address and that can be mmap'ed.
409  * drm_fbdev_generic_setup() shall be called after the DRM driver registered
410  * the new DRM device with drm_dev_register().
411  *
412  * Restore, hotplug events and teardown are all taken care of. Drivers that do
413  * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
414  * Simple drivers might use drm_mode_config_helper_suspend().
415  *
416  * Drivers that set the dirty callback on their framebuffer will get a shadow
417  * fbdev buffer that is blitted onto the real buffer. This is done in order to
418  * make deferred I/O work with all kinds of buffers. A shadow buffer can be
419  * requested explicitly by setting struct drm_mode_config.prefer_shadow or
420  * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
421  * required to use generic fbdev emulation with SHMEM helpers.
422  *
423  * This function is safe to call even when there are no connectors present.
424  * Setup will be retried on the next hotplug event.
425  *
426  * The fbdev is destroyed by drm_dev_unregister().
427  */
428 void drm_fbdev_generic_setup(struct drm_device *dev,
429 			     unsigned int preferred_bpp)
430 {
431 	struct drm_fb_helper *fb_helper;
432 	int ret;
433 
434 	drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
435 	drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
436 
437 	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
438 	if (!fb_helper)
439 		return;
440 	drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fb_helper_generic_funcs);
441 
442 	ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
443 	if (ret) {
444 		drm_err(dev, "Failed to register client: %d\n", ret);
445 		goto err_drm_client_init;
446 	}
447 
448 	ret = drm_fbdev_client_hotplug(&fb_helper->client);
449 	if (ret)
450 		drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
451 
452 	drm_client_register(&fb_helper->client);
453 
454 	return;
455 
456 err_drm_client_init:
457 	drm_fb_helper_unprepare(fb_helper);
458 	kfree(fb_helper);
459 	return;
460 }
461 EXPORT_SYMBOL(drm_fbdev_generic_setup);
462