1 /* exynos_drm_fbdev.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Authors:
5  *	Inki Dae <inki.dae@samsung.com>
6  *	Joonyoung Shim <jy0922.shim@samsung.com>
7  *	Seung-Woo Kim <sw0312.kim@samsung.com>
8  *
9  * This program is free software; you can redistribute  it and/or modify it
10  * under  the terms of  the GNU General  Public License as published by the
11  * Free Software Foundation;  either version 2 of the  License, or (at your
12  * option) any later version.
13  */
14 
15 #include <drm/drmP.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_fb_helper.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/exynos_drm.h>
20 
21 #include "exynos_drm_drv.h"
22 #include "exynos_drm_fb.h"
23 #include "exynos_drm_fbdev.h"
24 #include "exynos_drm_gem.h"
25 #include "exynos_drm_iommu.h"
26 
27 #define MAX_CONNECTOR		4
28 #define PREFERRED_BPP		32
29 
30 #define to_exynos_fbdev(x)	container_of(x, struct exynos_drm_fbdev,\
31 				drm_fb_helper)
32 
33 struct exynos_drm_fbdev {
34 	struct drm_fb_helper		drm_fb_helper;
35 	struct exynos_drm_gem_obj	*exynos_gem_obj;
36 };
37 
38 static int exynos_drm_fb_mmap(struct fb_info *info,
39 			struct vm_area_struct *vma)
40 {
41 	struct drm_fb_helper *helper = info->par;
42 	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
43 	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
44 	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
45 	unsigned long vm_size;
46 	int ret;
47 
48 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
49 
50 	vm_size = vma->vm_end - vma->vm_start;
51 
52 	if (vm_size > buffer->size)
53 		return -EINVAL;
54 
55 	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
56 		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
57 	if (ret < 0) {
58 		DRM_ERROR("failed to mmap.\n");
59 		return ret;
60 	}
61 
62 	return 0;
63 }
64 
65 static struct fb_ops exynos_drm_fb_ops = {
66 	.owner		= THIS_MODULE,
67 	.fb_mmap        = exynos_drm_fb_mmap,
68 	.fb_fillrect	= cfb_fillrect,
69 	.fb_copyarea	= cfb_copyarea,
70 	.fb_imageblit	= cfb_imageblit,
71 	.fb_check_var	= drm_fb_helper_check_var,
72 	.fb_set_par	= drm_fb_helper_set_par,
73 	.fb_blank	= drm_fb_helper_blank,
74 	.fb_pan_display	= drm_fb_helper_pan_display,
75 	.fb_setcmap	= drm_fb_helper_setcmap,
76 };
77 
78 static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
79 				     struct drm_framebuffer *fb)
80 {
81 	struct fb_info *fbi = helper->fbdev;
82 	struct drm_device *dev = helper->dev;
83 	struct exynos_drm_gem_buf *buffer;
84 	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
85 	unsigned long offset;
86 
87 	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
88 	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
89 
90 	/* RGB formats use only one buffer */
91 	buffer = exynos_drm_fb_buffer(fb, 0);
92 	if (!buffer) {
93 		DRM_DEBUG_KMS("buffer is null.\n");
94 		return -EFAULT;
95 	}
96 
97 	/* map pages with kernel virtual space. */
98 	if (!buffer->kvaddr) {
99 		if (is_drm_iommu_supported(dev)) {
100 			unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
101 
102 			buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
103 					nr_pages, VM_MAP,
104 					pgprot_writecombine(PAGE_KERNEL));
105 		} else {
106 			phys_addr_t dma_addr = buffer->dma_addr;
107 			if (dma_addr)
108 				buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
109 			else
110 				buffer->kvaddr = (void __iomem *)NULL;
111 		}
112 		if (!buffer->kvaddr) {
113 			DRM_ERROR("failed to map pages to kernel space.\n");
114 			return -EIO;
115 		}
116 	}
117 
118 	/* buffer count to framebuffer always is 1 at booting time. */
119 	exynos_drm_fb_set_buf_cnt(fb, 1);
120 
121 	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
122 	offset += fbi->var.yoffset * fb->pitches[0];
123 
124 	dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
125 	fbi->screen_base = buffer->kvaddr + offset;
126 	if (is_drm_iommu_supported(dev))
127 		fbi->fix.smem_start = (unsigned long)
128 			(page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
129 	else
130 		fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
131 
132 	fbi->screen_size = size;
133 	fbi->fix.smem_len = size;
134 
135 	return 0;
136 }
137 
138 static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
139 				    struct drm_fb_helper_surface_size *sizes)
140 {
141 	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
142 	struct exynos_drm_gem_obj *exynos_gem_obj;
143 	struct drm_device *dev = helper->dev;
144 	struct fb_info *fbi;
145 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
146 	struct platform_device *pdev = dev->platformdev;
147 	unsigned long size;
148 	int ret;
149 
150 	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
151 			sizes->surface_width, sizes->surface_height,
152 			sizes->surface_bpp);
153 
154 	mode_cmd.width = sizes->surface_width;
155 	mode_cmd.height = sizes->surface_height;
156 	mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
157 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
158 							  sizes->surface_depth);
159 
160 	mutex_lock(&dev->struct_mutex);
161 
162 	fbi = framebuffer_alloc(0, &pdev->dev);
163 	if (!fbi) {
164 		DRM_ERROR("failed to allocate fb info.\n");
165 		ret = -ENOMEM;
166 		goto out;
167 	}
168 
169 	size = mode_cmd.pitches[0] * mode_cmd.height;
170 
171 	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
172 	/*
173 	 * If physically contiguous memory allocation fails and if IOMMU is
174 	 * supported then try to get buffer from non physically contiguous
175 	 * memory area.
176 	 */
177 	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
178 		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
179 		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
180 							size);
181 	}
182 
183 	if (IS_ERR(exynos_gem_obj)) {
184 		ret = PTR_ERR(exynos_gem_obj);
185 		goto err_release_framebuffer;
186 	}
187 
188 	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
189 
190 	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
191 			&exynos_gem_obj->base);
192 	if (IS_ERR(helper->fb)) {
193 		DRM_ERROR("failed to create drm framebuffer.\n");
194 		ret = PTR_ERR(helper->fb);
195 		goto err_destroy_gem;
196 	}
197 
198 	helper->fbdev = fbi;
199 
200 	fbi->par = helper;
201 	fbi->flags = FBINFO_FLAG_DEFAULT;
202 	fbi->fbops = &exynos_drm_fb_ops;
203 
204 	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
205 	if (ret) {
206 		DRM_ERROR("failed to allocate cmap.\n");
207 		goto err_destroy_framebuffer;
208 	}
209 
210 	ret = exynos_drm_fbdev_update(helper, helper->fb);
211 	if (ret < 0)
212 		goto err_dealloc_cmap;
213 
214 	mutex_unlock(&dev->struct_mutex);
215 	return ret;
216 
217 err_dealloc_cmap:
218 	fb_dealloc_cmap(&fbi->cmap);
219 err_destroy_framebuffer:
220 	drm_framebuffer_cleanup(helper->fb);
221 err_destroy_gem:
222 	exynos_drm_gem_destroy(exynos_gem_obj);
223 err_release_framebuffer:
224 	framebuffer_release(fbi);
225 
226 /*
227  * if failed, all resources allocated above would be released by
228  * drm_mode_config_cleanup() when drm_load() had been called prior
229  * to any specific driver such as fimd or hdmi driver.
230  */
231 out:
232 	mutex_unlock(&dev->struct_mutex);
233 	return ret;
234 }
235 
236 static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
237 	.fb_probe =	exynos_drm_fbdev_create,
238 };
239 
240 bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
241 {
242 	struct drm_connector *connector;
243 	bool ret = false;
244 
245 	mutex_lock(&dev->mode_config.mutex);
246 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
247 		if (connector->status != connector_status_connected)
248 			continue;
249 
250 		ret = true;
251 		break;
252 	}
253 	mutex_unlock(&dev->mode_config.mutex);
254 
255 	return ret;
256 }
257 
258 int exynos_drm_fbdev_init(struct drm_device *dev)
259 {
260 	struct exynos_drm_fbdev *fbdev;
261 	struct exynos_drm_private *private = dev->dev_private;
262 	struct drm_fb_helper *helper;
263 	unsigned int num_crtc;
264 	int ret;
265 
266 	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
267 		return 0;
268 
269 	if (!exynos_drm_fbdev_is_anything_connected(dev))
270 		return 0;
271 
272 	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
273 	if (!fbdev)
274 		return -ENOMEM;
275 
276 	private->fb_helper = helper = &fbdev->drm_fb_helper;
277 	helper->funcs = &exynos_drm_fb_helper_funcs;
278 
279 	num_crtc = dev->mode_config.num_crtc;
280 
281 	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
282 	if (ret < 0) {
283 		DRM_ERROR("failed to initialize drm fb helper.\n");
284 		goto err_init;
285 	}
286 
287 	ret = drm_fb_helper_single_add_all_connectors(helper);
288 	if (ret < 0) {
289 		DRM_ERROR("failed to register drm_fb_helper_connector.\n");
290 		goto err_setup;
291 
292 	}
293 
294 	/* disable all the possible outputs/crtcs before entering KMS mode */
295 	drm_helper_disable_unused_functions(dev);
296 
297 	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
298 	if (ret < 0) {
299 		DRM_ERROR("failed to set up hw configuration.\n");
300 		goto err_setup;
301 	}
302 
303 	return 0;
304 
305 err_setup:
306 	drm_fb_helper_fini(helper);
307 
308 err_init:
309 	private->fb_helper = NULL;
310 	kfree(fbdev);
311 
312 	return ret;
313 }
314 
315 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
316 				      struct drm_fb_helper *fb_helper)
317 {
318 	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
319 	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
320 	struct drm_framebuffer *fb;
321 
322 	if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
323 		vunmap(exynos_gem_obj->buffer->kvaddr);
324 
325 	/* release drm framebuffer and real buffer */
326 	if (fb_helper->fb && fb_helper->fb->funcs) {
327 		fb = fb_helper->fb;
328 		if (fb) {
329 			drm_framebuffer_unregister_private(fb);
330 			drm_framebuffer_remove(fb);
331 		}
332 	}
333 
334 	/* release linux framebuffer */
335 	if (fb_helper->fbdev) {
336 		struct fb_info *info;
337 		int ret;
338 
339 		info = fb_helper->fbdev;
340 		ret = unregister_framebuffer(info);
341 		if (ret < 0)
342 			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
343 
344 		if (info->cmap.len)
345 			fb_dealloc_cmap(&info->cmap);
346 
347 		framebuffer_release(info);
348 	}
349 
350 	drm_fb_helper_fini(fb_helper);
351 }
352 
353 void exynos_drm_fbdev_fini(struct drm_device *dev)
354 {
355 	struct exynos_drm_private *private = dev->dev_private;
356 	struct exynos_drm_fbdev *fbdev;
357 
358 	if (!private || !private->fb_helper)
359 		return;
360 
361 	fbdev = to_exynos_fbdev(private->fb_helper);
362 
363 	if (fbdev->exynos_gem_obj)
364 		exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
365 
366 	exynos_drm_fbdev_destroy(dev, private->fb_helper);
367 	kfree(fbdev);
368 	private->fb_helper = NULL;
369 }
370 
371 void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
372 {
373 	struct exynos_drm_private *private = dev->dev_private;
374 
375 	if (!private || !private->fb_helper)
376 		return;
377 
378 	drm_modeset_lock_all(dev);
379 	drm_fb_helper_restore_fbdev_mode(private->fb_helper);
380 	drm_modeset_unlock_all(dev);
381 }
382