xref: /openbmc/linux/drivers/gpu/drm/vc4/vc4_kms.c (revision 160b8e75)
1 /*
2  * Copyright (C) 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 /**
10  * DOC: VC4 KMS
11  *
12  * This is the general code for implementing KMS mode setting that
13  * doesn't clearly associate with any of the other objects (plane,
14  * crtc, HDMI encoder).
15  */
16 
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_crtc_helper.h>
21 #include <drm/drm_plane_helper.h>
22 #include <drm/drm_fb_helper.h>
23 #include <drm/drm_fb_cma_helper.h>
24 #include <drm/drm_gem_framebuffer_helper.h>
25 #include "vc4_drv.h"
26 
27 static void
28 vc4_atomic_complete_commit(struct drm_atomic_state *state)
29 {
30 	struct drm_device *dev = state->dev;
31 	struct vc4_dev *vc4 = to_vc4_dev(dev);
32 
33 	drm_atomic_helper_wait_for_fences(dev, state, false);
34 
35 	drm_atomic_helper_wait_for_dependencies(state);
36 
37 	drm_atomic_helper_commit_modeset_disables(dev, state);
38 
39 	drm_atomic_helper_commit_planes(dev, state, 0);
40 
41 	drm_atomic_helper_commit_modeset_enables(dev, state);
42 
43 	/* Make sure that drm_atomic_helper_wait_for_vblanks()
44 	 * actually waits for vblank.  If we're doing a full atomic
45 	 * modeset (as opposed to a vc4_update_plane() short circuit),
46 	 * then we need to wait for scanout to be done with our
47 	 * display lists before we free it and potentially reallocate
48 	 * and overwrite the dlist memory with a new modeset.
49 	 */
50 	state->legacy_cursor_update = false;
51 
52 	drm_atomic_helper_commit_hw_done(state);
53 
54 	drm_atomic_helper_wait_for_vblanks(dev, state);
55 
56 	drm_atomic_helper_cleanup_planes(dev, state);
57 
58 	drm_atomic_helper_commit_cleanup_done(state);
59 
60 	drm_atomic_state_put(state);
61 
62 	up(&vc4->async_modeset);
63 }
64 
65 static void commit_work(struct work_struct *work)
66 {
67 	struct drm_atomic_state *state = container_of(work,
68 						      struct drm_atomic_state,
69 						      commit_work);
70 	vc4_atomic_complete_commit(state);
71 }
72 
73 /**
74  * vc4_atomic_commit - commit validated state object
75  * @dev: DRM device
76  * @state: the driver state object
77  * @nonblock: nonblocking commit
78  *
79  * This function commits a with drm_atomic_helper_check() pre-validated state
80  * object. This can still fail when e.g. the framebuffer reservation fails. For
81  * now this doesn't implement asynchronous commits.
82  *
83  * RETURNS
84  * Zero for success or -errno.
85  */
86 static int vc4_atomic_commit(struct drm_device *dev,
87 			     struct drm_atomic_state *state,
88 			     bool nonblock)
89 {
90 	struct vc4_dev *vc4 = to_vc4_dev(dev);
91 	int ret;
92 
93 	ret = drm_atomic_helper_setup_commit(state, nonblock);
94 	if (ret)
95 		return ret;
96 
97 	INIT_WORK(&state->commit_work, commit_work);
98 
99 	ret = down_interruptible(&vc4->async_modeset);
100 	if (ret)
101 		return ret;
102 
103 	ret = drm_atomic_helper_prepare_planes(dev, state);
104 	if (ret) {
105 		up(&vc4->async_modeset);
106 		return ret;
107 	}
108 
109 	if (!nonblock) {
110 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
111 		if (ret) {
112 			drm_atomic_helper_cleanup_planes(dev, state);
113 			up(&vc4->async_modeset);
114 			return ret;
115 		}
116 	}
117 
118 	/*
119 	 * This is the point of no return - everything below never fails except
120 	 * when the hw goes bonghits. Which means we can commit the new state on
121 	 * the software side now.
122 	 */
123 
124 	BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
125 
126 	/*
127 	 * Everything below can be run asynchronously without the need to grab
128 	 * any modeset locks at all under one condition: It must be guaranteed
129 	 * that the asynchronous work has either been cancelled (if the driver
130 	 * supports it, which at least requires that the framebuffers get
131 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
132 	 * before the new state gets committed on the software side with
133 	 * drm_atomic_helper_swap_state().
134 	 *
135 	 * This scheme allows new atomic state updates to be prepared and
136 	 * checked in parallel to the asynchronous completion of the previous
137 	 * update. Which is important since compositors need to figure out the
138 	 * composition of the next frame right after having submitted the
139 	 * current layout.
140 	 */
141 
142 	drm_atomic_state_get(state);
143 	if (nonblock)
144 		queue_work(system_unbound_wq, &state->commit_work);
145 	else
146 		vc4_atomic_complete_commit(state);
147 
148 	return 0;
149 }
150 
151 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
152 					     struct drm_file *file_priv,
153 					     const struct drm_mode_fb_cmd2 *mode_cmd)
154 {
155 	struct drm_mode_fb_cmd2 mode_cmd_local;
156 
157 	/* If the user didn't specify a modifier, use the
158 	 * vc4_set_tiling_ioctl() state for the BO.
159 	 */
160 	if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
161 		struct drm_gem_object *gem_obj;
162 		struct vc4_bo *bo;
163 
164 		gem_obj = drm_gem_object_lookup(file_priv,
165 						mode_cmd->handles[0]);
166 		if (!gem_obj) {
167 			DRM_DEBUG("Failed to look up GEM BO %d\n",
168 				  mode_cmd->handles[0]);
169 			return ERR_PTR(-ENOENT);
170 		}
171 		bo = to_vc4_bo(gem_obj);
172 
173 		mode_cmd_local = *mode_cmd;
174 
175 		if (bo->t_format) {
176 			mode_cmd_local.modifier[0] =
177 				DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
178 		} else {
179 			mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
180 		}
181 
182 		drm_gem_object_put_unlocked(gem_obj);
183 
184 		mode_cmd = &mode_cmd_local;
185 	}
186 
187 	return drm_gem_fb_create(dev, file_priv, mode_cmd);
188 }
189 
190 static const struct drm_mode_config_funcs vc4_mode_funcs = {
191 	.output_poll_changed = drm_fb_helper_output_poll_changed,
192 	.atomic_check = drm_atomic_helper_check,
193 	.atomic_commit = vc4_atomic_commit,
194 	.fb_create = vc4_fb_create,
195 };
196 
197 int vc4_kms_load(struct drm_device *dev)
198 {
199 	struct vc4_dev *vc4 = to_vc4_dev(dev);
200 	int ret;
201 
202 	sema_init(&vc4->async_modeset, 1);
203 
204 	/* Set support for vblank irq fast disable, before drm_vblank_init() */
205 	dev->vblank_disable_immediate = true;
206 
207 	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
208 	if (ret < 0) {
209 		dev_err(dev->dev, "failed to initialize vblank\n");
210 		return ret;
211 	}
212 
213 	dev->mode_config.max_width = 2048;
214 	dev->mode_config.max_height = 2048;
215 	dev->mode_config.funcs = &vc4_mode_funcs;
216 	dev->mode_config.preferred_depth = 24;
217 	dev->mode_config.async_page_flip = true;
218 	dev->mode_config.allow_fb_modifiers = true;
219 
220 	drm_mode_config_reset(dev);
221 
222 	if (dev->mode_config.num_connector)
223 		drm_fb_cma_fbdev_init(dev, 32, 0);
224 
225 	drm_kms_helper_poll_init(dev);
226 
227 	return 0;
228 }
229