xref: /openbmc/linux/drivers/gpu/drm/vc4/vc4_kms.c (revision 53809828)
1 /*
2  * Copyright (C) 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 /**
10  * DOC: VC4 KMS
11  *
12  * This is the general code for implementing KMS mode setting that
13  * doesn't clearly associate with any of the other objects (plane,
14  * crtc, HDMI encoder).
15  */
16 
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_crtc_helper.h>
21 #include <drm/drm_plane_helper.h>
22 #include <drm/drm_gem_framebuffer_helper.h>
23 #include "vc4_drv.h"
24 #include "vc4_regs.h"
25 
26 struct vc4_ctm_state {
27 	struct drm_private_state base;
28 	struct drm_color_ctm *ctm;
29 	int fifo;
30 };
31 
32 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
33 {
34 	return container_of(priv, struct vc4_ctm_state, base);
35 }
36 
37 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
38 					       struct drm_private_obj *manager)
39 {
40 	struct drm_device *dev = state->dev;
41 	struct vc4_dev *vc4 = dev->dev_private;
42 	struct drm_private_state *priv_state;
43 	int ret;
44 
45 	ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
46 	if (ret)
47 		return ERR_PTR(ret);
48 
49 	priv_state = drm_atomic_get_private_obj_state(state, manager);
50 	if (IS_ERR(priv_state))
51 		return ERR_CAST(priv_state);
52 
53 	return to_vc4_ctm_state(priv_state);
54 }
55 
56 static struct drm_private_state *
57 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
58 {
59 	struct vc4_ctm_state *state;
60 
61 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
62 	if (!state)
63 		return NULL;
64 
65 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
66 
67 	return &state->base;
68 }
69 
70 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
71 				  struct drm_private_state *state)
72 {
73 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
74 
75 	kfree(ctm_state);
76 }
77 
78 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
79 	.atomic_duplicate_state = vc4_ctm_duplicate_state,
80 	.atomic_destroy_state = vc4_ctm_destroy_state,
81 };
82 
83 /* Converts a DRM S31.32 value to the HW S0.9 format. */
84 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
85 {
86 	u16 r;
87 
88 	/* Sign bit. */
89 	r = in & BIT_ULL(63) ? BIT(9) : 0;
90 
91 	if ((in & GENMASK_ULL(62, 32)) > 0) {
92 		/* We have zero integer bits so we can only saturate here. */
93 		r |= GENMASK(8, 0);
94 	} else {
95 		/* Otherwise take the 9 most important fractional bits. */
96 		r |= (in >> 23) & GENMASK(8, 0);
97 	}
98 
99 	return r;
100 }
101 
102 static void
103 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
104 {
105 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
106 	struct drm_color_ctm *ctm = ctm_state->ctm;
107 
108 	if (ctm_state->fifo) {
109 		HVS_WRITE(SCALER_OLEDCOEF2,
110 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
111 					SCALER_OLEDCOEF2_R_TO_R) |
112 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
113 					SCALER_OLEDCOEF2_R_TO_G) |
114 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
115 					SCALER_OLEDCOEF2_R_TO_B));
116 		HVS_WRITE(SCALER_OLEDCOEF1,
117 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
118 					SCALER_OLEDCOEF1_G_TO_R) |
119 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
120 					SCALER_OLEDCOEF1_G_TO_G) |
121 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
122 					SCALER_OLEDCOEF1_G_TO_B));
123 		HVS_WRITE(SCALER_OLEDCOEF0,
124 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
125 					SCALER_OLEDCOEF0_B_TO_R) |
126 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
127 					SCALER_OLEDCOEF0_B_TO_G) |
128 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
129 					SCALER_OLEDCOEF0_B_TO_B));
130 	}
131 
132 	HVS_WRITE(SCALER_OLEDOFFS,
133 		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
134 }
135 
136 static void
137 vc4_atomic_complete_commit(struct drm_atomic_state *state)
138 {
139 	struct drm_device *dev = state->dev;
140 	struct vc4_dev *vc4 = to_vc4_dev(dev);
141 
142 	drm_atomic_helper_wait_for_fences(dev, state, false);
143 
144 	drm_atomic_helper_wait_for_dependencies(state);
145 
146 	drm_atomic_helper_commit_modeset_disables(dev, state);
147 
148 	vc4_ctm_commit(vc4, state);
149 
150 	drm_atomic_helper_commit_planes(dev, state, 0);
151 
152 	drm_atomic_helper_commit_modeset_enables(dev, state);
153 
154 	drm_atomic_helper_fake_vblank(state);
155 
156 	drm_atomic_helper_commit_hw_done(state);
157 
158 	drm_atomic_helper_wait_for_flip_done(dev, state);
159 
160 	drm_atomic_helper_cleanup_planes(dev, state);
161 
162 	drm_atomic_helper_commit_cleanup_done(state);
163 
164 	drm_atomic_state_put(state);
165 
166 	up(&vc4->async_modeset);
167 }
168 
169 static void commit_work(struct work_struct *work)
170 {
171 	struct drm_atomic_state *state = container_of(work,
172 						      struct drm_atomic_state,
173 						      commit_work);
174 	vc4_atomic_complete_commit(state);
175 }
176 
177 /**
178  * vc4_atomic_commit - commit validated state object
179  * @dev: DRM device
180  * @state: the driver state object
181  * @nonblock: nonblocking commit
182  *
183  * This function commits a with drm_atomic_helper_check() pre-validated state
184  * object. This can still fail when e.g. the framebuffer reservation fails. For
185  * now this doesn't implement asynchronous commits.
186  *
187  * RETURNS
188  * Zero for success or -errno.
189  */
190 static int vc4_atomic_commit(struct drm_device *dev,
191 			     struct drm_atomic_state *state,
192 			     bool nonblock)
193 {
194 	struct vc4_dev *vc4 = to_vc4_dev(dev);
195 	int ret;
196 
197 	if (state->async_update) {
198 		ret = down_interruptible(&vc4->async_modeset);
199 		if (ret)
200 			return ret;
201 
202 		ret = drm_atomic_helper_prepare_planes(dev, state);
203 		if (ret) {
204 			up(&vc4->async_modeset);
205 			return ret;
206 		}
207 
208 		drm_atomic_helper_async_commit(dev, state);
209 
210 		drm_atomic_helper_cleanup_planes(dev, state);
211 
212 		up(&vc4->async_modeset);
213 
214 		return 0;
215 	}
216 
217 	ret = drm_atomic_helper_setup_commit(state, nonblock);
218 	if (ret)
219 		return ret;
220 
221 	INIT_WORK(&state->commit_work, commit_work);
222 
223 	ret = down_interruptible(&vc4->async_modeset);
224 	if (ret)
225 		return ret;
226 
227 	ret = drm_atomic_helper_prepare_planes(dev, state);
228 	if (ret) {
229 		up(&vc4->async_modeset);
230 		return ret;
231 	}
232 
233 	if (!nonblock) {
234 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
235 		if (ret) {
236 			drm_atomic_helper_cleanup_planes(dev, state);
237 			up(&vc4->async_modeset);
238 			return ret;
239 		}
240 	}
241 
242 	/*
243 	 * This is the point of no return - everything below never fails except
244 	 * when the hw goes bonghits. Which means we can commit the new state on
245 	 * the software side now.
246 	 */
247 
248 	BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
249 
250 	/*
251 	 * Everything below can be run asynchronously without the need to grab
252 	 * any modeset locks at all under one condition: It must be guaranteed
253 	 * that the asynchronous work has either been cancelled (if the driver
254 	 * supports it, which at least requires that the framebuffers get
255 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
256 	 * before the new state gets committed on the software side with
257 	 * drm_atomic_helper_swap_state().
258 	 *
259 	 * This scheme allows new atomic state updates to be prepared and
260 	 * checked in parallel to the asynchronous completion of the previous
261 	 * update. Which is important since compositors need to figure out the
262 	 * composition of the next frame right after having submitted the
263 	 * current layout.
264 	 */
265 
266 	drm_atomic_state_get(state);
267 	if (nonblock)
268 		queue_work(system_unbound_wq, &state->commit_work);
269 	else
270 		vc4_atomic_complete_commit(state);
271 
272 	return 0;
273 }
274 
275 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
276 					     struct drm_file *file_priv,
277 					     const struct drm_mode_fb_cmd2 *mode_cmd)
278 {
279 	struct drm_mode_fb_cmd2 mode_cmd_local;
280 
281 	/* If the user didn't specify a modifier, use the
282 	 * vc4_set_tiling_ioctl() state for the BO.
283 	 */
284 	if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
285 		struct drm_gem_object *gem_obj;
286 		struct vc4_bo *bo;
287 
288 		gem_obj = drm_gem_object_lookup(file_priv,
289 						mode_cmd->handles[0]);
290 		if (!gem_obj) {
291 			DRM_DEBUG("Failed to look up GEM BO %d\n",
292 				  mode_cmd->handles[0]);
293 			return ERR_PTR(-ENOENT);
294 		}
295 		bo = to_vc4_bo(gem_obj);
296 
297 		mode_cmd_local = *mode_cmd;
298 
299 		if (bo->t_format) {
300 			mode_cmd_local.modifier[0] =
301 				DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
302 		} else {
303 			mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
304 		}
305 
306 		drm_gem_object_put_unlocked(gem_obj);
307 
308 		mode_cmd = &mode_cmd_local;
309 	}
310 
311 	return drm_gem_fb_create(dev, file_priv, mode_cmd);
312 }
313 
314 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
315  * at a time and the HW only supports S0.9 scalars. To account for the latter,
316  * we don't allow userland to set a CTM that we have no hope of approximating.
317  */
318 static int
319 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
320 {
321 	struct vc4_dev *vc4 = to_vc4_dev(dev);
322 	struct vc4_ctm_state *ctm_state = NULL;
323 	struct drm_crtc *crtc;
324 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
325 	struct drm_color_ctm *ctm;
326 	int i;
327 
328 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
329 		/* CTM is being disabled. */
330 		if (!new_crtc_state->ctm && old_crtc_state->ctm) {
331 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
332 			if (IS_ERR(ctm_state))
333 				return PTR_ERR(ctm_state);
334 			ctm_state->fifo = 0;
335 		}
336 	}
337 
338 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
339 		if (new_crtc_state->ctm == old_crtc_state->ctm)
340 			continue;
341 
342 		if (!ctm_state) {
343 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
344 			if (IS_ERR(ctm_state))
345 				return PTR_ERR(ctm_state);
346 		}
347 
348 		/* CTM is being enabled or the matrix changed. */
349 		if (new_crtc_state->ctm) {
350 			/* fifo is 1-based since 0 disables CTM. */
351 			int fifo = to_vc4_crtc(crtc)->channel + 1;
352 
353 			/* Check userland isn't trying to turn on CTM for more
354 			 * than one CRTC at a time.
355 			 */
356 			if (ctm_state->fifo && ctm_state->fifo != fifo) {
357 				DRM_DEBUG_DRIVER("Too many CTM configured\n");
358 				return -EINVAL;
359 			}
360 
361 			/* Check we can approximate the specified CTM.
362 			 * We disallow scalars |c| > 1.0 since the HW has
363 			 * no integer bits.
364 			 */
365 			ctm = new_crtc_state->ctm->data;
366 			for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
367 				u64 val = ctm->matrix[i];
368 
369 				val &= ~BIT_ULL(63);
370 				if (val > BIT_ULL(32))
371 					return -EINVAL;
372 			}
373 
374 			ctm_state->fifo = fifo;
375 			ctm_state->ctm = ctm;
376 		}
377 	}
378 
379 	return 0;
380 }
381 
382 static int
383 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
384 {
385 	int ret;
386 
387 	ret = vc4_ctm_atomic_check(dev, state);
388 	if (ret < 0)
389 		return ret;
390 
391 	return drm_atomic_helper_check(dev, state);
392 }
393 
394 static const struct drm_mode_config_funcs vc4_mode_funcs = {
395 	.atomic_check = vc4_atomic_check,
396 	.atomic_commit = vc4_atomic_commit,
397 	.fb_create = vc4_fb_create,
398 };
399 
400 int vc4_kms_load(struct drm_device *dev)
401 {
402 	struct vc4_dev *vc4 = to_vc4_dev(dev);
403 	struct vc4_ctm_state *ctm_state;
404 	int ret;
405 
406 	sema_init(&vc4->async_modeset, 1);
407 
408 	/* Set support for vblank irq fast disable, before drm_vblank_init() */
409 	dev->vblank_disable_immediate = true;
410 
411 	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
412 	if (ret < 0) {
413 		dev_err(dev->dev, "failed to initialize vblank\n");
414 		return ret;
415 	}
416 
417 	dev->mode_config.max_width = 2048;
418 	dev->mode_config.max_height = 2048;
419 	dev->mode_config.funcs = &vc4_mode_funcs;
420 	dev->mode_config.preferred_depth = 24;
421 	dev->mode_config.async_page_flip = true;
422 	dev->mode_config.allow_fb_modifiers = true;
423 
424 	drm_modeset_lock_init(&vc4->ctm_state_lock);
425 
426 	ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
427 	if (!ctm_state)
428 		return -ENOMEM;
429 	drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base,
430 				    &vc4_ctm_state_funcs);
431 
432 	drm_mode_config_reset(dev);
433 
434 	drm_kms_helper_poll_init(dev);
435 
436 	return 0;
437 }
438