xref: /openbmc/linux/drivers/gpu/drm/vc4/vc4_kms.c (revision 5b4cb650)
1 /*
2  * Copyright (C) 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 /**
10  * DOC: VC4 KMS
11  *
12  * This is the general code for implementing KMS mode setting that
13  * doesn't clearly associate with any of the other objects (plane,
14  * crtc, HDMI encoder).
15  */
16 
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_crtc_helper.h>
21 #include <drm/drm_plane_helper.h>
22 #include <drm/drm_gem_framebuffer_helper.h>
23 #include "vc4_drv.h"
24 #include "vc4_regs.h"
25 
26 struct vc4_ctm_state {
27 	struct drm_private_state base;
28 	struct drm_color_ctm *ctm;
29 	int fifo;
30 };
31 
32 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
33 {
34 	return container_of(priv, struct vc4_ctm_state, base);
35 }
36 
37 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
38 					       struct drm_private_obj *manager)
39 {
40 	struct drm_device *dev = state->dev;
41 	struct vc4_dev *vc4 = dev->dev_private;
42 	struct drm_private_state *priv_state;
43 	int ret;
44 
45 	ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
46 	if (ret)
47 		return ERR_PTR(ret);
48 
49 	priv_state = drm_atomic_get_private_obj_state(state, manager);
50 	if (IS_ERR(priv_state))
51 		return ERR_CAST(priv_state);
52 
53 	return to_vc4_ctm_state(priv_state);
54 }
55 
56 static struct drm_private_state *
57 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
58 {
59 	struct vc4_ctm_state *state;
60 
61 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
62 	if (!state)
63 		return NULL;
64 
65 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
66 
67 	return &state->base;
68 }
69 
70 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
71 				  struct drm_private_state *state)
72 {
73 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
74 
75 	kfree(ctm_state);
76 }
77 
78 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
79 	.atomic_duplicate_state = vc4_ctm_duplicate_state,
80 	.atomic_destroy_state = vc4_ctm_destroy_state,
81 };
82 
83 /* Converts a DRM S31.32 value to the HW S0.9 format. */
84 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
85 {
86 	u16 r;
87 
88 	/* Sign bit. */
89 	r = in & BIT_ULL(63) ? BIT(9) : 0;
90 
91 	if ((in & GENMASK_ULL(62, 32)) > 0) {
92 		/* We have zero integer bits so we can only saturate here. */
93 		r |= GENMASK(8, 0);
94 	} else {
95 		/* Otherwise take the 9 most important fractional bits. */
96 		r |= (in >> 23) & GENMASK(8, 0);
97 	}
98 
99 	return r;
100 }
101 
102 static void
103 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
104 {
105 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
106 	struct drm_color_ctm *ctm = ctm_state->ctm;
107 
108 	if (ctm_state->fifo) {
109 		HVS_WRITE(SCALER_OLEDCOEF2,
110 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
111 					SCALER_OLEDCOEF2_R_TO_R) |
112 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
113 					SCALER_OLEDCOEF2_R_TO_G) |
114 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
115 					SCALER_OLEDCOEF2_R_TO_B));
116 		HVS_WRITE(SCALER_OLEDCOEF1,
117 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
118 					SCALER_OLEDCOEF1_G_TO_R) |
119 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
120 					SCALER_OLEDCOEF1_G_TO_G) |
121 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
122 					SCALER_OLEDCOEF1_G_TO_B));
123 		HVS_WRITE(SCALER_OLEDCOEF0,
124 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
125 					SCALER_OLEDCOEF0_B_TO_R) |
126 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
127 					SCALER_OLEDCOEF0_B_TO_G) |
128 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
129 					SCALER_OLEDCOEF0_B_TO_B));
130 	}
131 
132 	HVS_WRITE(SCALER_OLEDOFFS,
133 		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
134 }
135 
136 static void
137 vc4_atomic_complete_commit(struct drm_atomic_state *state)
138 {
139 	struct drm_device *dev = state->dev;
140 	struct vc4_dev *vc4 = to_vc4_dev(dev);
141 
142 	drm_atomic_helper_wait_for_fences(dev, state, false);
143 
144 	drm_atomic_helper_wait_for_dependencies(state);
145 
146 	drm_atomic_helper_commit_modeset_disables(dev, state);
147 
148 	vc4_ctm_commit(vc4, state);
149 
150 	drm_atomic_helper_commit_planes(dev, state, 0);
151 
152 	drm_atomic_helper_commit_modeset_enables(dev, state);
153 
154 	drm_atomic_helper_fake_vblank(state);
155 
156 	drm_atomic_helper_commit_hw_done(state);
157 
158 	drm_atomic_helper_wait_for_flip_done(dev, state);
159 
160 	drm_atomic_helper_cleanup_planes(dev, state);
161 
162 	drm_atomic_helper_commit_cleanup_done(state);
163 
164 	drm_atomic_state_put(state);
165 
166 	up(&vc4->async_modeset);
167 }
168 
169 static void commit_work(struct work_struct *work)
170 {
171 	struct drm_atomic_state *state = container_of(work,
172 						      struct drm_atomic_state,
173 						      commit_work);
174 	vc4_atomic_complete_commit(state);
175 }
176 
177 /**
178  * vc4_atomic_commit - commit validated state object
179  * @dev: DRM device
180  * @state: the driver state object
181  * @nonblock: nonblocking commit
182  *
183  * This function commits a with drm_atomic_helper_check() pre-validated state
184  * object. This can still fail when e.g. the framebuffer reservation fails. For
185  * now this doesn't implement asynchronous commits.
186  *
187  * RETURNS
188  * Zero for success or -errno.
189  */
190 static int vc4_atomic_commit(struct drm_device *dev,
191 			     struct drm_atomic_state *state,
192 			     bool nonblock)
193 {
194 	struct vc4_dev *vc4 = to_vc4_dev(dev);
195 	int ret;
196 
197 	if (state->async_update) {
198 		ret = down_interruptible(&vc4->async_modeset);
199 		if (ret)
200 			return ret;
201 
202 		ret = drm_atomic_helper_prepare_planes(dev, state);
203 		if (ret) {
204 			up(&vc4->async_modeset);
205 			return ret;
206 		}
207 
208 		drm_atomic_helper_async_commit(dev, state);
209 
210 		drm_atomic_helper_cleanup_planes(dev, state);
211 
212 		up(&vc4->async_modeset);
213 
214 		return 0;
215 	}
216 
217 	/* We know for sure we don't want an async update here. Set
218 	 * state->legacy_cursor_update to false to prevent
219 	 * drm_atomic_helper_setup_commit() from auto-completing
220 	 * commit->flip_done.
221 	 */
222 	state->legacy_cursor_update = false;
223 	ret = drm_atomic_helper_setup_commit(state, nonblock);
224 	if (ret)
225 		return ret;
226 
227 	INIT_WORK(&state->commit_work, commit_work);
228 
229 	ret = down_interruptible(&vc4->async_modeset);
230 	if (ret)
231 		return ret;
232 
233 	ret = drm_atomic_helper_prepare_planes(dev, state);
234 	if (ret) {
235 		up(&vc4->async_modeset);
236 		return ret;
237 	}
238 
239 	if (!nonblock) {
240 		ret = drm_atomic_helper_wait_for_fences(dev, state, true);
241 		if (ret) {
242 			drm_atomic_helper_cleanup_planes(dev, state);
243 			up(&vc4->async_modeset);
244 			return ret;
245 		}
246 	}
247 
248 	/*
249 	 * This is the point of no return - everything below never fails except
250 	 * when the hw goes bonghits. Which means we can commit the new state on
251 	 * the software side now.
252 	 */
253 
254 	BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
255 
256 	/*
257 	 * Everything below can be run asynchronously without the need to grab
258 	 * any modeset locks at all under one condition: It must be guaranteed
259 	 * that the asynchronous work has either been cancelled (if the driver
260 	 * supports it, which at least requires that the framebuffers get
261 	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
262 	 * before the new state gets committed on the software side with
263 	 * drm_atomic_helper_swap_state().
264 	 *
265 	 * This scheme allows new atomic state updates to be prepared and
266 	 * checked in parallel to the asynchronous completion of the previous
267 	 * update. Which is important since compositors need to figure out the
268 	 * composition of the next frame right after having submitted the
269 	 * current layout.
270 	 */
271 
272 	drm_atomic_state_get(state);
273 	if (nonblock)
274 		queue_work(system_unbound_wq, &state->commit_work);
275 	else
276 		vc4_atomic_complete_commit(state);
277 
278 	return 0;
279 }
280 
281 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
282 					     struct drm_file *file_priv,
283 					     const struct drm_mode_fb_cmd2 *mode_cmd)
284 {
285 	struct drm_mode_fb_cmd2 mode_cmd_local;
286 
287 	/* If the user didn't specify a modifier, use the
288 	 * vc4_set_tiling_ioctl() state for the BO.
289 	 */
290 	if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
291 		struct drm_gem_object *gem_obj;
292 		struct vc4_bo *bo;
293 
294 		gem_obj = drm_gem_object_lookup(file_priv,
295 						mode_cmd->handles[0]);
296 		if (!gem_obj) {
297 			DRM_DEBUG("Failed to look up GEM BO %d\n",
298 				  mode_cmd->handles[0]);
299 			return ERR_PTR(-ENOENT);
300 		}
301 		bo = to_vc4_bo(gem_obj);
302 
303 		mode_cmd_local = *mode_cmd;
304 
305 		if (bo->t_format) {
306 			mode_cmd_local.modifier[0] =
307 				DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
308 		} else {
309 			mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
310 		}
311 
312 		drm_gem_object_put_unlocked(gem_obj);
313 
314 		mode_cmd = &mode_cmd_local;
315 	}
316 
317 	return drm_gem_fb_create(dev, file_priv, mode_cmd);
318 }
319 
320 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
321  * at a time and the HW only supports S0.9 scalars. To account for the latter,
322  * we don't allow userland to set a CTM that we have no hope of approximating.
323  */
324 static int
325 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
326 {
327 	struct vc4_dev *vc4 = to_vc4_dev(dev);
328 	struct vc4_ctm_state *ctm_state = NULL;
329 	struct drm_crtc *crtc;
330 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
331 	struct drm_color_ctm *ctm;
332 	int i;
333 
334 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
335 		/* CTM is being disabled. */
336 		if (!new_crtc_state->ctm && old_crtc_state->ctm) {
337 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
338 			if (IS_ERR(ctm_state))
339 				return PTR_ERR(ctm_state);
340 			ctm_state->fifo = 0;
341 		}
342 	}
343 
344 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
345 		if (new_crtc_state->ctm == old_crtc_state->ctm)
346 			continue;
347 
348 		if (!ctm_state) {
349 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
350 			if (IS_ERR(ctm_state))
351 				return PTR_ERR(ctm_state);
352 		}
353 
354 		/* CTM is being enabled or the matrix changed. */
355 		if (new_crtc_state->ctm) {
356 			/* fifo is 1-based since 0 disables CTM. */
357 			int fifo = to_vc4_crtc(crtc)->channel + 1;
358 
359 			/* Check userland isn't trying to turn on CTM for more
360 			 * than one CRTC at a time.
361 			 */
362 			if (ctm_state->fifo && ctm_state->fifo != fifo) {
363 				DRM_DEBUG_DRIVER("Too many CTM configured\n");
364 				return -EINVAL;
365 			}
366 
367 			/* Check we can approximate the specified CTM.
368 			 * We disallow scalars |c| > 1.0 since the HW has
369 			 * no integer bits.
370 			 */
371 			ctm = new_crtc_state->ctm->data;
372 			for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
373 				u64 val = ctm->matrix[i];
374 
375 				val &= ~BIT_ULL(63);
376 				if (val > BIT_ULL(32))
377 					return -EINVAL;
378 			}
379 
380 			ctm_state->fifo = fifo;
381 			ctm_state->ctm = ctm;
382 		}
383 	}
384 
385 	return 0;
386 }
387 
388 static int
389 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
390 {
391 	int ret;
392 
393 	ret = vc4_ctm_atomic_check(dev, state);
394 	if (ret < 0)
395 		return ret;
396 
397 	return drm_atomic_helper_check(dev, state);
398 }
399 
400 static const struct drm_mode_config_funcs vc4_mode_funcs = {
401 	.atomic_check = vc4_atomic_check,
402 	.atomic_commit = vc4_atomic_commit,
403 	.fb_create = vc4_fb_create,
404 };
405 
406 int vc4_kms_load(struct drm_device *dev)
407 {
408 	struct vc4_dev *vc4 = to_vc4_dev(dev);
409 	struct vc4_ctm_state *ctm_state;
410 	int ret;
411 
412 	sema_init(&vc4->async_modeset, 1);
413 
414 	/* Set support for vblank irq fast disable, before drm_vblank_init() */
415 	dev->vblank_disable_immediate = true;
416 
417 	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
418 	if (ret < 0) {
419 		dev_err(dev->dev, "failed to initialize vblank\n");
420 		return ret;
421 	}
422 
423 	dev->mode_config.max_width = 2048;
424 	dev->mode_config.max_height = 2048;
425 	dev->mode_config.funcs = &vc4_mode_funcs;
426 	dev->mode_config.preferred_depth = 24;
427 	dev->mode_config.async_page_flip = true;
428 	dev->mode_config.allow_fb_modifiers = true;
429 
430 	drm_modeset_lock_init(&vc4->ctm_state_lock);
431 
432 	ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
433 	if (!ctm_state)
434 		return -ENOMEM;
435 	drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base,
436 				    &vc4_ctm_state_funcs);
437 
438 	drm_mode_config_reset(dev);
439 
440 	drm_kms_helper_poll_init(dev);
441 
442 	return 0;
443 }
444