xref: /openbmc/linux/drivers/gpu/drm/vc4/vc4_kms.c (revision 06ba8020)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Broadcom
4  */
5 
6 /**
7  * DOC: VC4 KMS
8  *
9  * This is the general code for implementing KMS mode setting that
10  * doesn't clearly associate with any of the other objects (plane,
11  * crtc, HDMI encoder).
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/sort.h>
16 
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_crtc.h>
20 #include <drm/drm_fourcc.h>
21 #include <drm/drm_gem_framebuffer_helper.h>
22 #include <drm/drm_probe_helper.h>
23 #include <drm/drm_vblank.h>
24 
25 #include "vc4_drv.h"
26 #include "vc4_regs.h"
27 
28 struct vc4_ctm_state {
29 	struct drm_private_state base;
30 	struct drm_color_ctm *ctm;
31 	int fifo;
32 };
33 
34 static struct vc4_ctm_state *
35 to_vc4_ctm_state(const struct drm_private_state *priv)
36 {
37 	return container_of(priv, struct vc4_ctm_state, base);
38 }
39 
40 struct vc4_load_tracker_state {
41 	struct drm_private_state base;
42 	u64 hvs_load;
43 	u64 membus_load;
44 };
45 
46 static struct vc4_load_tracker_state *
47 to_vc4_load_tracker_state(const struct drm_private_state *priv)
48 {
49 	return container_of(priv, struct vc4_load_tracker_state, base);
50 }
51 
52 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
53 					       struct drm_private_obj *manager)
54 {
55 	struct drm_device *dev = state->dev;
56 	struct vc4_dev *vc4 = to_vc4_dev(dev);
57 	struct drm_private_state *priv_state;
58 	int ret;
59 
60 	ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
61 	if (ret)
62 		return ERR_PTR(ret);
63 
64 	priv_state = drm_atomic_get_private_obj_state(state, manager);
65 	if (IS_ERR(priv_state))
66 		return ERR_CAST(priv_state);
67 
68 	return to_vc4_ctm_state(priv_state);
69 }
70 
71 static struct drm_private_state *
72 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
73 {
74 	struct vc4_ctm_state *state;
75 
76 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
77 	if (!state)
78 		return NULL;
79 
80 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
81 
82 	return &state->base;
83 }
84 
85 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
86 				  struct drm_private_state *state)
87 {
88 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
89 
90 	kfree(ctm_state);
91 }
92 
93 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
94 	.atomic_duplicate_state = vc4_ctm_duplicate_state,
95 	.atomic_destroy_state = vc4_ctm_destroy_state,
96 };
97 
98 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
99 {
100 	struct vc4_dev *vc4 = to_vc4_dev(dev);
101 
102 	drm_atomic_private_obj_fini(&vc4->ctm_manager);
103 }
104 
105 static int vc4_ctm_obj_init(struct vc4_dev *vc4)
106 {
107 	struct vc4_ctm_state *ctm_state;
108 
109 	drm_modeset_lock_init(&vc4->ctm_state_lock);
110 
111 	ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
112 	if (!ctm_state)
113 		return -ENOMEM;
114 
115 	drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
116 				    &vc4_ctm_state_funcs);
117 
118 	return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
119 }
120 
121 /* Converts a DRM S31.32 value to the HW S0.9 format. */
122 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
123 {
124 	u16 r;
125 
126 	/* Sign bit. */
127 	r = in & BIT_ULL(63) ? BIT(9) : 0;
128 
129 	if ((in & GENMASK_ULL(62, 32)) > 0) {
130 		/* We have zero integer bits so we can only saturate here. */
131 		r |= GENMASK(8, 0);
132 	} else {
133 		/* Otherwise take the 9 most important fractional bits. */
134 		r |= (in >> 23) & GENMASK(8, 0);
135 	}
136 
137 	return r;
138 }
139 
140 static void
141 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
142 {
143 	struct vc4_hvs *hvs = vc4->hvs;
144 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
145 	struct drm_color_ctm *ctm = ctm_state->ctm;
146 
147 	if (ctm_state->fifo) {
148 		HVS_WRITE(SCALER_OLEDCOEF2,
149 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
150 					SCALER_OLEDCOEF2_R_TO_R) |
151 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
152 					SCALER_OLEDCOEF2_R_TO_G) |
153 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
154 					SCALER_OLEDCOEF2_R_TO_B));
155 		HVS_WRITE(SCALER_OLEDCOEF1,
156 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
157 					SCALER_OLEDCOEF1_G_TO_R) |
158 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
159 					SCALER_OLEDCOEF1_G_TO_G) |
160 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
161 					SCALER_OLEDCOEF1_G_TO_B));
162 		HVS_WRITE(SCALER_OLEDCOEF0,
163 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
164 					SCALER_OLEDCOEF0_B_TO_R) |
165 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
166 					SCALER_OLEDCOEF0_B_TO_G) |
167 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
168 					SCALER_OLEDCOEF0_B_TO_B));
169 	}
170 
171 	HVS_WRITE(SCALER_OLEDOFFS,
172 		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
173 }
174 
175 struct vc4_hvs_state *
176 vc4_hvs_get_new_global_state(const struct drm_atomic_state *state)
177 {
178 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
179 	struct drm_private_state *priv_state;
180 
181 	priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
182 	if (!priv_state)
183 		return ERR_PTR(-EINVAL);
184 
185 	return to_vc4_hvs_state(priv_state);
186 }
187 
188 struct vc4_hvs_state *
189 vc4_hvs_get_old_global_state(const struct drm_atomic_state *state)
190 {
191 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
192 	struct drm_private_state *priv_state;
193 
194 	priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
195 	if (!priv_state)
196 		return ERR_PTR(-EINVAL);
197 
198 	return to_vc4_hvs_state(priv_state);
199 }
200 
201 struct vc4_hvs_state *
202 vc4_hvs_get_global_state(struct drm_atomic_state *state)
203 {
204 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
205 	struct drm_private_state *priv_state;
206 
207 	priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
208 	if (IS_ERR(priv_state))
209 		return ERR_CAST(priv_state);
210 
211 	return to_vc4_hvs_state(priv_state);
212 }
213 
214 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
215 				     struct drm_atomic_state *state)
216 {
217 	struct vc4_hvs *hvs = vc4->hvs;
218 	struct drm_crtc_state *crtc_state;
219 	struct drm_crtc *crtc;
220 	unsigned int i;
221 
222 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
223 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
224 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
225 		u32 dispctrl;
226 		u32 dsp3_mux;
227 
228 		if (!crtc_state->active)
229 			continue;
230 
231 		if (vc4_state->assigned_channel != 2)
232 			continue;
233 
234 		/*
235 		 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
236 		 * FIFO X'.
237 		 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
238 		 *
239 		 * DSP3 is connected to FIFO2 unless the transposer is
240 		 * enabled. In this case, FIFO 2 is directly accessed by the
241 		 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
242 		 * route.
243 		 */
244 		if (vc4_crtc->feeds_txp)
245 			dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
246 		else
247 			dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
248 
249 		dispctrl = HVS_READ(SCALER_DISPCTRL) &
250 			   ~SCALER_DISPCTRL_DSP3_MUX_MASK;
251 		HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
252 	}
253 }
254 
255 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
256 				     struct drm_atomic_state *state)
257 {
258 	struct vc4_hvs *hvs = vc4->hvs;
259 	struct drm_crtc_state *crtc_state;
260 	struct drm_crtc *crtc;
261 	unsigned char mux;
262 	unsigned int i;
263 	u32 reg;
264 
265 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
266 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
267 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
268 		unsigned int channel = vc4_state->assigned_channel;
269 
270 		if (!vc4_state->update_muxing)
271 			continue;
272 
273 		switch (vc4_crtc->data->hvs_output) {
274 		case 2:
275 			drm_WARN_ON(&vc4->base,
276 				    VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL),
277 						  SCALER_DISPCTRL_DSP3_MUX) == channel);
278 
279 			mux = (channel == 2) ? 0 : 1;
280 			reg = HVS_READ(SCALER_DISPECTRL);
281 			HVS_WRITE(SCALER_DISPECTRL,
282 				  (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
283 				  VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
284 			break;
285 
286 		case 3:
287 			if (channel == VC4_HVS_CHANNEL_DISABLED)
288 				mux = 3;
289 			else
290 				mux = channel;
291 
292 			reg = HVS_READ(SCALER_DISPCTRL);
293 			HVS_WRITE(SCALER_DISPCTRL,
294 				  (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
295 				  VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
296 			break;
297 
298 		case 4:
299 			if (channel == VC4_HVS_CHANNEL_DISABLED)
300 				mux = 3;
301 			else
302 				mux = channel;
303 
304 			reg = HVS_READ(SCALER_DISPEOLN);
305 			HVS_WRITE(SCALER_DISPEOLN,
306 				  (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
307 				  VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
308 
309 			break;
310 
311 		case 5:
312 			if (channel == VC4_HVS_CHANNEL_DISABLED)
313 				mux = 3;
314 			else
315 				mux = channel;
316 
317 			reg = HVS_READ(SCALER_DISPDITHER);
318 			HVS_WRITE(SCALER_DISPDITHER,
319 				  (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
320 				  VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
321 			break;
322 
323 		default:
324 			break;
325 		}
326 	}
327 }
328 
329 static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
330 {
331 	struct drm_device *dev = state->dev;
332 	struct vc4_dev *vc4 = to_vc4_dev(dev);
333 	struct vc4_hvs *hvs = vc4->hvs;
334 	struct drm_crtc_state *new_crtc_state;
335 	struct vc4_hvs_state *new_hvs_state;
336 	struct drm_crtc *crtc;
337 	struct vc4_hvs_state *old_hvs_state;
338 	unsigned int channel;
339 	int i;
340 
341 	old_hvs_state = vc4_hvs_get_old_global_state(state);
342 	if (WARN_ON(IS_ERR(old_hvs_state)))
343 		return;
344 
345 	new_hvs_state = vc4_hvs_get_new_global_state(state);
346 	if (WARN_ON(IS_ERR(new_hvs_state)))
347 		return;
348 
349 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
350 		struct vc4_crtc_state *vc4_crtc_state;
351 
352 		if (!new_crtc_state->commit)
353 			continue;
354 
355 		vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
356 		vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
357 	}
358 
359 	for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
360 		struct drm_crtc_commit *commit;
361 		int ret;
362 
363 		if (!old_hvs_state->fifo_state[channel].in_use)
364 			continue;
365 
366 		commit = old_hvs_state->fifo_state[channel].pending_commit;
367 		if (!commit)
368 			continue;
369 
370 		ret = drm_crtc_commit_wait(commit);
371 		if (ret)
372 			drm_err(dev, "Timed out waiting for commit\n");
373 
374 		drm_crtc_commit_put(commit);
375 		old_hvs_state->fifo_state[channel].pending_commit = NULL;
376 	}
377 
378 	if (vc4->is_vc5) {
379 		unsigned long state_rate = max(old_hvs_state->core_clock_rate,
380 					       new_hvs_state->core_clock_rate);
381 		unsigned long core_rate = clamp_t(unsigned long, state_rate,
382 						  500000000, hvs->max_core_rate);
383 
384 		drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
385 
386 		/*
387 		 * Do a temporary request on the core clock during the
388 		 * modeset.
389 		 */
390 		WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
391 	}
392 
393 	drm_atomic_helper_commit_modeset_disables(dev, state);
394 
395 	vc4_ctm_commit(vc4, state);
396 
397 	if (vc4->is_vc5)
398 		vc5_hvs_pv_muxing_commit(vc4, state);
399 	else
400 		vc4_hvs_pv_muxing_commit(vc4, state);
401 
402 	drm_atomic_helper_commit_planes(dev, state,
403 					DRM_PLANE_COMMIT_ACTIVE_ONLY);
404 
405 	drm_atomic_helper_commit_modeset_enables(dev, state);
406 
407 	drm_atomic_helper_fake_vblank(state);
408 
409 	drm_atomic_helper_commit_hw_done(state);
410 
411 	drm_atomic_helper_wait_for_flip_done(dev, state);
412 
413 	drm_atomic_helper_cleanup_planes(dev, state);
414 
415 	if (vc4->is_vc5) {
416 		unsigned long core_rate = min_t(unsigned long,
417 						hvs->max_core_rate,
418 						new_hvs_state->core_clock_rate);
419 
420 		drm_dbg(dev, "Running the core clock at %lu Hz\n", core_rate);
421 
422 		/*
423 		 * Request a clock rate based on the current HVS
424 		 * requirements.
425 		 */
426 		WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
427 
428 		drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
429 			clk_get_rate(hvs->core_clk));
430 	}
431 }
432 
433 static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
434 {
435 	struct drm_crtc_state *crtc_state;
436 	struct vc4_hvs_state *hvs_state;
437 	struct drm_crtc *crtc;
438 	unsigned int i;
439 
440 	hvs_state = vc4_hvs_get_new_global_state(state);
441 	if (WARN_ON(IS_ERR(hvs_state)))
442 		return PTR_ERR(hvs_state);
443 
444 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
445 		struct vc4_crtc_state *vc4_crtc_state =
446 			to_vc4_crtc_state(crtc_state);
447 		unsigned int channel =
448 			vc4_crtc_state->assigned_channel;
449 
450 		if (channel == VC4_HVS_CHANNEL_DISABLED)
451 			continue;
452 
453 		if (!hvs_state->fifo_state[channel].in_use)
454 			continue;
455 
456 		hvs_state->fifo_state[channel].pending_commit =
457 			drm_crtc_commit_get(crtc_state->commit);
458 	}
459 
460 	return 0;
461 }
462 
463 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
464 					     struct drm_file *file_priv,
465 					     const struct drm_mode_fb_cmd2 *mode_cmd)
466 {
467 	struct vc4_dev *vc4 = to_vc4_dev(dev);
468 	struct drm_mode_fb_cmd2 mode_cmd_local;
469 
470 	if (WARN_ON_ONCE(vc4->is_vc5))
471 		return ERR_PTR(-ENODEV);
472 
473 	/* If the user didn't specify a modifier, use the
474 	 * vc4_set_tiling_ioctl() state for the BO.
475 	 */
476 	if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
477 		struct drm_gem_object *gem_obj;
478 		struct vc4_bo *bo;
479 
480 		gem_obj = drm_gem_object_lookup(file_priv,
481 						mode_cmd->handles[0]);
482 		if (!gem_obj) {
483 			DRM_DEBUG("Failed to look up GEM BO %d\n",
484 				  mode_cmd->handles[0]);
485 			return ERR_PTR(-ENOENT);
486 		}
487 		bo = to_vc4_bo(gem_obj);
488 
489 		mode_cmd_local = *mode_cmd;
490 
491 		if (bo->t_format) {
492 			mode_cmd_local.modifier[0] =
493 				DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
494 		} else {
495 			mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
496 		}
497 
498 		drm_gem_object_put(gem_obj);
499 
500 		mode_cmd = &mode_cmd_local;
501 	}
502 
503 	return drm_gem_fb_create(dev, file_priv, mode_cmd);
504 }
505 
506 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
507  * at a time and the HW only supports S0.9 scalars. To account for the latter,
508  * we don't allow userland to set a CTM that we have no hope of approximating.
509  */
510 static int
511 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
512 {
513 	struct vc4_dev *vc4 = to_vc4_dev(dev);
514 	struct vc4_ctm_state *ctm_state = NULL;
515 	struct drm_crtc *crtc;
516 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
517 	struct drm_color_ctm *ctm;
518 	int i;
519 
520 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
521 		/* CTM is being disabled. */
522 		if (!new_crtc_state->ctm && old_crtc_state->ctm) {
523 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
524 			if (IS_ERR(ctm_state))
525 				return PTR_ERR(ctm_state);
526 			ctm_state->fifo = 0;
527 		}
528 	}
529 
530 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
531 		if (new_crtc_state->ctm == old_crtc_state->ctm)
532 			continue;
533 
534 		if (!ctm_state) {
535 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
536 			if (IS_ERR(ctm_state))
537 				return PTR_ERR(ctm_state);
538 		}
539 
540 		/* CTM is being enabled or the matrix changed. */
541 		if (new_crtc_state->ctm) {
542 			struct vc4_crtc_state *vc4_crtc_state =
543 				to_vc4_crtc_state(new_crtc_state);
544 
545 			/* fifo is 1-based since 0 disables CTM. */
546 			int fifo = vc4_crtc_state->assigned_channel + 1;
547 
548 			/* Check userland isn't trying to turn on CTM for more
549 			 * than one CRTC at a time.
550 			 */
551 			if (ctm_state->fifo && ctm_state->fifo != fifo) {
552 				DRM_DEBUG_DRIVER("Too many CTM configured\n");
553 				return -EINVAL;
554 			}
555 
556 			/* Check we can approximate the specified CTM.
557 			 * We disallow scalars |c| > 1.0 since the HW has
558 			 * no integer bits.
559 			 */
560 			ctm = new_crtc_state->ctm->data;
561 			for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
562 				u64 val = ctm->matrix[i];
563 
564 				val &= ~BIT_ULL(63);
565 				if (val > BIT_ULL(32))
566 					return -EINVAL;
567 			}
568 
569 			ctm_state->fifo = fifo;
570 			ctm_state->ctm = ctm;
571 		}
572 	}
573 
574 	return 0;
575 }
576 
577 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
578 {
579 	struct drm_plane_state *old_plane_state, *new_plane_state;
580 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
581 	struct vc4_load_tracker_state *load_state;
582 	struct drm_private_state *priv_state;
583 	struct drm_plane *plane;
584 	int i;
585 
586 	priv_state = drm_atomic_get_private_obj_state(state,
587 						      &vc4->load_tracker);
588 	if (IS_ERR(priv_state))
589 		return PTR_ERR(priv_state);
590 
591 	load_state = to_vc4_load_tracker_state(priv_state);
592 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
593 				       new_plane_state, i) {
594 		struct vc4_plane_state *vc4_plane_state;
595 
596 		if (old_plane_state->fb && old_plane_state->crtc) {
597 			vc4_plane_state = to_vc4_plane_state(old_plane_state);
598 			load_state->membus_load -= vc4_plane_state->membus_load;
599 			load_state->hvs_load -= vc4_plane_state->hvs_load;
600 		}
601 
602 		if (new_plane_state->fb && new_plane_state->crtc) {
603 			vc4_plane_state = to_vc4_plane_state(new_plane_state);
604 			load_state->membus_load += vc4_plane_state->membus_load;
605 			load_state->hvs_load += vc4_plane_state->hvs_load;
606 		}
607 	}
608 
609 	/* Don't check the load when the tracker is disabled. */
610 	if (!vc4->load_tracker_enabled)
611 		return 0;
612 
613 	/* The absolute limit is 2Gbyte/sec, but let's take a margin to let
614 	 * the system work when other blocks are accessing the memory.
615 	 */
616 	if (load_state->membus_load > SZ_1G + SZ_512M)
617 		return -ENOSPC;
618 
619 	/* HVS clock is supposed to run @ 250Mhz, let's take a margin and
620 	 * consider the maximum number of cycles is 240M.
621 	 */
622 	if (load_state->hvs_load > 240000000ULL)
623 		return -ENOSPC;
624 
625 	return 0;
626 }
627 
628 static struct drm_private_state *
629 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
630 {
631 	struct vc4_load_tracker_state *state;
632 
633 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
634 	if (!state)
635 		return NULL;
636 
637 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
638 
639 	return &state->base;
640 }
641 
642 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
643 					   struct drm_private_state *state)
644 {
645 	struct vc4_load_tracker_state *load_state;
646 
647 	load_state = to_vc4_load_tracker_state(state);
648 	kfree(load_state);
649 }
650 
651 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
652 	.atomic_duplicate_state = vc4_load_tracker_duplicate_state,
653 	.atomic_destroy_state = vc4_load_tracker_destroy_state,
654 };
655 
656 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
657 {
658 	struct vc4_dev *vc4 = to_vc4_dev(dev);
659 
660 	drm_atomic_private_obj_fini(&vc4->load_tracker);
661 }
662 
663 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
664 {
665 	struct vc4_load_tracker_state *load_state;
666 
667 	load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
668 	if (!load_state)
669 		return -ENOMEM;
670 
671 	drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
672 				    &load_state->base,
673 				    &vc4_load_tracker_state_funcs);
674 
675 	return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
676 }
677 
678 static struct drm_private_state *
679 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
680 {
681 	struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
682 	struct vc4_hvs_state *state;
683 	unsigned int i;
684 
685 	state = kzalloc(sizeof(*state), GFP_KERNEL);
686 	if (!state)
687 		return NULL;
688 
689 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
690 
691 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
692 		state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
693 		state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
694 	}
695 
696 	state->core_clock_rate = old_state->core_clock_rate;
697 
698 	return &state->base;
699 }
700 
701 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
702 					   struct drm_private_state *state)
703 {
704 	struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
705 	unsigned int i;
706 
707 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
708 		if (!hvs_state->fifo_state[i].pending_commit)
709 			continue;
710 
711 		drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
712 	}
713 
714 	kfree(hvs_state);
715 }
716 
717 static void vc4_hvs_channels_print_state(struct drm_printer *p,
718 					 const struct drm_private_state *state)
719 {
720 	struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
721 	unsigned int i;
722 
723 	drm_printf(p, "HVS State\n");
724 	drm_printf(p, "\tCore Clock Rate: %lu\n", hvs_state->core_clock_rate);
725 
726 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
727 		drm_printf(p, "\tChannel %d\n", i);
728 		drm_printf(p, "\t\tin use=%d\n", hvs_state->fifo_state[i].in_use);
729 		drm_printf(p, "\t\tload=%lu\n", hvs_state->fifo_state[i].fifo_load);
730 	}
731 }
732 
733 static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
734 	.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
735 	.atomic_destroy_state = vc4_hvs_channels_destroy_state,
736 	.atomic_print_state = vc4_hvs_channels_print_state,
737 };
738 
739 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
740 {
741 	struct vc4_dev *vc4 = to_vc4_dev(dev);
742 
743 	drm_atomic_private_obj_fini(&vc4->hvs_channels);
744 }
745 
746 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
747 {
748 	struct vc4_hvs_state *state;
749 
750 	state = kzalloc(sizeof(*state), GFP_KERNEL);
751 	if (!state)
752 		return -ENOMEM;
753 
754 	drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
755 				    &state->base,
756 				    &vc4_hvs_state_funcs);
757 
758 	return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
759 }
760 
761 static int cmp_vc4_crtc_hvs_output(const void *a, const void *b)
762 {
763 	const struct vc4_crtc *crtc_a =
764 		to_vc4_crtc(*(const struct drm_crtc **)a);
765 	const struct vc4_crtc_data *data_a =
766 		vc4_crtc_to_vc4_crtc_data(crtc_a);
767 	const struct vc4_crtc *crtc_b =
768 		to_vc4_crtc(*(const struct drm_crtc **)b);
769 	const struct vc4_crtc_data *data_b =
770 		vc4_crtc_to_vc4_crtc_data(crtc_b);
771 
772 	return data_a->hvs_output - data_b->hvs_output;
773 }
774 
775 /*
776  * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
777  * the TXP (and therefore all the CRTCs found on that platform).
778  *
779  * The naive (and our initial) implementation would just iterate over
780  * all the active CRTCs, try to find a suitable FIFO, and then remove it
781  * from the pool of available FIFOs. However, there are a few corner
782  * cases that need to be considered:
783  *
784  * - When running in a dual-display setup (so with two CRTCs involved),
785  *   we can update the state of a single CRTC (for example by changing
786  *   its mode using xrandr under X11) without affecting the other. In
787  *   this case, the other CRTC wouldn't be in the state at all, so we
788  *   need to consider all the running CRTCs in the DRM device to assign
789  *   a FIFO, not just the one in the state.
790  *
791  * - To fix the above, we can't use drm_atomic_get_crtc_state on all
792  *   enabled CRTCs to pull their CRTC state into the global state, since
793  *   a page flip would start considering their vblank to complete. Since
794  *   we don't have a guarantee that they are actually active, that
795  *   vblank might never happen, and shouldn't even be considered if we
796  *   want to do a page flip on a single CRTC. That can be tested by
797  *   doing a modetest -v first on HDMI1 and then on HDMI0.
798  *
799  * - Since we need the pixelvalve to be disabled and enabled back when
800  *   the FIFO is changed, we should keep the FIFO assigned for as long
801  *   as the CRTC is enabled, only considering it free again once that
802  *   CRTC has been disabled. This can be tested by booting X11 on a
803  *   single display, and changing the resolution down and then back up.
804  */
805 static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
806 				      struct drm_atomic_state *state)
807 {
808 	struct vc4_hvs_state *hvs_new_state;
809 	struct drm_crtc **sorted_crtcs;
810 	struct drm_crtc *crtc;
811 	unsigned int unassigned_channels = 0;
812 	unsigned int i;
813 	int ret;
814 
815 	hvs_new_state = vc4_hvs_get_global_state(state);
816 	if (IS_ERR(hvs_new_state))
817 		return PTR_ERR(hvs_new_state);
818 
819 	for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
820 		if (!hvs_new_state->fifo_state[i].in_use)
821 			unassigned_channels |= BIT(i);
822 
823 	/*
824 	 * The problem we have to solve here is that we have up to 7
825 	 * encoders, connected to up to 6 CRTCs.
826 	 *
827 	 * Those CRTCs, depending on the instance, can be routed to 1, 2
828 	 * or 3 HVS FIFOs, and we need to set the muxing between FIFOs and
829 	 * outputs in the HVS accordingly.
830 	 *
831 	 * It would be pretty hard to come up with an algorithm that
832 	 * would generically solve this. However, the current routing
833 	 * trees we support allow us to simplify a bit the problem.
834 	 *
835 	 * Indeed, with the current supported layouts, if we try to
836 	 * assign in the ascending crtc index order the FIFOs, we can't
837 	 * fall into the situation where an earlier CRTC that had
838 	 * multiple routes is assigned one that was the only option for
839 	 * a later CRTC.
840 	 *
841 	 * If the layout changes and doesn't give us that in the future,
842 	 * we will need to have something smarter, but it works so far.
843 	 */
844 	sorted_crtcs = kmalloc_array(dev->num_crtcs, sizeof(*sorted_crtcs), GFP_KERNEL);
845 	if (!sorted_crtcs)
846 		return -ENOMEM;
847 
848 	i = 0;
849 	drm_for_each_crtc(crtc, dev)
850 		sorted_crtcs[i++] = crtc;
851 
852 	sort(sorted_crtcs, i, sizeof(*sorted_crtcs), cmp_vc4_crtc_hvs_output, NULL);
853 
854 	for (i = 0; i < dev->num_crtcs; i++) {
855 		struct vc4_crtc_state *old_vc4_crtc_state, *new_vc4_crtc_state;
856 		struct drm_crtc_state *old_crtc_state, *new_crtc_state;
857 		struct vc4_crtc *vc4_crtc;
858 		unsigned int matching_channels;
859 		unsigned int channel;
860 
861 		crtc = sorted_crtcs[i];
862 		if (!crtc)
863 			continue;
864 		vc4_crtc = to_vc4_crtc(crtc);
865 
866 		old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
867 		if (!old_crtc_state)
868 			continue;
869 		old_vc4_crtc_state = to_vc4_crtc_state(old_crtc_state);
870 
871 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
872 		if (!new_crtc_state)
873 			continue;
874 		new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
875 
876 		drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
877 
878 		/* Nothing to do here, let's skip it */
879 		if (old_crtc_state->enable == new_crtc_state->enable) {
880 			if (new_crtc_state->enable)
881 				drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n",
882 					crtc->name, new_vc4_crtc_state->assigned_channel);
883 			else
884 				drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name);
885 
886 			continue;
887 		}
888 
889 		/* Muxing will need to be modified, mark it as such */
890 		new_vc4_crtc_state->update_muxing = true;
891 
892 		/* If we're disabling our CRTC, we put back our channel */
893 		if (!new_crtc_state->enable) {
894 			channel = old_vc4_crtc_state->assigned_channel;
895 
896 			drm_dbg(dev, "%s: Disabling, Freeing channel %d\n",
897 				crtc->name, channel);
898 
899 			hvs_new_state->fifo_state[channel].in_use = false;
900 			new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
901 			continue;
902 		}
903 
904 		matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
905 		if (!matching_channels) {
906 			ret = -EINVAL;
907 			goto err_free_crtc_array;
908 		}
909 
910 		channel = ffs(matching_channels) - 1;
911 
912 		drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name);
913 		new_vc4_crtc_state->assigned_channel = channel;
914 		unassigned_channels &= ~BIT(channel);
915 		hvs_new_state->fifo_state[channel].in_use = true;
916 	}
917 
918 	kfree(sorted_crtcs);
919 	return 0;
920 
921 err_free_crtc_array:
922 	kfree(sorted_crtcs);
923 	return ret;
924 }
925 
926 static int
927 vc4_core_clock_atomic_check(struct drm_atomic_state *state)
928 {
929 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
930 	struct drm_private_state *priv_state;
931 	struct vc4_hvs_state *hvs_new_state;
932 	struct vc4_load_tracker_state *load_state;
933 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
934 	struct drm_crtc *crtc;
935 	unsigned int num_outputs;
936 	unsigned long pixel_rate;
937 	unsigned long cob_rate;
938 	unsigned int i;
939 
940 	priv_state = drm_atomic_get_private_obj_state(state,
941 						      &vc4->load_tracker);
942 	if (IS_ERR(priv_state))
943 		return PTR_ERR(priv_state);
944 
945 	load_state = to_vc4_load_tracker_state(priv_state);
946 
947 	hvs_new_state = vc4_hvs_get_global_state(state);
948 	if (IS_ERR(hvs_new_state))
949 		return PTR_ERR(hvs_new_state);
950 
951 	for_each_oldnew_crtc_in_state(state, crtc,
952 				      old_crtc_state,
953 				      new_crtc_state,
954 				      i) {
955 		if (old_crtc_state->active) {
956 			struct vc4_crtc_state *old_vc4_state =
957 				to_vc4_crtc_state(old_crtc_state);
958 			unsigned int channel = old_vc4_state->assigned_channel;
959 
960 			hvs_new_state->fifo_state[channel].fifo_load = 0;
961 		}
962 
963 		if (new_crtc_state->active) {
964 			struct vc4_crtc_state *new_vc4_state =
965 				to_vc4_crtc_state(new_crtc_state);
966 			unsigned int channel = new_vc4_state->assigned_channel;
967 
968 			hvs_new_state->fifo_state[channel].fifo_load =
969 				new_vc4_state->hvs_load;
970 		}
971 	}
972 
973 	cob_rate = 0;
974 	num_outputs = 0;
975 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
976 		if (!hvs_new_state->fifo_state[i].in_use)
977 			continue;
978 
979 		num_outputs++;
980 		cob_rate = max_t(unsigned long,
981 				 hvs_new_state->fifo_state[i].fifo_load,
982 				 cob_rate);
983 	}
984 
985 	pixel_rate = load_state->hvs_load;
986 	if (num_outputs > 1) {
987 		pixel_rate = (pixel_rate * 40) / 100;
988 	} else {
989 		pixel_rate = (pixel_rate * 60) / 100;
990 	}
991 
992 	hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
993 
994 	return 0;
995 }
996 
997 
998 static int
999 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
1000 {
1001 	int ret;
1002 
1003 	ret = vc4_pv_muxing_atomic_check(dev, state);
1004 	if (ret)
1005 		return ret;
1006 
1007 	ret = vc4_ctm_atomic_check(dev, state);
1008 	if (ret < 0)
1009 		return ret;
1010 
1011 	ret = drm_atomic_helper_check(dev, state);
1012 	if (ret)
1013 		return ret;
1014 
1015 	ret = vc4_load_tracker_atomic_check(state);
1016 	if (ret)
1017 		return ret;
1018 
1019 	return vc4_core_clock_atomic_check(state);
1020 }
1021 
1022 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
1023 	.atomic_commit_setup	= vc4_atomic_commit_setup,
1024 	.atomic_commit_tail	= vc4_atomic_commit_tail,
1025 };
1026 
1027 static const struct drm_mode_config_funcs vc4_mode_funcs = {
1028 	.atomic_check = vc4_atomic_check,
1029 	.atomic_commit = drm_atomic_helper_commit,
1030 	.fb_create = vc4_fb_create,
1031 };
1032 
1033 static const struct drm_mode_config_funcs vc5_mode_funcs = {
1034 	.atomic_check = vc4_atomic_check,
1035 	.atomic_commit = drm_atomic_helper_commit,
1036 	.fb_create = drm_gem_fb_create,
1037 };
1038 
1039 int vc4_kms_load(struct drm_device *dev)
1040 {
1041 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1042 	int ret;
1043 
1044 	/*
1045 	 * The limits enforced by the load tracker aren't relevant for
1046 	 * the BCM2711, but the load tracker computations are used for
1047 	 * the core clock rate calculation.
1048 	 */
1049 	if (!vc4->is_vc5) {
1050 		/* Start with the load tracker enabled. Can be
1051 		 * disabled through the debugfs load_tracker file.
1052 		 */
1053 		vc4->load_tracker_enabled = true;
1054 	}
1055 
1056 	/* Set support for vblank irq fast disable, before drm_vblank_init() */
1057 	dev->vblank_disable_immediate = true;
1058 
1059 	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
1060 	if (ret < 0) {
1061 		dev_err(dev->dev, "failed to initialize vblank\n");
1062 		return ret;
1063 	}
1064 
1065 	if (vc4->is_vc5) {
1066 		dev->mode_config.max_width = 7680;
1067 		dev->mode_config.max_height = 7680;
1068 	} else {
1069 		dev->mode_config.max_width = 2048;
1070 		dev->mode_config.max_height = 2048;
1071 	}
1072 
1073 	dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
1074 	dev->mode_config.helper_private = &vc4_mode_config_helpers;
1075 	dev->mode_config.preferred_depth = 24;
1076 	dev->mode_config.async_page_flip = true;
1077 	dev->mode_config.normalize_zpos = true;
1078 
1079 	ret = vc4_ctm_obj_init(vc4);
1080 	if (ret)
1081 		return ret;
1082 
1083 	ret = vc4_load_tracker_obj_init(vc4);
1084 	if (ret)
1085 		return ret;
1086 
1087 	ret = vc4_hvs_channels_obj_init(vc4);
1088 	if (ret)
1089 		return ret;
1090 
1091 	drm_mode_config_reset(dev);
1092 
1093 	drm_kms_helper_poll_init(dev);
1094 
1095 	return 0;
1096 }
1097