xref: /openbmc/linux/drivers/gpu/drm/tegra/hub.c (revision cd6d421e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/host1x.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/of_graph.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/reset.h>
16 
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_fourcc.h>
20 #include <drm/drm_probe_helper.h>
21 
22 #include "drm.h"
23 #include "dc.h"
24 #include "plane.h"
25 
26 static const u32 tegra_shared_plane_formats[] = {
27 	DRM_FORMAT_ARGB1555,
28 	DRM_FORMAT_RGB565,
29 	DRM_FORMAT_RGBA5551,
30 	DRM_FORMAT_ARGB8888,
31 	DRM_FORMAT_ABGR8888,
32 	/* new on Tegra114 */
33 	DRM_FORMAT_ABGR4444,
34 	DRM_FORMAT_ABGR1555,
35 	DRM_FORMAT_BGRA5551,
36 	DRM_FORMAT_XRGB1555,
37 	DRM_FORMAT_RGBX5551,
38 	DRM_FORMAT_XBGR1555,
39 	DRM_FORMAT_BGRX5551,
40 	DRM_FORMAT_BGR565,
41 	DRM_FORMAT_XRGB8888,
42 	DRM_FORMAT_XBGR8888,
43 	/* planar formats */
44 	DRM_FORMAT_UYVY,
45 	DRM_FORMAT_YUYV,
46 	DRM_FORMAT_YUV420,
47 	DRM_FORMAT_YUV422,
48 };
49 
50 static const u64 tegra_shared_plane_modifiers[] = {
51 	DRM_FORMAT_MOD_LINEAR,
52 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
53 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
54 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
55 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
56 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
57 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
58 	DRM_FORMAT_MOD_INVALID
59 };
60 
61 static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
62 					      unsigned int offset)
63 {
64 	if (offset >= 0x500 && offset <= 0x581) {
65 		offset = 0x000 + (offset - 0x500);
66 		return plane->offset + offset;
67 	}
68 
69 	if (offset >= 0x700 && offset <= 0x73c) {
70 		offset = 0x180 + (offset - 0x700);
71 		return plane->offset + offset;
72 	}
73 
74 	if (offset >= 0x800 && offset <= 0x83e) {
75 		offset = 0x1c0 + (offset - 0x800);
76 		return plane->offset + offset;
77 	}
78 
79 	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
80 
81 	return plane->offset + offset;
82 }
83 
84 static inline u32 tegra_plane_readl(struct tegra_plane *plane,
85 				    unsigned int offset)
86 {
87 	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
88 }
89 
90 static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
91 				      unsigned int offset)
92 {
93 	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
94 }
95 
96 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
97 {
98 	int err = 0;
99 
100 	mutex_lock(&wgrp->lock);
101 
102 	if (wgrp->usecount == 0) {
103 		err = host1x_client_resume(wgrp->parent);
104 		if (err < 0) {
105 			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
106 			goto unlock;
107 		}
108 
109 		reset_control_deassert(wgrp->rst);
110 	}
111 
112 	wgrp->usecount++;
113 
114 unlock:
115 	mutex_unlock(&wgrp->lock);
116 	return err;
117 }
118 
119 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
120 {
121 	int err;
122 
123 	mutex_lock(&wgrp->lock);
124 
125 	if (wgrp->usecount == 1) {
126 		err = reset_control_assert(wgrp->rst);
127 		if (err < 0) {
128 			pr_err("failed to assert reset for window group %u\n",
129 			       wgrp->index);
130 		}
131 
132 		host1x_client_suspend(wgrp->parent);
133 	}
134 
135 	wgrp->usecount--;
136 	mutex_unlock(&wgrp->lock);
137 }
138 
139 int tegra_display_hub_prepare(struct tegra_display_hub *hub)
140 {
141 	unsigned int i;
142 
143 	/*
144 	 * XXX Enabling/disabling windowgroups needs to happen when the owner
145 	 * display controller is disabled. There's currently no good point at
146 	 * which this could be executed, so unconditionally enable all window
147 	 * groups for now.
148 	 */
149 	for (i = 0; i < hub->soc->num_wgrps; i++) {
150 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
151 
152 		/* Skip orphaned window group whose parent DC is disabled */
153 		if (wgrp->parent)
154 			tegra_windowgroup_enable(wgrp);
155 	}
156 
157 	return 0;
158 }
159 
160 void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
161 {
162 	unsigned int i;
163 
164 	/*
165 	 * XXX Remove this once window groups can be more fine-grainedly
166 	 * enabled and disabled.
167 	 */
168 	for (i = 0; i < hub->soc->num_wgrps; i++) {
169 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
170 
171 		/* Skip orphaned window group whose parent DC is disabled */
172 		if (wgrp->parent)
173 			tegra_windowgroup_disable(wgrp);
174 	}
175 }
176 
177 static void tegra_shared_plane_update(struct tegra_plane *plane)
178 {
179 	struct tegra_dc *dc = plane->dc;
180 	unsigned long timeout;
181 	u32 mask, value;
182 
183 	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
184 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
185 
186 	timeout = jiffies + msecs_to_jiffies(1000);
187 
188 	while (time_before(jiffies, timeout)) {
189 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
190 		if ((value & mask) == 0)
191 			break;
192 
193 		usleep_range(100, 400);
194 	}
195 }
196 
197 static void tegra_shared_plane_activate(struct tegra_plane *plane)
198 {
199 	struct tegra_dc *dc = plane->dc;
200 	unsigned long timeout;
201 	u32 mask, value;
202 
203 	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
204 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
205 
206 	timeout = jiffies + msecs_to_jiffies(1000);
207 
208 	while (time_before(jiffies, timeout)) {
209 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
210 		if ((value & mask) == 0)
211 			break;
212 
213 		usleep_range(100, 400);
214 	}
215 }
216 
217 static unsigned int
218 tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
219 {
220 	unsigned int offset =
221 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
222 
223 	return tegra_dc_readl(dc, offset) & OWNER_MASK;
224 }
225 
226 static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
227 				       struct tegra_plane *plane)
228 {
229 	struct device *dev = dc->dev;
230 
231 	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
232 		if (plane->dc == dc)
233 			return true;
234 
235 		dev_WARN(dev, "head %u owns window %u but is not attached\n",
236 			 dc->pipe, plane->index);
237 	}
238 
239 	return false;
240 }
241 
242 static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
243 					struct tegra_dc *new)
244 {
245 	unsigned int offset =
246 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
247 	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
248 	struct device *dev = new ? new->dev : old->dev;
249 	unsigned int owner, index = plane->index;
250 	u32 value;
251 
252 	value = tegra_dc_readl(dc, offset);
253 	owner = value & OWNER_MASK;
254 
255 	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
256 		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
257 		return -EBUSY;
258 	}
259 
260 	/*
261 	 * This seems to happen whenever the head has been disabled with one
262 	 * or more windows being active. This is harmless because we'll just
263 	 * reassign the window to the new head anyway.
264 	 */
265 	if (old && owner == OWNER_MASK)
266 		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
267 			old->pipe, owner);
268 
269 	value &= ~OWNER_MASK;
270 
271 	if (new)
272 		value |= OWNER(new->pipe);
273 	else
274 		value |= OWNER_MASK;
275 
276 	tegra_dc_writel(dc, value, offset);
277 
278 	plane->dc = new;
279 
280 	return 0;
281 }
282 
283 static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
284 					 struct tegra_plane *plane)
285 {
286 	u32 value;
287 	int err;
288 
289 	if (!tegra_dc_owns_shared_plane(dc, plane)) {
290 		err = tegra_shared_plane_set_owner(plane, dc);
291 		if (err < 0)
292 			return;
293 	}
294 
295 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
296 	value |= MODE_FOUR_LINES;
297 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
298 
299 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
300 	value = SLOTS(1);
301 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
302 
303 	/* disable watermark */
304 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
305 	value &= ~LATENCY_CTL_MODE_ENABLE;
306 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
307 
308 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
309 	value |= WATERMARK_MASK;
310 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
311 
312 	/* pipe meter */
313 	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
314 	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
315 	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
316 
317 	/* mempool entries */
318 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
319 	value = MEMPOOL_ENTRIES(0x331);
320 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
321 
322 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
323 	value &= ~THREAD_NUM_MASK;
324 	value |= THREAD_NUM(plane->base.index);
325 	value |= THREAD_GROUP_ENABLE;
326 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
327 
328 	tegra_shared_plane_update(plane);
329 	tegra_shared_plane_activate(plane);
330 }
331 
332 static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
333 					 struct tegra_plane *plane)
334 {
335 	tegra_shared_plane_set_owner(plane, NULL);
336 }
337 
338 static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
339 					   struct drm_atomic_state *state)
340 {
341 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
342 										 plane);
343 	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
344 	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
345 	struct tegra_bo_tiling *tiling = &plane_state->tiling;
346 	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
347 	int err;
348 
349 	/* no need for further checks if the plane is being disabled */
350 	if (!new_plane_state->crtc || !new_plane_state->fb)
351 		return 0;
352 
353 	err = tegra_plane_format(new_plane_state->fb->format->format,
354 				 &plane_state->format,
355 				 &plane_state->swap);
356 	if (err < 0)
357 		return err;
358 
359 	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
360 	if (err < 0)
361 		return err;
362 
363 	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
364 	    !dc->soc->supports_block_linear) {
365 		DRM_ERROR("hardware doesn't support block linear mode\n");
366 		return -EINVAL;
367 	}
368 
369 	/*
370 	 * Tegra doesn't support different strides for U and V planes so we
371 	 * error out if the user tries to display a framebuffer with such a
372 	 * configuration.
373 	 */
374 	if (new_plane_state->fb->format->num_planes > 2) {
375 		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
376 			DRM_ERROR("unsupported UV-plane configuration\n");
377 			return -EINVAL;
378 		}
379 	}
380 
381 	/* XXX scaling is not yet supported, add a check here */
382 
383 	err = tegra_plane_state_add(&tegra->base, new_plane_state);
384 	if (err < 0)
385 		return err;
386 
387 	return 0;
388 }
389 
390 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
391 					      struct drm_atomic_state *state)
392 {
393 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
394 									   plane);
395 	struct tegra_plane *p = to_tegra_plane(plane);
396 	struct tegra_dc *dc;
397 	u32 value;
398 	int err;
399 
400 	/* rien ne va plus */
401 	if (!old_state || !old_state->crtc)
402 		return;
403 
404 	dc = to_tegra_dc(old_state->crtc);
405 
406 	err = host1x_client_resume(&dc->client);
407 	if (err < 0) {
408 		dev_err(dc->dev, "failed to resume: %d\n", err);
409 		return;
410 	}
411 
412 	/*
413 	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
414 	 * on planes that are already disabled. Make sure we fallback to the
415 	 * head for this particular state instead of crashing.
416 	 */
417 	if (WARN_ON(p->dc == NULL))
418 		p->dc = dc;
419 
420 	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
421 	value &= ~WIN_ENABLE;
422 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
423 
424 	tegra_dc_remove_shared_plane(dc, p);
425 
426 	host1x_client_suspend(&dc->client);
427 }
428 
429 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
430 					     struct drm_atomic_state *state)
431 {
432 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
433 									   plane);
434 	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
435 	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
436 	unsigned int zpos = new_state->normalized_zpos;
437 	struct drm_framebuffer *fb = new_state->fb;
438 	struct tegra_plane *p = to_tegra_plane(plane);
439 	dma_addr_t base;
440 	u32 value;
441 	int err;
442 
443 	/* rien ne va plus */
444 	if (!new_state->crtc || !new_state->fb)
445 		return;
446 
447 	if (!new_state->visible) {
448 		tegra_shared_plane_atomic_disable(plane, state);
449 		return;
450 	}
451 
452 	err = host1x_client_resume(&dc->client);
453 	if (err < 0) {
454 		dev_err(dc->dev, "failed to resume: %d\n", err);
455 		return;
456 	}
457 
458 	tegra_dc_assign_shared_plane(dc, p);
459 
460 	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
461 
462 	/* blending */
463 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
464 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
465 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
466 	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
467 
468 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
469 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
470 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
471 	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
472 
473 	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
474 	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
475 
476 	/* bypass scaling */
477 	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
478 	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
479 
480 	value = INPUT_SCALER_VBYPASS | INPUT_SCALER_HBYPASS;
481 	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
482 
483 	/* disable compression */
484 	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
485 
486 	base = tegra_plane_state->iova[0] + fb->offsets[0];
487 
488 	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
489 	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
490 
491 	value = V_POSITION(new_state->crtc_y) |
492 		H_POSITION(new_state->crtc_x);
493 	tegra_plane_writel(p, value, DC_WIN_POSITION);
494 
495 	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
496 	tegra_plane_writel(p, value, DC_WIN_SIZE);
497 
498 	value = WIN_ENABLE | COLOR_EXPAND;
499 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
500 
501 	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
502 	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
503 
504 	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
505 	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
506 
507 	value = PITCH(fb->pitches[0]);
508 	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
509 
510 	value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL;
511 	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
512 
513 	value = OFFSET_X(new_state->src_y >> 16) |
514 		OFFSET_Y(new_state->src_x >> 16);
515 	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
516 
517 	if (dc->soc->supports_block_linear) {
518 		unsigned long height = tegra_plane_state->tiling.value;
519 
520 		/* XXX */
521 		switch (tegra_plane_state->tiling.mode) {
522 		case TEGRA_BO_TILING_MODE_PITCH:
523 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
524 				DC_WINBUF_SURFACE_KIND_PITCH;
525 			break;
526 
527 		/* XXX not supported on Tegra186 and later */
528 		case TEGRA_BO_TILING_MODE_TILED:
529 			value = DC_WINBUF_SURFACE_KIND_TILED;
530 			break;
531 
532 		case TEGRA_BO_TILING_MODE_BLOCK:
533 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
534 				DC_WINBUF_SURFACE_KIND_BLOCK;
535 			break;
536 		}
537 
538 		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
539 	}
540 
541 	/* disable gamut CSC */
542 	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
543 	value &= ~CONTROL_CSC_ENABLE;
544 	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
545 
546 	host1x_client_suspend(&dc->client);
547 }
548 
549 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
550 	.prepare_fb = tegra_plane_prepare_fb,
551 	.cleanup_fb = tegra_plane_cleanup_fb,
552 	.atomic_check = tegra_shared_plane_atomic_check,
553 	.atomic_update = tegra_shared_plane_atomic_update,
554 	.atomic_disable = tegra_shared_plane_atomic_disable,
555 };
556 
557 struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
558 					    struct tegra_dc *dc,
559 					    unsigned int wgrp,
560 					    unsigned int index)
561 {
562 	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
563 	struct tegra_drm *tegra = drm->dev_private;
564 	struct tegra_display_hub *hub = tegra->hub;
565 	/* planes can be assigned to arbitrary CRTCs */
566 	unsigned int possible_crtcs = 0x7;
567 	struct tegra_shared_plane *plane;
568 	unsigned int num_formats;
569 	const u64 *modifiers;
570 	struct drm_plane *p;
571 	const u32 *formats;
572 	int err;
573 
574 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
575 	if (!plane)
576 		return ERR_PTR(-ENOMEM);
577 
578 	plane->base.offset = 0x0a00 + 0x0300 * index;
579 	plane->base.index = index;
580 
581 	plane->wgrp = &hub->wgrps[wgrp];
582 	plane->wgrp->parent = &dc->client;
583 
584 	p = &plane->base.base;
585 
586 	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
587 	formats = tegra_shared_plane_formats;
588 	modifiers = tegra_shared_plane_modifiers;
589 
590 	err = drm_universal_plane_init(drm, p, possible_crtcs,
591 				       &tegra_plane_funcs, formats,
592 				       num_formats, modifiers, type, NULL);
593 	if (err < 0) {
594 		kfree(plane);
595 		return ERR_PTR(err);
596 	}
597 
598 	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
599 	drm_plane_create_zpos_property(p, 0, 0, 255);
600 
601 	return p;
602 }
603 
604 static struct drm_private_state *
605 tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
606 {
607 	struct tegra_display_hub_state *state;
608 
609 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
610 	if (!state)
611 		return NULL;
612 
613 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
614 
615 	return &state->base;
616 }
617 
618 static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
619 					    struct drm_private_state *state)
620 {
621 	struct tegra_display_hub_state *hub_state =
622 		to_tegra_display_hub_state(state);
623 
624 	kfree(hub_state);
625 }
626 
627 static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
628 	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
629 	.atomic_destroy_state = tegra_display_hub_destroy_state,
630 };
631 
632 static struct tegra_display_hub_state *
633 tegra_display_hub_get_state(struct tegra_display_hub *hub,
634 			    struct drm_atomic_state *state)
635 {
636 	struct drm_private_state *priv;
637 
638 	priv = drm_atomic_get_private_obj_state(state, &hub->base);
639 	if (IS_ERR(priv))
640 		return ERR_CAST(priv);
641 
642 	return to_tegra_display_hub_state(priv);
643 }
644 
645 int tegra_display_hub_atomic_check(struct drm_device *drm,
646 				   struct drm_atomic_state *state)
647 {
648 	struct tegra_drm *tegra = drm->dev_private;
649 	struct tegra_display_hub_state *hub_state;
650 	struct drm_crtc_state *old, *new;
651 	struct drm_crtc *crtc;
652 	unsigned int i;
653 
654 	if (!tegra->hub)
655 		return 0;
656 
657 	hub_state = tegra_display_hub_get_state(tegra->hub, state);
658 	if (IS_ERR(hub_state))
659 		return PTR_ERR(hub_state);
660 
661 	/*
662 	 * The display hub display clock needs to be fed by the display clock
663 	 * with the highest frequency to ensure proper functioning of all the
664 	 * displays.
665 	 *
666 	 * Note that this isn't used before Tegra186, but it doesn't hurt and
667 	 * conditionalizing it would make the code less clean.
668 	 */
669 	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
670 		struct tegra_dc_state *dc = to_dc_state(new);
671 
672 		if (new->active) {
673 			if (!hub_state->clk || dc->pclk > hub_state->rate) {
674 				hub_state->dc = to_tegra_dc(dc->base.crtc);
675 				hub_state->clk = hub_state->dc->clk;
676 				hub_state->rate = dc->pclk;
677 			}
678 		}
679 	}
680 
681 	return 0;
682 }
683 
684 static void tegra_display_hub_update(struct tegra_dc *dc)
685 {
686 	u32 value;
687 	int err;
688 
689 	err = host1x_client_resume(&dc->client);
690 	if (err < 0) {
691 		dev_err(dc->dev, "failed to resume: %d\n", err);
692 		return;
693 	}
694 
695 	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
696 	value &= ~LATENCY_EVENT;
697 	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
698 
699 	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
700 	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
701 	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
702 
703 	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
704 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
705 	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
706 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
707 
708 	host1x_client_suspend(&dc->client);
709 }
710 
711 void tegra_display_hub_atomic_commit(struct drm_device *drm,
712 				     struct drm_atomic_state *state)
713 {
714 	struct tegra_drm *tegra = drm->dev_private;
715 	struct tegra_display_hub *hub = tegra->hub;
716 	struct tegra_display_hub_state *hub_state;
717 	struct device *dev = hub->client.dev;
718 	int err;
719 
720 	hub_state = to_tegra_display_hub_state(hub->base.state);
721 
722 	if (hub_state->clk) {
723 		err = clk_set_rate(hub_state->clk, hub_state->rate);
724 		if (err < 0)
725 			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
726 				hub_state->clk, hub_state->rate);
727 
728 		err = clk_set_parent(hub->clk_disp, hub_state->clk);
729 		if (err < 0)
730 			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
731 				hub->clk_disp, hub_state->clk, err);
732 	}
733 
734 	if (hub_state->dc)
735 		tegra_display_hub_update(hub_state->dc);
736 }
737 
738 static int tegra_display_hub_init(struct host1x_client *client)
739 {
740 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
741 	struct drm_device *drm = dev_get_drvdata(client->host);
742 	struct tegra_drm *tegra = drm->dev_private;
743 	struct tegra_display_hub_state *state;
744 
745 	state = kzalloc(sizeof(*state), GFP_KERNEL);
746 	if (!state)
747 		return -ENOMEM;
748 
749 	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
750 				    &tegra_display_hub_state_funcs);
751 
752 	tegra->hub = hub;
753 
754 	return 0;
755 }
756 
757 static int tegra_display_hub_exit(struct host1x_client *client)
758 {
759 	struct drm_device *drm = dev_get_drvdata(client->host);
760 	struct tegra_drm *tegra = drm->dev_private;
761 
762 	drm_atomic_private_obj_fini(&tegra->hub->base);
763 	tegra->hub = NULL;
764 
765 	return 0;
766 }
767 
768 static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
769 {
770 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
771 	struct device *dev = client->dev;
772 	unsigned int i = hub->num_heads;
773 	int err;
774 
775 	err = reset_control_assert(hub->rst);
776 	if (err < 0)
777 		return err;
778 
779 	while (i--)
780 		clk_disable_unprepare(hub->clk_heads[i]);
781 
782 	clk_disable_unprepare(hub->clk_hub);
783 	clk_disable_unprepare(hub->clk_dsc);
784 	clk_disable_unprepare(hub->clk_disp);
785 
786 	pm_runtime_put_sync(dev);
787 
788 	return 0;
789 }
790 
791 static int tegra_display_hub_runtime_resume(struct host1x_client *client)
792 {
793 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
794 	struct device *dev = client->dev;
795 	unsigned int i;
796 	int err;
797 
798 	err = pm_runtime_resume_and_get(dev);
799 	if (err < 0) {
800 		dev_err(dev, "failed to get runtime PM: %d\n", err);
801 		return err;
802 	}
803 
804 	err = clk_prepare_enable(hub->clk_disp);
805 	if (err < 0)
806 		goto put_rpm;
807 
808 	err = clk_prepare_enable(hub->clk_dsc);
809 	if (err < 0)
810 		goto disable_disp;
811 
812 	err = clk_prepare_enable(hub->clk_hub);
813 	if (err < 0)
814 		goto disable_dsc;
815 
816 	for (i = 0; i < hub->num_heads; i++) {
817 		err = clk_prepare_enable(hub->clk_heads[i]);
818 		if (err < 0)
819 			goto disable_heads;
820 	}
821 
822 	err = reset_control_deassert(hub->rst);
823 	if (err < 0)
824 		goto disable_heads;
825 
826 	return 0;
827 
828 disable_heads:
829 	while (i--)
830 		clk_disable_unprepare(hub->clk_heads[i]);
831 
832 	clk_disable_unprepare(hub->clk_hub);
833 disable_dsc:
834 	clk_disable_unprepare(hub->clk_dsc);
835 disable_disp:
836 	clk_disable_unprepare(hub->clk_disp);
837 put_rpm:
838 	pm_runtime_put_sync(dev);
839 	return err;
840 }
841 
842 static const struct host1x_client_ops tegra_display_hub_ops = {
843 	.init = tegra_display_hub_init,
844 	.exit = tegra_display_hub_exit,
845 	.suspend = tegra_display_hub_runtime_suspend,
846 	.resume = tegra_display_hub_runtime_resume,
847 };
848 
849 static int tegra_display_hub_probe(struct platform_device *pdev)
850 {
851 	struct device_node *child = NULL;
852 	struct tegra_display_hub *hub;
853 	struct clk *clk;
854 	unsigned int i;
855 	int err;
856 
857 	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
858 	if (!hub)
859 		return -ENOMEM;
860 
861 	hub->soc = of_device_get_match_data(&pdev->dev);
862 
863 	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
864 	if (IS_ERR(hub->clk_disp)) {
865 		err = PTR_ERR(hub->clk_disp);
866 		return err;
867 	}
868 
869 	if (hub->soc->supports_dsc) {
870 		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
871 		if (IS_ERR(hub->clk_dsc)) {
872 			err = PTR_ERR(hub->clk_dsc);
873 			return err;
874 		}
875 	}
876 
877 	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
878 	if (IS_ERR(hub->clk_hub)) {
879 		err = PTR_ERR(hub->clk_hub);
880 		return err;
881 	}
882 
883 	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
884 	if (IS_ERR(hub->rst)) {
885 		err = PTR_ERR(hub->rst);
886 		return err;
887 	}
888 
889 	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
890 				  sizeof(*hub->wgrps), GFP_KERNEL);
891 	if (!hub->wgrps)
892 		return -ENOMEM;
893 
894 	for (i = 0; i < hub->soc->num_wgrps; i++) {
895 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
896 		char id[8];
897 
898 		snprintf(id, sizeof(id), "wgrp%u", i);
899 		mutex_init(&wgrp->lock);
900 		wgrp->usecount = 0;
901 		wgrp->index = i;
902 
903 		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
904 		if (IS_ERR(wgrp->rst))
905 			return PTR_ERR(wgrp->rst);
906 
907 		err = reset_control_assert(wgrp->rst);
908 		if (err < 0)
909 			return err;
910 	}
911 
912 	hub->num_heads = of_get_child_count(pdev->dev.of_node);
913 
914 	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
915 				      GFP_KERNEL);
916 	if (!hub->clk_heads)
917 		return -ENOMEM;
918 
919 	for (i = 0; i < hub->num_heads; i++) {
920 		child = of_get_next_child(pdev->dev.of_node, child);
921 		if (!child) {
922 			dev_err(&pdev->dev, "failed to find node for head %u\n",
923 				i);
924 			return -ENODEV;
925 		}
926 
927 		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
928 		if (IS_ERR(clk)) {
929 			dev_err(&pdev->dev, "failed to get clock for head %u\n",
930 				i);
931 			of_node_put(child);
932 			return PTR_ERR(clk);
933 		}
934 
935 		hub->clk_heads[i] = clk;
936 	}
937 
938 	of_node_put(child);
939 
940 	/* XXX: enable clock across reset? */
941 	err = reset_control_assert(hub->rst);
942 	if (err < 0)
943 		return err;
944 
945 	platform_set_drvdata(pdev, hub);
946 	pm_runtime_enable(&pdev->dev);
947 
948 	INIT_LIST_HEAD(&hub->client.list);
949 	hub->client.ops = &tegra_display_hub_ops;
950 	hub->client.dev = &pdev->dev;
951 
952 	err = host1x_client_register(&hub->client);
953 	if (err < 0)
954 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
955 			err);
956 
957 	err = devm_of_platform_populate(&pdev->dev);
958 	if (err < 0)
959 		goto unregister;
960 
961 	return err;
962 
963 unregister:
964 	host1x_client_unregister(&hub->client);
965 	pm_runtime_disable(&pdev->dev);
966 	return err;
967 }
968 
969 static int tegra_display_hub_remove(struct platform_device *pdev)
970 {
971 	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
972 	unsigned int i;
973 	int err;
974 
975 	err = host1x_client_unregister(&hub->client);
976 	if (err < 0) {
977 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
978 			err);
979 	}
980 
981 	for (i = 0; i < hub->soc->num_wgrps; i++) {
982 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
983 
984 		mutex_destroy(&wgrp->lock);
985 	}
986 
987 	pm_runtime_disable(&pdev->dev);
988 
989 	return err;
990 }
991 
992 static const struct tegra_display_hub_soc tegra186_display_hub = {
993 	.num_wgrps = 6,
994 	.supports_dsc = true,
995 };
996 
997 static const struct tegra_display_hub_soc tegra194_display_hub = {
998 	.num_wgrps = 6,
999 	.supports_dsc = false,
1000 };
1001 
1002 static const struct of_device_id tegra_display_hub_of_match[] = {
1003 	{
1004 		.compatible = "nvidia,tegra194-display",
1005 		.data = &tegra194_display_hub
1006 	}, {
1007 		.compatible = "nvidia,tegra186-display",
1008 		.data = &tegra186_display_hub
1009 	}, {
1010 		/* sentinel */
1011 	}
1012 };
1013 MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1014 
1015 struct platform_driver tegra_display_hub_driver = {
1016 	.driver = {
1017 		.name = "tegra-display-hub",
1018 		.of_match_table = tegra_display_hub_of_match,
1019 	},
1020 	.probe = tegra_display_hub_probe,
1021 	.remove = tegra_display_hub_remove,
1022 };
1023