xref: /openbmc/linux/drivers/gpu/drm/tegra/hub.c (revision 479965a2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/host1x.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_graph.h>
13 #include <linux/of_platform.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/reset.h>
17 
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_blend.h>
21 #include <drm/drm_fourcc.h>
22 #include <drm/drm_framebuffer.h>
23 #include <drm/drm_probe_helper.h>
24 
25 #include "drm.h"
26 #include "dc.h"
27 #include "plane.h"
28 
29 #define NFB 24
30 
31 static const u32 tegra_shared_plane_formats[] = {
32 	DRM_FORMAT_ARGB1555,
33 	DRM_FORMAT_RGB565,
34 	DRM_FORMAT_RGBA5551,
35 	DRM_FORMAT_ARGB8888,
36 	DRM_FORMAT_ABGR8888,
37 	/* new on Tegra114 */
38 	DRM_FORMAT_ABGR4444,
39 	DRM_FORMAT_ABGR1555,
40 	DRM_FORMAT_BGRA5551,
41 	DRM_FORMAT_XRGB1555,
42 	DRM_FORMAT_RGBX5551,
43 	DRM_FORMAT_XBGR1555,
44 	DRM_FORMAT_BGRX5551,
45 	DRM_FORMAT_BGR565,
46 	DRM_FORMAT_XRGB8888,
47 	DRM_FORMAT_XBGR8888,
48 	/* planar formats */
49 	DRM_FORMAT_UYVY,
50 	DRM_FORMAT_YUYV,
51 	DRM_FORMAT_YUV420,
52 	DRM_FORMAT_YUV422,
53 };
54 
55 static const u64 tegra_shared_plane_modifiers[] = {
56 	DRM_FORMAT_MOD_LINEAR,
57 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
58 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
59 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
60 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
61 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
62 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
63 	/*
64 	 * The GPU sector layout is only supported on Tegra194, but these will
65 	 * be filtered out later on by ->format_mod_supported() on SoCs where
66 	 * it isn't supported.
67 	 */
68 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
69 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
70 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
71 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
72 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
73 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
74 	/* sentinel */
75 	DRM_FORMAT_MOD_INVALID
76 };
77 
78 static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
79 					      unsigned int offset)
80 {
81 	if (offset >= 0x500 && offset <= 0x581) {
82 		offset = 0x000 + (offset - 0x500);
83 		return plane->offset + offset;
84 	}
85 
86 	if (offset >= 0x700 && offset <= 0x73c) {
87 		offset = 0x180 + (offset - 0x700);
88 		return plane->offset + offset;
89 	}
90 
91 	if (offset >= 0x800 && offset <= 0x83e) {
92 		offset = 0x1c0 + (offset - 0x800);
93 		return plane->offset + offset;
94 	}
95 
96 	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
97 
98 	return plane->offset + offset;
99 }
100 
101 static inline u32 tegra_plane_readl(struct tegra_plane *plane,
102 				    unsigned int offset)
103 {
104 	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
105 }
106 
107 static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
108 				      unsigned int offset)
109 {
110 	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
111 }
112 
113 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
114 {
115 	int err = 0;
116 
117 	mutex_lock(&wgrp->lock);
118 
119 	if (wgrp->usecount == 0) {
120 		err = host1x_client_resume(wgrp->parent);
121 		if (err < 0) {
122 			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
123 			goto unlock;
124 		}
125 
126 		reset_control_deassert(wgrp->rst);
127 	}
128 
129 	wgrp->usecount++;
130 
131 unlock:
132 	mutex_unlock(&wgrp->lock);
133 	return err;
134 }
135 
136 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
137 {
138 	int err;
139 
140 	mutex_lock(&wgrp->lock);
141 
142 	if (wgrp->usecount == 1) {
143 		err = reset_control_assert(wgrp->rst);
144 		if (err < 0) {
145 			pr_err("failed to assert reset for window group %u\n",
146 			       wgrp->index);
147 		}
148 
149 		host1x_client_suspend(wgrp->parent);
150 	}
151 
152 	wgrp->usecount--;
153 	mutex_unlock(&wgrp->lock);
154 }
155 
156 int tegra_display_hub_prepare(struct tegra_display_hub *hub)
157 {
158 	unsigned int i;
159 
160 	/*
161 	 * XXX Enabling/disabling windowgroups needs to happen when the owner
162 	 * display controller is disabled. There's currently no good point at
163 	 * which this could be executed, so unconditionally enable all window
164 	 * groups for now.
165 	 */
166 	for (i = 0; i < hub->soc->num_wgrps; i++) {
167 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
168 
169 		/* Skip orphaned window group whose parent DC is disabled */
170 		if (wgrp->parent)
171 			tegra_windowgroup_enable(wgrp);
172 	}
173 
174 	return 0;
175 }
176 
177 void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
178 {
179 	unsigned int i;
180 
181 	/*
182 	 * XXX Remove this once window groups can be more fine-grainedly
183 	 * enabled and disabled.
184 	 */
185 	for (i = 0; i < hub->soc->num_wgrps; i++) {
186 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
187 
188 		/* Skip orphaned window group whose parent DC is disabled */
189 		if (wgrp->parent)
190 			tegra_windowgroup_disable(wgrp);
191 	}
192 }
193 
194 static void tegra_shared_plane_update(struct tegra_plane *plane)
195 {
196 	struct tegra_dc *dc = plane->dc;
197 	unsigned long timeout;
198 	u32 mask, value;
199 
200 	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
201 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
202 
203 	timeout = jiffies + msecs_to_jiffies(1000);
204 
205 	while (time_before(jiffies, timeout)) {
206 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
207 		if ((value & mask) == 0)
208 			break;
209 
210 		usleep_range(100, 400);
211 	}
212 }
213 
214 static void tegra_shared_plane_activate(struct tegra_plane *plane)
215 {
216 	struct tegra_dc *dc = plane->dc;
217 	unsigned long timeout;
218 	u32 mask, value;
219 
220 	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
221 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
222 
223 	timeout = jiffies + msecs_to_jiffies(1000);
224 
225 	while (time_before(jiffies, timeout)) {
226 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
227 		if ((value & mask) == 0)
228 			break;
229 
230 		usleep_range(100, 400);
231 	}
232 }
233 
234 static unsigned int
235 tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
236 {
237 	unsigned int offset =
238 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
239 
240 	return tegra_dc_readl(dc, offset) & OWNER_MASK;
241 }
242 
243 static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
244 				       struct tegra_plane *plane)
245 {
246 	struct device *dev = dc->dev;
247 
248 	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
249 		if (plane->dc == dc)
250 			return true;
251 
252 		dev_WARN(dev, "head %u owns window %u but is not attached\n",
253 			 dc->pipe, plane->index);
254 	}
255 
256 	return false;
257 }
258 
259 static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
260 					struct tegra_dc *new)
261 {
262 	unsigned int offset =
263 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
264 	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
265 	struct device *dev = new ? new->dev : old->dev;
266 	unsigned int owner, index = plane->index;
267 	u32 value;
268 
269 	value = tegra_dc_readl(dc, offset);
270 	owner = value & OWNER_MASK;
271 
272 	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
273 		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
274 		return -EBUSY;
275 	}
276 
277 	/*
278 	 * This seems to happen whenever the head has been disabled with one
279 	 * or more windows being active. This is harmless because we'll just
280 	 * reassign the window to the new head anyway.
281 	 */
282 	if (old && owner == OWNER_MASK)
283 		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
284 			old->pipe, owner);
285 
286 	value &= ~OWNER_MASK;
287 
288 	if (new)
289 		value |= OWNER(new->pipe);
290 	else
291 		value |= OWNER_MASK;
292 
293 	tegra_dc_writel(dc, value, offset);
294 
295 	plane->dc = new;
296 
297 	return 0;
298 }
299 
300 static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
301 {
302 	static const unsigned int coeffs[192] = {
303 		0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
304 		0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
305 		0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
306 		0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
307 		0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
308 		0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
309 		0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
310 		0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
311 		0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
312 		0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
313 		0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
314 		0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
315 		0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
316 		0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
317 		0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
318 		0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
319 		0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
320 		0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
321 		0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
322 		0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
323 		0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
324 		0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
325 		0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
326 		0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
327 		0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
328 		0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
329 		0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
330 		0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
331 		0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
332 		0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
333 		0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
334 		0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
335 		0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
336 		0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
337 		0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
338 		0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
339 		0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
340 		0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
341 		0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
342 		0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
343 		0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
344 		0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
345 		0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
346 		0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
347 		0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
348 		0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
349 		0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
350 		0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
351 	};
352 	unsigned int ratio, row, column;
353 
354 	for (ratio = 0; ratio <= 2; ratio++) {
355 		for (row = 0; row <= 15; row++) {
356 			for (column = 0; column <= 3; column++) {
357 				unsigned int index = (ratio << 6) + (row << 2) + column;
358 				u32 value;
359 
360 				value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
361 				tegra_plane_writel(plane, value,
362 						   DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
363 			}
364 		}
365 	}
366 }
367 
368 static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
369 					 struct tegra_plane *plane)
370 {
371 	u32 value;
372 	int err;
373 
374 	if (!tegra_dc_owns_shared_plane(dc, plane)) {
375 		err = tegra_shared_plane_set_owner(plane, dc);
376 		if (err < 0)
377 			return;
378 	}
379 
380 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
381 	value |= MODE_FOUR_LINES;
382 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
383 
384 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
385 	value = SLOTS(1);
386 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
387 
388 	/* disable watermark */
389 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
390 	value &= ~LATENCY_CTL_MODE_ENABLE;
391 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
392 
393 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
394 	value |= WATERMARK_MASK;
395 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
396 
397 	/* pipe meter */
398 	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
399 	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
400 	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
401 
402 	/* mempool entries */
403 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
404 	value = MEMPOOL_ENTRIES(0x331);
405 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
406 
407 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
408 	value &= ~THREAD_NUM_MASK;
409 	value |= THREAD_NUM(plane->base.index);
410 	value |= THREAD_GROUP_ENABLE;
411 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
412 
413 	tegra_shared_plane_setup_scaler(plane);
414 
415 	tegra_shared_plane_update(plane);
416 	tegra_shared_plane_activate(plane);
417 }
418 
419 static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
420 					 struct tegra_plane *plane)
421 {
422 	tegra_shared_plane_set_owner(plane, NULL);
423 }
424 
425 static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
426 					   struct drm_atomic_state *state)
427 {
428 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
429 										 plane);
430 	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
431 	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
432 	struct tegra_bo_tiling *tiling = &plane_state->tiling;
433 	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
434 	int err;
435 
436 	/* no need for further checks if the plane is being disabled */
437 	if (!new_plane_state->crtc || !new_plane_state->fb)
438 		return 0;
439 
440 	err = tegra_plane_format(new_plane_state->fb->format->format,
441 				 &plane_state->format,
442 				 &plane_state->swap);
443 	if (err < 0)
444 		return err;
445 
446 	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
447 	if (err < 0)
448 		return err;
449 
450 	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
451 	    !dc->soc->supports_block_linear) {
452 		DRM_ERROR("hardware doesn't support block linear mode\n");
453 		return -EINVAL;
454 	}
455 
456 	if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
457 	    !dc->soc->supports_sector_layout) {
458 		DRM_ERROR("hardware doesn't support GPU sector layout\n");
459 		return -EINVAL;
460 	}
461 
462 	/*
463 	 * Tegra doesn't support different strides for U and V planes so we
464 	 * error out if the user tries to display a framebuffer with such a
465 	 * configuration.
466 	 */
467 	if (new_plane_state->fb->format->num_planes > 2) {
468 		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
469 			DRM_ERROR("unsupported UV-plane configuration\n");
470 			return -EINVAL;
471 		}
472 	}
473 
474 	/* XXX scaling is not yet supported, add a check here */
475 
476 	err = tegra_plane_state_add(&tegra->base, new_plane_state);
477 	if (err < 0)
478 		return err;
479 
480 	return 0;
481 }
482 
483 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
484 					      struct drm_atomic_state *state)
485 {
486 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
487 									   plane);
488 	struct tegra_plane *p = to_tegra_plane(plane);
489 	struct tegra_dc *dc;
490 	u32 value;
491 	int err;
492 
493 	/* rien ne va plus */
494 	if (!old_state || !old_state->crtc)
495 		return;
496 
497 	dc = to_tegra_dc(old_state->crtc);
498 
499 	err = host1x_client_resume(&dc->client);
500 	if (err < 0) {
501 		dev_err(dc->dev, "failed to resume: %d\n", err);
502 		return;
503 	}
504 
505 	/*
506 	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
507 	 * on planes that are already disabled. Make sure we fallback to the
508 	 * head for this particular state instead of crashing.
509 	 */
510 	if (WARN_ON(p->dc == NULL))
511 		p->dc = dc;
512 
513 	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
514 	value &= ~WIN_ENABLE;
515 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
516 
517 	tegra_dc_remove_shared_plane(dc, p);
518 
519 	host1x_client_suspend(&dc->client);
520 }
521 
522 static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
523 {
524 	u64 tmp, tmp1, tmp2;
525 
526 	tmp = (u64)dfixed_trunc(in);
527 	tmp2 = (u64)out;
528 	tmp1 = (tmp << NFB) + (tmp2 >> 1);
529 	do_div(tmp1, tmp2);
530 
531 	return lower_32_bits(tmp1);
532 }
533 
534 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
535 					     struct drm_atomic_state *state)
536 {
537 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
538 									   plane);
539 	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
540 	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
541 	unsigned int zpos = new_state->normalized_zpos;
542 	struct drm_framebuffer *fb = new_state->fb;
543 	struct tegra_plane *p = to_tegra_plane(plane);
544 	u32 value, min_width, bypass = 0;
545 	dma_addr_t base, addr_flag = 0;
546 	unsigned int bpc, planes;
547 	bool yuv;
548 	int err;
549 
550 	/* rien ne va plus */
551 	if (!new_state->crtc || !new_state->fb)
552 		return;
553 
554 	if (!new_state->visible) {
555 		tegra_shared_plane_atomic_disable(plane, state);
556 		return;
557 	}
558 
559 	err = host1x_client_resume(&dc->client);
560 	if (err < 0) {
561 		dev_err(dc->dev, "failed to resume: %d\n", err);
562 		return;
563 	}
564 
565 	yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
566 
567 	tegra_dc_assign_shared_plane(dc, p);
568 
569 	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
570 
571 	/* blending */
572 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
573 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
574 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
575 	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
576 
577 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
578 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
579 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
580 	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
581 
582 	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
583 	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
584 
585 	/* scaling */
586 	min_width = min(new_state->src_w >> 16, new_state->crtc_w);
587 
588 	value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
589 
590 	if (min_width < MAX_PIXELS_5TAP444(value)) {
591 		value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
592 	} else {
593 		value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
594 
595 		if (min_width < MAX_PIXELS_2TAP444(value))
596 			value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
597 		else
598 			dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
599 	}
600 
601 	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
602 	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
603 
604 	if (new_state->src_w != new_state->crtc_w << 16) {
605 		fixed20_12 width = dfixed_init(new_state->src_w >> 16);
606 		u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
607 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
608 
609 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
610 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
611 	} else {
612 		bypass |= INPUT_SCALER_HBYPASS;
613 	}
614 
615 	if (new_state->src_h != new_state->crtc_h << 16) {
616 		fixed20_12 height = dfixed_init(new_state->src_h >> 16);
617 		u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
618 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
619 
620 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
621 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
622 	} else {
623 		bypass |= INPUT_SCALER_VBYPASS;
624 	}
625 
626 	tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
627 
628 	/* disable compression */
629 	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
630 
631 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
632 	/*
633 	 * Physical address bit 39 in Tegra194 is used as a switch for special
634 	 * logic that swizzles the memory using either the legacy Tegra or the
635 	 * dGPU sector layout.
636 	 */
637 	if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
638 		addr_flag = BIT_ULL(39);
639 #endif
640 
641 	base = tegra_plane_state->iova[0] + fb->offsets[0];
642 	base |= addr_flag;
643 
644 	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
645 	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
646 
647 	value = V_POSITION(new_state->crtc_y) |
648 		H_POSITION(new_state->crtc_x);
649 	tegra_plane_writel(p, value, DC_WIN_POSITION);
650 
651 	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
652 	tegra_plane_writel(p, value, DC_WIN_SIZE);
653 
654 	value = WIN_ENABLE | COLOR_EXPAND;
655 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
656 
657 	value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
658 	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
659 
660 	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
661 	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
662 
663 	value = PITCH(fb->pitches[0]);
664 	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
665 
666 	if (yuv && planes > 1) {
667 		base = tegra_plane_state->iova[1] + fb->offsets[1];
668 		base |= addr_flag;
669 
670 		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
671 		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
672 
673 		if (planes > 2) {
674 			base = tegra_plane_state->iova[2] + fb->offsets[2];
675 			base |= addr_flag;
676 
677 			tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
678 			tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
679 		}
680 
681 		value = PITCH_U(fb->pitches[1]);
682 
683 		if (planes > 2)
684 			value |= PITCH_V(fb->pitches[2]);
685 
686 		tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
687 	} else {
688 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
689 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
690 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
691 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
692 		tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
693 	}
694 
695 	value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
696 
697 	if (yuv) {
698 		if (bpc < 12)
699 			value |= DEGAMMA_YUV8_10;
700 		else
701 			value |= DEGAMMA_YUV12;
702 
703 		/* XXX parameterize */
704 		value |= COLOR_SPACE_YUV_2020;
705 	} else {
706 		if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
707 			value |= DEGAMMA_SRGB;
708 	}
709 
710 	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
711 
712 	value = OFFSET_X(new_state->src_y >> 16) |
713 		OFFSET_Y(new_state->src_x >> 16);
714 	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
715 
716 	if (dc->soc->supports_block_linear) {
717 		unsigned long height = tegra_plane_state->tiling.value;
718 
719 		/* XXX */
720 		switch (tegra_plane_state->tiling.mode) {
721 		case TEGRA_BO_TILING_MODE_PITCH:
722 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
723 				DC_WINBUF_SURFACE_KIND_PITCH;
724 			break;
725 
726 		/* XXX not supported on Tegra186 and later */
727 		case TEGRA_BO_TILING_MODE_TILED:
728 			value = DC_WINBUF_SURFACE_KIND_TILED;
729 			break;
730 
731 		case TEGRA_BO_TILING_MODE_BLOCK:
732 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
733 				DC_WINBUF_SURFACE_KIND_BLOCK;
734 			break;
735 		}
736 
737 		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
738 	}
739 
740 	/* disable gamut CSC */
741 	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
742 	value &= ~CONTROL_CSC_ENABLE;
743 	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
744 
745 	host1x_client_suspend(&dc->client);
746 }
747 
748 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
749 	.prepare_fb = tegra_plane_prepare_fb,
750 	.cleanup_fb = tegra_plane_cleanup_fb,
751 	.atomic_check = tegra_shared_plane_atomic_check,
752 	.atomic_update = tegra_shared_plane_atomic_update,
753 	.atomic_disable = tegra_shared_plane_atomic_disable,
754 };
755 
756 struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
757 					    struct tegra_dc *dc,
758 					    unsigned int wgrp,
759 					    unsigned int index)
760 {
761 	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
762 	struct tegra_drm *tegra = drm->dev_private;
763 	struct tegra_display_hub *hub = tegra->hub;
764 	struct tegra_shared_plane *plane;
765 	unsigned int possible_crtcs;
766 	unsigned int num_formats;
767 	const u64 *modifiers;
768 	struct drm_plane *p;
769 	const u32 *formats;
770 	int err;
771 
772 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
773 	if (!plane)
774 		return ERR_PTR(-ENOMEM);
775 
776 	plane->base.offset = 0x0a00 + 0x0300 * index;
777 	plane->base.index = index;
778 
779 	plane->wgrp = &hub->wgrps[wgrp];
780 	plane->wgrp->parent = &dc->client;
781 
782 	p = &plane->base.base;
783 
784 	/* planes can be assigned to arbitrary CRTCs */
785 	possible_crtcs = BIT(tegra->num_crtcs) - 1;
786 
787 	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
788 	formats = tegra_shared_plane_formats;
789 	modifiers = tegra_shared_plane_modifiers;
790 
791 	err = drm_universal_plane_init(drm, p, possible_crtcs,
792 				       &tegra_plane_funcs, formats,
793 				       num_formats, modifiers, type, NULL);
794 	if (err < 0) {
795 		kfree(plane);
796 		return ERR_PTR(err);
797 	}
798 
799 	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
800 	drm_plane_create_zpos_property(p, 0, 0, 255);
801 
802 	return p;
803 }
804 
805 static struct drm_private_state *
806 tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
807 {
808 	struct tegra_display_hub_state *state;
809 
810 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
811 	if (!state)
812 		return NULL;
813 
814 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
815 
816 	return &state->base;
817 }
818 
819 static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
820 					    struct drm_private_state *state)
821 {
822 	struct tegra_display_hub_state *hub_state =
823 		to_tegra_display_hub_state(state);
824 
825 	kfree(hub_state);
826 }
827 
828 static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
829 	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
830 	.atomic_destroy_state = tegra_display_hub_destroy_state,
831 };
832 
833 static struct tegra_display_hub_state *
834 tegra_display_hub_get_state(struct tegra_display_hub *hub,
835 			    struct drm_atomic_state *state)
836 {
837 	struct drm_private_state *priv;
838 
839 	priv = drm_atomic_get_private_obj_state(state, &hub->base);
840 	if (IS_ERR(priv))
841 		return ERR_CAST(priv);
842 
843 	return to_tegra_display_hub_state(priv);
844 }
845 
846 int tegra_display_hub_atomic_check(struct drm_device *drm,
847 				   struct drm_atomic_state *state)
848 {
849 	struct tegra_drm *tegra = drm->dev_private;
850 	struct tegra_display_hub_state *hub_state;
851 	struct drm_crtc_state *old, *new;
852 	struct drm_crtc *crtc;
853 	unsigned int i;
854 
855 	if (!tegra->hub)
856 		return 0;
857 
858 	hub_state = tegra_display_hub_get_state(tegra->hub, state);
859 	if (IS_ERR(hub_state))
860 		return PTR_ERR(hub_state);
861 
862 	/*
863 	 * The display hub display clock needs to be fed by the display clock
864 	 * with the highest frequency to ensure proper functioning of all the
865 	 * displays.
866 	 *
867 	 * Note that this isn't used before Tegra186, but it doesn't hurt and
868 	 * conditionalizing it would make the code less clean.
869 	 */
870 	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
871 		struct tegra_dc_state *dc = to_dc_state(new);
872 
873 		if (new->active) {
874 			if (!hub_state->clk || dc->pclk > hub_state->rate) {
875 				hub_state->dc = to_tegra_dc(dc->base.crtc);
876 				hub_state->clk = hub_state->dc->clk;
877 				hub_state->rate = dc->pclk;
878 			}
879 		}
880 	}
881 
882 	return 0;
883 }
884 
885 static void tegra_display_hub_update(struct tegra_dc *dc)
886 {
887 	u32 value;
888 	int err;
889 
890 	err = host1x_client_resume(&dc->client);
891 	if (err < 0) {
892 		dev_err(dc->dev, "failed to resume: %d\n", err);
893 		return;
894 	}
895 
896 	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
897 	value &= ~LATENCY_EVENT;
898 	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
899 
900 	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
901 	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
902 	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
903 
904 	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
905 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
906 	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
907 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
908 
909 	host1x_client_suspend(&dc->client);
910 }
911 
912 void tegra_display_hub_atomic_commit(struct drm_device *drm,
913 				     struct drm_atomic_state *state)
914 {
915 	struct tegra_drm *tegra = drm->dev_private;
916 	struct tegra_display_hub *hub = tegra->hub;
917 	struct tegra_display_hub_state *hub_state;
918 	struct device *dev = hub->client.dev;
919 	int err;
920 
921 	hub_state = to_tegra_display_hub_state(hub->base.state);
922 
923 	if (hub_state->clk) {
924 		err = clk_set_rate(hub_state->clk, hub_state->rate);
925 		if (err < 0)
926 			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
927 				hub_state->clk, hub_state->rate);
928 
929 		err = clk_set_parent(hub->clk_disp, hub_state->clk);
930 		if (err < 0)
931 			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
932 				hub->clk_disp, hub_state->clk, err);
933 	}
934 
935 	if (hub_state->dc)
936 		tegra_display_hub_update(hub_state->dc);
937 }
938 
939 static int tegra_display_hub_init(struct host1x_client *client)
940 {
941 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
942 	struct drm_device *drm = dev_get_drvdata(client->host);
943 	struct tegra_drm *tegra = drm->dev_private;
944 	struct tegra_display_hub_state *state;
945 
946 	state = kzalloc(sizeof(*state), GFP_KERNEL);
947 	if (!state)
948 		return -ENOMEM;
949 
950 	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
951 				    &tegra_display_hub_state_funcs);
952 
953 	tegra->hub = hub;
954 
955 	return 0;
956 }
957 
958 static int tegra_display_hub_exit(struct host1x_client *client)
959 {
960 	struct drm_device *drm = dev_get_drvdata(client->host);
961 	struct tegra_drm *tegra = drm->dev_private;
962 
963 	drm_atomic_private_obj_fini(&tegra->hub->base);
964 	tegra->hub = NULL;
965 
966 	return 0;
967 }
968 
969 static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
970 {
971 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
972 	struct device *dev = client->dev;
973 	unsigned int i = hub->num_heads;
974 	int err;
975 
976 	err = reset_control_assert(hub->rst);
977 	if (err < 0)
978 		return err;
979 
980 	while (i--)
981 		clk_disable_unprepare(hub->clk_heads[i]);
982 
983 	clk_disable_unprepare(hub->clk_hub);
984 	clk_disable_unprepare(hub->clk_dsc);
985 	clk_disable_unprepare(hub->clk_disp);
986 
987 	pm_runtime_put_sync(dev);
988 
989 	return 0;
990 }
991 
992 static int tegra_display_hub_runtime_resume(struct host1x_client *client)
993 {
994 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
995 	struct device *dev = client->dev;
996 	unsigned int i;
997 	int err;
998 
999 	err = pm_runtime_resume_and_get(dev);
1000 	if (err < 0) {
1001 		dev_err(dev, "failed to get runtime PM: %d\n", err);
1002 		return err;
1003 	}
1004 
1005 	err = clk_prepare_enable(hub->clk_disp);
1006 	if (err < 0)
1007 		goto put_rpm;
1008 
1009 	err = clk_prepare_enable(hub->clk_dsc);
1010 	if (err < 0)
1011 		goto disable_disp;
1012 
1013 	err = clk_prepare_enable(hub->clk_hub);
1014 	if (err < 0)
1015 		goto disable_dsc;
1016 
1017 	for (i = 0; i < hub->num_heads; i++) {
1018 		err = clk_prepare_enable(hub->clk_heads[i]);
1019 		if (err < 0)
1020 			goto disable_heads;
1021 	}
1022 
1023 	err = reset_control_deassert(hub->rst);
1024 	if (err < 0)
1025 		goto disable_heads;
1026 
1027 	return 0;
1028 
1029 disable_heads:
1030 	while (i--)
1031 		clk_disable_unprepare(hub->clk_heads[i]);
1032 
1033 	clk_disable_unprepare(hub->clk_hub);
1034 disable_dsc:
1035 	clk_disable_unprepare(hub->clk_dsc);
1036 disable_disp:
1037 	clk_disable_unprepare(hub->clk_disp);
1038 put_rpm:
1039 	pm_runtime_put_sync(dev);
1040 	return err;
1041 }
1042 
1043 static const struct host1x_client_ops tegra_display_hub_ops = {
1044 	.init = tegra_display_hub_init,
1045 	.exit = tegra_display_hub_exit,
1046 	.suspend = tegra_display_hub_runtime_suspend,
1047 	.resume = tegra_display_hub_runtime_resume,
1048 };
1049 
1050 static int tegra_display_hub_probe(struct platform_device *pdev)
1051 {
1052 	u64 dma_mask = dma_get_mask(pdev->dev.parent);
1053 	struct device_node *child = NULL;
1054 	struct tegra_display_hub *hub;
1055 	struct clk *clk;
1056 	unsigned int i;
1057 	int err;
1058 
1059 	err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1060 	if (err < 0) {
1061 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1062 		return err;
1063 	}
1064 
1065 	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1066 	if (!hub)
1067 		return -ENOMEM;
1068 
1069 	hub->soc = of_device_get_match_data(&pdev->dev);
1070 
1071 	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1072 	if (IS_ERR(hub->clk_disp)) {
1073 		err = PTR_ERR(hub->clk_disp);
1074 		return err;
1075 	}
1076 
1077 	if (hub->soc->supports_dsc) {
1078 		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1079 		if (IS_ERR(hub->clk_dsc)) {
1080 			err = PTR_ERR(hub->clk_dsc);
1081 			return err;
1082 		}
1083 	}
1084 
1085 	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1086 	if (IS_ERR(hub->clk_hub)) {
1087 		err = PTR_ERR(hub->clk_hub);
1088 		return err;
1089 	}
1090 
1091 	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1092 	if (IS_ERR(hub->rst)) {
1093 		err = PTR_ERR(hub->rst);
1094 		return err;
1095 	}
1096 
1097 	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1098 				  sizeof(*hub->wgrps), GFP_KERNEL);
1099 	if (!hub->wgrps)
1100 		return -ENOMEM;
1101 
1102 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1103 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1104 		char id[8];
1105 
1106 		snprintf(id, sizeof(id), "wgrp%u", i);
1107 		mutex_init(&wgrp->lock);
1108 		wgrp->usecount = 0;
1109 		wgrp->index = i;
1110 
1111 		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1112 		if (IS_ERR(wgrp->rst))
1113 			return PTR_ERR(wgrp->rst);
1114 
1115 		err = reset_control_assert(wgrp->rst);
1116 		if (err < 0)
1117 			return err;
1118 	}
1119 
1120 	hub->num_heads = of_get_child_count(pdev->dev.of_node);
1121 
1122 	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1123 				      GFP_KERNEL);
1124 	if (!hub->clk_heads)
1125 		return -ENOMEM;
1126 
1127 	for (i = 0; i < hub->num_heads; i++) {
1128 		child = of_get_next_child(pdev->dev.of_node, child);
1129 		if (!child) {
1130 			dev_err(&pdev->dev, "failed to find node for head %u\n",
1131 				i);
1132 			return -ENODEV;
1133 		}
1134 
1135 		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1136 		if (IS_ERR(clk)) {
1137 			dev_err(&pdev->dev, "failed to get clock for head %u\n",
1138 				i);
1139 			of_node_put(child);
1140 			return PTR_ERR(clk);
1141 		}
1142 
1143 		hub->clk_heads[i] = clk;
1144 	}
1145 
1146 	of_node_put(child);
1147 
1148 	/* XXX: enable clock across reset? */
1149 	err = reset_control_assert(hub->rst);
1150 	if (err < 0)
1151 		return err;
1152 
1153 	platform_set_drvdata(pdev, hub);
1154 	pm_runtime_enable(&pdev->dev);
1155 
1156 	INIT_LIST_HEAD(&hub->client.list);
1157 	hub->client.ops = &tegra_display_hub_ops;
1158 	hub->client.dev = &pdev->dev;
1159 
1160 	err = host1x_client_register(&hub->client);
1161 	if (err < 0)
1162 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1163 			err);
1164 
1165 	err = devm_of_platform_populate(&pdev->dev);
1166 	if (err < 0)
1167 		goto unregister;
1168 
1169 	return err;
1170 
1171 unregister:
1172 	host1x_client_unregister(&hub->client);
1173 	pm_runtime_disable(&pdev->dev);
1174 	return err;
1175 }
1176 
1177 static void tegra_display_hub_remove(struct platform_device *pdev)
1178 {
1179 	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1180 	unsigned int i;
1181 
1182 	host1x_client_unregister(&hub->client);
1183 
1184 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1185 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1186 
1187 		mutex_destroy(&wgrp->lock);
1188 	}
1189 
1190 	pm_runtime_disable(&pdev->dev);
1191 }
1192 
1193 static const struct tegra_display_hub_soc tegra186_display_hub = {
1194 	.num_wgrps = 6,
1195 	.supports_dsc = true,
1196 };
1197 
1198 static const struct tegra_display_hub_soc tegra194_display_hub = {
1199 	.num_wgrps = 6,
1200 	.supports_dsc = false,
1201 };
1202 
1203 static const struct of_device_id tegra_display_hub_of_match[] = {
1204 	{
1205 		.compatible = "nvidia,tegra194-display",
1206 		.data = &tegra194_display_hub
1207 	}, {
1208 		.compatible = "nvidia,tegra186-display",
1209 		.data = &tegra186_display_hub
1210 	}, {
1211 		/* sentinel */
1212 	}
1213 };
1214 MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1215 
1216 struct platform_driver tegra_display_hub_driver = {
1217 	.driver = {
1218 		.name = "tegra-display-hub",
1219 		.of_match_table = tegra_display_hub_of_match,
1220 	},
1221 	.probe = tegra_display_hub_probe,
1222 	.remove_new = tegra_display_hub_remove,
1223 };
1224