xref: /openbmc/linux/drivers/gpu/drm/tegra/hub.c (revision 7ac1a36a9823a573ad23ece58007a74b8089fe16)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/host1x.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_device.h>
13 #include <linux/of_graph.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/reset.h>
17 
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_fourcc.h>
21 #include <drm/drm_probe_helper.h>
22 
23 #include "drm.h"
24 #include "dc.h"
25 #include "plane.h"
26 
27 #define NFB 24
28 
29 static const u32 tegra_shared_plane_formats[] = {
30 	DRM_FORMAT_ARGB1555,
31 	DRM_FORMAT_RGB565,
32 	DRM_FORMAT_RGBA5551,
33 	DRM_FORMAT_ARGB8888,
34 	DRM_FORMAT_ABGR8888,
35 	/* new on Tegra114 */
36 	DRM_FORMAT_ABGR4444,
37 	DRM_FORMAT_ABGR1555,
38 	DRM_FORMAT_BGRA5551,
39 	DRM_FORMAT_XRGB1555,
40 	DRM_FORMAT_RGBX5551,
41 	DRM_FORMAT_XBGR1555,
42 	DRM_FORMAT_BGRX5551,
43 	DRM_FORMAT_BGR565,
44 	DRM_FORMAT_XRGB8888,
45 	DRM_FORMAT_XBGR8888,
46 	/* planar formats */
47 	DRM_FORMAT_UYVY,
48 	DRM_FORMAT_YUYV,
49 	DRM_FORMAT_YUV420,
50 	DRM_FORMAT_YUV422,
51 };
52 
53 static const u64 tegra_shared_plane_modifiers[] = {
54 	DRM_FORMAT_MOD_LINEAR,
55 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
56 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
57 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
58 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
59 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
60 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
61 	/*
62 	 * The GPU sector layout is only supported on Tegra194, but these will
63 	 * be filtered out later on by ->format_mod_supported() on SoCs where
64 	 * it isn't supported.
65 	 */
66 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
67 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
68 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
69 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
70 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
71 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
72 	/* sentinel */
73 	DRM_FORMAT_MOD_INVALID
74 };
75 
76 static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
77 					      unsigned int offset)
78 {
79 	if (offset >= 0x500 && offset <= 0x581) {
80 		offset = 0x000 + (offset - 0x500);
81 		return plane->offset + offset;
82 	}
83 
84 	if (offset >= 0x700 && offset <= 0x73c) {
85 		offset = 0x180 + (offset - 0x700);
86 		return plane->offset + offset;
87 	}
88 
89 	if (offset >= 0x800 && offset <= 0x83e) {
90 		offset = 0x1c0 + (offset - 0x800);
91 		return plane->offset + offset;
92 	}
93 
94 	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
95 
96 	return plane->offset + offset;
97 }
98 
99 static inline u32 tegra_plane_readl(struct tegra_plane *plane,
100 				    unsigned int offset)
101 {
102 	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
103 }
104 
105 static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
106 				      unsigned int offset)
107 {
108 	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
109 }
110 
111 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
112 {
113 	int err = 0;
114 
115 	mutex_lock(&wgrp->lock);
116 
117 	if (wgrp->usecount == 0) {
118 		err = host1x_client_resume(wgrp->parent);
119 		if (err < 0) {
120 			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
121 			goto unlock;
122 		}
123 
124 		reset_control_deassert(wgrp->rst);
125 	}
126 
127 	wgrp->usecount++;
128 
129 unlock:
130 	mutex_unlock(&wgrp->lock);
131 	return err;
132 }
133 
134 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
135 {
136 	int err;
137 
138 	mutex_lock(&wgrp->lock);
139 
140 	if (wgrp->usecount == 1) {
141 		err = reset_control_assert(wgrp->rst);
142 		if (err < 0) {
143 			pr_err("failed to assert reset for window group %u\n",
144 			       wgrp->index);
145 		}
146 
147 		host1x_client_suspend(wgrp->parent);
148 	}
149 
150 	wgrp->usecount--;
151 	mutex_unlock(&wgrp->lock);
152 }
153 
154 int tegra_display_hub_prepare(struct tegra_display_hub *hub)
155 {
156 	unsigned int i;
157 
158 	/*
159 	 * XXX Enabling/disabling windowgroups needs to happen when the owner
160 	 * display controller is disabled. There's currently no good point at
161 	 * which this could be executed, so unconditionally enable all window
162 	 * groups for now.
163 	 */
164 	for (i = 0; i < hub->soc->num_wgrps; i++) {
165 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
166 
167 		/* Skip orphaned window group whose parent DC is disabled */
168 		if (wgrp->parent)
169 			tegra_windowgroup_enable(wgrp);
170 	}
171 
172 	return 0;
173 }
174 
175 void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
176 {
177 	unsigned int i;
178 
179 	/*
180 	 * XXX Remove this once window groups can be more fine-grainedly
181 	 * enabled and disabled.
182 	 */
183 	for (i = 0; i < hub->soc->num_wgrps; i++) {
184 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
185 
186 		/* Skip orphaned window group whose parent DC is disabled */
187 		if (wgrp->parent)
188 			tegra_windowgroup_disable(wgrp);
189 	}
190 }
191 
192 static void tegra_shared_plane_update(struct tegra_plane *plane)
193 {
194 	struct tegra_dc *dc = plane->dc;
195 	unsigned long timeout;
196 	u32 mask, value;
197 
198 	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
199 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
200 
201 	timeout = jiffies + msecs_to_jiffies(1000);
202 
203 	while (time_before(jiffies, timeout)) {
204 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
205 		if ((value & mask) == 0)
206 			break;
207 
208 		usleep_range(100, 400);
209 	}
210 }
211 
212 static void tegra_shared_plane_activate(struct tegra_plane *plane)
213 {
214 	struct tegra_dc *dc = plane->dc;
215 	unsigned long timeout;
216 	u32 mask, value;
217 
218 	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
219 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
220 
221 	timeout = jiffies + msecs_to_jiffies(1000);
222 
223 	while (time_before(jiffies, timeout)) {
224 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
225 		if ((value & mask) == 0)
226 			break;
227 
228 		usleep_range(100, 400);
229 	}
230 }
231 
232 static unsigned int
233 tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
234 {
235 	unsigned int offset =
236 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
237 
238 	return tegra_dc_readl(dc, offset) & OWNER_MASK;
239 }
240 
241 static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
242 				       struct tegra_plane *plane)
243 {
244 	struct device *dev = dc->dev;
245 
246 	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
247 		if (plane->dc == dc)
248 			return true;
249 
250 		dev_WARN(dev, "head %u owns window %u but is not attached\n",
251 			 dc->pipe, plane->index);
252 	}
253 
254 	return false;
255 }
256 
257 static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
258 					struct tegra_dc *new)
259 {
260 	unsigned int offset =
261 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
262 	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
263 	struct device *dev = new ? new->dev : old->dev;
264 	unsigned int owner, index = plane->index;
265 	u32 value;
266 
267 	value = tegra_dc_readl(dc, offset);
268 	owner = value & OWNER_MASK;
269 
270 	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
271 		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
272 		return -EBUSY;
273 	}
274 
275 	/*
276 	 * This seems to happen whenever the head has been disabled with one
277 	 * or more windows being active. This is harmless because we'll just
278 	 * reassign the window to the new head anyway.
279 	 */
280 	if (old && owner == OWNER_MASK)
281 		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
282 			old->pipe, owner);
283 
284 	value &= ~OWNER_MASK;
285 
286 	if (new)
287 		value |= OWNER(new->pipe);
288 	else
289 		value |= OWNER_MASK;
290 
291 	tegra_dc_writel(dc, value, offset);
292 
293 	plane->dc = new;
294 
295 	return 0;
296 }
297 
298 static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
299 {
300 	static const unsigned int coeffs[192] = {
301 		0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
302 		0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
303 		0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
304 		0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
305 		0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
306 		0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
307 		0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
308 		0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
309 		0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
310 		0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
311 		0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
312 		0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
313 		0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
314 		0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
315 		0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
316 		0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
317 		0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
318 		0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
319 		0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
320 		0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
321 		0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
322 		0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
323 		0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
324 		0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
325 		0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
326 		0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
327 		0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
328 		0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
329 		0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
330 		0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
331 		0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
332 		0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
333 		0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
334 		0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
335 		0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
336 		0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
337 		0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
338 		0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
339 		0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
340 		0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
341 		0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
342 		0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
343 		0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
344 		0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
345 		0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
346 		0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
347 		0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
348 		0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
349 	};
350 	unsigned int ratio, row, column;
351 
352 	for (ratio = 0; ratio <= 2; ratio++) {
353 		for (row = 0; row <= 15; row++) {
354 			for (column = 0; column <= 3; column++) {
355 				unsigned int index = (ratio << 6) + (row << 2) + column;
356 				u32 value;
357 
358 				value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
359 				tegra_plane_writel(plane, value,
360 						   DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
361 			}
362 		}
363 	}
364 }
365 
366 static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
367 					 struct tegra_plane *plane)
368 {
369 	u32 value;
370 	int err;
371 
372 	if (!tegra_dc_owns_shared_plane(dc, plane)) {
373 		err = tegra_shared_plane_set_owner(plane, dc);
374 		if (err < 0)
375 			return;
376 	}
377 
378 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
379 	value |= MODE_FOUR_LINES;
380 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
381 
382 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
383 	value = SLOTS(1);
384 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
385 
386 	/* disable watermark */
387 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
388 	value &= ~LATENCY_CTL_MODE_ENABLE;
389 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
390 
391 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
392 	value |= WATERMARK_MASK;
393 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
394 
395 	/* pipe meter */
396 	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
397 	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
398 	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
399 
400 	/* mempool entries */
401 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
402 	value = MEMPOOL_ENTRIES(0x331);
403 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
404 
405 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
406 	value &= ~THREAD_NUM_MASK;
407 	value |= THREAD_NUM(plane->base.index);
408 	value |= THREAD_GROUP_ENABLE;
409 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
410 
411 	tegra_shared_plane_setup_scaler(plane);
412 
413 	tegra_shared_plane_update(plane);
414 	tegra_shared_plane_activate(plane);
415 }
416 
417 static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
418 					 struct tegra_plane *plane)
419 {
420 	tegra_shared_plane_set_owner(plane, NULL);
421 }
422 
423 static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
424 					   struct drm_atomic_state *state)
425 {
426 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
427 										 plane);
428 	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
429 	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
430 	struct tegra_bo_tiling *tiling = &plane_state->tiling;
431 	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
432 	int err;
433 
434 	/* no need for further checks if the plane is being disabled */
435 	if (!new_plane_state->crtc || !new_plane_state->fb)
436 		return 0;
437 
438 	err = tegra_plane_format(new_plane_state->fb->format->format,
439 				 &plane_state->format,
440 				 &plane_state->swap);
441 	if (err < 0)
442 		return err;
443 
444 	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
445 	if (err < 0)
446 		return err;
447 
448 	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
449 	    !dc->soc->supports_block_linear) {
450 		DRM_ERROR("hardware doesn't support block linear mode\n");
451 		return -EINVAL;
452 	}
453 
454 	if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
455 	    !dc->soc->supports_sector_layout) {
456 		DRM_ERROR("hardware doesn't support GPU sector layout\n");
457 		return -EINVAL;
458 	}
459 
460 	/*
461 	 * Tegra doesn't support different strides for U and V planes so we
462 	 * error out if the user tries to display a framebuffer with such a
463 	 * configuration.
464 	 */
465 	if (new_plane_state->fb->format->num_planes > 2) {
466 		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
467 			DRM_ERROR("unsupported UV-plane configuration\n");
468 			return -EINVAL;
469 		}
470 	}
471 
472 	/* XXX scaling is not yet supported, add a check here */
473 
474 	err = tegra_plane_state_add(&tegra->base, new_plane_state);
475 	if (err < 0)
476 		return err;
477 
478 	return 0;
479 }
480 
481 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
482 					      struct drm_atomic_state *state)
483 {
484 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
485 									   plane);
486 	struct tegra_plane *p = to_tegra_plane(plane);
487 	struct tegra_dc *dc;
488 	u32 value;
489 	int err;
490 
491 	/* rien ne va plus */
492 	if (!old_state || !old_state->crtc)
493 		return;
494 
495 	dc = to_tegra_dc(old_state->crtc);
496 
497 	err = host1x_client_resume(&dc->client);
498 	if (err < 0) {
499 		dev_err(dc->dev, "failed to resume: %d\n", err);
500 		return;
501 	}
502 
503 	/*
504 	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
505 	 * on planes that are already disabled. Make sure we fallback to the
506 	 * head for this particular state instead of crashing.
507 	 */
508 	if (WARN_ON(p->dc == NULL))
509 		p->dc = dc;
510 
511 	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
512 	value &= ~WIN_ENABLE;
513 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
514 
515 	tegra_dc_remove_shared_plane(dc, p);
516 
517 	host1x_client_suspend(&dc->client);
518 }
519 
520 static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
521 {
522 	u64 tmp, tmp1, tmp2;
523 
524 	tmp = (u64)dfixed_trunc(in);
525 	tmp2 = (u64)out;
526 	tmp1 = (tmp << NFB) + (tmp2 >> 1);
527 	do_div(tmp1, tmp2);
528 
529 	return lower_32_bits(tmp1);
530 }
531 
532 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
533 					     struct drm_atomic_state *state)
534 {
535 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
536 									   plane);
537 	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
538 	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
539 	unsigned int zpos = new_state->normalized_zpos;
540 	struct drm_framebuffer *fb = new_state->fb;
541 	struct tegra_plane *p = to_tegra_plane(plane);
542 	u32 value, min_width, bypass = 0;
543 	dma_addr_t base, addr_flag = 0;
544 	unsigned int bpc, planes;
545 	bool yuv;
546 	int err;
547 
548 	/* rien ne va plus */
549 	if (!new_state->crtc || !new_state->fb)
550 		return;
551 
552 	if (!new_state->visible) {
553 		tegra_shared_plane_atomic_disable(plane, state);
554 		return;
555 	}
556 
557 	err = host1x_client_resume(&dc->client);
558 	if (err < 0) {
559 		dev_err(dc->dev, "failed to resume: %d\n", err);
560 		return;
561 	}
562 
563 	yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
564 
565 	tegra_dc_assign_shared_plane(dc, p);
566 
567 	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
568 
569 	/* blending */
570 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
571 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
572 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
573 	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
574 
575 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
576 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
577 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
578 	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
579 
580 	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
581 	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
582 
583 	/* scaling */
584 	min_width = min(new_state->src_w >> 16, new_state->crtc_w);
585 
586 	value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
587 
588 	if (min_width < MAX_PIXELS_5TAP444(value)) {
589 		value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
590 	} else {
591 		value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
592 
593 		if (min_width < MAX_PIXELS_2TAP444(value))
594 			value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
595 		else
596 			dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
597 	}
598 
599 	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
600 	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
601 
602 	if (new_state->src_w != new_state->crtc_w << 16) {
603 		fixed20_12 width = dfixed_init(new_state->src_w >> 16);
604 		u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
605 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
606 
607 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
608 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
609 	} else {
610 		bypass |= INPUT_SCALER_HBYPASS;
611 	}
612 
613 	if (new_state->src_h != new_state->crtc_h << 16) {
614 		fixed20_12 height = dfixed_init(new_state->src_h >> 16);
615 		u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
616 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
617 
618 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
619 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
620 	} else {
621 		bypass |= INPUT_SCALER_VBYPASS;
622 	}
623 
624 	tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
625 
626 	/* disable compression */
627 	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
628 
629 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
630 	/*
631 	 * Physical address bit 39 in Tegra194 is used as a switch for special
632 	 * logic that swizzles the memory using either the legacy Tegra or the
633 	 * dGPU sector layout.
634 	 */
635 	if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
636 		addr_flag = BIT_ULL(39);
637 #endif
638 
639 	base = tegra_plane_state->iova[0] + fb->offsets[0];
640 	base |= addr_flag;
641 
642 	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
643 	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
644 
645 	value = V_POSITION(new_state->crtc_y) |
646 		H_POSITION(new_state->crtc_x);
647 	tegra_plane_writel(p, value, DC_WIN_POSITION);
648 
649 	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
650 	tegra_plane_writel(p, value, DC_WIN_SIZE);
651 
652 	value = WIN_ENABLE | COLOR_EXPAND;
653 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
654 
655 	value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
656 	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
657 
658 	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
659 	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
660 
661 	value = PITCH(fb->pitches[0]);
662 	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
663 
664 	if (yuv && planes > 1) {
665 		base = tegra_plane_state->iova[1] + fb->offsets[1];
666 		base |= addr_flag;
667 
668 		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
669 		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
670 
671 		if (planes > 2) {
672 			base = tegra_plane_state->iova[2] + fb->offsets[2];
673 			base |= addr_flag;
674 
675 			tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
676 			tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
677 		}
678 
679 		value = PITCH_U(fb->pitches[1]);
680 
681 		if (planes > 2)
682 			value |= PITCH_V(fb->pitches[2]);
683 
684 		tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
685 	} else {
686 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
687 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
688 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
689 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
690 		tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
691 	}
692 
693 	value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
694 
695 	if (yuv) {
696 		if (bpc < 12)
697 			value |= DEGAMMA_YUV8_10;
698 		else
699 			value |= DEGAMMA_YUV12;
700 
701 		/* XXX parameterize */
702 		value |= COLOR_SPACE_YUV_2020;
703 	} else {
704 		if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
705 			value |= DEGAMMA_SRGB;
706 	}
707 
708 	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
709 
710 	value = OFFSET_X(new_state->src_y >> 16) |
711 		OFFSET_Y(new_state->src_x >> 16);
712 	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
713 
714 	if (dc->soc->supports_block_linear) {
715 		unsigned long height = tegra_plane_state->tiling.value;
716 
717 		/* XXX */
718 		switch (tegra_plane_state->tiling.mode) {
719 		case TEGRA_BO_TILING_MODE_PITCH:
720 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
721 				DC_WINBUF_SURFACE_KIND_PITCH;
722 			break;
723 
724 		/* XXX not supported on Tegra186 and later */
725 		case TEGRA_BO_TILING_MODE_TILED:
726 			value = DC_WINBUF_SURFACE_KIND_TILED;
727 			break;
728 
729 		case TEGRA_BO_TILING_MODE_BLOCK:
730 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
731 				DC_WINBUF_SURFACE_KIND_BLOCK;
732 			break;
733 		}
734 
735 		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
736 	}
737 
738 	/* disable gamut CSC */
739 	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
740 	value &= ~CONTROL_CSC_ENABLE;
741 	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
742 
743 	host1x_client_suspend(&dc->client);
744 }
745 
746 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
747 	.prepare_fb = tegra_plane_prepare_fb,
748 	.cleanup_fb = tegra_plane_cleanup_fb,
749 	.atomic_check = tegra_shared_plane_atomic_check,
750 	.atomic_update = tegra_shared_plane_atomic_update,
751 	.atomic_disable = tegra_shared_plane_atomic_disable,
752 };
753 
754 struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
755 					    struct tegra_dc *dc,
756 					    unsigned int wgrp,
757 					    unsigned int index)
758 {
759 	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
760 	struct tegra_drm *tegra = drm->dev_private;
761 	struct tegra_display_hub *hub = tegra->hub;
762 	struct tegra_shared_plane *plane;
763 	unsigned int possible_crtcs;
764 	unsigned int num_formats;
765 	const u64 *modifiers;
766 	struct drm_plane *p;
767 	const u32 *formats;
768 	int err;
769 
770 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
771 	if (!plane)
772 		return ERR_PTR(-ENOMEM);
773 
774 	plane->base.offset = 0x0a00 + 0x0300 * index;
775 	plane->base.index = index;
776 
777 	plane->wgrp = &hub->wgrps[wgrp];
778 	plane->wgrp->parent = &dc->client;
779 
780 	p = &plane->base.base;
781 
782 	/* planes can be assigned to arbitrary CRTCs */
783 	possible_crtcs = BIT(tegra->num_crtcs) - 1;
784 
785 	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
786 	formats = tegra_shared_plane_formats;
787 	modifiers = tegra_shared_plane_modifiers;
788 
789 	err = drm_universal_plane_init(drm, p, possible_crtcs,
790 				       &tegra_plane_funcs, formats,
791 				       num_formats, modifiers, type, NULL);
792 	if (err < 0) {
793 		kfree(plane);
794 		return ERR_PTR(err);
795 	}
796 
797 	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
798 	drm_plane_create_zpos_property(p, 0, 0, 255);
799 
800 	return p;
801 }
802 
803 static struct drm_private_state *
804 tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
805 {
806 	struct tegra_display_hub_state *state;
807 
808 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
809 	if (!state)
810 		return NULL;
811 
812 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
813 
814 	return &state->base;
815 }
816 
817 static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
818 					    struct drm_private_state *state)
819 {
820 	struct tegra_display_hub_state *hub_state =
821 		to_tegra_display_hub_state(state);
822 
823 	kfree(hub_state);
824 }
825 
826 static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
827 	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
828 	.atomic_destroy_state = tegra_display_hub_destroy_state,
829 };
830 
831 static struct tegra_display_hub_state *
832 tegra_display_hub_get_state(struct tegra_display_hub *hub,
833 			    struct drm_atomic_state *state)
834 {
835 	struct drm_private_state *priv;
836 
837 	priv = drm_atomic_get_private_obj_state(state, &hub->base);
838 	if (IS_ERR(priv))
839 		return ERR_CAST(priv);
840 
841 	return to_tegra_display_hub_state(priv);
842 }
843 
844 int tegra_display_hub_atomic_check(struct drm_device *drm,
845 				   struct drm_atomic_state *state)
846 {
847 	struct tegra_drm *tegra = drm->dev_private;
848 	struct tegra_display_hub_state *hub_state;
849 	struct drm_crtc_state *old, *new;
850 	struct drm_crtc *crtc;
851 	unsigned int i;
852 
853 	if (!tegra->hub)
854 		return 0;
855 
856 	hub_state = tegra_display_hub_get_state(tegra->hub, state);
857 	if (IS_ERR(hub_state))
858 		return PTR_ERR(hub_state);
859 
860 	/*
861 	 * The display hub display clock needs to be fed by the display clock
862 	 * with the highest frequency to ensure proper functioning of all the
863 	 * displays.
864 	 *
865 	 * Note that this isn't used before Tegra186, but it doesn't hurt and
866 	 * conditionalizing it would make the code less clean.
867 	 */
868 	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
869 		struct tegra_dc_state *dc = to_dc_state(new);
870 
871 		if (new->active) {
872 			if (!hub_state->clk || dc->pclk > hub_state->rate) {
873 				hub_state->dc = to_tegra_dc(dc->base.crtc);
874 				hub_state->clk = hub_state->dc->clk;
875 				hub_state->rate = dc->pclk;
876 			}
877 		}
878 	}
879 
880 	return 0;
881 }
882 
883 static void tegra_display_hub_update(struct tegra_dc *dc)
884 {
885 	u32 value;
886 	int err;
887 
888 	err = host1x_client_resume(&dc->client);
889 	if (err < 0) {
890 		dev_err(dc->dev, "failed to resume: %d\n", err);
891 		return;
892 	}
893 
894 	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
895 	value &= ~LATENCY_EVENT;
896 	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
897 
898 	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
899 	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
900 	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
901 
902 	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
903 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
904 	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
905 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
906 
907 	host1x_client_suspend(&dc->client);
908 }
909 
910 void tegra_display_hub_atomic_commit(struct drm_device *drm,
911 				     struct drm_atomic_state *state)
912 {
913 	struct tegra_drm *tegra = drm->dev_private;
914 	struct tegra_display_hub *hub = tegra->hub;
915 	struct tegra_display_hub_state *hub_state;
916 	struct device *dev = hub->client.dev;
917 	int err;
918 
919 	hub_state = to_tegra_display_hub_state(hub->base.state);
920 
921 	if (hub_state->clk) {
922 		err = clk_set_rate(hub_state->clk, hub_state->rate);
923 		if (err < 0)
924 			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
925 				hub_state->clk, hub_state->rate);
926 
927 		err = clk_set_parent(hub->clk_disp, hub_state->clk);
928 		if (err < 0)
929 			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
930 				hub->clk_disp, hub_state->clk, err);
931 	}
932 
933 	if (hub_state->dc)
934 		tegra_display_hub_update(hub_state->dc);
935 }
936 
937 static int tegra_display_hub_init(struct host1x_client *client)
938 {
939 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
940 	struct drm_device *drm = dev_get_drvdata(client->host);
941 	struct tegra_drm *tegra = drm->dev_private;
942 	struct tegra_display_hub_state *state;
943 
944 	state = kzalloc(sizeof(*state), GFP_KERNEL);
945 	if (!state)
946 		return -ENOMEM;
947 
948 	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
949 				    &tegra_display_hub_state_funcs);
950 
951 	tegra->hub = hub;
952 
953 	return 0;
954 }
955 
956 static int tegra_display_hub_exit(struct host1x_client *client)
957 {
958 	struct drm_device *drm = dev_get_drvdata(client->host);
959 	struct tegra_drm *tegra = drm->dev_private;
960 
961 	drm_atomic_private_obj_fini(&tegra->hub->base);
962 	tegra->hub = NULL;
963 
964 	return 0;
965 }
966 
967 static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
968 {
969 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
970 	struct device *dev = client->dev;
971 	unsigned int i = hub->num_heads;
972 	int err;
973 
974 	err = reset_control_assert(hub->rst);
975 	if (err < 0)
976 		return err;
977 
978 	while (i--)
979 		clk_disable_unprepare(hub->clk_heads[i]);
980 
981 	clk_disable_unprepare(hub->clk_hub);
982 	clk_disable_unprepare(hub->clk_dsc);
983 	clk_disable_unprepare(hub->clk_disp);
984 
985 	pm_runtime_put_sync(dev);
986 
987 	return 0;
988 }
989 
990 static int tegra_display_hub_runtime_resume(struct host1x_client *client)
991 {
992 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
993 	struct device *dev = client->dev;
994 	unsigned int i;
995 	int err;
996 
997 	err = pm_runtime_resume_and_get(dev);
998 	if (err < 0) {
999 		dev_err(dev, "failed to get runtime PM: %d\n", err);
1000 		return err;
1001 	}
1002 
1003 	err = clk_prepare_enable(hub->clk_disp);
1004 	if (err < 0)
1005 		goto put_rpm;
1006 
1007 	err = clk_prepare_enable(hub->clk_dsc);
1008 	if (err < 0)
1009 		goto disable_disp;
1010 
1011 	err = clk_prepare_enable(hub->clk_hub);
1012 	if (err < 0)
1013 		goto disable_dsc;
1014 
1015 	for (i = 0; i < hub->num_heads; i++) {
1016 		err = clk_prepare_enable(hub->clk_heads[i]);
1017 		if (err < 0)
1018 			goto disable_heads;
1019 	}
1020 
1021 	err = reset_control_deassert(hub->rst);
1022 	if (err < 0)
1023 		goto disable_heads;
1024 
1025 	return 0;
1026 
1027 disable_heads:
1028 	while (i--)
1029 		clk_disable_unprepare(hub->clk_heads[i]);
1030 
1031 	clk_disable_unprepare(hub->clk_hub);
1032 disable_dsc:
1033 	clk_disable_unprepare(hub->clk_dsc);
1034 disable_disp:
1035 	clk_disable_unprepare(hub->clk_disp);
1036 put_rpm:
1037 	pm_runtime_put_sync(dev);
1038 	return err;
1039 }
1040 
1041 static const struct host1x_client_ops tegra_display_hub_ops = {
1042 	.init = tegra_display_hub_init,
1043 	.exit = tegra_display_hub_exit,
1044 	.suspend = tegra_display_hub_runtime_suspend,
1045 	.resume = tegra_display_hub_runtime_resume,
1046 };
1047 
1048 static int tegra_display_hub_probe(struct platform_device *pdev)
1049 {
1050 	u64 dma_mask = dma_get_mask(pdev->dev.parent);
1051 	struct device_node *child = NULL;
1052 	struct tegra_display_hub *hub;
1053 	struct clk *clk;
1054 	unsigned int i;
1055 	int err;
1056 
1057 	err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1058 	if (err < 0) {
1059 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1060 		return err;
1061 	}
1062 
1063 	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1064 	if (!hub)
1065 		return -ENOMEM;
1066 
1067 	hub->soc = of_device_get_match_data(&pdev->dev);
1068 
1069 	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1070 	if (IS_ERR(hub->clk_disp)) {
1071 		err = PTR_ERR(hub->clk_disp);
1072 		return err;
1073 	}
1074 
1075 	if (hub->soc->supports_dsc) {
1076 		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1077 		if (IS_ERR(hub->clk_dsc)) {
1078 			err = PTR_ERR(hub->clk_dsc);
1079 			return err;
1080 		}
1081 	}
1082 
1083 	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1084 	if (IS_ERR(hub->clk_hub)) {
1085 		err = PTR_ERR(hub->clk_hub);
1086 		return err;
1087 	}
1088 
1089 	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1090 	if (IS_ERR(hub->rst)) {
1091 		err = PTR_ERR(hub->rst);
1092 		return err;
1093 	}
1094 
1095 	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1096 				  sizeof(*hub->wgrps), GFP_KERNEL);
1097 	if (!hub->wgrps)
1098 		return -ENOMEM;
1099 
1100 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1101 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1102 		char id[8];
1103 
1104 		snprintf(id, sizeof(id), "wgrp%u", i);
1105 		mutex_init(&wgrp->lock);
1106 		wgrp->usecount = 0;
1107 		wgrp->index = i;
1108 
1109 		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1110 		if (IS_ERR(wgrp->rst))
1111 			return PTR_ERR(wgrp->rst);
1112 
1113 		err = reset_control_assert(wgrp->rst);
1114 		if (err < 0)
1115 			return err;
1116 	}
1117 
1118 	hub->num_heads = of_get_child_count(pdev->dev.of_node);
1119 
1120 	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1121 				      GFP_KERNEL);
1122 	if (!hub->clk_heads)
1123 		return -ENOMEM;
1124 
1125 	for (i = 0; i < hub->num_heads; i++) {
1126 		child = of_get_next_child(pdev->dev.of_node, child);
1127 		if (!child) {
1128 			dev_err(&pdev->dev, "failed to find node for head %u\n",
1129 				i);
1130 			return -ENODEV;
1131 		}
1132 
1133 		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1134 		if (IS_ERR(clk)) {
1135 			dev_err(&pdev->dev, "failed to get clock for head %u\n",
1136 				i);
1137 			of_node_put(child);
1138 			return PTR_ERR(clk);
1139 		}
1140 
1141 		hub->clk_heads[i] = clk;
1142 	}
1143 
1144 	of_node_put(child);
1145 
1146 	/* XXX: enable clock across reset? */
1147 	err = reset_control_assert(hub->rst);
1148 	if (err < 0)
1149 		return err;
1150 
1151 	platform_set_drvdata(pdev, hub);
1152 	pm_runtime_enable(&pdev->dev);
1153 
1154 	INIT_LIST_HEAD(&hub->client.list);
1155 	hub->client.ops = &tegra_display_hub_ops;
1156 	hub->client.dev = &pdev->dev;
1157 
1158 	err = host1x_client_register(&hub->client);
1159 	if (err < 0)
1160 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1161 			err);
1162 
1163 	err = devm_of_platform_populate(&pdev->dev);
1164 	if (err < 0)
1165 		goto unregister;
1166 
1167 	return err;
1168 
1169 unregister:
1170 	host1x_client_unregister(&hub->client);
1171 	pm_runtime_disable(&pdev->dev);
1172 	return err;
1173 }
1174 
1175 static int tegra_display_hub_remove(struct platform_device *pdev)
1176 {
1177 	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1178 	unsigned int i;
1179 	int err;
1180 
1181 	err = host1x_client_unregister(&hub->client);
1182 	if (err < 0) {
1183 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1184 			err);
1185 	}
1186 
1187 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1188 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1189 
1190 		mutex_destroy(&wgrp->lock);
1191 	}
1192 
1193 	pm_runtime_disable(&pdev->dev);
1194 
1195 	return err;
1196 }
1197 
1198 static const struct tegra_display_hub_soc tegra186_display_hub = {
1199 	.num_wgrps = 6,
1200 	.supports_dsc = true,
1201 };
1202 
1203 static const struct tegra_display_hub_soc tegra194_display_hub = {
1204 	.num_wgrps = 6,
1205 	.supports_dsc = false,
1206 };
1207 
1208 static const struct of_device_id tegra_display_hub_of_match[] = {
1209 	{
1210 		.compatible = "nvidia,tegra194-display",
1211 		.data = &tegra194_display_hub
1212 	}, {
1213 		.compatible = "nvidia,tegra186-display",
1214 		.data = &tegra186_display_hub
1215 	}, {
1216 		/* sentinel */
1217 	}
1218 };
1219 MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1220 
1221 struct platform_driver tegra_display_hub_driver = {
1222 	.driver = {
1223 		.name = "tegra-display-hub",
1224 		.of_match_table = tegra_display_hub_of_match,
1225 	},
1226 	.probe = tegra_display_hub_probe,
1227 	.remove = tegra_display_hub_remove,
1228 };
1229