xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c (revision 50371be6)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include <drm/drm_fb_helper.h>
27 #include <drm/drm_fourcc.h>
28 #include <drm/drm_vblank.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_pm.h"
32 #include "amdgpu_i2c.h"
33 #include "atom.h"
34 #include "amdgpu_atombios.h"
35 #include "atombios_crtc.h"
36 #include "atombios_encoders.h"
37 #include "amdgpu_pll.h"
38 #include "amdgpu_connectors.h"
39 #include "amdgpu_display.h"
40 
41 #include "bif/bif_3_0_d.h"
42 #include "bif/bif_3_0_sh_mask.h"
43 #include "oss/oss_1_0_d.h"
44 #include "oss/oss_1_0_sh_mask.h"
45 #include "gca/gfx_6_0_d.h"
46 #include "gca/gfx_6_0_sh_mask.h"
47 #include "gmc/gmc_6_0_d.h"
48 #include "gmc/gmc_6_0_sh_mask.h"
49 #include "dce/dce_6_0_d.h"
50 #include "dce/dce_6_0_sh_mask.h"
51 #include "gca/gfx_7_2_enum.h"
52 #include "dce_v6_0.h"
53 #include "si_enums.h"
54 
55 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
56 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
57 
58 static const u32 crtc_offsets[6] =
59 {
60 	SI_CRTC0_REGISTER_OFFSET,
61 	SI_CRTC1_REGISTER_OFFSET,
62 	SI_CRTC2_REGISTER_OFFSET,
63 	SI_CRTC3_REGISTER_OFFSET,
64 	SI_CRTC4_REGISTER_OFFSET,
65 	SI_CRTC5_REGISTER_OFFSET
66 };
67 
68 static const u32 hpd_offsets[] =
69 {
70 	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
71 	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
72 	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
73 	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
74 	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
75 	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
76 };
77 
78 static const uint32_t dig_offsets[] = {
79 	SI_CRTC0_REGISTER_OFFSET,
80 	SI_CRTC1_REGISTER_OFFSET,
81 	SI_CRTC2_REGISTER_OFFSET,
82 	SI_CRTC3_REGISTER_OFFSET,
83 	SI_CRTC4_REGISTER_OFFSET,
84 	SI_CRTC5_REGISTER_OFFSET,
85 	(0x13830 - 0x7030) >> 2,
86 };
87 
88 static const struct {
89 	uint32_t	reg;
90 	uint32_t	vblank;
91 	uint32_t	vline;
92 	uint32_t	hpd;
93 
94 } interrupt_status_offsets[6] = { {
95 	.reg = mmDISP_INTERRUPT_STATUS,
96 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
97 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
98 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
99 }, {
100 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
101 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
102 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
103 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
104 }, {
105 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
106 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
107 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
108 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
109 }, {
110 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
111 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
112 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
113 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
114 }, {
115 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
116 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
117 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
118 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
119 }, {
120 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
121 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
122 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
123 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
124 } };
125 
126 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
127 				     u32 block_offset, u32 reg)
128 {
129 	unsigned long flags;
130 	u32 r;
131 
132 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
133 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
134 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
135 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
136 
137 	return r;
138 }
139 
140 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
141 				      u32 block_offset, u32 reg, u32 v)
142 {
143 	unsigned long flags;
144 
145 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
146 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
147 		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
148 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
149 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
150 }
151 
152 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
153 {
154 	if (crtc >= adev->mode_info.num_crtc)
155 		return 0;
156 	else
157 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
158 }
159 
160 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
161 {
162 	unsigned i;
163 
164 	/* Enable pflip interrupts */
165 	for (i = 0; i < adev->mode_info.num_crtc; i++)
166 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
167 }
168 
169 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
170 {
171 	unsigned i;
172 
173 	/* Disable pflip interrupts */
174 	for (i = 0; i < adev->mode_info.num_crtc; i++)
175 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
176 }
177 
178 /**
179  * dce_v6_0_page_flip - pageflip callback.
180  *
181  * @adev: amdgpu_device pointer
182  * @crtc_id: crtc to cleanup pageflip on
183  * @crtc_base: new address of the crtc (GPU MC address)
184  * @async: asynchronous flip
185  *
186  * Does the actual pageflip (evergreen+).
187  * During vblank we take the crtc lock and wait for the update_pending
188  * bit to go high, when it does, we release the lock, and allow the
189  * double buffered update to take place.
190  * Returns the current update pending status.
191  */
192 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
193 			       int crtc_id, u64 crtc_base, bool async)
194 {
195 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
196 	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
197 
198 	/* flip at hsync for async, default is vsync */
199 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
200 	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
201 	/* update pitch */
202 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
203 	       fb->pitches[0] / fb->format->cpp[0]);
204 	/* update the scanout addresses */
205 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
206 	       upper_32_bits(crtc_base));
207 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
208 	       (u32)crtc_base);
209 
210 	/* post the write */
211 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
212 }
213 
214 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
215 					u32 *vbl, u32 *position)
216 {
217 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
218 		return -EINVAL;
219 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
220 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
221 
222 	return 0;
223 
224 }
225 
226 /**
227  * dce_v6_0_hpd_sense - hpd sense callback.
228  *
229  * @adev: amdgpu_device pointer
230  * @hpd: hpd (hotplug detect) pin
231  *
232  * Checks if a digital monitor is connected (evergreen+).
233  * Returns true if connected, false if not connected.
234  */
235 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
236 			       enum amdgpu_hpd_id hpd)
237 {
238 	bool connected = false;
239 
240 	if (hpd >= adev->mode_info.num_hpd)
241 		return connected;
242 
243 	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
244 		connected = true;
245 
246 	return connected;
247 }
248 
249 /**
250  * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
251  *
252  * @adev: amdgpu_device pointer
253  * @hpd: hpd (hotplug detect) pin
254  *
255  * Set the polarity of the hpd pin (evergreen+).
256  */
257 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
258 				      enum amdgpu_hpd_id hpd)
259 {
260 	u32 tmp;
261 	bool connected = dce_v6_0_hpd_sense(adev, hpd);
262 
263 	if (hpd >= adev->mode_info.num_hpd)
264 		return;
265 
266 	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
267 	if (connected)
268 		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
269 	else
270 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
271 	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
272 }
273 
274 /**
275  * dce_v6_0_hpd_init - hpd setup callback.
276  *
277  * @adev: amdgpu_device pointer
278  *
279  * Setup the hpd pins used by the card (evergreen+).
280  * Enable the pin, set the polarity, and enable the hpd interrupts.
281  */
282 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
283 {
284 	struct drm_device *dev = adev_to_drm(adev);
285 	struct drm_connector *connector;
286 	struct drm_connector_list_iter iter;
287 	u32 tmp;
288 
289 	drm_connector_list_iter_begin(dev, &iter);
290 	drm_for_each_connector_iter(connector, &iter) {
291 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
292 
293 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
294 			continue;
295 
296 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
297 		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
298 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
299 
300 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
301 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
302 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
303 			 * aux dp channel on imac and help (but not completely fix)
304 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
305 			 * also avoid interrupt storms during dpms.
306 			 */
307 			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
308 			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
309 			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
310 			continue;
311 		}
312 
313 		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
314 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
315 	}
316 	drm_connector_list_iter_end(&iter);
317 }
318 
319 /**
320  * dce_v6_0_hpd_fini - hpd tear down callback.
321  *
322  * @adev: amdgpu_device pointer
323  *
324  * Tear down the hpd pins used by the card (evergreen+).
325  * Disable the hpd interrupts.
326  */
327 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
328 {
329 	struct drm_device *dev = adev_to_drm(adev);
330 	struct drm_connector *connector;
331 	struct drm_connector_list_iter iter;
332 	u32 tmp;
333 
334 	drm_connector_list_iter_begin(dev, &iter);
335 	drm_for_each_connector_iter(connector, &iter) {
336 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
337 
338 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
339 			continue;
340 
341 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
342 		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
343 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
344 
345 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
346 	}
347 	drm_connector_list_iter_end(&iter);
348 }
349 
350 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
351 {
352 	return mmDC_GPIO_HPD_A;
353 }
354 
355 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
356 					  bool render)
357 {
358 	if (!render)
359 		WREG32(mmVGA_RENDER_CONTROL,
360 			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
361 
362 }
363 
364 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
365 {
366 	switch (adev->asic_type) {
367 	case CHIP_TAHITI:
368 	case CHIP_PITCAIRN:
369 	case CHIP_VERDE:
370 		return 6;
371 	case CHIP_OLAND:
372 		return 2;
373 	default:
374 		return 0;
375 	}
376 }
377 
378 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
379 {
380 	/*Disable VGA render and enabled crtc, if has DCE engine*/
381 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
382 		u32 tmp;
383 		int crtc_enabled, i;
384 
385 		dce_v6_0_set_vga_render_state(adev, false);
386 
387 		/*Disable crtc*/
388 		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
389 			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
390 				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
391 			if (crtc_enabled) {
392 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
393 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
394 				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
395 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
396 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
397 			}
398 		}
399 	}
400 }
401 
402 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
403 {
404 
405 	struct drm_device *dev = encoder->dev;
406 	struct amdgpu_device *adev = drm_to_adev(dev);
407 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
408 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
409 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
410 	int bpc = 0;
411 	u32 tmp = 0;
412 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
413 
414 	if (connector) {
415 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
416 		bpc = amdgpu_connector_get_monitor_bpc(connector);
417 		dither = amdgpu_connector->dither;
418 	}
419 
420 	/* LVDS FMT is set up by atom */
421 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
422 		return;
423 
424 	if (bpc == 0)
425 		return;
426 
427 
428 	switch (bpc) {
429 	case 6:
430 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
431 			/* XXX sort out optimal dither settings */
432 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
433 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
434 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
435 		else
436 			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
437 		break;
438 	case 8:
439 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
440 			/* XXX sort out optimal dither settings */
441 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
442 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
443 				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
444 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
445 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
446 		else
447 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
448 				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
449 		break;
450 	case 10:
451 	default:
452 		/* not needed */
453 		break;
454 	}
455 
456 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
457 }
458 
459 /**
460  * si_get_number_of_dram_channels - get the number of dram channels
461  *
462  * @adev: amdgpu_device pointer
463  *
464  * Look up the number of video ram channels (CIK).
465  * Used for display watermark bandwidth calculations
466  * Returns the number of dram channels
467  */
468 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
469 {
470 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
471 
472 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
473 	case 0:
474 	default:
475 		return 1;
476 	case 1:
477 		return 2;
478 	case 2:
479 		return 4;
480 	case 3:
481 		return 8;
482 	case 4:
483 		return 3;
484 	case 5:
485 		return 6;
486 	case 6:
487 		return 10;
488 	case 7:
489 		return 12;
490 	case 8:
491 		return 16;
492 	}
493 }
494 
495 struct dce6_wm_params {
496 	u32 dram_channels; /* number of dram channels */
497 	u32 yclk;          /* bandwidth per dram data pin in kHz */
498 	u32 sclk;          /* engine clock in kHz */
499 	u32 disp_clk;      /* display clock in kHz */
500 	u32 src_width;     /* viewport width */
501 	u32 active_time;   /* active display time in ns */
502 	u32 blank_time;    /* blank time in ns */
503 	bool interlaced;    /* mode is interlaced */
504 	fixed20_12 vsc;    /* vertical scale ratio */
505 	u32 num_heads;     /* number of active crtcs */
506 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
507 	u32 lb_size;       /* line buffer allocated to pipe */
508 	u32 vtaps;         /* vertical scaler taps */
509 };
510 
511 /**
512  * dce_v6_0_dram_bandwidth - get the dram bandwidth
513  *
514  * @wm: watermark calculation data
515  *
516  * Calculate the raw dram bandwidth (CIK).
517  * Used for display watermark bandwidth calculations
518  * Returns the dram bandwidth in MBytes/s
519  */
520 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
521 {
522 	/* Calculate raw DRAM Bandwidth */
523 	fixed20_12 dram_efficiency; /* 0.7 */
524 	fixed20_12 yclk, dram_channels, bandwidth;
525 	fixed20_12 a;
526 
527 	a.full = dfixed_const(1000);
528 	yclk.full = dfixed_const(wm->yclk);
529 	yclk.full = dfixed_div(yclk, a);
530 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
531 	a.full = dfixed_const(10);
532 	dram_efficiency.full = dfixed_const(7);
533 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
534 	bandwidth.full = dfixed_mul(dram_channels, yclk);
535 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
536 
537 	return dfixed_trunc(bandwidth);
538 }
539 
540 /**
541  * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
542  *
543  * @wm: watermark calculation data
544  *
545  * Calculate the dram bandwidth used for display (CIK).
546  * Used for display watermark bandwidth calculations
547  * Returns the dram bandwidth for display in MBytes/s
548  */
549 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
550 {
551 	/* Calculate DRAM Bandwidth and the part allocated to display. */
552 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
553 	fixed20_12 yclk, dram_channels, bandwidth;
554 	fixed20_12 a;
555 
556 	a.full = dfixed_const(1000);
557 	yclk.full = dfixed_const(wm->yclk);
558 	yclk.full = dfixed_div(yclk, a);
559 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
560 	a.full = dfixed_const(10);
561 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
562 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
563 	bandwidth.full = dfixed_mul(dram_channels, yclk);
564 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
565 
566 	return dfixed_trunc(bandwidth);
567 }
568 
569 /**
570  * dce_v6_0_data_return_bandwidth - get the data return bandwidth
571  *
572  * @wm: watermark calculation data
573  *
574  * Calculate the data return bandwidth used for display (CIK).
575  * Used for display watermark bandwidth calculations
576  * Returns the data return bandwidth in MBytes/s
577  */
578 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
579 {
580 	/* Calculate the display Data return Bandwidth */
581 	fixed20_12 return_efficiency; /* 0.8 */
582 	fixed20_12 sclk, bandwidth;
583 	fixed20_12 a;
584 
585 	a.full = dfixed_const(1000);
586 	sclk.full = dfixed_const(wm->sclk);
587 	sclk.full = dfixed_div(sclk, a);
588 	a.full = dfixed_const(10);
589 	return_efficiency.full = dfixed_const(8);
590 	return_efficiency.full = dfixed_div(return_efficiency, a);
591 	a.full = dfixed_const(32);
592 	bandwidth.full = dfixed_mul(a, sclk);
593 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
594 
595 	return dfixed_trunc(bandwidth);
596 }
597 
598 /**
599  * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
600  *
601  * @wm: watermark calculation data
602  *
603  * Calculate the dmif bandwidth used for display (CIK).
604  * Used for display watermark bandwidth calculations
605  * Returns the dmif bandwidth in MBytes/s
606  */
607 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
608 {
609 	/* Calculate the DMIF Request Bandwidth */
610 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
611 	fixed20_12 disp_clk, bandwidth;
612 	fixed20_12 a, b;
613 
614 	a.full = dfixed_const(1000);
615 	disp_clk.full = dfixed_const(wm->disp_clk);
616 	disp_clk.full = dfixed_div(disp_clk, a);
617 	a.full = dfixed_const(32);
618 	b.full = dfixed_mul(a, disp_clk);
619 
620 	a.full = dfixed_const(10);
621 	disp_clk_request_efficiency.full = dfixed_const(8);
622 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
623 
624 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
625 
626 	return dfixed_trunc(bandwidth);
627 }
628 
629 /**
630  * dce_v6_0_available_bandwidth - get the min available bandwidth
631  *
632  * @wm: watermark calculation data
633  *
634  * Calculate the min available bandwidth used for display (CIK).
635  * Used for display watermark bandwidth calculations
636  * Returns the min available bandwidth in MBytes/s
637  */
638 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
639 {
640 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
641 	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
642 	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
643 	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
644 
645 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
646 }
647 
648 /**
649  * dce_v6_0_average_bandwidth - get the average available bandwidth
650  *
651  * @wm: watermark calculation data
652  *
653  * Calculate the average available bandwidth used for display (CIK).
654  * Used for display watermark bandwidth calculations
655  * Returns the average available bandwidth in MBytes/s
656  */
657 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
658 {
659 	/* Calculate the display mode Average Bandwidth
660 	 * DisplayMode should contain the source and destination dimensions,
661 	 * timing, etc.
662 	 */
663 	fixed20_12 bpp;
664 	fixed20_12 line_time;
665 	fixed20_12 src_width;
666 	fixed20_12 bandwidth;
667 	fixed20_12 a;
668 
669 	a.full = dfixed_const(1000);
670 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
671 	line_time.full = dfixed_div(line_time, a);
672 	bpp.full = dfixed_const(wm->bytes_per_pixel);
673 	src_width.full = dfixed_const(wm->src_width);
674 	bandwidth.full = dfixed_mul(src_width, bpp);
675 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
676 	bandwidth.full = dfixed_div(bandwidth, line_time);
677 
678 	return dfixed_trunc(bandwidth);
679 }
680 
681 /**
682  * dce_v6_0_latency_watermark - get the latency watermark
683  *
684  * @wm: watermark calculation data
685  *
686  * Calculate the latency watermark (CIK).
687  * Used for display watermark bandwidth calculations
688  * Returns the latency watermark in ns
689  */
690 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
691 {
692 	/* First calculate the latency in ns */
693 	u32 mc_latency = 2000; /* 2000 ns. */
694 	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
695 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
696 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
697 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
698 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
699 		(wm->num_heads * cursor_line_pair_return_time);
700 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
701 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
702 	u32 tmp, dmif_size = 12288;
703 	fixed20_12 a, b, c;
704 
705 	if (wm->num_heads == 0)
706 		return 0;
707 
708 	a.full = dfixed_const(2);
709 	b.full = dfixed_const(1);
710 	if ((wm->vsc.full > a.full) ||
711 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
712 	    (wm->vtaps >= 5) ||
713 	    ((wm->vsc.full >= a.full) && wm->interlaced))
714 		max_src_lines_per_dst_line = 4;
715 	else
716 		max_src_lines_per_dst_line = 2;
717 
718 	a.full = dfixed_const(available_bandwidth);
719 	b.full = dfixed_const(wm->num_heads);
720 	a.full = dfixed_div(a, b);
721 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
722 	tmp = min(dfixed_trunc(a), tmp);
723 
724 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
725 
726 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
727 	b.full = dfixed_const(1000);
728 	c.full = dfixed_const(lb_fill_bw);
729 	b.full = dfixed_div(c, b);
730 	a.full = dfixed_div(a, b);
731 	line_fill_time = dfixed_trunc(a);
732 
733 	if (line_fill_time < wm->active_time)
734 		return latency;
735 	else
736 		return latency + (line_fill_time - wm->active_time);
737 
738 }
739 
740 /**
741  * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
742  * average and available dram bandwidth
743  *
744  * @wm: watermark calculation data
745  *
746  * Check if the display average bandwidth fits in the display
747  * dram bandwidth (CIK).
748  * Used for display watermark bandwidth calculations
749  * Returns true if the display fits, false if not.
750  */
751 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
752 {
753 	if (dce_v6_0_average_bandwidth(wm) <=
754 	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
755 		return true;
756 	else
757 		return false;
758 }
759 
760 /**
761  * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
762  * average and available bandwidth
763  *
764  * @wm: watermark calculation data
765  *
766  * Check if the display average bandwidth fits in the display
767  * available bandwidth (CIK).
768  * Used for display watermark bandwidth calculations
769  * Returns true if the display fits, false if not.
770  */
771 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
772 {
773 	if (dce_v6_0_average_bandwidth(wm) <=
774 	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
775 		return true;
776 	else
777 		return false;
778 }
779 
780 /**
781  * dce_v6_0_check_latency_hiding - check latency hiding
782  *
783  * @wm: watermark calculation data
784  *
785  * Check latency hiding (CIK).
786  * Used for display watermark bandwidth calculations
787  * Returns true if the display fits, false if not.
788  */
789 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
790 {
791 	u32 lb_partitions = wm->lb_size / wm->src_width;
792 	u32 line_time = wm->active_time + wm->blank_time;
793 	u32 latency_tolerant_lines;
794 	u32 latency_hiding;
795 	fixed20_12 a;
796 
797 	a.full = dfixed_const(1);
798 	if (wm->vsc.full > a.full)
799 		latency_tolerant_lines = 1;
800 	else {
801 		if (lb_partitions <= (wm->vtaps + 1))
802 			latency_tolerant_lines = 1;
803 		else
804 			latency_tolerant_lines = 2;
805 	}
806 
807 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
808 
809 	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
810 		return true;
811 	else
812 		return false;
813 }
814 
815 /**
816  * dce_v6_0_program_watermarks - program display watermarks
817  *
818  * @adev: amdgpu_device pointer
819  * @amdgpu_crtc: the selected display controller
820  * @lb_size: line buffer size
821  * @num_heads: number of display controllers in use
822  *
823  * Calculate and program the display watermarks for the
824  * selected display controller (CIK).
825  */
826 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
827 					struct amdgpu_crtc *amdgpu_crtc,
828 					u32 lb_size, u32 num_heads)
829 {
830 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
831 	struct dce6_wm_params wm_low, wm_high;
832 	u32 dram_channels;
833 	u32 active_time;
834 	u32 line_time = 0;
835 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
836 	u32 priority_a_mark = 0, priority_b_mark = 0;
837 	u32 priority_a_cnt = PRIORITY_OFF;
838 	u32 priority_b_cnt = PRIORITY_OFF;
839 	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
840 	fixed20_12 a, b, c;
841 
842 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
843 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
844 					    (u32)mode->clock);
845 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
846 					  (u32)mode->clock);
847 		line_time = min(line_time, (u32)65535);
848 		priority_a_cnt = 0;
849 		priority_b_cnt = 0;
850 
851 		dram_channels = si_get_number_of_dram_channels(adev);
852 
853 		/* watermark for high clocks */
854 		if (adev->pm.dpm_enabled) {
855 			wm_high.yclk =
856 				amdgpu_dpm_get_mclk(adev, false) * 10;
857 			wm_high.sclk =
858 				amdgpu_dpm_get_sclk(adev, false) * 10;
859 		} else {
860 			wm_high.yclk = adev->pm.current_mclk * 10;
861 			wm_high.sclk = adev->pm.current_sclk * 10;
862 		}
863 
864 		wm_high.disp_clk = mode->clock;
865 		wm_high.src_width = mode->crtc_hdisplay;
866 		wm_high.active_time = active_time;
867 		wm_high.blank_time = line_time - wm_high.active_time;
868 		wm_high.interlaced = false;
869 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
870 			wm_high.interlaced = true;
871 		wm_high.vsc = amdgpu_crtc->vsc;
872 		wm_high.vtaps = 1;
873 		if (amdgpu_crtc->rmx_type != RMX_OFF)
874 			wm_high.vtaps = 2;
875 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
876 		wm_high.lb_size = lb_size;
877 		wm_high.dram_channels = dram_channels;
878 		wm_high.num_heads = num_heads;
879 
880 		if (adev->pm.dpm_enabled) {
881 		/* watermark for low clocks */
882 			wm_low.yclk =
883 				amdgpu_dpm_get_mclk(adev, true) * 10;
884 			wm_low.sclk =
885 				amdgpu_dpm_get_sclk(adev, true) * 10;
886 		} else {
887 			wm_low.yclk = adev->pm.current_mclk * 10;
888 			wm_low.sclk = adev->pm.current_sclk * 10;
889 		}
890 
891 		wm_low.disp_clk = mode->clock;
892 		wm_low.src_width = mode->crtc_hdisplay;
893 		wm_low.active_time = active_time;
894 		wm_low.blank_time = line_time - wm_low.active_time;
895 		wm_low.interlaced = false;
896 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
897 			wm_low.interlaced = true;
898 		wm_low.vsc = amdgpu_crtc->vsc;
899 		wm_low.vtaps = 1;
900 		if (amdgpu_crtc->rmx_type != RMX_OFF)
901 			wm_low.vtaps = 2;
902 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
903 		wm_low.lb_size = lb_size;
904 		wm_low.dram_channels = dram_channels;
905 		wm_low.num_heads = num_heads;
906 
907 		/* set for high clocks */
908 		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
909 		/* set for low clocks */
910 		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
911 
912 		/* possibly force display priority to high */
913 		/* should really do this at mode validation time... */
914 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
915 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
916 		    !dce_v6_0_check_latency_hiding(&wm_high) ||
917 		    (adev->mode_info.disp_priority == 2)) {
918 			DRM_DEBUG_KMS("force priority to high\n");
919 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
920 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
921 		}
922 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
923 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
924 		    !dce_v6_0_check_latency_hiding(&wm_low) ||
925 		    (adev->mode_info.disp_priority == 2)) {
926 			DRM_DEBUG_KMS("force priority to high\n");
927 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
928 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
929 		}
930 
931 		a.full = dfixed_const(1000);
932 		b.full = dfixed_const(mode->clock);
933 		b.full = dfixed_div(b, a);
934 		c.full = dfixed_const(latency_watermark_a);
935 		c.full = dfixed_mul(c, b);
936 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
937 		c.full = dfixed_div(c, a);
938 		a.full = dfixed_const(16);
939 		c.full = dfixed_div(c, a);
940 		priority_a_mark = dfixed_trunc(c);
941 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
942 
943 		a.full = dfixed_const(1000);
944 		b.full = dfixed_const(mode->clock);
945 		b.full = dfixed_div(b, a);
946 		c.full = dfixed_const(latency_watermark_b);
947 		c.full = dfixed_mul(c, b);
948 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
949 		c.full = dfixed_div(c, a);
950 		a.full = dfixed_const(16);
951 		c.full = dfixed_div(c, a);
952 		priority_b_mark = dfixed_trunc(c);
953 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
954 
955 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
956 	}
957 
958 	/* select wm A */
959 	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
960 	tmp = arb_control3;
961 	tmp &= ~LATENCY_WATERMARK_MASK(3);
962 	tmp |= LATENCY_WATERMARK_MASK(1);
963 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
964 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
965 	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
966 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
967 	/* select wm B */
968 	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
969 	tmp &= ~LATENCY_WATERMARK_MASK(3);
970 	tmp |= LATENCY_WATERMARK_MASK(2);
971 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
972 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
973 	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
974 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
975 	/* restore original selection */
976 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
977 
978 	/* write the priority marks */
979 	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
980 	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
981 
982 	/* save values for DPM */
983 	amdgpu_crtc->line_time = line_time;
984 	amdgpu_crtc->wm_high = latency_watermark_a;
985 
986 	/* Save number of lines the linebuffer leads before the scanout */
987 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
988 }
989 
990 /* watermark setup */
991 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
992 				   struct amdgpu_crtc *amdgpu_crtc,
993 				   struct drm_display_mode *mode,
994 				   struct drm_display_mode *other_mode)
995 {
996 	u32 tmp, buffer_alloc, i;
997 	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
998 	/*
999 	 * Line Buffer Setup
1000 	 * There are 3 line buffers, each one shared by 2 display controllers.
1001 	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1002 	 * the display controllers.  The paritioning is done via one of four
1003 	 * preset allocations specified in bits 21:20:
1004 	 *  0 - half lb
1005 	 *  2 - whole lb, other crtc must be disabled
1006 	 */
1007 	/* this can get tricky if we have two large displays on a paired group
1008 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1009 	 * non-linked crtcs for maximum line buffer allocation.
1010 	 */
1011 	if (amdgpu_crtc->base.enabled && mode) {
1012 		if (other_mode) {
1013 			tmp = 0; /* 1/2 */
1014 			buffer_alloc = 1;
1015 		} else {
1016 			tmp = 2; /* whole */
1017 			buffer_alloc = 2;
1018 		}
1019 	} else {
1020 		tmp = 0;
1021 		buffer_alloc = 0;
1022 	}
1023 
1024 	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1025 	       DC_LB_MEMORY_CONFIG(tmp));
1026 
1027 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1028 	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1029 	for (i = 0; i < adev->usec_timeout; i++) {
1030 		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1031 		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1032 			break;
1033 		udelay(1);
1034 	}
1035 
1036 	if (amdgpu_crtc->base.enabled && mode) {
1037 		switch (tmp) {
1038 		case 0:
1039 		default:
1040 			return 4096 * 2;
1041 		case 2:
1042 			return 8192 * 2;
1043 		}
1044 	}
1045 
1046 	/* controller not enabled, so no lb used */
1047 	return 0;
1048 }
1049 
1050 
1051 /**
1052  * dce_v6_0_bandwidth_update - program display watermarks
1053  *
1054  * @adev: amdgpu_device pointer
1055  *
1056  * Calculate and program the display watermarks and line
1057  * buffer allocation (CIK).
1058  */
1059 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1060 {
1061 	struct drm_display_mode *mode0 = NULL;
1062 	struct drm_display_mode *mode1 = NULL;
1063 	u32 num_heads = 0, lb_size;
1064 	int i;
1065 
1066 	if (!adev->mode_info.mode_config_initialized)
1067 		return;
1068 
1069 	amdgpu_display_update_priority(adev);
1070 
1071 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1072 		if (adev->mode_info.crtcs[i]->base.enabled)
1073 			num_heads++;
1074 	}
1075 	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1076 		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1077 		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1078 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1079 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1080 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1081 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1082 	}
1083 }
1084 
1085 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1086 {
1087 	int i;
1088 	u32 tmp;
1089 
1090 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1091 		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1092 				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1093 		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1094 					PORT_CONNECTIVITY))
1095 			adev->mode_info.audio.pin[i].connected = false;
1096 		else
1097 			adev->mode_info.audio.pin[i].connected = true;
1098 	}
1099 
1100 }
1101 
1102 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1103 {
1104 	int i;
1105 
1106 	dce_v6_0_audio_get_connected_pins(adev);
1107 
1108 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1109 		if (adev->mode_info.audio.pin[i].connected)
1110 			return &adev->mode_info.audio.pin[i];
1111 	}
1112 	DRM_ERROR("No connected audio pins found!\n");
1113 	return NULL;
1114 }
1115 
1116 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1117 {
1118 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1119 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1120 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1121 
1122 	if (!dig || !dig->afmt || !dig->afmt->pin)
1123 		return;
1124 
1125 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1126 	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1127 		             dig->afmt->pin->id));
1128 }
1129 
1130 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1131 						struct drm_display_mode *mode)
1132 {
1133 	struct drm_device *dev = encoder->dev;
1134 	struct amdgpu_device *adev = drm_to_adev(dev);
1135 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1136 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1137 	struct drm_connector *connector;
1138 	struct drm_connector_list_iter iter;
1139 	struct amdgpu_connector *amdgpu_connector = NULL;
1140 	int interlace = 0;
1141 	u32 tmp;
1142 
1143 	drm_connector_list_iter_begin(dev, &iter);
1144 	drm_for_each_connector_iter(connector, &iter) {
1145 		if (connector->encoder == encoder) {
1146 			amdgpu_connector = to_amdgpu_connector(connector);
1147 			break;
1148 		}
1149 	}
1150 	drm_connector_list_iter_end(&iter);
1151 
1152 	if (!amdgpu_connector) {
1153 		DRM_ERROR("Couldn't find encoder's connector\n");
1154 		return;
1155 	}
1156 
1157 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1158 		interlace = 1;
1159 
1160 	if (connector->latency_present[interlace]) {
1161 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1162 				VIDEO_LIPSYNC, connector->video_latency[interlace]);
1163 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1164 				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1165 	} else {
1166 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1167 				VIDEO_LIPSYNC, 0);
1168 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1169 				AUDIO_LIPSYNC, 0);
1170 	}
1171 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1172 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1173 }
1174 
1175 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1176 {
1177 	struct drm_device *dev = encoder->dev;
1178 	struct amdgpu_device *adev = drm_to_adev(dev);
1179 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1180 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1181 	struct drm_connector *connector;
1182 	struct drm_connector_list_iter iter;
1183 	struct amdgpu_connector *amdgpu_connector = NULL;
1184 	u8 *sadb = NULL;
1185 	int sad_count;
1186 	u32 tmp;
1187 
1188 	drm_connector_list_iter_begin(dev, &iter);
1189 	drm_for_each_connector_iter(connector, &iter) {
1190 		if (connector->encoder == encoder) {
1191 			amdgpu_connector = to_amdgpu_connector(connector);
1192 			break;
1193 		}
1194 	}
1195 	drm_connector_list_iter_end(&iter);
1196 
1197 	if (!amdgpu_connector) {
1198 		DRM_ERROR("Couldn't find encoder's connector\n");
1199 		return;
1200 	}
1201 
1202 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1203 	if (sad_count < 0) {
1204 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1205 		sad_count = 0;
1206 	}
1207 
1208 	/* program the speaker allocation */
1209 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1210 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1211 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1212 			HDMI_CONNECTION, 0);
1213 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1214 			DP_CONNECTION, 0);
1215 
1216 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1217 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1218 				DP_CONNECTION, 1);
1219 	else
1220 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1221 				HDMI_CONNECTION, 1);
1222 
1223 	if (sad_count)
1224 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1225 				SPEAKER_ALLOCATION, sadb[0]);
1226 	else
1227 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1228 				SPEAKER_ALLOCATION, 5); /* stereo */
1229 
1230 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1231 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1232 
1233 	kfree(sadb);
1234 }
1235 
1236 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1237 {
1238 	struct drm_device *dev = encoder->dev;
1239 	struct amdgpu_device *adev = drm_to_adev(dev);
1240 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1241 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1242 	struct drm_connector *connector;
1243 	struct drm_connector_list_iter iter;
1244 	struct amdgpu_connector *amdgpu_connector = NULL;
1245 	struct cea_sad *sads;
1246 	int i, sad_count;
1247 
1248 	static const u16 eld_reg_to_type[][2] = {
1249 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1250 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1251 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1252 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1253 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1254 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1255 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1256 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1257 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1258 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1259 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1260 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1261 	};
1262 
1263 	drm_connector_list_iter_begin(dev, &iter);
1264 	drm_for_each_connector_iter(connector, &iter) {
1265 		if (connector->encoder == encoder) {
1266 			amdgpu_connector = to_amdgpu_connector(connector);
1267 			break;
1268 		}
1269 	}
1270 	drm_connector_list_iter_end(&iter);
1271 
1272 	if (!amdgpu_connector) {
1273 		DRM_ERROR("Couldn't find encoder's connector\n");
1274 		return;
1275 	}
1276 
1277 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1278 	if (sad_count < 0)
1279 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1280 	if (sad_count <= 0)
1281 		return;
1282 
1283 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1284 		u32 tmp = 0;
1285 		u8 stereo_freqs = 0;
1286 		int max_channels = -1;
1287 		int j;
1288 
1289 		for (j = 0; j < sad_count; j++) {
1290 			struct cea_sad *sad = &sads[j];
1291 
1292 			if (sad->format == eld_reg_to_type[i][1]) {
1293 				if (sad->channels > max_channels) {
1294 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1295 							MAX_CHANNELS, sad->channels);
1296 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1297 							DESCRIPTOR_BYTE_2, sad->byte2);
1298 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1299 							SUPPORTED_FREQUENCIES, sad->freq);
1300 					max_channels = sad->channels;
1301 				}
1302 
1303 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1304 					stereo_freqs |= sad->freq;
1305 				else
1306 					break;
1307 			}
1308 		}
1309 
1310 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1311 				SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1312 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1313 	}
1314 
1315 	kfree(sads);
1316 
1317 }
1318 
1319 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1320 				  struct amdgpu_audio_pin *pin,
1321 				  bool enable)
1322 {
1323 	if (!pin)
1324 		return;
1325 
1326 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1327 			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1328 }
1329 
1330 static const u32 pin_offsets[7] =
1331 {
1332 	(0x1780 - 0x1780),
1333 	(0x1786 - 0x1780),
1334 	(0x178c - 0x1780),
1335 	(0x1792 - 0x1780),
1336 	(0x1798 - 0x1780),
1337 	(0x179d - 0x1780),
1338 	(0x17a4 - 0x1780),
1339 };
1340 
1341 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1342 {
1343 	int i;
1344 
1345 	if (!amdgpu_audio)
1346 		return 0;
1347 
1348 	adev->mode_info.audio.enabled = true;
1349 
1350 	switch (adev->asic_type) {
1351 	case CHIP_TAHITI:
1352 	case CHIP_PITCAIRN:
1353 	case CHIP_VERDE:
1354 	default:
1355 		adev->mode_info.audio.num_pins = 6;
1356 		break;
1357 	case CHIP_OLAND:
1358 		adev->mode_info.audio.num_pins = 2;
1359 		break;
1360 	}
1361 
1362 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1363 		adev->mode_info.audio.pin[i].channels = -1;
1364 		adev->mode_info.audio.pin[i].rate = -1;
1365 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1366 		adev->mode_info.audio.pin[i].status_bits = 0;
1367 		adev->mode_info.audio.pin[i].category_code = 0;
1368 		adev->mode_info.audio.pin[i].connected = false;
1369 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1370 		adev->mode_info.audio.pin[i].id = i;
1371 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1372 	}
1373 
1374 	return 0;
1375 }
1376 
1377 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1378 {
1379 	int i;
1380 
1381 	if (!amdgpu_audio)
1382 		return;
1383 
1384 	if (!adev->mode_info.audio.enabled)
1385 		return;
1386 
1387 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1388 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1389 
1390 	adev->mode_info.audio.enabled = false;
1391 }
1392 
1393 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1394 {
1395 	struct drm_device *dev = encoder->dev;
1396 	struct amdgpu_device *adev = drm_to_adev(dev);
1397 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1398 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1399 	u32 tmp;
1400 
1401 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1402 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1403 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1404 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1405 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1406 }
1407 
1408 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1409 				   uint32_t clock, int bpc)
1410 {
1411 	struct drm_device *dev = encoder->dev;
1412 	struct amdgpu_device *adev = drm_to_adev(dev);
1413 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1414 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1415 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1416 	u32 tmp;
1417 
1418 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1419 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1420 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1421 			bpc > 8 ? 0 : 1);
1422 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1423 
1424 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1425 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1426 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1427 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1428 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1429 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1430 
1431 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1432 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1433 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1434 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1435 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1436 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1437 
1438 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1439 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1440 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1441 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1442 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1443 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1444 }
1445 
1446 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1447 					       struct drm_display_mode *mode)
1448 {
1449 	struct drm_device *dev = encoder->dev;
1450 	struct amdgpu_device *adev = drm_to_adev(dev);
1451 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1452 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1453 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1454 	struct hdmi_avi_infoframe frame;
1455 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1456 	uint8_t *payload = buffer + 3;
1457 	uint8_t *header = buffer;
1458 	ssize_t err;
1459 	u32 tmp;
1460 
1461 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1462 	if (err < 0) {
1463 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1464 		return;
1465 	}
1466 
1467 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1468 	if (err < 0) {
1469 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1470 		return;
1471 	}
1472 
1473 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1474 	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1475 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1476 	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1477 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1478 	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1479 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1480 	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1481 
1482 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1483 	/* anything other than 0 */
1484 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1485 			HDMI_AUDIO_INFO_LINE, 2);
1486 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1487 }
1488 
1489 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1490 {
1491 	struct drm_device *dev = encoder->dev;
1492 	struct amdgpu_device *adev = drm_to_adev(dev);
1493 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1494 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1495 	u32 tmp;
1496 
1497 	/*
1498 	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1499 	 * Express [24MHz / target pixel clock] as an exact rational
1500 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1501 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1502 	 */
1503 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1504 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1505 			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1506 	if (em == ATOM_ENCODER_MODE_HDMI) {
1507 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1508 				DCCG_AUDIO_DTO_SEL, 0);
1509 	} else if (ENCODER_MODE_IS_DP(em)) {
1510 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1511 				DCCG_AUDIO_DTO_SEL, 1);
1512 	}
1513 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1514 	if (em == ATOM_ENCODER_MODE_HDMI) {
1515 		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1516 		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1517 	} else if (ENCODER_MODE_IS_DP(em)) {
1518 		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1519 		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1520 	}
1521 }
1522 
1523 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1524 {
1525 	struct drm_device *dev = encoder->dev;
1526 	struct amdgpu_device *adev = drm_to_adev(dev);
1527 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1528 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1529 	u32 tmp;
1530 
1531 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1532 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1533 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1534 
1535 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1536 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1537 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1538 
1539 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1540 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1541 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1542 
1543 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1544 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1545 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1546 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1547 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1548 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1549 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1550 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1551 
1552 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1553 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1554 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1555 
1556 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1557 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1558 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1559 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1560 
1561 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1562 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1563 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1564 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1565 }
1566 
1567 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1568 {
1569 	struct drm_device *dev = encoder->dev;
1570 	struct amdgpu_device *adev = drm_to_adev(dev);
1571 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1572 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1573 	u32 tmp;
1574 
1575 	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1576 	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1577 	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1578 }
1579 
1580 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1581 {
1582 	struct drm_device *dev = encoder->dev;
1583 	struct amdgpu_device *adev = drm_to_adev(dev);
1584 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1585 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1586 	u32 tmp;
1587 
1588 	if (enable) {
1589 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1590 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1591 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1592 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1593 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1594 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1595 
1596 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1597 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1598 		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1599 
1600 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1601 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1602 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1603 	} else {
1604 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1605 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1606 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1607 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1608 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1609 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1610 
1611 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1612 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1613 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1614 	}
1615 }
1616 
1617 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1618 {
1619 	struct drm_device *dev = encoder->dev;
1620 	struct amdgpu_device *adev = drm_to_adev(dev);
1621 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1622 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1623 	u32 tmp;
1624 
1625 	if (enable) {
1626 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1627 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1628 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1629 
1630 		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1631 		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1632 		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1633 
1634 		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1635 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1636 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1637 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1638 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1639 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1640 	} else {
1641 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1642 	}
1643 }
1644 
1645 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1646 				  struct drm_display_mode *mode)
1647 {
1648 	struct drm_device *dev = encoder->dev;
1649 	struct amdgpu_device *adev = drm_to_adev(dev);
1650 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1651 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1652 	struct drm_connector *connector;
1653 	struct drm_connector_list_iter iter;
1654 	struct amdgpu_connector *amdgpu_connector = NULL;
1655 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1656 	int bpc = 8;
1657 
1658 	if (!dig || !dig->afmt)
1659 		return;
1660 
1661 	drm_connector_list_iter_begin(dev, &iter);
1662 	drm_for_each_connector_iter(connector, &iter) {
1663 		if (connector->encoder == encoder) {
1664 			amdgpu_connector = to_amdgpu_connector(connector);
1665 			break;
1666 		}
1667 	}
1668 	drm_connector_list_iter_end(&iter);
1669 
1670 	if (!amdgpu_connector) {
1671 		DRM_ERROR("Couldn't find encoder's connector\n");
1672 		return;
1673 	}
1674 
1675 	if (!dig->afmt->enabled)
1676 		return;
1677 
1678 	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1679 	if (!dig->afmt->pin)
1680 		return;
1681 
1682 	if (encoder->crtc) {
1683 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1684 		bpc = amdgpu_crtc->bpc;
1685 	}
1686 
1687 	/* disable audio before setting up hw */
1688 	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1689 
1690 	dce_v6_0_audio_set_mute(encoder, true);
1691 	dce_v6_0_audio_write_speaker_allocation(encoder);
1692 	dce_v6_0_audio_write_sad_regs(encoder);
1693 	dce_v6_0_audio_write_latency_fields(encoder, mode);
1694 	if (em == ATOM_ENCODER_MODE_HDMI) {
1695 		dce_v6_0_audio_set_dto(encoder, mode->clock);
1696 		dce_v6_0_audio_set_vbi_packet(encoder);
1697 		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1698 	} else if (ENCODER_MODE_IS_DP(em)) {
1699 		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1700 	}
1701 	dce_v6_0_audio_set_packet(encoder);
1702 	dce_v6_0_audio_select_pin(encoder);
1703 	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1704 	dce_v6_0_audio_set_mute(encoder, false);
1705 	if (em == ATOM_ENCODER_MODE_HDMI) {
1706 		dce_v6_0_audio_hdmi_enable(encoder, 1);
1707 	} else if (ENCODER_MODE_IS_DP(em)) {
1708 		dce_v6_0_audio_dp_enable(encoder, 1);
1709 	}
1710 
1711 	/* enable audio after setting up hw */
1712 	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1713 }
1714 
1715 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1716 {
1717 	struct drm_device *dev = encoder->dev;
1718 	struct amdgpu_device *adev = drm_to_adev(dev);
1719 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1720 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1721 
1722 	if (!dig || !dig->afmt)
1723 		return;
1724 
1725 	/* Silent, r600_hdmi_enable will raise WARN for us */
1726 	if (enable && dig->afmt->enabled)
1727 		return;
1728 
1729 	if (!enable && !dig->afmt->enabled)
1730 		return;
1731 
1732 	if (!enable && dig->afmt->pin) {
1733 		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1734 		dig->afmt->pin = NULL;
1735 	}
1736 
1737 	dig->afmt->enabled = enable;
1738 
1739 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1740 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1741 }
1742 
1743 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1744 {
1745 	int i, j;
1746 
1747 	for (i = 0; i < adev->mode_info.num_dig; i++)
1748 		adev->mode_info.afmt[i] = NULL;
1749 
1750 	/* DCE6 has audio blocks tied to DIG encoders */
1751 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1752 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1753 		if (adev->mode_info.afmt[i]) {
1754 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1755 			adev->mode_info.afmt[i]->id = i;
1756 		} else {
1757 			for (j = 0; j < i; j++) {
1758 				kfree(adev->mode_info.afmt[j]);
1759 				adev->mode_info.afmt[j] = NULL;
1760 			}
1761 			DRM_ERROR("Out of memory allocating afmt table\n");
1762 			return -ENOMEM;
1763 		}
1764 	}
1765 	return 0;
1766 }
1767 
1768 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1769 {
1770 	int i;
1771 
1772 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1773 		kfree(adev->mode_info.afmt[i]);
1774 		adev->mode_info.afmt[i] = NULL;
1775 	}
1776 }
1777 
1778 static const u32 vga_control_regs[6] =
1779 {
1780 	mmD1VGA_CONTROL,
1781 	mmD2VGA_CONTROL,
1782 	mmD3VGA_CONTROL,
1783 	mmD4VGA_CONTROL,
1784 	mmD5VGA_CONTROL,
1785 	mmD6VGA_CONTROL,
1786 };
1787 
1788 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1789 {
1790 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1791 	struct drm_device *dev = crtc->dev;
1792 	struct amdgpu_device *adev = drm_to_adev(dev);
1793 	u32 vga_control;
1794 
1795 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1796 	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1797 }
1798 
1799 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1800 {
1801 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1802 	struct drm_device *dev = crtc->dev;
1803 	struct amdgpu_device *adev = drm_to_adev(dev);
1804 
1805 	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1806 }
1807 
1808 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1809 				     struct drm_framebuffer *fb,
1810 				     int x, int y, int atomic)
1811 {
1812 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1813 	struct drm_device *dev = crtc->dev;
1814 	struct amdgpu_device *adev = drm_to_adev(dev);
1815 	struct drm_framebuffer *target_fb;
1816 	struct drm_gem_object *obj;
1817 	struct amdgpu_bo *abo;
1818 	uint64_t fb_location, tiling_flags;
1819 	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1820 	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1821 	u32 viewport_w, viewport_h;
1822 	int r;
1823 	bool bypass_lut = false;
1824 
1825 	/* no fb bound */
1826 	if (!atomic && !crtc->primary->fb) {
1827 		DRM_DEBUG_KMS("No FB bound\n");
1828 		return 0;
1829 	}
1830 
1831 	if (atomic)
1832 		target_fb = fb;
1833 	else
1834 		target_fb = crtc->primary->fb;
1835 
1836 	/* If atomic, assume fb object is pinned & idle & fenced and
1837 	 * just update base pointers
1838 	 */
1839 	obj = target_fb->obj[0];
1840 	abo = gem_to_amdgpu_bo(obj);
1841 	r = amdgpu_bo_reserve(abo, false);
1842 	if (unlikely(r != 0))
1843 		return r;
1844 
1845 	if (!atomic) {
1846 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1847 		if (unlikely(r != 0)) {
1848 			amdgpu_bo_unreserve(abo);
1849 			return -EINVAL;
1850 		}
1851 	}
1852 	fb_location = amdgpu_bo_gpu_offset(abo);
1853 
1854 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1855 	amdgpu_bo_unreserve(abo);
1856 
1857 	switch (target_fb->format->format) {
1858 	case DRM_FORMAT_C8:
1859 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1860 			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1861 		break;
1862 	case DRM_FORMAT_XRGB4444:
1863 	case DRM_FORMAT_ARGB4444:
1864 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1865 			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1866 #ifdef __BIG_ENDIAN
1867 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1868 #endif
1869 		break;
1870 	case DRM_FORMAT_XRGB1555:
1871 	case DRM_FORMAT_ARGB1555:
1872 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1873 			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1874 #ifdef __BIG_ENDIAN
1875 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1876 #endif
1877 		break;
1878 	case DRM_FORMAT_BGRX5551:
1879 	case DRM_FORMAT_BGRA5551:
1880 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1881 			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1882 #ifdef __BIG_ENDIAN
1883 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1884 #endif
1885 		break;
1886 	case DRM_FORMAT_RGB565:
1887 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1888 			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1889 #ifdef __BIG_ENDIAN
1890 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1891 #endif
1892 		break;
1893 	case DRM_FORMAT_XRGB8888:
1894 	case DRM_FORMAT_ARGB8888:
1895 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1896 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1897 #ifdef __BIG_ENDIAN
1898 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1899 #endif
1900 		break;
1901 	case DRM_FORMAT_XRGB2101010:
1902 	case DRM_FORMAT_ARGB2101010:
1903 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1904 			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1905 #ifdef __BIG_ENDIAN
1906 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1907 #endif
1908 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1909 		bypass_lut = true;
1910 		break;
1911 	case DRM_FORMAT_BGRX1010102:
1912 	case DRM_FORMAT_BGRA1010102:
1913 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1914 			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1915 #ifdef __BIG_ENDIAN
1916 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1917 #endif
1918 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1919 		bypass_lut = true;
1920 		break;
1921 	case DRM_FORMAT_XBGR8888:
1922 	case DRM_FORMAT_ABGR8888:
1923 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1924 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1925 		fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
1926 			   GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
1927 #ifdef __BIG_ENDIAN
1928 		fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1929 #endif
1930 		break;
1931 	default:
1932 		DRM_ERROR("Unsupported screen format %p4cc\n",
1933 			  &target_fb->format->format);
1934 		return -EINVAL;
1935 	}
1936 
1937 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1938 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1939 
1940 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1941 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1942 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1943 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1944 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1945 
1946 		fb_format |= GRPH_NUM_BANKS(num_banks);
1947 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1948 		fb_format |= GRPH_TILE_SPLIT(tile_split);
1949 		fb_format |= GRPH_BANK_WIDTH(bankw);
1950 		fb_format |= GRPH_BANK_HEIGHT(bankh);
1951 		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1952 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1953 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1954 	}
1955 
1956 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1957 	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1958 
1959 	dce_v6_0_vga_enable(crtc, false);
1960 
1961 	/* Make sure surface address is updated at vertical blank rather than
1962 	 * horizontal blank
1963 	 */
1964 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1965 
1966 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1967 	       upper_32_bits(fb_location));
1968 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1969 	       upper_32_bits(fb_location));
1970 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1971 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1972 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1973 	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1974 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1975 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1976 
1977 	/*
1978 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1979 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1980 	 * retain the full precision throughout the pipeline.
1981 	 */
1982 	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1983 		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1984 		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1985 
1986 	if (bypass_lut)
1987 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1988 
1989 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1990 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1991 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1992 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1993 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1994 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1995 
1996 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1997 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1998 
1999 	dce_v6_0_grph_enable(crtc, true);
2000 
2001 	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2002 		       target_fb->height);
2003 	x &= ~3;
2004 	y &= ~1;
2005 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2006 	       (x << 16) | y);
2007 	viewport_w = crtc->mode.hdisplay;
2008 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2009 
2010 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2011 	       (viewport_w << 16) | viewport_h);
2012 
2013 	/* set pageflip to happen anywhere in vblank interval */
2014 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2015 
2016 	if (!atomic && fb && fb != crtc->primary->fb) {
2017 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2018 		r = amdgpu_bo_reserve(abo, true);
2019 		if (unlikely(r != 0))
2020 			return r;
2021 		amdgpu_bo_unpin(abo);
2022 		amdgpu_bo_unreserve(abo);
2023 	}
2024 
2025 	/* Bytes per pixel may have changed */
2026 	dce_v6_0_bandwidth_update(adev);
2027 
2028 	return 0;
2029 
2030 }
2031 
2032 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2033 				    struct drm_display_mode *mode)
2034 {
2035 	struct drm_device *dev = crtc->dev;
2036 	struct amdgpu_device *adev = drm_to_adev(dev);
2037 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2038 
2039 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2040 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2041 		       INTERLEAVE_EN);
2042 	else
2043 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2044 }
2045 
2046 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2047 {
2048 
2049 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2050 	struct drm_device *dev = crtc->dev;
2051 	struct amdgpu_device *adev = drm_to_adev(dev);
2052 	u16 *r, *g, *b;
2053 	int i;
2054 
2055 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2056 
2057 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2058 	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2059 		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2060 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2061 	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2062 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2063 	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2064 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2065 	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2066 		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2067 
2068 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2069 
2070 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2071 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2072 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2073 
2074 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2075 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2076 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2077 
2078 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2079 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2080 
2081 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2082 	r = crtc->gamma_store;
2083 	g = r + crtc->gamma_size;
2084 	b = g + crtc->gamma_size;
2085 	for (i = 0; i < 256; i++) {
2086 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2087 		       ((*r++ & 0xffc0) << 14) |
2088 		       ((*g++ & 0xffc0) << 4) |
2089 		       (*b++ >> 6));
2090 	}
2091 
2092 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2093 	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2094 		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2095 		ICON_DEGAMMA_MODE(0) |
2096 		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2097 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2098 	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2099 		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2100 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2101 	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2102 		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2103 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2104 	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2105 		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2106 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2107 	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2108 
2109 
2110 }
2111 
2112 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2113 {
2114 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2115 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2116 
2117 	switch (amdgpu_encoder->encoder_id) {
2118 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2119 		return dig->linkb ? 1 : 0;
2120 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2121 		return dig->linkb ? 3 : 2;
2122 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2123 		return dig->linkb ? 5 : 4;
2124 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2125 		return 6;
2126 	default:
2127 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2128 		return 0;
2129 	}
2130 }
2131 
2132 /**
2133  * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2134  *
2135  * @crtc: drm crtc
2136  *
2137  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2138  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2139  * monitors a dedicated PPLL must be used.  If a particular board has
2140  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2141  * as there is no need to program the PLL itself.  If we are not able to
2142  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2143  * avoid messing up an existing monitor.
2144  *
2145  *
2146  */
2147 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2148 {
2149 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2150 	struct drm_device *dev = crtc->dev;
2151 	struct amdgpu_device *adev = drm_to_adev(dev);
2152 	u32 pll_in_use;
2153 	int pll;
2154 
2155 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2156 		if (adev->clock.dp_extclk)
2157 			/* skip PPLL programming if using ext clock */
2158 			return ATOM_PPLL_INVALID;
2159 		else
2160 			return ATOM_PPLL0;
2161 	} else {
2162 		/* use the same PPLL for all monitors with the same clock */
2163 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2164 		if (pll != ATOM_PPLL_INVALID)
2165 			return pll;
2166 	}
2167 
2168 	/*  PPLL1, and PPLL2 */
2169 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2170 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2171 		return ATOM_PPLL2;
2172 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2173 		return ATOM_PPLL1;
2174 	DRM_ERROR("unable to allocate a PPLL\n");
2175 	return ATOM_PPLL_INVALID;
2176 }
2177 
2178 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2179 {
2180 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2181 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2182 	uint32_t cur_lock;
2183 
2184 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2185 	if (lock)
2186 		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2187 	else
2188 		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2189 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2190 }
2191 
2192 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2193 {
2194 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2195 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2196 
2197 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2198 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2199 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2200 
2201 
2202 }
2203 
2204 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2205 {
2206 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2207 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2208 
2209 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2210 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2211 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2212 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2213 
2214 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2215 	       CUR_CONTROL__CURSOR_EN_MASK |
2216 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2217 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2218 
2219 }
2220 
2221 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2222 				       int x, int y)
2223 {
2224 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2225 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2226 	int xorigin = 0, yorigin = 0;
2227 
2228 	int w = amdgpu_crtc->cursor_width;
2229 
2230 	amdgpu_crtc->cursor_x = x;
2231 	amdgpu_crtc->cursor_y = y;
2232 
2233 	/* avivo cursor are offset into the total surface */
2234 	x += crtc->x;
2235 	y += crtc->y;
2236 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2237 
2238 	if (x < 0) {
2239 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2240 		x = 0;
2241 	}
2242 	if (y < 0) {
2243 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2244 		y = 0;
2245 	}
2246 
2247 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2248 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2249 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2250 	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2251 
2252 	return 0;
2253 }
2254 
2255 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2256 				     int x, int y)
2257 {
2258 	int ret;
2259 
2260 	dce_v6_0_lock_cursor(crtc, true);
2261 	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2262 	dce_v6_0_lock_cursor(crtc, false);
2263 
2264 	return ret;
2265 }
2266 
2267 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2268 				     struct drm_file *file_priv,
2269 				     uint32_t handle,
2270 				     uint32_t width,
2271 				     uint32_t height,
2272 				     int32_t hot_x,
2273 				     int32_t hot_y)
2274 {
2275 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2276 	struct drm_gem_object *obj;
2277 	struct amdgpu_bo *aobj;
2278 	int ret;
2279 
2280 	if (!handle) {
2281 		/* turn off cursor */
2282 		dce_v6_0_hide_cursor(crtc);
2283 		obj = NULL;
2284 		goto unpin;
2285 	}
2286 
2287 	if ((width > amdgpu_crtc->max_cursor_width) ||
2288 	    (height > amdgpu_crtc->max_cursor_height)) {
2289 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2290 		return -EINVAL;
2291 	}
2292 
2293 	obj = drm_gem_object_lookup(file_priv, handle);
2294 	if (!obj) {
2295 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2296 		return -ENOENT;
2297 	}
2298 
2299 	aobj = gem_to_amdgpu_bo(obj);
2300 	ret = amdgpu_bo_reserve(aobj, false);
2301 	if (ret != 0) {
2302 		drm_gem_object_put(obj);
2303 		return ret;
2304 	}
2305 
2306 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2307 	amdgpu_bo_unreserve(aobj);
2308 	if (ret) {
2309 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2310 		drm_gem_object_put(obj);
2311 		return ret;
2312 	}
2313 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2314 
2315 	dce_v6_0_lock_cursor(crtc, true);
2316 
2317 	if (width != amdgpu_crtc->cursor_width ||
2318 	    height != amdgpu_crtc->cursor_height ||
2319 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2320 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2321 		int x, y;
2322 
2323 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2324 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2325 
2326 		dce_v6_0_cursor_move_locked(crtc, x, y);
2327 
2328 		amdgpu_crtc->cursor_width = width;
2329 		amdgpu_crtc->cursor_height = height;
2330 		amdgpu_crtc->cursor_hot_x = hot_x;
2331 		amdgpu_crtc->cursor_hot_y = hot_y;
2332 	}
2333 
2334 	dce_v6_0_show_cursor(crtc);
2335 	dce_v6_0_lock_cursor(crtc, false);
2336 
2337 unpin:
2338 	if (amdgpu_crtc->cursor_bo) {
2339 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2340 		ret = amdgpu_bo_reserve(aobj, true);
2341 		if (likely(ret == 0)) {
2342 			amdgpu_bo_unpin(aobj);
2343 			amdgpu_bo_unreserve(aobj);
2344 		}
2345 		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2346 	}
2347 
2348 	amdgpu_crtc->cursor_bo = obj;
2349 	return 0;
2350 }
2351 
2352 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2353 {
2354 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2355 
2356 	if (amdgpu_crtc->cursor_bo) {
2357 		dce_v6_0_lock_cursor(crtc, true);
2358 
2359 		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2360 					    amdgpu_crtc->cursor_y);
2361 
2362 		dce_v6_0_show_cursor(crtc);
2363 		dce_v6_0_lock_cursor(crtc, false);
2364 	}
2365 }
2366 
2367 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2368 				   u16 *blue, uint32_t size,
2369 				   struct drm_modeset_acquire_ctx *ctx)
2370 {
2371 	dce_v6_0_crtc_load_lut(crtc);
2372 
2373 	return 0;
2374 }
2375 
2376 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2377 {
2378 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2379 
2380 	drm_crtc_cleanup(crtc);
2381 	kfree(amdgpu_crtc);
2382 }
2383 
2384 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2385 	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2386 	.cursor_move = dce_v6_0_crtc_cursor_move,
2387 	.gamma_set = dce_v6_0_crtc_gamma_set,
2388 	.set_config = amdgpu_display_crtc_set_config,
2389 	.destroy = dce_v6_0_crtc_destroy,
2390 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2391 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2392 	.enable_vblank = amdgpu_enable_vblank_kms,
2393 	.disable_vblank = amdgpu_disable_vblank_kms,
2394 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2395 };
2396 
2397 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2398 {
2399 	struct drm_device *dev = crtc->dev;
2400 	struct amdgpu_device *adev = drm_to_adev(dev);
2401 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2402 	unsigned type;
2403 
2404 	switch (mode) {
2405 	case DRM_MODE_DPMS_ON:
2406 		amdgpu_crtc->enabled = true;
2407 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2408 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2409 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2410 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2411 						amdgpu_crtc->crtc_id);
2412 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2413 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2414 		drm_crtc_vblank_on(crtc);
2415 		dce_v6_0_crtc_load_lut(crtc);
2416 		break;
2417 	case DRM_MODE_DPMS_STANDBY:
2418 	case DRM_MODE_DPMS_SUSPEND:
2419 	case DRM_MODE_DPMS_OFF:
2420 		drm_crtc_vblank_off(crtc);
2421 		if (amdgpu_crtc->enabled)
2422 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2423 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2424 		amdgpu_crtc->enabled = false;
2425 		break;
2426 	}
2427 	/* adjust pm to dpms */
2428 	amdgpu_dpm_compute_clocks(adev);
2429 }
2430 
2431 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2432 {
2433 	/* disable crtc pair power gating before programming */
2434 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2435 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2436 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2437 }
2438 
2439 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2440 {
2441 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2442 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2443 }
2444 
2445 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2446 {
2447 
2448 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2449 	struct drm_device *dev = crtc->dev;
2450 	struct amdgpu_device *adev = drm_to_adev(dev);
2451 	struct amdgpu_atom_ss ss;
2452 	int i;
2453 
2454 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2455 	if (crtc->primary->fb) {
2456 		int r;
2457 		struct amdgpu_bo *abo;
2458 
2459 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2460 		r = amdgpu_bo_reserve(abo, true);
2461 		if (unlikely(r))
2462 			DRM_ERROR("failed to reserve abo before unpin\n");
2463 		else {
2464 			amdgpu_bo_unpin(abo);
2465 			amdgpu_bo_unreserve(abo);
2466 		}
2467 	}
2468 	/* disable the GRPH */
2469 	dce_v6_0_grph_enable(crtc, false);
2470 
2471 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2472 
2473 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2474 		if (adev->mode_info.crtcs[i] &&
2475 		    adev->mode_info.crtcs[i]->enabled &&
2476 		    i != amdgpu_crtc->crtc_id &&
2477 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2478 			/* one other crtc is using this pll don't turn
2479 			 * off the pll
2480 			 */
2481 			goto done;
2482 		}
2483 	}
2484 
2485 	switch (amdgpu_crtc->pll_id) {
2486 	case ATOM_PPLL1:
2487 	case ATOM_PPLL2:
2488 		/* disable the ppll */
2489 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2490 						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2491 		break;
2492 	default:
2493 		break;
2494 	}
2495 done:
2496 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2497 	amdgpu_crtc->adjusted_clock = 0;
2498 	amdgpu_crtc->encoder = NULL;
2499 	amdgpu_crtc->connector = NULL;
2500 }
2501 
2502 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2503 				  struct drm_display_mode *mode,
2504 				  struct drm_display_mode *adjusted_mode,
2505 				  int x, int y, struct drm_framebuffer *old_fb)
2506 {
2507 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2508 
2509 	if (!amdgpu_crtc->adjusted_clock)
2510 		return -EINVAL;
2511 
2512 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2513 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2514 	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2515 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2516 	amdgpu_atombios_crtc_scaler_setup(crtc);
2517 	dce_v6_0_cursor_reset(crtc);
2518 	/* update the hw version fpr dpm */
2519 	amdgpu_crtc->hw_mode = *adjusted_mode;
2520 
2521 	return 0;
2522 }
2523 
2524 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2525 				     const struct drm_display_mode *mode,
2526 				     struct drm_display_mode *adjusted_mode)
2527 {
2528 
2529 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2530 	struct drm_device *dev = crtc->dev;
2531 	struct drm_encoder *encoder;
2532 
2533 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2534 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2535 		if (encoder->crtc == crtc) {
2536 			amdgpu_crtc->encoder = encoder;
2537 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2538 			break;
2539 		}
2540 	}
2541 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2542 		amdgpu_crtc->encoder = NULL;
2543 		amdgpu_crtc->connector = NULL;
2544 		return false;
2545 	}
2546 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2547 		return false;
2548 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2549 		return false;
2550 	/* pick pll */
2551 	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2552 	/* if we can't get a PPLL for a non-DP encoder, fail */
2553 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2554 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2555 		return false;
2556 
2557 	return true;
2558 }
2559 
2560 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2561 				  struct drm_framebuffer *old_fb)
2562 {
2563 	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2564 }
2565 
2566 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2567 					 struct drm_framebuffer *fb,
2568 					 int x, int y, enum mode_set_atomic state)
2569 {
2570 	return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2571 }
2572 
2573 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2574 	.dpms = dce_v6_0_crtc_dpms,
2575 	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2576 	.mode_set = dce_v6_0_crtc_mode_set,
2577 	.mode_set_base = dce_v6_0_crtc_set_base,
2578 	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2579 	.prepare = dce_v6_0_crtc_prepare,
2580 	.commit = dce_v6_0_crtc_commit,
2581 	.disable = dce_v6_0_crtc_disable,
2582 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2583 };
2584 
2585 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2586 {
2587 	struct amdgpu_crtc *amdgpu_crtc;
2588 
2589 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2590 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2591 	if (amdgpu_crtc == NULL)
2592 		return -ENOMEM;
2593 
2594 	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2595 
2596 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2597 	amdgpu_crtc->crtc_id = index;
2598 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2599 
2600 	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2601 	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2602 	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2603 	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2604 
2605 	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2606 
2607 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2608 	amdgpu_crtc->adjusted_clock = 0;
2609 	amdgpu_crtc->encoder = NULL;
2610 	amdgpu_crtc->connector = NULL;
2611 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2612 
2613 	return 0;
2614 }
2615 
2616 static int dce_v6_0_early_init(void *handle)
2617 {
2618 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2619 
2620 	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2621 	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2622 
2623 	dce_v6_0_set_display_funcs(adev);
2624 
2625 	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2626 
2627 	switch (adev->asic_type) {
2628 	case CHIP_TAHITI:
2629 	case CHIP_PITCAIRN:
2630 	case CHIP_VERDE:
2631 		adev->mode_info.num_hpd = 6;
2632 		adev->mode_info.num_dig = 6;
2633 		break;
2634 	case CHIP_OLAND:
2635 		adev->mode_info.num_hpd = 2;
2636 		adev->mode_info.num_dig = 2;
2637 		break;
2638 	default:
2639 		return -EINVAL;
2640 	}
2641 
2642 	dce_v6_0_set_irq_funcs(adev);
2643 
2644 	return 0;
2645 }
2646 
2647 static int dce_v6_0_sw_init(void *handle)
2648 {
2649 	int r, i;
2650 	bool ret;
2651 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2652 
2653 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2654 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2655 		if (r)
2656 			return r;
2657 	}
2658 
2659 	for (i = 8; i < 20; i += 2) {
2660 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2661 		if (r)
2662 			return r;
2663 	}
2664 
2665 	/* HPD hotplug */
2666 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2667 	if (r)
2668 		return r;
2669 
2670 	adev->mode_info.mode_config_initialized = true;
2671 
2672 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2673 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2674 	adev_to_drm(adev)->mode_config.max_width = 16384;
2675 	adev_to_drm(adev)->mode_config.max_height = 16384;
2676 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2677 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2678 	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2679 
2680 	r = amdgpu_display_modeset_create_props(adev);
2681 	if (r)
2682 		return r;
2683 
2684 	adev_to_drm(adev)->mode_config.max_width = 16384;
2685 	adev_to_drm(adev)->mode_config.max_height = 16384;
2686 
2687 	/* allocate crtcs */
2688 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2689 		r = dce_v6_0_crtc_init(adev, i);
2690 		if (r)
2691 			return r;
2692 	}
2693 
2694 	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2695 	if (ret)
2696 		amdgpu_display_print_display_setup(adev_to_drm(adev));
2697 	else
2698 		return -EINVAL;
2699 
2700 	/* setup afmt */
2701 	r = dce_v6_0_afmt_init(adev);
2702 	if (r)
2703 		return r;
2704 
2705 	r = dce_v6_0_audio_init(adev);
2706 	if (r)
2707 		return r;
2708 
2709 	/* Disable vblank IRQs aggressively for power-saving */
2710 	/* XXX: can this be enabled for DC? */
2711 	adev_to_drm(adev)->vblank_disable_immediate = true;
2712 
2713 	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2714 	if (r)
2715 		return r;
2716 
2717 	/* Pre-DCE11 */
2718 	INIT_WORK(&adev->hotplug_work,
2719 		  amdgpu_display_hotplug_work_func);
2720 
2721 	drm_kms_helper_poll_init(adev_to_drm(adev));
2722 
2723 	return r;
2724 }
2725 
2726 static int dce_v6_0_sw_fini(void *handle)
2727 {
2728 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2729 
2730 	kfree(adev->mode_info.bios_hardcoded_edid);
2731 
2732 	drm_kms_helper_poll_fini(adev_to_drm(adev));
2733 
2734 	dce_v6_0_audio_fini(adev);
2735 	dce_v6_0_afmt_fini(adev);
2736 
2737 	drm_mode_config_cleanup(adev_to_drm(adev));
2738 	adev->mode_info.mode_config_initialized = false;
2739 
2740 	return 0;
2741 }
2742 
2743 static int dce_v6_0_hw_init(void *handle)
2744 {
2745 	int i;
2746 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2747 
2748 	/* disable vga render */
2749 	dce_v6_0_set_vga_render_state(adev, false);
2750 	/* init dig PHYs, disp eng pll */
2751 	amdgpu_atombios_encoder_init_dig(adev);
2752 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2753 
2754 	/* initialize hpd */
2755 	dce_v6_0_hpd_init(adev);
2756 
2757 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2758 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2759 	}
2760 
2761 	dce_v6_0_pageflip_interrupt_init(adev);
2762 
2763 	return 0;
2764 }
2765 
2766 static int dce_v6_0_hw_fini(void *handle)
2767 {
2768 	int i;
2769 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2770 
2771 	dce_v6_0_hpd_fini(adev);
2772 
2773 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2774 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2775 	}
2776 
2777 	dce_v6_0_pageflip_interrupt_fini(adev);
2778 
2779 	flush_work(&adev->hotplug_work);
2780 
2781 	return 0;
2782 }
2783 
2784 static int dce_v6_0_suspend(void *handle)
2785 {
2786 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2787 	int r;
2788 
2789 	r = amdgpu_display_suspend_helper(adev);
2790 	if (r)
2791 		return r;
2792 	adev->mode_info.bl_level =
2793 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2794 
2795 	return dce_v6_0_hw_fini(handle);
2796 }
2797 
2798 static int dce_v6_0_resume(void *handle)
2799 {
2800 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2801 	int ret;
2802 
2803 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2804 							   adev->mode_info.bl_level);
2805 
2806 	ret = dce_v6_0_hw_init(handle);
2807 
2808 	/* turn on the BL */
2809 	if (adev->mode_info.bl_encoder) {
2810 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2811 								  adev->mode_info.bl_encoder);
2812 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2813 						    bl_level);
2814 	}
2815 	if (ret)
2816 		return ret;
2817 
2818 	return amdgpu_display_resume_helper(adev);
2819 }
2820 
2821 static bool dce_v6_0_is_idle(void *handle)
2822 {
2823 	return true;
2824 }
2825 
2826 static int dce_v6_0_wait_for_idle(void *handle)
2827 {
2828 	return 0;
2829 }
2830 
2831 static int dce_v6_0_soft_reset(void *handle)
2832 {
2833 	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2834 	return 0;
2835 }
2836 
2837 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2838 						     int crtc,
2839 						     enum amdgpu_interrupt_state state)
2840 {
2841 	u32 reg_block, interrupt_mask;
2842 
2843 	if (crtc >= adev->mode_info.num_crtc) {
2844 		DRM_DEBUG("invalid crtc %d\n", crtc);
2845 		return;
2846 	}
2847 
2848 	switch (crtc) {
2849 	case 0:
2850 		reg_block = SI_CRTC0_REGISTER_OFFSET;
2851 		break;
2852 	case 1:
2853 		reg_block = SI_CRTC1_REGISTER_OFFSET;
2854 		break;
2855 	case 2:
2856 		reg_block = SI_CRTC2_REGISTER_OFFSET;
2857 		break;
2858 	case 3:
2859 		reg_block = SI_CRTC3_REGISTER_OFFSET;
2860 		break;
2861 	case 4:
2862 		reg_block = SI_CRTC4_REGISTER_OFFSET;
2863 		break;
2864 	case 5:
2865 		reg_block = SI_CRTC5_REGISTER_OFFSET;
2866 		break;
2867 	default:
2868 		DRM_DEBUG("invalid crtc %d\n", crtc);
2869 		return;
2870 	}
2871 
2872 	switch (state) {
2873 	case AMDGPU_IRQ_STATE_DISABLE:
2874 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2875 		interrupt_mask &= ~VBLANK_INT_MASK;
2876 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2877 		break;
2878 	case AMDGPU_IRQ_STATE_ENABLE:
2879 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2880 		interrupt_mask |= VBLANK_INT_MASK;
2881 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2882 		break;
2883 	default:
2884 		break;
2885 	}
2886 }
2887 
2888 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2889 						    int crtc,
2890 						    enum amdgpu_interrupt_state state)
2891 {
2892 
2893 }
2894 
2895 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2896 					    struct amdgpu_irq_src *src,
2897 					    unsigned type,
2898 					    enum amdgpu_interrupt_state state)
2899 {
2900 	u32 dc_hpd_int_cntl;
2901 
2902 	if (type >= adev->mode_info.num_hpd) {
2903 		DRM_DEBUG("invalid hdp %d\n", type);
2904 		return 0;
2905 	}
2906 
2907 	switch (state) {
2908 	case AMDGPU_IRQ_STATE_DISABLE:
2909 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2910 		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2911 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2912 		break;
2913 	case AMDGPU_IRQ_STATE_ENABLE:
2914 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2915 		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2916 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2917 		break;
2918 	default:
2919 		break;
2920 	}
2921 
2922 	return 0;
2923 }
2924 
2925 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2926 					     struct amdgpu_irq_src *src,
2927 					     unsigned type,
2928 					     enum amdgpu_interrupt_state state)
2929 {
2930 	switch (type) {
2931 	case AMDGPU_CRTC_IRQ_VBLANK1:
2932 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2933 		break;
2934 	case AMDGPU_CRTC_IRQ_VBLANK2:
2935 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2936 		break;
2937 	case AMDGPU_CRTC_IRQ_VBLANK3:
2938 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2939 		break;
2940 	case AMDGPU_CRTC_IRQ_VBLANK4:
2941 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2942 		break;
2943 	case AMDGPU_CRTC_IRQ_VBLANK5:
2944 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2945 		break;
2946 	case AMDGPU_CRTC_IRQ_VBLANK6:
2947 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2948 		break;
2949 	case AMDGPU_CRTC_IRQ_VLINE1:
2950 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2951 		break;
2952 	case AMDGPU_CRTC_IRQ_VLINE2:
2953 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2954 		break;
2955 	case AMDGPU_CRTC_IRQ_VLINE3:
2956 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2957 		break;
2958 	case AMDGPU_CRTC_IRQ_VLINE4:
2959 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2960 		break;
2961 	case AMDGPU_CRTC_IRQ_VLINE5:
2962 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2963 		break;
2964 	case AMDGPU_CRTC_IRQ_VLINE6:
2965 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2966 		break;
2967 	default:
2968 		break;
2969 	}
2970 	return 0;
2971 }
2972 
2973 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2974 			     struct amdgpu_irq_src *source,
2975 			     struct amdgpu_iv_entry *entry)
2976 {
2977 	unsigned crtc = entry->src_id - 1;
2978 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2979 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2980 								    crtc);
2981 
2982 	switch (entry->src_data[0]) {
2983 	case 0: /* vblank */
2984 		if (disp_int & interrupt_status_offsets[crtc].vblank)
2985 			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2986 		else
2987 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2988 
2989 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2990 			drm_handle_vblank(adev_to_drm(adev), crtc);
2991 		}
2992 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2993 		break;
2994 	case 1: /* vline */
2995 		if (disp_int & interrupt_status_offsets[crtc].vline)
2996 			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2997 		else
2998 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2999 
3000 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3001 		break;
3002 	default:
3003 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3004 		break;
3005 	}
3006 
3007 	return 0;
3008 }
3009 
3010 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3011 						 struct amdgpu_irq_src *src,
3012 						 unsigned type,
3013 						 enum amdgpu_interrupt_state state)
3014 {
3015 	u32 reg;
3016 
3017 	if (type >= adev->mode_info.num_crtc) {
3018 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3019 		return -EINVAL;
3020 	}
3021 
3022 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3023 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3024 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3025 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3026 	else
3027 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3028 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3029 
3030 	return 0;
3031 }
3032 
3033 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
3034 				 struct amdgpu_irq_src *source,
3035 				 struct amdgpu_iv_entry *entry)
3036 {
3037 	unsigned long flags;
3038 	unsigned crtc_id;
3039 	struct amdgpu_crtc *amdgpu_crtc;
3040 	struct amdgpu_flip_work *works;
3041 
3042 	crtc_id = (entry->src_id - 8) >> 1;
3043 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3044 
3045 	if (crtc_id >= adev->mode_info.num_crtc) {
3046 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3047 		return -EINVAL;
3048 	}
3049 
3050 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3051 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3052 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3053 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3054 
3055 	/* IRQ could occur when in initial stage */
3056 	if (amdgpu_crtc == NULL)
3057 		return 0;
3058 
3059 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3060 	works = amdgpu_crtc->pflip_works;
3061 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3062 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3063 						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3064 						amdgpu_crtc->pflip_status,
3065 						AMDGPU_FLIP_SUBMITTED);
3066 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3067 		return 0;
3068 	}
3069 
3070 	/* page flip completed. clean up */
3071 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3072 	amdgpu_crtc->pflip_works = NULL;
3073 
3074 	/* wakeup usersapce */
3075 	if (works->event)
3076 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3077 
3078 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3079 
3080 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3081 	schedule_work(&works->unpin_work);
3082 
3083 	return 0;
3084 }
3085 
3086 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3087 			    struct amdgpu_irq_src *source,
3088 			    struct amdgpu_iv_entry *entry)
3089 {
3090 	uint32_t disp_int, mask, tmp;
3091 	unsigned hpd;
3092 
3093 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3094 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3095 		return 0;
3096 	}
3097 
3098 	hpd = entry->src_data[0];
3099 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3100 	mask = interrupt_status_offsets[hpd].hpd;
3101 
3102 	if (disp_int & mask) {
3103 		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3104 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3105 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3106 		schedule_work(&adev->hotplug_work);
3107 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3108 	}
3109 
3110 	return 0;
3111 
3112 }
3113 
3114 static int dce_v6_0_set_clockgating_state(void *handle,
3115 					  enum amd_clockgating_state state)
3116 {
3117 	return 0;
3118 }
3119 
3120 static int dce_v6_0_set_powergating_state(void *handle,
3121 					  enum amd_powergating_state state)
3122 {
3123 	return 0;
3124 }
3125 
3126 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3127 	.name = "dce_v6_0",
3128 	.early_init = dce_v6_0_early_init,
3129 	.late_init = NULL,
3130 	.sw_init = dce_v6_0_sw_init,
3131 	.sw_fini = dce_v6_0_sw_fini,
3132 	.hw_init = dce_v6_0_hw_init,
3133 	.hw_fini = dce_v6_0_hw_fini,
3134 	.suspend = dce_v6_0_suspend,
3135 	.resume = dce_v6_0_resume,
3136 	.is_idle = dce_v6_0_is_idle,
3137 	.wait_for_idle = dce_v6_0_wait_for_idle,
3138 	.soft_reset = dce_v6_0_soft_reset,
3139 	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3140 	.set_powergating_state = dce_v6_0_set_powergating_state,
3141 };
3142 
3143 static void
3144 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3145 			  struct drm_display_mode *mode,
3146 			  struct drm_display_mode *adjusted_mode)
3147 {
3148 
3149 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3150 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3151 
3152 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3153 
3154 	/* need to call this here rather than in prepare() since we need some crtc info */
3155 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3156 
3157 	/* set scaler clears this on some chips */
3158 	dce_v6_0_set_interleave(encoder->crtc, mode);
3159 
3160 	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3161 		dce_v6_0_afmt_enable(encoder, true);
3162 		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3163 	}
3164 }
3165 
3166 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3167 {
3168 
3169 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3170 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3171 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3172 
3173 	if ((amdgpu_encoder->active_device &
3174 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3175 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3176 	     ENCODER_OBJECT_ID_NONE)) {
3177 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3178 		if (dig) {
3179 			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3180 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3181 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3182 		}
3183 	}
3184 
3185 	amdgpu_atombios_scratch_regs_lock(adev, true);
3186 
3187 	if (connector) {
3188 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3189 
3190 		/* select the clock/data port if it uses a router */
3191 		if (amdgpu_connector->router.cd_valid)
3192 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3193 
3194 		/* turn eDP panel on for mode set */
3195 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3196 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3197 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3198 	}
3199 
3200 	/* this is needed for the pll/ss setup to work correctly in some cases */
3201 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3202 	/* set up the FMT blocks */
3203 	dce_v6_0_program_fmt(encoder);
3204 }
3205 
3206 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3207 {
3208 
3209 	struct drm_device *dev = encoder->dev;
3210 	struct amdgpu_device *adev = drm_to_adev(dev);
3211 
3212 	/* need to call this here as we need the crtc set up */
3213 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3214 	amdgpu_atombios_scratch_regs_lock(adev, false);
3215 }
3216 
3217 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3218 {
3219 
3220 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3221 	struct amdgpu_encoder_atom_dig *dig;
3222 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3223 
3224 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3225 
3226 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3227 		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3228 			dce_v6_0_afmt_enable(encoder, false);
3229 		dig = amdgpu_encoder->enc_priv;
3230 		dig->dig_encoder = -1;
3231 	}
3232 	amdgpu_encoder->active_device = 0;
3233 }
3234 
3235 /* these are handled by the primary encoders */
3236 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3237 {
3238 
3239 }
3240 
3241 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3242 {
3243 
3244 }
3245 
3246 static void
3247 dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3248 		      struct drm_display_mode *mode,
3249 		      struct drm_display_mode *adjusted_mode)
3250 {
3251 
3252 }
3253 
3254 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3255 {
3256 
3257 }
3258 
3259 static void
3260 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3261 {
3262 
3263 }
3264 
3265 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3266 				    const struct drm_display_mode *mode,
3267 				    struct drm_display_mode *adjusted_mode)
3268 {
3269 	return true;
3270 }
3271 
3272 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3273 	.dpms = dce_v6_0_ext_dpms,
3274 	.mode_fixup = dce_v6_0_ext_mode_fixup,
3275 	.prepare = dce_v6_0_ext_prepare,
3276 	.mode_set = dce_v6_0_ext_mode_set,
3277 	.commit = dce_v6_0_ext_commit,
3278 	.disable = dce_v6_0_ext_disable,
3279 	/* no detect for TMDS/LVDS yet */
3280 };
3281 
3282 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3283 	.dpms = amdgpu_atombios_encoder_dpms,
3284 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3285 	.prepare = dce_v6_0_encoder_prepare,
3286 	.mode_set = dce_v6_0_encoder_mode_set,
3287 	.commit = dce_v6_0_encoder_commit,
3288 	.disable = dce_v6_0_encoder_disable,
3289 	.detect = amdgpu_atombios_encoder_dig_detect,
3290 };
3291 
3292 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3293 	.dpms = amdgpu_atombios_encoder_dpms,
3294 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3295 	.prepare = dce_v6_0_encoder_prepare,
3296 	.mode_set = dce_v6_0_encoder_mode_set,
3297 	.commit = dce_v6_0_encoder_commit,
3298 	.detect = amdgpu_atombios_encoder_dac_detect,
3299 };
3300 
3301 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3302 {
3303 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3304 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3305 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3306 	kfree(amdgpu_encoder->enc_priv);
3307 	drm_encoder_cleanup(encoder);
3308 	kfree(amdgpu_encoder);
3309 }
3310 
3311 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3312 	.destroy = dce_v6_0_encoder_destroy,
3313 };
3314 
3315 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3316 				 uint32_t encoder_enum,
3317 				 uint32_t supported_device,
3318 				 u16 caps)
3319 {
3320 	struct drm_device *dev = adev_to_drm(adev);
3321 	struct drm_encoder *encoder;
3322 	struct amdgpu_encoder *amdgpu_encoder;
3323 
3324 	/* see if we already added it */
3325 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3326 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3327 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3328 			amdgpu_encoder->devices |= supported_device;
3329 			return;
3330 		}
3331 
3332 	}
3333 
3334 	/* add a new one */
3335 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3336 	if (!amdgpu_encoder)
3337 		return;
3338 
3339 	encoder = &amdgpu_encoder->base;
3340 	switch (adev->mode_info.num_crtc) {
3341 	case 1:
3342 		encoder->possible_crtcs = 0x1;
3343 		break;
3344 	case 2:
3345 	default:
3346 		encoder->possible_crtcs = 0x3;
3347 		break;
3348 	case 4:
3349 		encoder->possible_crtcs = 0xf;
3350 		break;
3351 	case 6:
3352 		encoder->possible_crtcs = 0x3f;
3353 		break;
3354 	}
3355 
3356 	amdgpu_encoder->enc_priv = NULL;
3357 	amdgpu_encoder->encoder_enum = encoder_enum;
3358 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3359 	amdgpu_encoder->devices = supported_device;
3360 	amdgpu_encoder->rmx_type = RMX_OFF;
3361 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3362 	amdgpu_encoder->is_ext_encoder = false;
3363 	amdgpu_encoder->caps = caps;
3364 
3365 	switch (amdgpu_encoder->encoder_id) {
3366 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3367 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3368 		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3369 				 DRM_MODE_ENCODER_DAC, NULL);
3370 		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3371 		break;
3372 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3373 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3374 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3375 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3376 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3377 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3378 			amdgpu_encoder->rmx_type = RMX_FULL;
3379 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3380 					 DRM_MODE_ENCODER_LVDS, NULL);
3381 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3382 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3383 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3384 					 DRM_MODE_ENCODER_DAC, NULL);
3385 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3386 		} else {
3387 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3388 					 DRM_MODE_ENCODER_TMDS, NULL);
3389 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3390 		}
3391 		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3392 		break;
3393 	case ENCODER_OBJECT_ID_SI170B:
3394 	case ENCODER_OBJECT_ID_CH7303:
3395 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3396 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3397 	case ENCODER_OBJECT_ID_TITFP513:
3398 	case ENCODER_OBJECT_ID_VT1623:
3399 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3400 	case ENCODER_OBJECT_ID_TRAVIS:
3401 	case ENCODER_OBJECT_ID_NUTMEG:
3402 		/* these are handled by the primary encoders */
3403 		amdgpu_encoder->is_ext_encoder = true;
3404 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3405 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3406 					 DRM_MODE_ENCODER_LVDS, NULL);
3407 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3408 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3409 					 DRM_MODE_ENCODER_DAC, NULL);
3410 		else
3411 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3412 					 DRM_MODE_ENCODER_TMDS, NULL);
3413 		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3414 		break;
3415 	}
3416 }
3417 
3418 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3419 	.bandwidth_update = &dce_v6_0_bandwidth_update,
3420 	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3421 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3422 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3423 	.hpd_sense = &dce_v6_0_hpd_sense,
3424 	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3425 	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3426 	.page_flip = &dce_v6_0_page_flip,
3427 	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3428 	.add_encoder = &dce_v6_0_encoder_add,
3429 	.add_connector = &amdgpu_connector_add,
3430 };
3431 
3432 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3433 {
3434 	adev->mode_info.funcs = &dce_v6_0_display_funcs;
3435 }
3436 
3437 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3438 	.set = dce_v6_0_set_crtc_interrupt_state,
3439 	.process = dce_v6_0_crtc_irq,
3440 };
3441 
3442 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3443 	.set = dce_v6_0_set_pageflip_interrupt_state,
3444 	.process = dce_v6_0_pageflip_irq,
3445 };
3446 
3447 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3448 	.set = dce_v6_0_set_hpd_interrupt_state,
3449 	.process = dce_v6_0_hpd_irq,
3450 };
3451 
3452 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3453 {
3454 	if (adev->mode_info.num_crtc > 0)
3455 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3456 	else
3457 		adev->crtc_irq.num_types = 0;
3458 	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3459 
3460 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3461 	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3462 
3463 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3464 	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3465 }
3466 
3467 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3468 {
3469 	.type = AMD_IP_BLOCK_TYPE_DCE,
3470 	.major = 6,
3471 	.minor = 0,
3472 	.rev = 0,
3473 	.funcs = &dce_v6_0_ip_funcs,
3474 };
3475 
3476 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3477 {
3478 	.type = AMD_IP_BLOCK_TYPE_DCE,
3479 	.major = 6,
3480 	.minor = 4,
3481 	.rev = 0,
3482 	.funcs = &dce_v6_0_ip_funcs,
3483 };
3484