1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/drm_fourcc.h>
25 #include <drm/drm_modeset_helper.h>
26 #include <drm/drm_modeset_helper_vtables.h>
27 #include <drm/drm_vblank.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_i2c.h"
32 #include "vid.h"
33 #include "atom.h"
34 #include "amdgpu_atombios.h"
35 #include "atombios_crtc.h"
36 #include "atombios_encoders.h"
37 #include "amdgpu_pll.h"
38 #include "amdgpu_connectors.h"
39 #include "amdgpu_display.h"
40 #include "dce_v10_0.h"
41 
42 #include "dce/dce_10_0_d.h"
43 #include "dce/dce_10_0_sh_mask.h"
44 #include "dce/dce_10_0_enum.h"
45 #include "oss/oss_3_0_d.h"
46 #include "oss/oss_3_0_sh_mask.h"
47 #include "gmc/gmc_8_1_d.h"
48 #include "gmc/gmc_8_1_sh_mask.h"
49 
50 #include "ivsrcid/ivsrcid_vislands30.h"
51 
52 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
53 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
54 
55 static const u32 crtc_offsets[] =
56 {
57 	CRTC0_REGISTER_OFFSET,
58 	CRTC1_REGISTER_OFFSET,
59 	CRTC2_REGISTER_OFFSET,
60 	CRTC3_REGISTER_OFFSET,
61 	CRTC4_REGISTER_OFFSET,
62 	CRTC5_REGISTER_OFFSET,
63 	CRTC6_REGISTER_OFFSET
64 };
65 
66 static const u32 hpd_offsets[] =
67 {
68 	HPD0_REGISTER_OFFSET,
69 	HPD1_REGISTER_OFFSET,
70 	HPD2_REGISTER_OFFSET,
71 	HPD3_REGISTER_OFFSET,
72 	HPD4_REGISTER_OFFSET,
73 	HPD5_REGISTER_OFFSET
74 };
75 
76 static const uint32_t dig_offsets[] = {
77 	DIG0_REGISTER_OFFSET,
78 	DIG1_REGISTER_OFFSET,
79 	DIG2_REGISTER_OFFSET,
80 	DIG3_REGISTER_OFFSET,
81 	DIG4_REGISTER_OFFSET,
82 	DIG5_REGISTER_OFFSET,
83 	DIG6_REGISTER_OFFSET
84 };
85 
86 static const struct {
87 	uint32_t        reg;
88 	uint32_t        vblank;
89 	uint32_t        vline;
90 	uint32_t        hpd;
91 
92 } interrupt_status_offsets[] = { {
93 	.reg = mmDISP_INTERRUPT_STATUS,
94 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
95 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
96 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
97 }, {
98 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
99 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
100 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
101 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
102 }, {
103 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
104 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
105 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
106 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
107 }, {
108 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
109 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
110 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
111 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
112 }, {
113 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
114 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
115 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
116 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
117 }, {
118 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
119 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
120 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
121 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
122 } };
123 
124 static const u32 golden_settings_tonga_a11[] =
125 {
126 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
127 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
128 	mmFBC_MISC, 0x1f311fff, 0x12300000,
129 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
130 };
131 
132 static const u32 tonga_mgcg_cgcg_init[] =
133 {
134 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
135 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
136 };
137 
138 static const u32 golden_settings_fiji_a10[] =
139 {
140 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
141 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
142 	mmFBC_MISC, 0x1f311fff, 0x12300000,
143 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
144 };
145 
146 static const u32 fiji_mgcg_cgcg_init[] =
147 {
148 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
149 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
150 };
151 
152 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
153 {
154 	switch (adev->asic_type) {
155 	case CHIP_FIJI:
156 		amdgpu_device_program_register_sequence(adev,
157 							fiji_mgcg_cgcg_init,
158 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
159 		amdgpu_device_program_register_sequence(adev,
160 							golden_settings_fiji_a10,
161 							ARRAY_SIZE(golden_settings_fiji_a10));
162 		break;
163 	case CHIP_TONGA:
164 		amdgpu_device_program_register_sequence(adev,
165 							tonga_mgcg_cgcg_init,
166 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
167 		amdgpu_device_program_register_sequence(adev,
168 							golden_settings_tonga_a11,
169 							ARRAY_SIZE(golden_settings_tonga_a11));
170 		break;
171 	default:
172 		break;
173 	}
174 }
175 
176 static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev,
177 				     u32 block_offset, u32 reg)
178 {
179 	unsigned long flags;
180 	u32 r;
181 
182 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
183 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
184 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
185 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
186 
187 	return r;
188 }
189 
190 static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
191 				      u32 block_offset, u32 reg, u32 v)
192 {
193 	unsigned long flags;
194 
195 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
196 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
197 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
198 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
199 }
200 
201 static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
202 {
203 	if (crtc >= adev->mode_info.num_crtc)
204 		return 0;
205 	else
206 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
207 }
208 
209 static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
210 {
211 	unsigned i;
212 
213 	/* Enable pflip interrupts */
214 	for (i = 0; i < adev->mode_info.num_crtc; i++)
215 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
216 }
217 
218 static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
219 {
220 	unsigned i;
221 
222 	/* Disable pflip interrupts */
223 	for (i = 0; i < adev->mode_info.num_crtc; i++)
224 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
225 }
226 
227 /**
228  * dce_v10_0_page_flip - pageflip callback.
229  *
230  * @adev: amdgpu_device pointer
231  * @crtc_id: crtc to cleanup pageflip on
232  * @crtc_base: new address of the crtc (GPU MC address)
233  * @async: asynchronous flip
234  *
235  * Triggers the actual pageflip by updating the primary
236  * surface base address.
237  */
238 static void dce_v10_0_page_flip(struct amdgpu_device *adev,
239 				int crtc_id, u64 crtc_base, bool async)
240 {
241 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
242 	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
243 	u32 tmp;
244 
245 	/* flip at hsync for async, default is vsync */
246 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
247 	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
248 			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
249 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
250 	/* update pitch */
251 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
252 	       fb->pitches[0] / fb->format->cpp[0]);
253 	/* update the primary scanout address */
254 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
255 	       upper_32_bits(crtc_base));
256 	/* writing to the low address triggers the update */
257 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
258 	       lower_32_bits(crtc_base));
259 	/* post the write */
260 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
261 }
262 
263 static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
264 					u32 *vbl, u32 *position)
265 {
266 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
267 		return -EINVAL;
268 
269 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
270 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
271 
272 	return 0;
273 }
274 
275 /**
276  * dce_v10_0_hpd_sense - hpd sense callback.
277  *
278  * @adev: amdgpu_device pointer
279  * @hpd: hpd (hotplug detect) pin
280  *
281  * Checks if a digital monitor is connected (evergreen+).
282  * Returns true if connected, false if not connected.
283  */
284 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
285 			       enum amdgpu_hpd_id hpd)
286 {
287 	bool connected = false;
288 
289 	if (hpd >= adev->mode_info.num_hpd)
290 		return connected;
291 
292 	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
293 	    DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
294 		connected = true;
295 
296 	return connected;
297 }
298 
299 /**
300  * dce_v10_0_hpd_set_polarity - hpd set polarity callback.
301  *
302  * @adev: amdgpu_device pointer
303  * @hpd: hpd (hotplug detect) pin
304  *
305  * Set the polarity of the hpd pin (evergreen+).
306  */
307 static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
308 				      enum amdgpu_hpd_id hpd)
309 {
310 	u32 tmp;
311 	bool connected = dce_v10_0_hpd_sense(adev, hpd);
312 
313 	if (hpd >= adev->mode_info.num_hpd)
314 		return;
315 
316 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
317 	if (connected)
318 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
319 	else
320 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
321 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
322 }
323 
324 /**
325  * dce_v10_0_hpd_init - hpd setup callback.
326  *
327  * @adev: amdgpu_device pointer
328  *
329  * Setup the hpd pins used by the card (evergreen+).
330  * Enable the pin, set the polarity, and enable the hpd interrupts.
331  */
332 static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
333 {
334 	struct drm_device *dev = adev_to_drm(adev);
335 	struct drm_connector *connector;
336 	struct drm_connector_list_iter iter;
337 	u32 tmp;
338 
339 	drm_connector_list_iter_begin(dev, &iter);
340 	drm_for_each_connector_iter(connector, &iter) {
341 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
342 
343 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
344 			continue;
345 
346 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
347 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
348 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
349 			 * aux dp channel on imac and help (but not completely fix)
350 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
351 			 * also avoid interrupt storms during dpms.
352 			 */
353 			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
354 			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
355 			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
356 			continue;
357 		}
358 
359 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
360 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
361 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
362 
363 		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
364 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
365 				    DC_HPD_CONNECT_INT_DELAY,
366 				    AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
367 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
368 				    DC_HPD_DISCONNECT_INT_DELAY,
369 				    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
370 		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
371 
372 		dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
373 		amdgpu_irq_get(adev, &adev->hpd_irq,
374 			       amdgpu_connector->hpd.hpd);
375 	}
376 	drm_connector_list_iter_end(&iter);
377 }
378 
379 /**
380  * dce_v10_0_hpd_fini - hpd tear down callback.
381  *
382  * @adev: amdgpu_device pointer
383  *
384  * Tear down the hpd pins used by the card (evergreen+).
385  * Disable the hpd interrupts.
386  */
387 static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
388 {
389 	struct drm_device *dev = adev_to_drm(adev);
390 	struct drm_connector *connector;
391 	struct drm_connector_list_iter iter;
392 	u32 tmp;
393 
394 	drm_connector_list_iter_begin(dev, &iter);
395 	drm_for_each_connector_iter(connector, &iter) {
396 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
397 
398 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
399 			continue;
400 
401 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
402 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
403 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
404 
405 		amdgpu_irq_put(adev, &adev->hpd_irq,
406 			       amdgpu_connector->hpd.hpd);
407 	}
408 	drm_connector_list_iter_end(&iter);
409 }
410 
411 static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
412 {
413 	return mmDC_GPIO_HPD_A;
414 }
415 
416 static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
417 {
418 	u32 crtc_hung = 0;
419 	u32 crtc_status[6];
420 	u32 i, j, tmp;
421 
422 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
423 		tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
424 		if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
425 			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
426 			crtc_hung |= (1 << i);
427 		}
428 	}
429 
430 	for (j = 0; j < 10; j++) {
431 		for (i = 0; i < adev->mode_info.num_crtc; i++) {
432 			if (crtc_hung & (1 << i)) {
433 				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
434 				if (tmp != crtc_status[i])
435 					crtc_hung &= ~(1 << i);
436 			}
437 		}
438 		if (crtc_hung == 0)
439 			return false;
440 		udelay(100);
441 	}
442 
443 	return true;
444 }
445 
446 static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
447 					   bool render)
448 {
449 	u32 tmp;
450 
451 	/* Lockout access through VGA aperture*/
452 	tmp = RREG32(mmVGA_HDP_CONTROL);
453 	if (render)
454 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
455 	else
456 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
457 	WREG32(mmVGA_HDP_CONTROL, tmp);
458 
459 	/* disable VGA render */
460 	tmp = RREG32(mmVGA_RENDER_CONTROL);
461 	if (render)
462 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
463 	else
464 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
465 	WREG32(mmVGA_RENDER_CONTROL, tmp);
466 }
467 
468 static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
469 {
470 	int num_crtc = 0;
471 
472 	switch (adev->asic_type) {
473 	case CHIP_FIJI:
474 	case CHIP_TONGA:
475 		num_crtc = 6;
476 		break;
477 	default:
478 		num_crtc = 0;
479 	}
480 	return num_crtc;
481 }
482 
483 void dce_v10_0_disable_dce(struct amdgpu_device *adev)
484 {
485 	/*Disable VGA render and enabled crtc, if has DCE engine*/
486 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
487 		u32 tmp;
488 		int crtc_enabled, i;
489 
490 		dce_v10_0_set_vga_render_state(adev, false);
491 
492 		/*Disable crtc*/
493 		for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
494 			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
495 									 CRTC_CONTROL, CRTC_MASTER_EN);
496 			if (crtc_enabled) {
497 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
498 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
499 				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
500 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
501 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
502 			}
503 		}
504 	}
505 }
506 
507 static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
508 {
509 	struct drm_device *dev = encoder->dev;
510 	struct amdgpu_device *adev = drm_to_adev(dev);
511 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
512 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
513 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
514 	int bpc = 0;
515 	u32 tmp = 0;
516 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
517 
518 	if (connector) {
519 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
520 		bpc = amdgpu_connector_get_monitor_bpc(connector);
521 		dither = amdgpu_connector->dither;
522 	}
523 
524 	/* LVDS/eDP FMT is set up by atom */
525 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
526 		return;
527 
528 	/* not needed for analog */
529 	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
530 	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
531 		return;
532 
533 	if (bpc == 0)
534 		return;
535 
536 	switch (bpc) {
537 	case 6:
538 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
539 			/* XXX sort out optimal dither settings */
540 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
541 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
542 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
543 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
544 		} else {
545 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
546 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
547 		}
548 		break;
549 	case 8:
550 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
551 			/* XXX sort out optimal dither settings */
552 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
553 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
554 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
555 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
556 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
557 		} else {
558 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
559 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
560 		}
561 		break;
562 	case 10:
563 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
564 			/* XXX sort out optimal dither settings */
565 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
566 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
567 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
568 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
569 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
570 		} else {
571 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
572 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
573 		}
574 		break;
575 	default:
576 		/* not needed */
577 		break;
578 	}
579 
580 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
581 }
582 
583 
584 /* display watermark setup */
585 /**
586  * dce_v10_0_line_buffer_adjust - Set up the line buffer
587  *
588  * @adev: amdgpu_device pointer
589  * @amdgpu_crtc: the selected display controller
590  * @mode: the current display mode on the selected display
591  * controller
592  *
593  * Setup up the line buffer allocation for
594  * the selected display controller (CIK).
595  * Returns the line buffer size in pixels.
596  */
597 static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
598 				       struct amdgpu_crtc *amdgpu_crtc,
599 				       struct drm_display_mode *mode)
600 {
601 	u32 tmp, buffer_alloc, i, mem_cfg;
602 	u32 pipe_offset = amdgpu_crtc->crtc_id;
603 	/*
604 	 * Line Buffer Setup
605 	 * There are 6 line buffers, one for each display controllers.
606 	 * There are 3 partitions per LB. Select the number of partitions
607 	 * to enable based on the display width.  For display widths larger
608 	 * than 4096, you need use to use 2 display controllers and combine
609 	 * them using the stereo blender.
610 	 */
611 	if (amdgpu_crtc->base.enabled && mode) {
612 		if (mode->crtc_hdisplay < 1920) {
613 			mem_cfg = 1;
614 			buffer_alloc = 2;
615 		} else if (mode->crtc_hdisplay < 2560) {
616 			mem_cfg = 2;
617 			buffer_alloc = 2;
618 		} else if (mode->crtc_hdisplay < 4096) {
619 			mem_cfg = 0;
620 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
621 		} else {
622 			DRM_DEBUG_KMS("Mode too big for LB!\n");
623 			mem_cfg = 0;
624 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
625 		}
626 	} else {
627 		mem_cfg = 1;
628 		buffer_alloc = 0;
629 	}
630 
631 	tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
632 	tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
633 	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
634 
635 	tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
636 	tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
637 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
638 
639 	for (i = 0; i < adev->usec_timeout; i++) {
640 		tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
641 		if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
642 			break;
643 		udelay(1);
644 	}
645 
646 	if (amdgpu_crtc->base.enabled && mode) {
647 		switch (mem_cfg) {
648 		case 0:
649 		default:
650 			return 4096 * 2;
651 		case 1:
652 			return 1920 * 2;
653 		case 2:
654 			return 2560 * 2;
655 		}
656 	}
657 
658 	/* controller not enabled, so no lb used */
659 	return 0;
660 }
661 
662 /**
663  * cik_get_number_of_dram_channels - get the number of dram channels
664  *
665  * @adev: amdgpu_device pointer
666  *
667  * Look up the number of video ram channels (CIK).
668  * Used for display watermark bandwidth calculations
669  * Returns the number of dram channels
670  */
671 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
672 {
673 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
674 
675 	switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
676 	case 0:
677 	default:
678 		return 1;
679 	case 1:
680 		return 2;
681 	case 2:
682 		return 4;
683 	case 3:
684 		return 8;
685 	case 4:
686 		return 3;
687 	case 5:
688 		return 6;
689 	case 6:
690 		return 10;
691 	case 7:
692 		return 12;
693 	case 8:
694 		return 16;
695 	}
696 }
697 
698 struct dce10_wm_params {
699 	u32 dram_channels; /* number of dram channels */
700 	u32 yclk;          /* bandwidth per dram data pin in kHz */
701 	u32 sclk;          /* engine clock in kHz */
702 	u32 disp_clk;      /* display clock in kHz */
703 	u32 src_width;     /* viewport width */
704 	u32 active_time;   /* active display time in ns */
705 	u32 blank_time;    /* blank time in ns */
706 	bool interlaced;    /* mode is interlaced */
707 	fixed20_12 vsc;    /* vertical scale ratio */
708 	u32 num_heads;     /* number of active crtcs */
709 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
710 	u32 lb_size;       /* line buffer allocated to pipe */
711 	u32 vtaps;         /* vertical scaler taps */
712 };
713 
714 /**
715  * dce_v10_0_dram_bandwidth - get the dram bandwidth
716  *
717  * @wm: watermark calculation data
718  *
719  * Calculate the raw dram bandwidth (CIK).
720  * Used for display watermark bandwidth calculations
721  * Returns the dram bandwidth in MBytes/s
722  */
723 static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm)
724 {
725 	/* Calculate raw DRAM Bandwidth */
726 	fixed20_12 dram_efficiency; /* 0.7 */
727 	fixed20_12 yclk, dram_channels, bandwidth;
728 	fixed20_12 a;
729 
730 	a.full = dfixed_const(1000);
731 	yclk.full = dfixed_const(wm->yclk);
732 	yclk.full = dfixed_div(yclk, a);
733 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
734 	a.full = dfixed_const(10);
735 	dram_efficiency.full = dfixed_const(7);
736 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
737 	bandwidth.full = dfixed_mul(dram_channels, yclk);
738 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
739 
740 	return dfixed_trunc(bandwidth);
741 }
742 
743 /**
744  * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display
745  *
746  * @wm: watermark calculation data
747  *
748  * Calculate the dram bandwidth used for display (CIK).
749  * Used for display watermark bandwidth calculations
750  * Returns the dram bandwidth for display in MBytes/s
751  */
752 static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
753 {
754 	/* Calculate DRAM Bandwidth and the part allocated to display. */
755 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
756 	fixed20_12 yclk, dram_channels, bandwidth;
757 	fixed20_12 a;
758 
759 	a.full = dfixed_const(1000);
760 	yclk.full = dfixed_const(wm->yclk);
761 	yclk.full = dfixed_div(yclk, a);
762 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
763 	a.full = dfixed_const(10);
764 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
765 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
766 	bandwidth.full = dfixed_mul(dram_channels, yclk);
767 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
768 
769 	return dfixed_trunc(bandwidth);
770 }
771 
772 /**
773  * dce_v10_0_data_return_bandwidth - get the data return bandwidth
774  *
775  * @wm: watermark calculation data
776  *
777  * Calculate the data return bandwidth used for display (CIK).
778  * Used for display watermark bandwidth calculations
779  * Returns the data return bandwidth in MBytes/s
780  */
781 static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm)
782 {
783 	/* Calculate the display Data return Bandwidth */
784 	fixed20_12 return_efficiency; /* 0.8 */
785 	fixed20_12 sclk, bandwidth;
786 	fixed20_12 a;
787 
788 	a.full = dfixed_const(1000);
789 	sclk.full = dfixed_const(wm->sclk);
790 	sclk.full = dfixed_div(sclk, a);
791 	a.full = dfixed_const(10);
792 	return_efficiency.full = dfixed_const(8);
793 	return_efficiency.full = dfixed_div(return_efficiency, a);
794 	a.full = dfixed_const(32);
795 	bandwidth.full = dfixed_mul(a, sclk);
796 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
797 
798 	return dfixed_trunc(bandwidth);
799 }
800 
801 /**
802  * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth
803  *
804  * @wm: watermark calculation data
805  *
806  * Calculate the dmif bandwidth used for display (CIK).
807  * Used for display watermark bandwidth calculations
808  * Returns the dmif bandwidth in MBytes/s
809  */
810 static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
811 {
812 	/* Calculate the DMIF Request Bandwidth */
813 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
814 	fixed20_12 disp_clk, bandwidth;
815 	fixed20_12 a, b;
816 
817 	a.full = dfixed_const(1000);
818 	disp_clk.full = dfixed_const(wm->disp_clk);
819 	disp_clk.full = dfixed_div(disp_clk, a);
820 	a.full = dfixed_const(32);
821 	b.full = dfixed_mul(a, disp_clk);
822 
823 	a.full = dfixed_const(10);
824 	disp_clk_request_efficiency.full = dfixed_const(8);
825 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
826 
827 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
828 
829 	return dfixed_trunc(bandwidth);
830 }
831 
832 /**
833  * dce_v10_0_available_bandwidth - get the min available bandwidth
834  *
835  * @wm: watermark calculation data
836  *
837  * Calculate the min available bandwidth used for display (CIK).
838  * Used for display watermark bandwidth calculations
839  * Returns the min available bandwidth in MBytes/s
840  */
841 static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm)
842 {
843 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
844 	u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm);
845 	u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm);
846 	u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm);
847 
848 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
849 }
850 
851 /**
852  * dce_v10_0_average_bandwidth - get the average available bandwidth
853  *
854  * @wm: watermark calculation data
855  *
856  * Calculate the average available bandwidth used for display (CIK).
857  * Used for display watermark bandwidth calculations
858  * Returns the average available bandwidth in MBytes/s
859  */
860 static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm)
861 {
862 	/* Calculate the display mode Average Bandwidth
863 	 * DisplayMode should contain the source and destination dimensions,
864 	 * timing, etc.
865 	 */
866 	fixed20_12 bpp;
867 	fixed20_12 line_time;
868 	fixed20_12 src_width;
869 	fixed20_12 bandwidth;
870 	fixed20_12 a;
871 
872 	a.full = dfixed_const(1000);
873 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
874 	line_time.full = dfixed_div(line_time, a);
875 	bpp.full = dfixed_const(wm->bytes_per_pixel);
876 	src_width.full = dfixed_const(wm->src_width);
877 	bandwidth.full = dfixed_mul(src_width, bpp);
878 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
879 	bandwidth.full = dfixed_div(bandwidth, line_time);
880 
881 	return dfixed_trunc(bandwidth);
882 }
883 
884 /**
885  * dce_v10_0_latency_watermark - get the latency watermark
886  *
887  * @wm: watermark calculation data
888  *
889  * Calculate the latency watermark (CIK).
890  * Used for display watermark bandwidth calculations
891  * Returns the latency watermark in ns
892  */
893 static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
894 {
895 	/* First calculate the latency in ns */
896 	u32 mc_latency = 2000; /* 2000 ns. */
897 	u32 available_bandwidth = dce_v10_0_available_bandwidth(wm);
898 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
899 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
900 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
901 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
902 		(wm->num_heads * cursor_line_pair_return_time);
903 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
904 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
905 	u32 tmp, dmif_size = 12288;
906 	fixed20_12 a, b, c;
907 
908 	if (wm->num_heads == 0)
909 		return 0;
910 
911 	a.full = dfixed_const(2);
912 	b.full = dfixed_const(1);
913 	if ((wm->vsc.full > a.full) ||
914 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
915 	    (wm->vtaps >= 5) ||
916 	    ((wm->vsc.full >= a.full) && wm->interlaced))
917 		max_src_lines_per_dst_line = 4;
918 	else
919 		max_src_lines_per_dst_line = 2;
920 
921 	a.full = dfixed_const(available_bandwidth);
922 	b.full = dfixed_const(wm->num_heads);
923 	a.full = dfixed_div(a, b);
924 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
925 	tmp = min(dfixed_trunc(a), tmp);
926 
927 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
928 
929 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
930 	b.full = dfixed_const(1000);
931 	c.full = dfixed_const(lb_fill_bw);
932 	b.full = dfixed_div(c, b);
933 	a.full = dfixed_div(a, b);
934 	line_fill_time = dfixed_trunc(a);
935 
936 	if (line_fill_time < wm->active_time)
937 		return latency;
938 	else
939 		return latency + (line_fill_time - wm->active_time);
940 
941 }
942 
943 /**
944  * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check
945  * average and available dram bandwidth
946  *
947  * @wm: watermark calculation data
948  *
949  * Check if the display average bandwidth fits in the display
950  * dram bandwidth (CIK).
951  * Used for display watermark bandwidth calculations
952  * Returns true if the display fits, false if not.
953  */
954 static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
955 {
956 	if (dce_v10_0_average_bandwidth(wm) <=
957 	    (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads))
958 		return true;
959 	else
960 		return false;
961 }
962 
963 /**
964  * dce_v10_0_average_bandwidth_vs_available_bandwidth - check
965  * average and available bandwidth
966  *
967  * @wm: watermark calculation data
968  *
969  * Check if the display average bandwidth fits in the display
970  * available bandwidth (CIK).
971  * Used for display watermark bandwidth calculations
972  * Returns true if the display fits, false if not.
973  */
974 static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
975 {
976 	if (dce_v10_0_average_bandwidth(wm) <=
977 	    (dce_v10_0_available_bandwidth(wm) / wm->num_heads))
978 		return true;
979 	else
980 		return false;
981 }
982 
983 /**
984  * dce_v10_0_check_latency_hiding - check latency hiding
985  *
986  * @wm: watermark calculation data
987  *
988  * Check latency hiding (CIK).
989  * Used for display watermark bandwidth calculations
990  * Returns true if the display fits, false if not.
991  */
992 static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm)
993 {
994 	u32 lb_partitions = wm->lb_size / wm->src_width;
995 	u32 line_time = wm->active_time + wm->blank_time;
996 	u32 latency_tolerant_lines;
997 	u32 latency_hiding;
998 	fixed20_12 a;
999 
1000 	a.full = dfixed_const(1);
1001 	if (wm->vsc.full > a.full)
1002 		latency_tolerant_lines = 1;
1003 	else {
1004 		if (lb_partitions <= (wm->vtaps + 1))
1005 			latency_tolerant_lines = 1;
1006 		else
1007 			latency_tolerant_lines = 2;
1008 	}
1009 
1010 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1011 
1012 	if (dce_v10_0_latency_watermark(wm) <= latency_hiding)
1013 		return true;
1014 	else
1015 		return false;
1016 }
1017 
1018 /**
1019  * dce_v10_0_program_watermarks - program display watermarks
1020  *
1021  * @adev: amdgpu_device pointer
1022  * @amdgpu_crtc: the selected display controller
1023  * @lb_size: line buffer size
1024  * @num_heads: number of display controllers in use
1025  *
1026  * Calculate and program the display watermarks for the
1027  * selected display controller (CIK).
1028  */
1029 static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1030 					struct amdgpu_crtc *amdgpu_crtc,
1031 					u32 lb_size, u32 num_heads)
1032 {
1033 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1034 	struct dce10_wm_params wm_low, wm_high;
1035 	u32 active_time;
1036 	u32 line_time = 0;
1037 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1038 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1039 
1040 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
1041 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1042 					    (u32)mode->clock);
1043 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1044 					  (u32)mode->clock);
1045 		line_time = min(line_time, (u32)65535);
1046 
1047 		/* watermark for high clocks */
1048 		if (adev->pm.dpm_enabled) {
1049 			wm_high.yclk =
1050 				amdgpu_dpm_get_mclk(adev, false) * 10;
1051 			wm_high.sclk =
1052 				amdgpu_dpm_get_sclk(adev, false) * 10;
1053 		} else {
1054 			wm_high.yclk = adev->pm.current_mclk * 10;
1055 			wm_high.sclk = adev->pm.current_sclk * 10;
1056 		}
1057 
1058 		wm_high.disp_clk = mode->clock;
1059 		wm_high.src_width = mode->crtc_hdisplay;
1060 		wm_high.active_time = active_time;
1061 		wm_high.blank_time = line_time - wm_high.active_time;
1062 		wm_high.interlaced = false;
1063 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1064 			wm_high.interlaced = true;
1065 		wm_high.vsc = amdgpu_crtc->vsc;
1066 		wm_high.vtaps = 1;
1067 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1068 			wm_high.vtaps = 2;
1069 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1070 		wm_high.lb_size = lb_size;
1071 		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1072 		wm_high.num_heads = num_heads;
1073 
1074 		/* set for high clocks */
1075 		latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535);
1076 
1077 		/* possibly force display priority to high */
1078 		/* should really do this at mode validation time... */
1079 		if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1080 		    !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1081 		    !dce_v10_0_check_latency_hiding(&wm_high) ||
1082 		    (adev->mode_info.disp_priority == 2)) {
1083 			DRM_DEBUG_KMS("force priority to high\n");
1084 		}
1085 
1086 		/* watermark for low clocks */
1087 		if (adev->pm.dpm_enabled) {
1088 			wm_low.yclk =
1089 				amdgpu_dpm_get_mclk(adev, true) * 10;
1090 			wm_low.sclk =
1091 				amdgpu_dpm_get_sclk(adev, true) * 10;
1092 		} else {
1093 			wm_low.yclk = adev->pm.current_mclk * 10;
1094 			wm_low.sclk = adev->pm.current_sclk * 10;
1095 		}
1096 
1097 		wm_low.disp_clk = mode->clock;
1098 		wm_low.src_width = mode->crtc_hdisplay;
1099 		wm_low.active_time = active_time;
1100 		wm_low.blank_time = line_time - wm_low.active_time;
1101 		wm_low.interlaced = false;
1102 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1103 			wm_low.interlaced = true;
1104 		wm_low.vsc = amdgpu_crtc->vsc;
1105 		wm_low.vtaps = 1;
1106 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1107 			wm_low.vtaps = 2;
1108 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1109 		wm_low.lb_size = lb_size;
1110 		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1111 		wm_low.num_heads = num_heads;
1112 
1113 		/* set for low clocks */
1114 		latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535);
1115 
1116 		/* possibly force display priority to high */
1117 		/* should really do this at mode validation time... */
1118 		if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1119 		    !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1120 		    !dce_v10_0_check_latency_hiding(&wm_low) ||
1121 		    (adev->mode_info.disp_priority == 2)) {
1122 			DRM_DEBUG_KMS("force priority to high\n");
1123 		}
1124 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1125 	}
1126 
1127 	/* select wm A */
1128 	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1129 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1130 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1131 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1132 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1133 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1134 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1135 	/* select wm B */
1136 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1137 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1138 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1139 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1140 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1141 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1142 	/* restore original selection */
1143 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1144 
1145 	/* save values for DPM */
1146 	amdgpu_crtc->line_time = line_time;
1147 	amdgpu_crtc->wm_high = latency_watermark_a;
1148 	amdgpu_crtc->wm_low = latency_watermark_b;
1149 	/* Save number of lines the linebuffer leads before the scanout */
1150 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1151 }
1152 
1153 /**
1154  * dce_v10_0_bandwidth_update - program display watermarks
1155  *
1156  * @adev: amdgpu_device pointer
1157  *
1158  * Calculate and program the display watermarks and line
1159  * buffer allocation (CIK).
1160  */
1161 static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
1162 {
1163 	struct drm_display_mode *mode = NULL;
1164 	u32 num_heads = 0, lb_size;
1165 	int i;
1166 
1167 	amdgpu_display_update_priority(adev);
1168 
1169 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1170 		if (adev->mode_info.crtcs[i]->base.enabled)
1171 			num_heads++;
1172 	}
1173 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1174 		mode = &adev->mode_info.crtcs[i]->base.mode;
1175 		lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1176 		dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1177 					    lb_size, num_heads);
1178 	}
1179 }
1180 
1181 static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev)
1182 {
1183 	int i;
1184 	u32 offset, tmp;
1185 
1186 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1187 		offset = adev->mode_info.audio.pin[i].offset;
1188 		tmp = RREG32_AUDIO_ENDPT(offset,
1189 					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1190 		if (((tmp &
1191 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1192 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1193 			adev->mode_info.audio.pin[i].connected = false;
1194 		else
1195 			adev->mode_info.audio.pin[i].connected = true;
1196 	}
1197 }
1198 
1199 static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev)
1200 {
1201 	int i;
1202 
1203 	dce_v10_0_audio_get_connected_pins(adev);
1204 
1205 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1206 		if (adev->mode_info.audio.pin[i].connected)
1207 			return &adev->mode_info.audio.pin[i];
1208 	}
1209 	DRM_ERROR("No connected audio pins found!\n");
1210 	return NULL;
1211 }
1212 
1213 static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1214 {
1215 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1216 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1217 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1218 	u32 tmp;
1219 
1220 	if (!dig || !dig->afmt || !dig->afmt->pin)
1221 		return;
1222 
1223 	tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1224 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1225 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1226 }
1227 
1228 static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
1229 						struct drm_display_mode *mode)
1230 {
1231 	struct drm_device *dev = encoder->dev;
1232 	struct amdgpu_device *adev = drm_to_adev(dev);
1233 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1234 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1235 	struct drm_connector *connector;
1236 	struct drm_connector_list_iter iter;
1237 	struct amdgpu_connector *amdgpu_connector = NULL;
1238 	u32 tmp;
1239 	int interlace = 0;
1240 
1241 	if (!dig || !dig->afmt || !dig->afmt->pin)
1242 		return;
1243 
1244 	drm_connector_list_iter_begin(dev, &iter);
1245 	drm_for_each_connector_iter(connector, &iter) {
1246 		if (connector->encoder == encoder) {
1247 			amdgpu_connector = to_amdgpu_connector(connector);
1248 			break;
1249 		}
1250 	}
1251 	drm_connector_list_iter_end(&iter);
1252 
1253 	if (!amdgpu_connector) {
1254 		DRM_ERROR("Couldn't find encoder's connector\n");
1255 		return;
1256 	}
1257 
1258 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1259 		interlace = 1;
1260 	if (connector->latency_present[interlace]) {
1261 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1262 				    VIDEO_LIPSYNC, connector->video_latency[interlace]);
1263 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1264 				    AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1265 	} else {
1266 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1267 				    VIDEO_LIPSYNC, 0);
1268 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1269 				    AUDIO_LIPSYNC, 0);
1270 	}
1271 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1272 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1273 }
1274 
1275 static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1276 {
1277 	struct drm_device *dev = encoder->dev;
1278 	struct amdgpu_device *adev = drm_to_adev(dev);
1279 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1280 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1281 	struct drm_connector *connector;
1282 	struct drm_connector_list_iter iter;
1283 	struct amdgpu_connector *amdgpu_connector = NULL;
1284 	u32 tmp;
1285 	u8 *sadb = NULL;
1286 	int sad_count;
1287 
1288 	if (!dig || !dig->afmt || !dig->afmt->pin)
1289 		return;
1290 
1291 	drm_connector_list_iter_begin(dev, &iter);
1292 	drm_for_each_connector_iter(connector, &iter) {
1293 		if (connector->encoder == encoder) {
1294 			amdgpu_connector = to_amdgpu_connector(connector);
1295 			break;
1296 		}
1297 	}
1298 	drm_connector_list_iter_end(&iter);
1299 
1300 	if (!amdgpu_connector) {
1301 		DRM_ERROR("Couldn't find encoder's connector\n");
1302 		return;
1303 	}
1304 
1305 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1306 	if (sad_count < 0) {
1307 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1308 		sad_count = 0;
1309 	}
1310 
1311 	/* program the speaker allocation */
1312 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1313 				 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1314 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1315 			    DP_CONNECTION, 0);
1316 	/* set HDMI mode */
1317 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1318 			    HDMI_CONNECTION, 1);
1319 	if (sad_count)
1320 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1321 				    SPEAKER_ALLOCATION, sadb[0]);
1322 	else
1323 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1324 				    SPEAKER_ALLOCATION, 5); /* stereo */
1325 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1326 			   ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1327 
1328 	kfree(sadb);
1329 }
1330 
1331 static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
1332 {
1333 	struct drm_device *dev = encoder->dev;
1334 	struct amdgpu_device *adev = drm_to_adev(dev);
1335 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1336 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1337 	struct drm_connector *connector;
1338 	struct drm_connector_list_iter iter;
1339 	struct amdgpu_connector *amdgpu_connector = NULL;
1340 	struct cea_sad *sads;
1341 	int i, sad_count;
1342 
1343 	static const u16 eld_reg_to_type[][2] = {
1344 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1345 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1346 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1347 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1348 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1349 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1350 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1351 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1352 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1353 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1354 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1355 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1356 	};
1357 
1358 	if (!dig || !dig->afmt || !dig->afmt->pin)
1359 		return;
1360 
1361 	drm_connector_list_iter_begin(dev, &iter);
1362 	drm_for_each_connector_iter(connector, &iter) {
1363 		if (connector->encoder == encoder) {
1364 			amdgpu_connector = to_amdgpu_connector(connector);
1365 			break;
1366 		}
1367 	}
1368 	drm_connector_list_iter_end(&iter);
1369 
1370 	if (!amdgpu_connector) {
1371 		DRM_ERROR("Couldn't find encoder's connector\n");
1372 		return;
1373 	}
1374 
1375 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1376 	if (sad_count < 0)
1377 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1378 	if (sad_count <= 0)
1379 		return;
1380 	BUG_ON(!sads);
1381 
1382 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1383 		u32 tmp = 0;
1384 		u8 stereo_freqs = 0;
1385 		int max_channels = -1;
1386 		int j;
1387 
1388 		for (j = 0; j < sad_count; j++) {
1389 			struct cea_sad *sad = &sads[j];
1390 
1391 			if (sad->format == eld_reg_to_type[i][1]) {
1392 				if (sad->channels > max_channels) {
1393 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1394 							    MAX_CHANNELS, sad->channels);
1395 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1396 							    DESCRIPTOR_BYTE_2, sad->byte2);
1397 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1398 							    SUPPORTED_FREQUENCIES, sad->freq);
1399 					max_channels = sad->channels;
1400 				}
1401 
1402 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1403 					stereo_freqs |= sad->freq;
1404 				else
1405 					break;
1406 			}
1407 		}
1408 
1409 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1410 				    SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1411 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1412 	}
1413 
1414 	kfree(sads);
1415 }
1416 
1417 static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
1418 				  struct amdgpu_audio_pin *pin,
1419 				  bool enable)
1420 {
1421 	if (!pin)
1422 		return;
1423 
1424 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1425 			   enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1426 }
1427 
1428 static const u32 pin_offsets[] =
1429 {
1430 	AUD0_REGISTER_OFFSET,
1431 	AUD1_REGISTER_OFFSET,
1432 	AUD2_REGISTER_OFFSET,
1433 	AUD3_REGISTER_OFFSET,
1434 	AUD4_REGISTER_OFFSET,
1435 	AUD5_REGISTER_OFFSET,
1436 	AUD6_REGISTER_OFFSET,
1437 };
1438 
1439 static int dce_v10_0_audio_init(struct amdgpu_device *adev)
1440 {
1441 	int i;
1442 
1443 	if (!amdgpu_audio)
1444 		return 0;
1445 
1446 	adev->mode_info.audio.enabled = true;
1447 
1448 	adev->mode_info.audio.num_pins = 7;
1449 
1450 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1451 		adev->mode_info.audio.pin[i].channels = -1;
1452 		adev->mode_info.audio.pin[i].rate = -1;
1453 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1454 		adev->mode_info.audio.pin[i].status_bits = 0;
1455 		adev->mode_info.audio.pin[i].category_code = 0;
1456 		adev->mode_info.audio.pin[i].connected = false;
1457 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1458 		adev->mode_info.audio.pin[i].id = i;
1459 		/* disable audio.  it will be set up later */
1460 		/* XXX remove once we switch to ip funcs */
1461 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1462 	}
1463 
1464 	return 0;
1465 }
1466 
1467 static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
1468 {
1469 	int i;
1470 
1471 	if (!amdgpu_audio)
1472 		return;
1473 
1474 	if (!adev->mode_info.audio.enabled)
1475 		return;
1476 
1477 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1478 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1479 
1480 	adev->mode_info.audio.enabled = false;
1481 }
1482 
1483 /*
1484  * update the N and CTS parameters for a given pixel clock rate
1485  */
1486 static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1487 {
1488 	struct drm_device *dev = encoder->dev;
1489 	struct amdgpu_device *adev = drm_to_adev(dev);
1490 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1491 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1492 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1493 	u32 tmp;
1494 
1495 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1496 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1497 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1498 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1499 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1500 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1501 
1502 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1503 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1504 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1505 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1506 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1507 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1508 
1509 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1510 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1511 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1512 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1513 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1514 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1515 
1516 }
1517 
1518 /*
1519  * build a HDMI Video Info Frame
1520  */
1521 static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1522 					       void *buffer, size_t size)
1523 {
1524 	struct drm_device *dev = encoder->dev;
1525 	struct amdgpu_device *adev = drm_to_adev(dev);
1526 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1527 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1528 	uint8_t *frame = buffer + 3;
1529 	uint8_t *header = buffer;
1530 
1531 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1532 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1533 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1534 		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1535 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1536 		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1537 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1538 		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1539 }
1540 
1541 static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1542 {
1543 	struct drm_device *dev = encoder->dev;
1544 	struct amdgpu_device *adev = drm_to_adev(dev);
1545 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1546 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1547 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1548 	u32 dto_phase = 24 * 1000;
1549 	u32 dto_modulo = clock;
1550 	u32 tmp;
1551 
1552 	if (!dig || !dig->afmt)
1553 		return;
1554 
1555 	/* XXX two dtos; generally use dto0 for hdmi */
1556 	/* Express [24MHz / target pixel clock] as an exact rational
1557 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1558 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1559 	 */
1560 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1561 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1562 			    amdgpu_crtc->crtc_id);
1563 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1564 	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1565 	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1566 }
1567 
1568 /*
1569  * update the info frames with the data from the current display mode
1570  */
1571 static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
1572 				  struct drm_display_mode *mode)
1573 {
1574 	struct drm_device *dev = encoder->dev;
1575 	struct amdgpu_device *adev = drm_to_adev(dev);
1576 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1577 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1578 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1579 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1580 	struct hdmi_avi_infoframe frame;
1581 	ssize_t err;
1582 	u32 tmp;
1583 	int bpc = 8;
1584 
1585 	if (!dig || !dig->afmt)
1586 		return;
1587 
1588 	/* Silent, r600_hdmi_enable will raise WARN for us */
1589 	if (!dig->afmt->enabled)
1590 		return;
1591 
1592 	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1593 	if (encoder->crtc) {
1594 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1595 		bpc = amdgpu_crtc->bpc;
1596 	}
1597 
1598 	/* disable audio prior to setting up hw */
1599 	dig->afmt->pin = dce_v10_0_audio_get_pin(adev);
1600 	dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1601 
1602 	dce_v10_0_audio_set_dto(encoder, mode->clock);
1603 
1604 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1605 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1606 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1607 
1608 	WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1609 
1610 	tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1611 	switch (bpc) {
1612 	case 0:
1613 	case 6:
1614 	case 8:
1615 	case 16:
1616 	default:
1617 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1618 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1619 		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1620 			  connector->name, bpc);
1621 		break;
1622 	case 10:
1623 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1624 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1625 		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1626 			  connector->name);
1627 		break;
1628 	case 12:
1629 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1630 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1631 		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1632 			  connector->name);
1633 		break;
1634 	}
1635 	WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1636 
1637 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1638 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1639 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1640 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1641 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1642 
1643 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1644 	/* enable audio info frames (frames won't be set until audio is enabled) */
1645 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1646 	/* required for audio info values to be updated */
1647 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1648 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1649 
1650 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1651 	/* required for audio info values to be updated */
1652 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1653 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1654 
1655 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1656 	/* anything other than 0 */
1657 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1658 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1659 
1660 	WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1661 
1662 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1663 	/* set the default audio delay */
1664 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1665 	/* should be suffient for all audio modes and small enough for all hblanks */
1666 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1667 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1668 
1669 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1670 	/* allow 60958 channel status fields to be updated */
1671 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1672 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1673 
1674 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1675 	if (bpc > 8)
1676 		/* clear SW CTS value */
1677 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1678 	else
1679 		/* select SW CTS value */
1680 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1681 	/* allow hw to sent ACR packets when required */
1682 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1683 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1684 
1685 	dce_v10_0_afmt_update_ACR(encoder, mode->clock);
1686 
1687 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1688 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1689 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1690 
1691 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1692 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1693 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1694 
1695 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1696 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1697 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1698 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1699 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1700 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1701 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1702 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1703 
1704 	dce_v10_0_audio_write_speaker_allocation(encoder);
1705 
1706 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1707 	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1708 
1709 	dce_v10_0_afmt_audio_select_pin(encoder);
1710 	dce_v10_0_audio_write_sad_regs(encoder);
1711 	dce_v10_0_audio_write_latency_fields(encoder, mode);
1712 
1713 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1714 	if (err < 0) {
1715 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1716 		return;
1717 	}
1718 
1719 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1720 	if (err < 0) {
1721 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1722 		return;
1723 	}
1724 
1725 	dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1726 
1727 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1728 	/* enable AVI info frames */
1729 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1730 	/* required for audio info values to be updated */
1731 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1732 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1733 
1734 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1735 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1736 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1737 
1738 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1739 	/* send audio packets */
1740 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1741 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1742 
1743 	WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1744 	WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1745 	WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1746 	WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1747 
1748 	/* enable audio after to setting up hw */
1749 	dce_v10_0_audio_enable(adev, dig->afmt->pin, true);
1750 }
1751 
1752 static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1753 {
1754 	struct drm_device *dev = encoder->dev;
1755 	struct amdgpu_device *adev = drm_to_adev(dev);
1756 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1757 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1758 
1759 	if (!dig || !dig->afmt)
1760 		return;
1761 
1762 	/* Silent, r600_hdmi_enable will raise WARN for us */
1763 	if (enable && dig->afmt->enabled)
1764 		return;
1765 	if (!enable && !dig->afmt->enabled)
1766 		return;
1767 
1768 	if (!enable && dig->afmt->pin) {
1769 		dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1770 		dig->afmt->pin = NULL;
1771 	}
1772 
1773 	dig->afmt->enabled = enable;
1774 
1775 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1776 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1777 }
1778 
1779 static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
1780 {
1781 	int i;
1782 
1783 	for (i = 0; i < adev->mode_info.num_dig; i++)
1784 		adev->mode_info.afmt[i] = NULL;
1785 
1786 	/* DCE10 has audio blocks tied to DIG encoders */
1787 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1788 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1789 		if (adev->mode_info.afmt[i]) {
1790 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1791 			adev->mode_info.afmt[i]->id = i;
1792 		} else {
1793 			int j;
1794 			for (j = 0; j < i; j++) {
1795 				kfree(adev->mode_info.afmt[j]);
1796 				adev->mode_info.afmt[j] = NULL;
1797 			}
1798 			return -ENOMEM;
1799 		}
1800 	}
1801 	return 0;
1802 }
1803 
1804 static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
1805 {
1806 	int i;
1807 
1808 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1809 		kfree(adev->mode_info.afmt[i]);
1810 		adev->mode_info.afmt[i] = NULL;
1811 	}
1812 }
1813 
1814 static const u32 vga_control_regs[6] =
1815 {
1816 	mmD1VGA_CONTROL,
1817 	mmD2VGA_CONTROL,
1818 	mmD3VGA_CONTROL,
1819 	mmD4VGA_CONTROL,
1820 	mmD5VGA_CONTROL,
1821 	mmD6VGA_CONTROL,
1822 };
1823 
1824 static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
1825 {
1826 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1827 	struct drm_device *dev = crtc->dev;
1828 	struct amdgpu_device *adev = drm_to_adev(dev);
1829 	u32 vga_control;
1830 
1831 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1832 	if (enable)
1833 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1834 	else
1835 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1836 }
1837 
1838 static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
1839 {
1840 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1841 	struct drm_device *dev = crtc->dev;
1842 	struct amdgpu_device *adev = drm_to_adev(dev);
1843 
1844 	if (enable)
1845 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1846 	else
1847 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1848 }
1849 
1850 static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
1851 				     struct drm_framebuffer *fb,
1852 				     int x, int y, int atomic)
1853 {
1854 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1855 	struct drm_device *dev = crtc->dev;
1856 	struct amdgpu_device *adev = drm_to_adev(dev);
1857 	struct drm_framebuffer *target_fb;
1858 	struct drm_gem_object *obj;
1859 	struct amdgpu_bo *abo;
1860 	uint64_t fb_location, tiling_flags;
1861 	uint32_t fb_format, fb_pitch_pixels;
1862 	u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
1863 	u32 pipe_config;
1864 	u32 tmp, viewport_w, viewport_h;
1865 	int r;
1866 	bool bypass_lut = false;
1867 
1868 	/* no fb bound */
1869 	if (!atomic && !crtc->primary->fb) {
1870 		DRM_DEBUG_KMS("No FB bound\n");
1871 		return 0;
1872 	}
1873 
1874 	if (atomic)
1875 		target_fb = fb;
1876 	else
1877 		target_fb = crtc->primary->fb;
1878 
1879 	/* If atomic, assume fb object is pinned & idle & fenced and
1880 	 * just update base pointers
1881 	 */
1882 	obj = target_fb->obj[0];
1883 	abo = gem_to_amdgpu_bo(obj);
1884 	r = amdgpu_bo_reserve(abo, false);
1885 	if (unlikely(r != 0))
1886 		return r;
1887 
1888 	if (!atomic) {
1889 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1890 		if (unlikely(r != 0)) {
1891 			amdgpu_bo_unreserve(abo);
1892 			return -EINVAL;
1893 		}
1894 	}
1895 	fb_location = amdgpu_bo_gpu_offset(abo);
1896 
1897 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1898 	amdgpu_bo_unreserve(abo);
1899 
1900 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1901 
1902 	switch (target_fb->format->format) {
1903 	case DRM_FORMAT_C8:
1904 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
1905 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1906 		break;
1907 	case DRM_FORMAT_XRGB4444:
1908 	case DRM_FORMAT_ARGB4444:
1909 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1910 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
1911 #ifdef __BIG_ENDIAN
1912 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1913 					ENDIAN_8IN16);
1914 #endif
1915 		break;
1916 	case DRM_FORMAT_XRGB1555:
1917 	case DRM_FORMAT_ARGB1555:
1918 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1919 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1920 #ifdef __BIG_ENDIAN
1921 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1922 					ENDIAN_8IN16);
1923 #endif
1924 		break;
1925 	case DRM_FORMAT_BGRX5551:
1926 	case DRM_FORMAT_BGRA5551:
1927 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1928 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
1929 #ifdef __BIG_ENDIAN
1930 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1931 					ENDIAN_8IN16);
1932 #endif
1933 		break;
1934 	case DRM_FORMAT_RGB565:
1935 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1936 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1937 #ifdef __BIG_ENDIAN
1938 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1939 					ENDIAN_8IN16);
1940 #endif
1941 		break;
1942 	case DRM_FORMAT_XRGB8888:
1943 	case DRM_FORMAT_ARGB8888:
1944 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1945 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1946 #ifdef __BIG_ENDIAN
1947 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1948 					ENDIAN_8IN32);
1949 #endif
1950 		break;
1951 	case DRM_FORMAT_XRGB2101010:
1952 	case DRM_FORMAT_ARGB2101010:
1953 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1954 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1955 #ifdef __BIG_ENDIAN
1956 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1957 					ENDIAN_8IN32);
1958 #endif
1959 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1960 		bypass_lut = true;
1961 		break;
1962 	case DRM_FORMAT_BGRX1010102:
1963 	case DRM_FORMAT_BGRA1010102:
1964 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1965 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
1966 #ifdef __BIG_ENDIAN
1967 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1968 					ENDIAN_8IN32);
1969 #endif
1970 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1971 		bypass_lut = true;
1972 		break;
1973 	case DRM_FORMAT_XBGR8888:
1974 	case DRM_FORMAT_ABGR8888:
1975 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1976 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1977 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
1978 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
1979 #ifdef __BIG_ENDIAN
1980 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1981 					ENDIAN_8IN32);
1982 #endif
1983 		break;
1984 	default:
1985 		DRM_ERROR("Unsupported screen format %p4cc\n",
1986 			  &target_fb->format->format);
1987 		return -EINVAL;
1988 	}
1989 
1990 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1991 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1992 
1993 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1994 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1995 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1996 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1997 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1998 
1999 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
2000 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2001 					  ARRAY_2D_TILED_THIN1);
2002 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2003 					  tile_split);
2004 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2005 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2006 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2007 					  mtaspect);
2008 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2009 					  ADDR_SURF_MICRO_TILING_DISPLAY);
2010 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2011 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2012 					  ARRAY_1D_TILED_THIN1);
2013 	}
2014 
2015 	fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2016 				  pipe_config);
2017 
2018 	dce_v10_0_vga_enable(crtc, false);
2019 
2020 	/* Make sure surface address is updated at vertical blank rather than
2021 	 * horizontal blank
2022 	 */
2023 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2024 	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2025 			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2026 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2027 
2028 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2029 	       upper_32_bits(fb_location));
2030 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2031 	       upper_32_bits(fb_location));
2032 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2033 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2034 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2035 	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2036 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2037 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2038 
2039 	/*
2040 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2041 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2042 	 * retain the full precision throughout the pipeline.
2043 	 */
2044 	tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2045 	if (bypass_lut)
2046 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2047 	else
2048 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2049 	WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2050 
2051 	if (bypass_lut)
2052 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2053 
2054 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2055 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2056 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2057 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2058 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2059 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2060 
2061 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2062 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2063 
2064 	dce_v10_0_grph_enable(crtc, true);
2065 
2066 	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2067 	       target_fb->height);
2068 
2069 	x &= ~3;
2070 	y &= ~1;
2071 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2072 	       (x << 16) | y);
2073 	viewport_w = crtc->mode.hdisplay;
2074 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2075 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2076 	       (viewport_w << 16) | viewport_h);
2077 
2078 	/* set pageflip to happen anywhere in vblank interval */
2079 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2080 
2081 	if (!atomic && fb && fb != crtc->primary->fb) {
2082 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2083 		r = amdgpu_bo_reserve(abo, true);
2084 		if (unlikely(r != 0))
2085 			return r;
2086 		amdgpu_bo_unpin(abo);
2087 		amdgpu_bo_unreserve(abo);
2088 	}
2089 
2090 	/* Bytes per pixel may have changed */
2091 	dce_v10_0_bandwidth_update(adev);
2092 
2093 	return 0;
2094 }
2095 
2096 static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
2097 				     struct drm_display_mode *mode)
2098 {
2099 	struct drm_device *dev = crtc->dev;
2100 	struct amdgpu_device *adev = drm_to_adev(dev);
2101 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2102 	u32 tmp;
2103 
2104 	tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2105 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2106 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2107 	else
2108 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2109 	WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2110 }
2111 
2112 static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
2113 {
2114 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2115 	struct drm_device *dev = crtc->dev;
2116 	struct amdgpu_device *adev = drm_to_adev(dev);
2117 	u16 *r, *g, *b;
2118 	int i;
2119 	u32 tmp;
2120 
2121 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2122 
2123 	tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2124 	tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2125 	tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0);
2126 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2127 
2128 	tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2129 	tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2130 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2131 
2132 	tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset);
2133 	tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1);
2134 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2135 
2136 	tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2137 	tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2138 	tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0);
2139 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2140 
2141 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2142 
2143 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2144 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2145 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2146 
2147 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2148 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2149 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2150 
2151 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2152 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2153 
2154 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2155 	r = crtc->gamma_store;
2156 	g = r + crtc->gamma_size;
2157 	b = g + crtc->gamma_size;
2158 	for (i = 0; i < 256; i++) {
2159 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2160 		       ((*r++ & 0xffc0) << 14) |
2161 		       ((*g++ & 0xffc0) << 4) |
2162 		       (*b++ >> 6));
2163 	}
2164 
2165 	tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2166 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2167 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0);
2168 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2169 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2170 
2171 	tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2172 	tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2173 	tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0);
2174 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2175 
2176 	tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2177 	tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2178 	tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0);
2179 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2180 
2181 	tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2182 	tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2183 	tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0);
2184 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2185 
2186 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2187 	WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2188 	/* XXX this only needs to be programmed once per crtc at startup,
2189 	 * not sure where the best place for it is
2190 	 */
2191 	tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2192 	tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2193 	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2194 }
2195 
2196 static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
2197 {
2198 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2199 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2200 
2201 	switch (amdgpu_encoder->encoder_id) {
2202 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2203 		if (dig->linkb)
2204 			return 1;
2205 		else
2206 			return 0;
2207 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2208 		if (dig->linkb)
2209 			return 3;
2210 		else
2211 			return 2;
2212 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2213 		if (dig->linkb)
2214 			return 5;
2215 		else
2216 			return 4;
2217 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2218 		return 6;
2219 	default:
2220 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2221 		return 0;
2222 	}
2223 }
2224 
2225 /**
2226  * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc.
2227  *
2228  * @crtc: drm crtc
2229  *
2230  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2231  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2232  * monitors a dedicated PPLL must be used.  If a particular board has
2233  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2234  * as there is no need to program the PLL itself.  If we are not able to
2235  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2236  * avoid messing up an existing monitor.
2237  *
2238  * Asic specific PLL information
2239  *
2240  * DCE 10.x
2241  * Tonga
2242  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2243  * CI
2244  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2245  *
2246  */
2247 static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
2248 {
2249 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2250 	struct drm_device *dev = crtc->dev;
2251 	struct amdgpu_device *adev = drm_to_adev(dev);
2252 	u32 pll_in_use;
2253 	int pll;
2254 
2255 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2256 		if (adev->clock.dp_extclk)
2257 			/* skip PPLL programming if using ext clock */
2258 			return ATOM_PPLL_INVALID;
2259 		else {
2260 			/* use the same PPLL for all DP monitors */
2261 			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2262 			if (pll != ATOM_PPLL_INVALID)
2263 				return pll;
2264 		}
2265 	} else {
2266 		/* use the same PPLL for all monitors with the same clock */
2267 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2268 		if (pll != ATOM_PPLL_INVALID)
2269 			return pll;
2270 	}
2271 
2272 	/* DCE10 has PPLL0, PPLL1, and PPLL2 */
2273 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2274 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2275 		return ATOM_PPLL2;
2276 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2277 		return ATOM_PPLL1;
2278 	if (!(pll_in_use & (1 << ATOM_PPLL0)))
2279 		return ATOM_PPLL0;
2280 	DRM_ERROR("unable to allocate a PPLL\n");
2281 	return ATOM_PPLL_INVALID;
2282 }
2283 
2284 static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2285 {
2286 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2287 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2288 	uint32_t cur_lock;
2289 
2290 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2291 	if (lock)
2292 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2293 	else
2294 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2295 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2296 }
2297 
2298 static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
2299 {
2300 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2301 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2302 	u32 tmp;
2303 
2304 	tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2305 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2306 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2307 }
2308 
2309 static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
2310 {
2311 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2312 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2313 	u32 tmp;
2314 
2315 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2316 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2317 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2318 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2319 
2320 	tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2321 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2322 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2323 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2324 }
2325 
2326 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2327 					int x, int y)
2328 {
2329 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2330 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2331 	int xorigin = 0, yorigin = 0;
2332 
2333 	amdgpu_crtc->cursor_x = x;
2334 	amdgpu_crtc->cursor_y = y;
2335 
2336 	/* avivo cursor are offset into the total surface */
2337 	x += crtc->x;
2338 	y += crtc->y;
2339 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2340 
2341 	if (x < 0) {
2342 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2343 		x = 0;
2344 	}
2345 	if (y < 0) {
2346 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2347 		y = 0;
2348 	}
2349 
2350 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2351 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2352 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2353 	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2354 
2355 	return 0;
2356 }
2357 
2358 static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2359 				      int x, int y)
2360 {
2361 	int ret;
2362 
2363 	dce_v10_0_lock_cursor(crtc, true);
2364 	ret = dce_v10_0_cursor_move_locked(crtc, x, y);
2365 	dce_v10_0_lock_cursor(crtc, false);
2366 
2367 	return ret;
2368 }
2369 
2370 static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2371 				      struct drm_file *file_priv,
2372 				      uint32_t handle,
2373 				      uint32_t width,
2374 				      uint32_t height,
2375 				      int32_t hot_x,
2376 				      int32_t hot_y)
2377 {
2378 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2379 	struct drm_gem_object *obj;
2380 	struct amdgpu_bo *aobj;
2381 	int ret;
2382 
2383 	if (!handle) {
2384 		/* turn off cursor */
2385 		dce_v10_0_hide_cursor(crtc);
2386 		obj = NULL;
2387 		goto unpin;
2388 	}
2389 
2390 	if ((width > amdgpu_crtc->max_cursor_width) ||
2391 	    (height > amdgpu_crtc->max_cursor_height)) {
2392 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2393 		return -EINVAL;
2394 	}
2395 
2396 	obj = drm_gem_object_lookup(file_priv, handle);
2397 	if (!obj) {
2398 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2399 		return -ENOENT;
2400 	}
2401 
2402 	aobj = gem_to_amdgpu_bo(obj);
2403 	ret = amdgpu_bo_reserve(aobj, false);
2404 	if (ret != 0) {
2405 		drm_gem_object_put(obj);
2406 		return ret;
2407 	}
2408 
2409 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2410 	amdgpu_bo_unreserve(aobj);
2411 	if (ret) {
2412 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2413 		drm_gem_object_put(obj);
2414 		return ret;
2415 	}
2416 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2417 
2418 	dce_v10_0_lock_cursor(crtc, true);
2419 
2420 	if (width != amdgpu_crtc->cursor_width ||
2421 	    height != amdgpu_crtc->cursor_height ||
2422 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2423 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2424 		int x, y;
2425 
2426 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2427 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2428 
2429 		dce_v10_0_cursor_move_locked(crtc, x, y);
2430 
2431 		amdgpu_crtc->cursor_width = width;
2432 		amdgpu_crtc->cursor_height = height;
2433 		amdgpu_crtc->cursor_hot_x = hot_x;
2434 		amdgpu_crtc->cursor_hot_y = hot_y;
2435 	}
2436 
2437 	dce_v10_0_show_cursor(crtc);
2438 	dce_v10_0_lock_cursor(crtc, false);
2439 
2440 unpin:
2441 	if (amdgpu_crtc->cursor_bo) {
2442 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2443 		ret = amdgpu_bo_reserve(aobj, true);
2444 		if (likely(ret == 0)) {
2445 			amdgpu_bo_unpin(aobj);
2446 			amdgpu_bo_unreserve(aobj);
2447 		}
2448 		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2449 	}
2450 
2451 	amdgpu_crtc->cursor_bo = obj;
2452 	return 0;
2453 }
2454 
2455 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2456 {
2457 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2458 
2459 	if (amdgpu_crtc->cursor_bo) {
2460 		dce_v10_0_lock_cursor(crtc, true);
2461 
2462 		dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2463 					     amdgpu_crtc->cursor_y);
2464 
2465 		dce_v10_0_show_cursor(crtc);
2466 
2467 		dce_v10_0_lock_cursor(crtc, false);
2468 	}
2469 }
2470 
2471 static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2472 				    u16 *blue, uint32_t size,
2473 				    struct drm_modeset_acquire_ctx *ctx)
2474 {
2475 	dce_v10_0_crtc_load_lut(crtc);
2476 
2477 	return 0;
2478 }
2479 
2480 static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
2481 {
2482 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2483 
2484 	drm_crtc_cleanup(crtc);
2485 	kfree(amdgpu_crtc);
2486 }
2487 
2488 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
2489 	.cursor_set2 = dce_v10_0_crtc_cursor_set2,
2490 	.cursor_move = dce_v10_0_crtc_cursor_move,
2491 	.gamma_set = dce_v10_0_crtc_gamma_set,
2492 	.set_config = amdgpu_display_crtc_set_config,
2493 	.destroy = dce_v10_0_crtc_destroy,
2494 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2495 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2496 	.enable_vblank = amdgpu_enable_vblank_kms,
2497 	.disable_vblank = amdgpu_disable_vblank_kms,
2498 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2499 };
2500 
2501 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2502 {
2503 	struct drm_device *dev = crtc->dev;
2504 	struct amdgpu_device *adev = drm_to_adev(dev);
2505 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2506 	unsigned type;
2507 
2508 	switch (mode) {
2509 	case DRM_MODE_DPMS_ON:
2510 		amdgpu_crtc->enabled = true;
2511 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2512 		dce_v10_0_vga_enable(crtc, true);
2513 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2514 		dce_v10_0_vga_enable(crtc, false);
2515 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2516 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2517 						amdgpu_crtc->crtc_id);
2518 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2519 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2520 		drm_crtc_vblank_on(crtc);
2521 		dce_v10_0_crtc_load_lut(crtc);
2522 		break;
2523 	case DRM_MODE_DPMS_STANDBY:
2524 	case DRM_MODE_DPMS_SUSPEND:
2525 	case DRM_MODE_DPMS_OFF:
2526 		drm_crtc_vblank_off(crtc);
2527 		if (amdgpu_crtc->enabled) {
2528 			dce_v10_0_vga_enable(crtc, true);
2529 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2530 			dce_v10_0_vga_enable(crtc, false);
2531 		}
2532 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2533 		amdgpu_crtc->enabled = false;
2534 		break;
2535 	}
2536 	/* adjust pm to dpms */
2537 	amdgpu_dpm_compute_clocks(adev);
2538 }
2539 
2540 static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
2541 {
2542 	/* disable crtc pair power gating before programming */
2543 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2544 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2545 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2546 }
2547 
2548 static void dce_v10_0_crtc_commit(struct drm_crtc *crtc)
2549 {
2550 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2551 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2552 }
2553 
2554 static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
2555 {
2556 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2557 	struct drm_device *dev = crtc->dev;
2558 	struct amdgpu_device *adev = drm_to_adev(dev);
2559 	struct amdgpu_atom_ss ss;
2560 	int i;
2561 
2562 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2563 	if (crtc->primary->fb) {
2564 		int r;
2565 		struct amdgpu_bo *abo;
2566 
2567 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2568 		r = amdgpu_bo_reserve(abo, true);
2569 		if (unlikely(r))
2570 			DRM_ERROR("failed to reserve abo before unpin\n");
2571 		else {
2572 			amdgpu_bo_unpin(abo);
2573 			amdgpu_bo_unreserve(abo);
2574 		}
2575 	}
2576 	/* disable the GRPH */
2577 	dce_v10_0_grph_enable(crtc, false);
2578 
2579 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2580 
2581 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2582 		if (adev->mode_info.crtcs[i] &&
2583 		    adev->mode_info.crtcs[i]->enabled &&
2584 		    i != amdgpu_crtc->crtc_id &&
2585 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2586 			/* one other crtc is using this pll don't turn
2587 			 * off the pll
2588 			 */
2589 			goto done;
2590 		}
2591 	}
2592 
2593 	switch (amdgpu_crtc->pll_id) {
2594 	case ATOM_PPLL0:
2595 	case ATOM_PPLL1:
2596 	case ATOM_PPLL2:
2597 		/* disable the ppll */
2598 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2599 					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2600 		break;
2601 	default:
2602 		break;
2603 	}
2604 done:
2605 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2606 	amdgpu_crtc->adjusted_clock = 0;
2607 	amdgpu_crtc->encoder = NULL;
2608 	amdgpu_crtc->connector = NULL;
2609 }
2610 
2611 static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
2612 				  struct drm_display_mode *mode,
2613 				  struct drm_display_mode *adjusted_mode,
2614 				  int x, int y, struct drm_framebuffer *old_fb)
2615 {
2616 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2617 
2618 	if (!amdgpu_crtc->adjusted_clock)
2619 		return -EINVAL;
2620 
2621 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2622 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2623 	dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2624 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2625 	amdgpu_atombios_crtc_scaler_setup(crtc);
2626 	dce_v10_0_cursor_reset(crtc);
2627 	/* update the hw version fpr dpm */
2628 	amdgpu_crtc->hw_mode = *adjusted_mode;
2629 
2630 	return 0;
2631 }
2632 
2633 static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
2634 				     const struct drm_display_mode *mode,
2635 				     struct drm_display_mode *adjusted_mode)
2636 {
2637 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2638 	struct drm_device *dev = crtc->dev;
2639 	struct drm_encoder *encoder;
2640 
2641 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2642 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2643 		if (encoder->crtc == crtc) {
2644 			amdgpu_crtc->encoder = encoder;
2645 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2646 			break;
2647 		}
2648 	}
2649 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2650 		amdgpu_crtc->encoder = NULL;
2651 		amdgpu_crtc->connector = NULL;
2652 		return false;
2653 	}
2654 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2655 		return false;
2656 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2657 		return false;
2658 	/* pick pll */
2659 	amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc);
2660 	/* if we can't get a PPLL for a non-DP encoder, fail */
2661 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2662 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2663 		return false;
2664 
2665 	return true;
2666 }
2667 
2668 static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2669 				  struct drm_framebuffer *old_fb)
2670 {
2671 	return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2672 }
2673 
2674 static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2675 					 struct drm_framebuffer *fb,
2676 					 int x, int y, enum mode_set_atomic state)
2677 {
2678 	return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
2679 }
2680 
2681 static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
2682 	.dpms = dce_v10_0_crtc_dpms,
2683 	.mode_fixup = dce_v10_0_crtc_mode_fixup,
2684 	.mode_set = dce_v10_0_crtc_mode_set,
2685 	.mode_set_base = dce_v10_0_crtc_set_base,
2686 	.mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
2687 	.prepare = dce_v10_0_crtc_prepare,
2688 	.commit = dce_v10_0_crtc_commit,
2689 	.disable = dce_v10_0_crtc_disable,
2690 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2691 };
2692 
2693 static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
2694 {
2695 	struct amdgpu_crtc *amdgpu_crtc;
2696 
2697 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2698 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2699 	if (amdgpu_crtc == NULL)
2700 		return -ENOMEM;
2701 
2702 	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
2703 
2704 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2705 	amdgpu_crtc->crtc_id = index;
2706 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2707 
2708 	amdgpu_crtc->max_cursor_width = 128;
2709 	amdgpu_crtc->max_cursor_height = 128;
2710 	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2711 	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2712 
2713 	switch (amdgpu_crtc->crtc_id) {
2714 	case 0:
2715 	default:
2716 		amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2717 		break;
2718 	case 1:
2719 		amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2720 		break;
2721 	case 2:
2722 		amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2723 		break;
2724 	case 3:
2725 		amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2726 		break;
2727 	case 4:
2728 		amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2729 		break;
2730 	case 5:
2731 		amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2732 		break;
2733 	}
2734 
2735 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2736 	amdgpu_crtc->adjusted_clock = 0;
2737 	amdgpu_crtc->encoder = NULL;
2738 	amdgpu_crtc->connector = NULL;
2739 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
2740 
2741 	return 0;
2742 }
2743 
2744 static int dce_v10_0_early_init(void *handle)
2745 {
2746 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2747 
2748 	adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
2749 	adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
2750 
2751 	dce_v10_0_set_display_funcs(adev);
2752 
2753 	adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
2754 
2755 	switch (adev->asic_type) {
2756 	case CHIP_FIJI:
2757 	case CHIP_TONGA:
2758 		adev->mode_info.num_hpd = 6;
2759 		adev->mode_info.num_dig = 7;
2760 		break;
2761 	default:
2762 		/* FIXME: not supported yet */
2763 		return -EINVAL;
2764 	}
2765 
2766 	dce_v10_0_set_irq_funcs(adev);
2767 
2768 	return 0;
2769 }
2770 
2771 static int dce_v10_0_sw_init(void *handle)
2772 {
2773 	int r, i;
2774 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2775 
2776 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2777 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2778 		if (r)
2779 			return r;
2780 	}
2781 
2782 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2783 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2784 		if (r)
2785 			return r;
2786 	}
2787 
2788 	/* HPD hotplug */
2789 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2790 	if (r)
2791 		return r;
2792 
2793 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2794 
2795 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2796 
2797 	adev_to_drm(adev)->mode_config.max_width = 16384;
2798 	adev_to_drm(adev)->mode_config.max_height = 16384;
2799 
2800 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2801 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2802 
2803 	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2804 
2805 	r = amdgpu_display_modeset_create_props(adev);
2806 	if (r)
2807 		return r;
2808 
2809 	adev_to_drm(adev)->mode_config.max_width = 16384;
2810 	adev_to_drm(adev)->mode_config.max_height = 16384;
2811 
2812 	/* allocate crtcs */
2813 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2814 		r = dce_v10_0_crtc_init(adev, i);
2815 		if (r)
2816 			return r;
2817 	}
2818 
2819 	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2820 		amdgpu_display_print_display_setup(adev_to_drm(adev));
2821 	else
2822 		return -EINVAL;
2823 
2824 	/* setup afmt */
2825 	r = dce_v10_0_afmt_init(adev);
2826 	if (r)
2827 		return r;
2828 
2829 	r = dce_v10_0_audio_init(adev);
2830 	if (r)
2831 		return r;
2832 
2833 	/* Disable vblank IRQs aggressively for power-saving */
2834 	/* XXX: can this be enabled for DC? */
2835 	adev_to_drm(adev)->vblank_disable_immediate = true;
2836 
2837 	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2838 	if (r)
2839 		return r;
2840 
2841 	INIT_DELAYED_WORK(&adev->hotplug_work,
2842 		  amdgpu_display_hotplug_work_func);
2843 
2844 	drm_kms_helper_poll_init(adev_to_drm(adev));
2845 
2846 	adev->mode_info.mode_config_initialized = true;
2847 	return 0;
2848 }
2849 
2850 static int dce_v10_0_sw_fini(void *handle)
2851 {
2852 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2853 
2854 	kfree(adev->mode_info.bios_hardcoded_edid);
2855 
2856 	drm_kms_helper_poll_fini(adev_to_drm(adev));
2857 
2858 	dce_v10_0_audio_fini(adev);
2859 
2860 	dce_v10_0_afmt_fini(adev);
2861 
2862 	drm_mode_config_cleanup(adev_to_drm(adev));
2863 	adev->mode_info.mode_config_initialized = false;
2864 
2865 	return 0;
2866 }
2867 
2868 static int dce_v10_0_hw_init(void *handle)
2869 {
2870 	int i;
2871 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2872 
2873 	dce_v10_0_init_golden_registers(adev);
2874 
2875 	/* disable vga render */
2876 	dce_v10_0_set_vga_render_state(adev, false);
2877 	/* init dig PHYs, disp eng pll */
2878 	amdgpu_atombios_encoder_init_dig(adev);
2879 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2880 
2881 	/* initialize hpd */
2882 	dce_v10_0_hpd_init(adev);
2883 
2884 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2885 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2886 	}
2887 
2888 	dce_v10_0_pageflip_interrupt_init(adev);
2889 
2890 	return 0;
2891 }
2892 
2893 static int dce_v10_0_hw_fini(void *handle)
2894 {
2895 	int i;
2896 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2897 
2898 	dce_v10_0_hpd_fini(adev);
2899 
2900 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2901 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2902 	}
2903 
2904 	dce_v10_0_pageflip_interrupt_fini(adev);
2905 
2906 	flush_delayed_work(&adev->hotplug_work);
2907 
2908 	return 0;
2909 }
2910 
2911 static int dce_v10_0_suspend(void *handle)
2912 {
2913 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2914 	int r;
2915 
2916 	r = amdgpu_display_suspend_helper(adev);
2917 	if (r)
2918 		return r;
2919 
2920 	adev->mode_info.bl_level =
2921 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2922 
2923 	return dce_v10_0_hw_fini(handle);
2924 }
2925 
2926 static int dce_v10_0_resume(void *handle)
2927 {
2928 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2929 	int ret;
2930 
2931 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2932 							   adev->mode_info.bl_level);
2933 
2934 	ret = dce_v10_0_hw_init(handle);
2935 
2936 	/* turn on the BL */
2937 	if (adev->mode_info.bl_encoder) {
2938 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2939 								  adev->mode_info.bl_encoder);
2940 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2941 						    bl_level);
2942 	}
2943 	if (ret)
2944 		return ret;
2945 
2946 	return amdgpu_display_resume_helper(adev);
2947 }
2948 
2949 static bool dce_v10_0_is_idle(void *handle)
2950 {
2951 	return true;
2952 }
2953 
2954 static int dce_v10_0_wait_for_idle(void *handle)
2955 {
2956 	return 0;
2957 }
2958 
2959 static bool dce_v10_0_check_soft_reset(void *handle)
2960 {
2961 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2962 
2963 	return dce_v10_0_is_display_hung(adev);
2964 }
2965 
2966 static int dce_v10_0_soft_reset(void *handle)
2967 {
2968 	u32 srbm_soft_reset = 0, tmp;
2969 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2970 
2971 	if (dce_v10_0_is_display_hung(adev))
2972 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2973 
2974 	if (srbm_soft_reset) {
2975 		tmp = RREG32(mmSRBM_SOFT_RESET);
2976 		tmp |= srbm_soft_reset;
2977 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2978 		WREG32(mmSRBM_SOFT_RESET, tmp);
2979 		tmp = RREG32(mmSRBM_SOFT_RESET);
2980 
2981 		udelay(50);
2982 
2983 		tmp &= ~srbm_soft_reset;
2984 		WREG32(mmSRBM_SOFT_RESET, tmp);
2985 		tmp = RREG32(mmSRBM_SOFT_RESET);
2986 
2987 		/* Wait a little for things to settle down */
2988 		udelay(50);
2989 	}
2990 	return 0;
2991 }
2992 
2993 static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2994 						     int crtc,
2995 						     enum amdgpu_interrupt_state state)
2996 {
2997 	u32 lb_interrupt_mask;
2998 
2999 	if (crtc >= adev->mode_info.num_crtc) {
3000 		DRM_DEBUG("invalid crtc %d\n", crtc);
3001 		return;
3002 	}
3003 
3004 	switch (state) {
3005 	case AMDGPU_IRQ_STATE_DISABLE:
3006 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3007 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3008 						  VBLANK_INTERRUPT_MASK, 0);
3009 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3010 		break;
3011 	case AMDGPU_IRQ_STATE_ENABLE:
3012 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3013 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3014 						  VBLANK_INTERRUPT_MASK, 1);
3015 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3016 		break;
3017 	default:
3018 		break;
3019 	}
3020 }
3021 
3022 static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3023 						    int crtc,
3024 						    enum amdgpu_interrupt_state state)
3025 {
3026 	u32 lb_interrupt_mask;
3027 
3028 	if (crtc >= adev->mode_info.num_crtc) {
3029 		DRM_DEBUG("invalid crtc %d\n", crtc);
3030 		return;
3031 	}
3032 
3033 	switch (state) {
3034 	case AMDGPU_IRQ_STATE_DISABLE:
3035 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3036 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3037 						  VLINE_INTERRUPT_MASK, 0);
3038 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3039 		break;
3040 	case AMDGPU_IRQ_STATE_ENABLE:
3041 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3042 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3043 						  VLINE_INTERRUPT_MASK, 1);
3044 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3045 		break;
3046 	default:
3047 		break;
3048 	}
3049 }
3050 
3051 static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
3052 				       struct amdgpu_irq_src *source,
3053 				       unsigned hpd,
3054 				       enum amdgpu_interrupt_state state)
3055 {
3056 	u32 tmp;
3057 
3058 	if (hpd >= adev->mode_info.num_hpd) {
3059 		DRM_DEBUG("invalid hdp %d\n", hpd);
3060 		return 0;
3061 	}
3062 
3063 	switch (state) {
3064 	case AMDGPU_IRQ_STATE_DISABLE:
3065 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3066 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3067 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3068 		break;
3069 	case AMDGPU_IRQ_STATE_ENABLE:
3070 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3071 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3072 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3073 		break;
3074 	default:
3075 		break;
3076 	}
3077 
3078 	return 0;
3079 }
3080 
3081 static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev,
3082 					struct amdgpu_irq_src *source,
3083 					unsigned type,
3084 					enum amdgpu_interrupt_state state)
3085 {
3086 	switch (type) {
3087 	case AMDGPU_CRTC_IRQ_VBLANK1:
3088 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3089 		break;
3090 	case AMDGPU_CRTC_IRQ_VBLANK2:
3091 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3092 		break;
3093 	case AMDGPU_CRTC_IRQ_VBLANK3:
3094 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3095 		break;
3096 	case AMDGPU_CRTC_IRQ_VBLANK4:
3097 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3098 		break;
3099 	case AMDGPU_CRTC_IRQ_VBLANK5:
3100 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3101 		break;
3102 	case AMDGPU_CRTC_IRQ_VBLANK6:
3103 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3104 		break;
3105 	case AMDGPU_CRTC_IRQ_VLINE1:
3106 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state);
3107 		break;
3108 	case AMDGPU_CRTC_IRQ_VLINE2:
3109 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state);
3110 		break;
3111 	case AMDGPU_CRTC_IRQ_VLINE3:
3112 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state);
3113 		break;
3114 	case AMDGPU_CRTC_IRQ_VLINE4:
3115 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state);
3116 		break;
3117 	case AMDGPU_CRTC_IRQ_VLINE5:
3118 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state);
3119 		break;
3120 	case AMDGPU_CRTC_IRQ_VLINE6:
3121 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state);
3122 		break;
3123 	default:
3124 		break;
3125 	}
3126 	return 0;
3127 }
3128 
3129 static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3130 					    struct amdgpu_irq_src *src,
3131 					    unsigned type,
3132 					    enum amdgpu_interrupt_state state)
3133 {
3134 	u32 reg;
3135 
3136 	if (type >= adev->mode_info.num_crtc) {
3137 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3138 		return -EINVAL;
3139 	}
3140 
3141 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3142 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3143 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3144 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3145 	else
3146 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3147 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3148 
3149 	return 0;
3150 }
3151 
3152 static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3153 				  struct amdgpu_irq_src *source,
3154 				  struct amdgpu_iv_entry *entry)
3155 {
3156 	unsigned long flags;
3157 	unsigned crtc_id;
3158 	struct amdgpu_crtc *amdgpu_crtc;
3159 	struct amdgpu_flip_work *works;
3160 
3161 	crtc_id = (entry->src_id - 8) >> 1;
3162 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3163 
3164 	if (crtc_id >= adev->mode_info.num_crtc) {
3165 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3166 		return -EINVAL;
3167 	}
3168 
3169 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3170 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3171 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3172 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3173 
3174 	/* IRQ could occur when in initial stage */
3175 	if (amdgpu_crtc == NULL)
3176 		return 0;
3177 
3178 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3179 	works = amdgpu_crtc->pflip_works;
3180 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3181 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3182 						 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3183 						 amdgpu_crtc->pflip_status,
3184 						 AMDGPU_FLIP_SUBMITTED);
3185 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3186 		return 0;
3187 	}
3188 
3189 	/* page flip completed. clean up */
3190 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3191 	amdgpu_crtc->pflip_works = NULL;
3192 
3193 	/* wakeup usersapce */
3194 	if (works->event)
3195 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3196 
3197 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3198 
3199 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3200 	schedule_work(&works->unpin_work);
3201 
3202 	return 0;
3203 }
3204 
3205 static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
3206 				  int hpd)
3207 {
3208 	u32 tmp;
3209 
3210 	if (hpd >= adev->mode_info.num_hpd) {
3211 		DRM_DEBUG("invalid hdp %d\n", hpd);
3212 		return;
3213 	}
3214 
3215 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3216 	tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3217 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3218 }
3219 
3220 static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3221 					  int crtc)
3222 {
3223 	u32 tmp;
3224 
3225 	if (crtc >= adev->mode_info.num_crtc) {
3226 		DRM_DEBUG("invalid crtc %d\n", crtc);
3227 		return;
3228 	}
3229 
3230 	tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3231 	tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3232 	WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3233 }
3234 
3235 static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3236 					 int crtc)
3237 {
3238 	u32 tmp;
3239 
3240 	if (crtc >= adev->mode_info.num_crtc) {
3241 		DRM_DEBUG("invalid crtc %d\n", crtc);
3242 		return;
3243 	}
3244 
3245 	tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3246 	tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3247 	WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3248 }
3249 
3250 static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3251 			      struct amdgpu_irq_src *source,
3252 			      struct amdgpu_iv_entry *entry)
3253 {
3254 	unsigned crtc = entry->src_id - 1;
3255 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3256 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
3257 
3258 	switch (entry->src_data[0]) {
3259 	case 0: /* vblank */
3260 		if (disp_int & interrupt_status_offsets[crtc].vblank)
3261 			dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3262 		else
3263 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3264 
3265 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3266 			drm_handle_vblank(adev_to_drm(adev), crtc);
3267 		}
3268 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3269 
3270 		break;
3271 	case 1: /* vline */
3272 		if (disp_int & interrupt_status_offsets[crtc].vline)
3273 			dce_v10_0_crtc_vline_int_ack(adev, crtc);
3274 		else
3275 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3276 
3277 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3278 
3279 		break;
3280 	default:
3281 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3282 		break;
3283 	}
3284 
3285 	return 0;
3286 }
3287 
3288 static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
3289 			     struct amdgpu_irq_src *source,
3290 			     struct amdgpu_iv_entry *entry)
3291 {
3292 	uint32_t disp_int, mask;
3293 	unsigned hpd;
3294 
3295 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3296 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3297 		return 0;
3298 	}
3299 
3300 	hpd = entry->src_data[0];
3301 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3302 	mask = interrupt_status_offsets[hpd].hpd;
3303 
3304 	if (disp_int & mask) {
3305 		dce_v10_0_hpd_int_ack(adev, hpd);
3306 		schedule_delayed_work(&adev->hotplug_work, 0);
3307 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3308 	}
3309 
3310 	return 0;
3311 }
3312 
3313 static int dce_v10_0_set_clockgating_state(void *handle,
3314 					  enum amd_clockgating_state state)
3315 {
3316 	return 0;
3317 }
3318 
3319 static int dce_v10_0_set_powergating_state(void *handle,
3320 					  enum amd_powergating_state state)
3321 {
3322 	return 0;
3323 }
3324 
3325 static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
3326 	.name = "dce_v10_0",
3327 	.early_init = dce_v10_0_early_init,
3328 	.late_init = NULL,
3329 	.sw_init = dce_v10_0_sw_init,
3330 	.sw_fini = dce_v10_0_sw_fini,
3331 	.hw_init = dce_v10_0_hw_init,
3332 	.hw_fini = dce_v10_0_hw_fini,
3333 	.suspend = dce_v10_0_suspend,
3334 	.resume = dce_v10_0_resume,
3335 	.is_idle = dce_v10_0_is_idle,
3336 	.wait_for_idle = dce_v10_0_wait_for_idle,
3337 	.check_soft_reset = dce_v10_0_check_soft_reset,
3338 	.soft_reset = dce_v10_0_soft_reset,
3339 	.set_clockgating_state = dce_v10_0_set_clockgating_state,
3340 	.set_powergating_state = dce_v10_0_set_powergating_state,
3341 };
3342 
3343 static void
3344 dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
3345 			  struct drm_display_mode *mode,
3346 			  struct drm_display_mode *adjusted_mode)
3347 {
3348 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3349 
3350 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3351 
3352 	/* need to call this here rather than in prepare() since we need some crtc info */
3353 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3354 
3355 	/* set scaler clears this on some chips */
3356 	dce_v10_0_set_interleave(encoder->crtc, mode);
3357 
3358 	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3359 		dce_v10_0_afmt_enable(encoder, true);
3360 		dce_v10_0_afmt_setmode(encoder, adjusted_mode);
3361 	}
3362 }
3363 
3364 static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
3365 {
3366 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3367 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3368 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3369 
3370 	if ((amdgpu_encoder->active_device &
3371 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3372 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3373 	     ENCODER_OBJECT_ID_NONE)) {
3374 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3375 		if (dig) {
3376 			dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder);
3377 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3378 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3379 		}
3380 	}
3381 
3382 	amdgpu_atombios_scratch_regs_lock(adev, true);
3383 
3384 	if (connector) {
3385 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3386 
3387 		/* select the clock/data port if it uses a router */
3388 		if (amdgpu_connector->router.cd_valid)
3389 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3390 
3391 		/* turn eDP panel on for mode set */
3392 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3393 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3394 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3395 	}
3396 
3397 	/* this is needed for the pll/ss setup to work correctly in some cases */
3398 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3399 	/* set up the FMT blocks */
3400 	dce_v10_0_program_fmt(encoder);
3401 }
3402 
3403 static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
3404 {
3405 	struct drm_device *dev = encoder->dev;
3406 	struct amdgpu_device *adev = drm_to_adev(dev);
3407 
3408 	/* need to call this here as we need the crtc set up */
3409 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3410 	amdgpu_atombios_scratch_regs_lock(adev, false);
3411 }
3412 
3413 static void dce_v10_0_encoder_disable(struct drm_encoder *encoder)
3414 {
3415 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3416 	struct amdgpu_encoder_atom_dig *dig;
3417 
3418 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3419 
3420 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3421 		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3422 			dce_v10_0_afmt_enable(encoder, false);
3423 		dig = amdgpu_encoder->enc_priv;
3424 		dig->dig_encoder = -1;
3425 	}
3426 	amdgpu_encoder->active_device = 0;
3427 }
3428 
3429 /* these are handled by the primary encoders */
3430 static void dce_v10_0_ext_prepare(struct drm_encoder *encoder)
3431 {
3432 
3433 }
3434 
3435 static void dce_v10_0_ext_commit(struct drm_encoder *encoder)
3436 {
3437 
3438 }
3439 
3440 static void
3441 dce_v10_0_ext_mode_set(struct drm_encoder *encoder,
3442 		      struct drm_display_mode *mode,
3443 		      struct drm_display_mode *adjusted_mode)
3444 {
3445 
3446 }
3447 
3448 static void dce_v10_0_ext_disable(struct drm_encoder *encoder)
3449 {
3450 
3451 }
3452 
3453 static void
3454 dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
3455 {
3456 
3457 }
3458 
3459 static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
3460 	.dpms = dce_v10_0_ext_dpms,
3461 	.prepare = dce_v10_0_ext_prepare,
3462 	.mode_set = dce_v10_0_ext_mode_set,
3463 	.commit = dce_v10_0_ext_commit,
3464 	.disable = dce_v10_0_ext_disable,
3465 	/* no detect for TMDS/LVDS yet */
3466 };
3467 
3468 static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = {
3469 	.dpms = amdgpu_atombios_encoder_dpms,
3470 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3471 	.prepare = dce_v10_0_encoder_prepare,
3472 	.mode_set = dce_v10_0_encoder_mode_set,
3473 	.commit = dce_v10_0_encoder_commit,
3474 	.disable = dce_v10_0_encoder_disable,
3475 	.detect = amdgpu_atombios_encoder_dig_detect,
3476 };
3477 
3478 static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = {
3479 	.dpms = amdgpu_atombios_encoder_dpms,
3480 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3481 	.prepare = dce_v10_0_encoder_prepare,
3482 	.mode_set = dce_v10_0_encoder_mode_set,
3483 	.commit = dce_v10_0_encoder_commit,
3484 	.detect = amdgpu_atombios_encoder_dac_detect,
3485 };
3486 
3487 static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder)
3488 {
3489 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3490 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3491 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3492 	kfree(amdgpu_encoder->enc_priv);
3493 	drm_encoder_cleanup(encoder);
3494 	kfree(amdgpu_encoder);
3495 }
3496 
3497 static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = {
3498 	.destroy = dce_v10_0_encoder_destroy,
3499 };
3500 
3501 static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3502 				 uint32_t encoder_enum,
3503 				 uint32_t supported_device,
3504 				 u16 caps)
3505 {
3506 	struct drm_device *dev = adev_to_drm(adev);
3507 	struct drm_encoder *encoder;
3508 	struct amdgpu_encoder *amdgpu_encoder;
3509 
3510 	/* see if we already added it */
3511 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3512 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3513 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3514 			amdgpu_encoder->devices |= supported_device;
3515 			return;
3516 		}
3517 
3518 	}
3519 
3520 	/* add a new one */
3521 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3522 	if (!amdgpu_encoder)
3523 		return;
3524 
3525 	encoder = &amdgpu_encoder->base;
3526 	switch (adev->mode_info.num_crtc) {
3527 	case 1:
3528 		encoder->possible_crtcs = 0x1;
3529 		break;
3530 	case 2:
3531 	default:
3532 		encoder->possible_crtcs = 0x3;
3533 		break;
3534 	case 4:
3535 		encoder->possible_crtcs = 0xf;
3536 		break;
3537 	case 6:
3538 		encoder->possible_crtcs = 0x3f;
3539 		break;
3540 	}
3541 
3542 	amdgpu_encoder->enc_priv = NULL;
3543 
3544 	amdgpu_encoder->encoder_enum = encoder_enum;
3545 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3546 	amdgpu_encoder->devices = supported_device;
3547 	amdgpu_encoder->rmx_type = RMX_OFF;
3548 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3549 	amdgpu_encoder->is_ext_encoder = false;
3550 	amdgpu_encoder->caps = caps;
3551 
3552 	switch (amdgpu_encoder->encoder_id) {
3553 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3554 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3555 		drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3556 				 DRM_MODE_ENCODER_DAC, NULL);
3557 		drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3558 		break;
3559 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3560 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3561 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3562 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3563 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3564 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3565 			amdgpu_encoder->rmx_type = RMX_FULL;
3566 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3567 					 DRM_MODE_ENCODER_LVDS, NULL);
3568 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3569 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3570 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3571 					 DRM_MODE_ENCODER_DAC, NULL);
3572 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3573 		} else {
3574 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3575 					 DRM_MODE_ENCODER_TMDS, NULL);
3576 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3577 		}
3578 		drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
3579 		break;
3580 	case ENCODER_OBJECT_ID_SI170B:
3581 	case ENCODER_OBJECT_ID_CH7303:
3582 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3583 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3584 	case ENCODER_OBJECT_ID_TITFP513:
3585 	case ENCODER_OBJECT_ID_VT1623:
3586 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3587 	case ENCODER_OBJECT_ID_TRAVIS:
3588 	case ENCODER_OBJECT_ID_NUTMEG:
3589 		/* these are handled by the primary encoders */
3590 		amdgpu_encoder->is_ext_encoder = true;
3591 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3592 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3593 					 DRM_MODE_ENCODER_LVDS, NULL);
3594 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3595 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3596 					 DRM_MODE_ENCODER_DAC, NULL);
3597 		else
3598 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3599 					 DRM_MODE_ENCODER_TMDS, NULL);
3600 		drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3601 		break;
3602 	}
3603 }
3604 
3605 static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
3606 	.bandwidth_update = &dce_v10_0_bandwidth_update,
3607 	.vblank_get_counter = &dce_v10_0_vblank_get_counter,
3608 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3609 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3610 	.hpd_sense = &dce_v10_0_hpd_sense,
3611 	.hpd_set_polarity = &dce_v10_0_hpd_set_polarity,
3612 	.hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg,
3613 	.page_flip = &dce_v10_0_page_flip,
3614 	.page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
3615 	.add_encoder = &dce_v10_0_encoder_add,
3616 	.add_connector = &amdgpu_connector_add,
3617 };
3618 
3619 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
3620 {
3621 	adev->mode_info.funcs = &dce_v10_0_display_funcs;
3622 }
3623 
3624 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
3625 	.set = dce_v10_0_set_crtc_irq_state,
3626 	.process = dce_v10_0_crtc_irq,
3627 };
3628 
3629 static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = {
3630 	.set = dce_v10_0_set_pageflip_irq_state,
3631 	.process = dce_v10_0_pageflip_irq,
3632 };
3633 
3634 static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
3635 	.set = dce_v10_0_set_hpd_irq_state,
3636 	.process = dce_v10_0_hpd_irq,
3637 };
3638 
3639 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
3640 {
3641 	if (adev->mode_info.num_crtc > 0)
3642 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3643 	else
3644 		adev->crtc_irq.num_types = 0;
3645 	adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
3646 
3647 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3648 	adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
3649 
3650 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3651 	adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
3652 }
3653 
3654 const struct amdgpu_ip_block_version dce_v10_0_ip_block =
3655 {
3656 	.type = AMD_IP_BLOCK_TYPE_DCE,
3657 	.major = 10,
3658 	.minor = 0,
3659 	.rev = 0,
3660 	.funcs = &dce_v10_0_ip_funcs,
3661 };
3662 
3663 const struct amdgpu_ip_block_version dce_v10_1_ip_block =
3664 {
3665 	.type = AMD_IP_BLOCK_TYPE_DCE,
3666 	.major = 10,
3667 	.minor = 1,
3668 	.rev = 0,
3669 	.funcs = &dce_v10_0_ip_funcs,
3670 };
3671