1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "drmP.h"
24 #include "amdgpu.h"
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
27 #include "vid.h"
28 #include "atom.h"
29 #include "amdgpu_atombios.h"
30 #include "atombios_crtc.h"
31 #include "atombios_encoders.h"
32 #include "amdgpu_pll.h"
33 #include "amdgpu_connectors.h"
34 
35 #include "dce/dce_11_0_d.h"
36 #include "dce/dce_11_0_sh_mask.h"
37 #include "dce/dce_11_0_enum.h"
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
40 #include "gmc/gmc_8_1_d.h"
41 #include "gmc/gmc_8_1_sh_mask.h"
42 
43 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
44 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
45 
46 static const u32 crtc_offsets[] =
47 {
48 	CRTC0_REGISTER_OFFSET,
49 	CRTC1_REGISTER_OFFSET,
50 	CRTC2_REGISTER_OFFSET,
51 	CRTC3_REGISTER_OFFSET,
52 	CRTC4_REGISTER_OFFSET,
53 	CRTC5_REGISTER_OFFSET,
54 	CRTC6_REGISTER_OFFSET
55 };
56 
57 static const u32 hpd_offsets[] =
58 {
59 	HPD0_REGISTER_OFFSET,
60 	HPD1_REGISTER_OFFSET,
61 	HPD2_REGISTER_OFFSET,
62 	HPD3_REGISTER_OFFSET,
63 	HPD4_REGISTER_OFFSET,
64 	HPD5_REGISTER_OFFSET
65 };
66 
67 static const uint32_t dig_offsets[] = {
68 	DIG0_REGISTER_OFFSET,
69 	DIG1_REGISTER_OFFSET,
70 	DIG2_REGISTER_OFFSET,
71 	DIG3_REGISTER_OFFSET,
72 	DIG4_REGISTER_OFFSET,
73 	DIG5_REGISTER_OFFSET,
74 	DIG6_REGISTER_OFFSET,
75 	DIG7_REGISTER_OFFSET,
76 	DIG8_REGISTER_OFFSET
77 };
78 
79 static const struct {
80 	uint32_t        reg;
81 	uint32_t        vblank;
82 	uint32_t        vline;
83 	uint32_t        hpd;
84 
85 } interrupt_status_offsets[] = { {
86 	.reg = mmDISP_INTERRUPT_STATUS,
87 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
88 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
89 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
90 }, {
91 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
92 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
93 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
94 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
95 }, {
96 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
97 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
98 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
99 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
100 }, {
101 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
102 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
103 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
104 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
105 }, {
106 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
107 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
108 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
109 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
110 }, {
111 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
112 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
113 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
114 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
115 } };
116 
117 static const u32 cz_golden_settings_a11[] =
118 {
119 	mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
120 	mmFBC_MISC, 0x1f311fff, 0x14300000,
121 };
122 
123 static const u32 cz_mgcg_cgcg_init[] =
124 {
125 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
126 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
127 };
128 
129 static const u32 stoney_golden_settings_a11[] =
130 {
131 	mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
132 	mmFBC_MISC, 0x1f311fff, 0x14302000,
133 };
134 
135 static const u32 polaris11_golden_settings_a11[] =
136 {
137 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
138 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
139 	mmFBC_DEBUG1, 0xffffffff, 0x00000008,
140 	mmFBC_MISC, 0x9f313fff, 0x14300008,
141 	mmHDMI_CONTROL, 0x313f031f, 0x00000011,
142 };
143 
144 static const u32 polaris10_golden_settings_a11[] =
145 {
146 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
147 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
148 	mmFBC_MISC, 0x9f313fff, 0x14300008,
149 	mmHDMI_CONTROL, 0x313f031f, 0x00000011,
150 };
151 
152 static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
153 {
154 	switch (adev->asic_type) {
155 	case CHIP_CARRIZO:
156 		amdgpu_program_register_sequence(adev,
157 						 cz_mgcg_cgcg_init,
158 						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
159 		amdgpu_program_register_sequence(adev,
160 						 cz_golden_settings_a11,
161 						 (const u32)ARRAY_SIZE(cz_golden_settings_a11));
162 		break;
163 	case CHIP_STONEY:
164 		amdgpu_program_register_sequence(adev,
165 						 stoney_golden_settings_a11,
166 						 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
167 		break;
168 	case CHIP_POLARIS11:
169 		amdgpu_program_register_sequence(adev,
170 						 polaris11_golden_settings_a11,
171 						 (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
172 		break;
173 	case CHIP_POLARIS10:
174 		amdgpu_program_register_sequence(adev,
175 						 polaris10_golden_settings_a11,
176 						 (const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
177 		break;
178 	default:
179 		break;
180 	}
181 }
182 
183 static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
184 				     u32 block_offset, u32 reg)
185 {
186 	unsigned long flags;
187 	u32 r;
188 
189 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
190 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
191 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
192 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
193 
194 	return r;
195 }
196 
197 static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
198 				      u32 block_offset, u32 reg, u32 v)
199 {
200 	unsigned long flags;
201 
202 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
203 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
204 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
205 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
206 }
207 
208 static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
209 {
210 	if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
211 			CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
212 		return true;
213 	else
214 		return false;
215 }
216 
217 static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
218 {
219 	u32 pos1, pos2;
220 
221 	pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
222 	pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
223 
224 	if (pos1 != pos2)
225 		return true;
226 	else
227 		return false;
228 }
229 
230 /**
231  * dce_v11_0_vblank_wait - vblank wait asic callback.
232  *
233  * @adev: amdgpu_device pointer
234  * @crtc: crtc to wait for vblank on
235  *
236  * Wait for vblank on the requested crtc (evergreen+).
237  */
238 static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
239 {
240 	unsigned i = 100;
241 
242 	if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
243 		return;
244 
245 	if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
246 		return;
247 
248 	/* depending on when we hit vblank, we may be close to active; if so,
249 	 * wait for another frame.
250 	 */
251 	while (dce_v11_0_is_in_vblank(adev, crtc)) {
252 		if (i++ == 100) {
253 			i = 0;
254 			if (!dce_v11_0_is_counter_moving(adev, crtc))
255 				break;
256 		}
257 	}
258 
259 	while (!dce_v11_0_is_in_vblank(adev, crtc)) {
260 		if (i++ == 100) {
261 			i = 0;
262 			if (!dce_v11_0_is_counter_moving(adev, crtc))
263 				break;
264 		}
265 	}
266 }
267 
268 static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
269 {
270 	if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
271 		return 0;
272 	else
273 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
274 }
275 
276 static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
277 {
278 	unsigned i;
279 
280 	/* Enable pflip interrupts */
281 	for (i = 0; i < adev->mode_info.num_crtc; i++)
282 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
283 }
284 
285 static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
286 {
287 	unsigned i;
288 
289 	/* Disable pflip interrupts */
290 	for (i = 0; i < adev->mode_info.num_crtc; i++)
291 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
292 }
293 
294 /**
295  * dce_v11_0_page_flip - pageflip callback.
296  *
297  * @adev: amdgpu_device pointer
298  * @crtc_id: crtc to cleanup pageflip on
299  * @crtc_base: new address of the crtc (GPU MC address)
300  *
301  * Triggers the actual pageflip by updating the primary
302  * surface base address.
303  */
304 static void dce_v11_0_page_flip(struct amdgpu_device *adev,
305 			      int crtc_id, u64 crtc_base)
306 {
307 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
308 
309 	/* update the scanout addresses */
310 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
311 	       upper_32_bits(crtc_base));
312 	/* writing to the low address triggers the update */
313 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
314 	       lower_32_bits(crtc_base));
315 	/* post the write */
316 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
317 }
318 
319 static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
320 					u32 *vbl, u32 *position)
321 {
322 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
323 		return -EINVAL;
324 
325 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
326 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
327 
328 	return 0;
329 }
330 
331 /**
332  * dce_v11_0_hpd_sense - hpd sense callback.
333  *
334  * @adev: amdgpu_device pointer
335  * @hpd: hpd (hotplug detect) pin
336  *
337  * Checks if a digital monitor is connected (evergreen+).
338  * Returns true if connected, false if not connected.
339  */
340 static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
341 			       enum amdgpu_hpd_id hpd)
342 {
343 	int idx;
344 	bool connected = false;
345 
346 	switch (hpd) {
347 	case AMDGPU_HPD_1:
348 		idx = 0;
349 		break;
350 	case AMDGPU_HPD_2:
351 		idx = 1;
352 		break;
353 	case AMDGPU_HPD_3:
354 		idx = 2;
355 		break;
356 	case AMDGPU_HPD_4:
357 		idx = 3;
358 		break;
359 	case AMDGPU_HPD_5:
360 		idx = 4;
361 		break;
362 	case AMDGPU_HPD_6:
363 		idx = 5;
364 		break;
365 	default:
366 		return connected;
367 	}
368 
369 	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
370 	    DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
371 		connected = true;
372 
373 	return connected;
374 }
375 
376 /**
377  * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
378  *
379  * @adev: amdgpu_device pointer
380  * @hpd: hpd (hotplug detect) pin
381  *
382  * Set the polarity of the hpd pin (evergreen+).
383  */
384 static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
385 				      enum amdgpu_hpd_id hpd)
386 {
387 	u32 tmp;
388 	bool connected = dce_v11_0_hpd_sense(adev, hpd);
389 	int idx;
390 
391 	switch (hpd) {
392 	case AMDGPU_HPD_1:
393 		idx = 0;
394 		break;
395 	case AMDGPU_HPD_2:
396 		idx = 1;
397 		break;
398 	case AMDGPU_HPD_3:
399 		idx = 2;
400 		break;
401 	case AMDGPU_HPD_4:
402 		idx = 3;
403 		break;
404 	case AMDGPU_HPD_5:
405 		idx = 4;
406 		break;
407 	case AMDGPU_HPD_6:
408 		idx = 5;
409 		break;
410 	default:
411 		return;
412 	}
413 
414 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
415 	if (connected)
416 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
417 	else
418 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
419 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
420 }
421 
422 /**
423  * dce_v11_0_hpd_init - hpd setup callback.
424  *
425  * @adev: amdgpu_device pointer
426  *
427  * Setup the hpd pins used by the card (evergreen+).
428  * Enable the pin, set the polarity, and enable the hpd interrupts.
429  */
430 static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
431 {
432 	struct drm_device *dev = adev->ddev;
433 	struct drm_connector *connector;
434 	u32 tmp;
435 	int idx;
436 
437 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
438 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
439 
440 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
441 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
442 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
443 			 * aux dp channel on imac and help (but not completely fix)
444 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
445 			 * also avoid interrupt storms during dpms.
446 			 */
447 			continue;
448 		}
449 
450 		switch (amdgpu_connector->hpd.hpd) {
451 		case AMDGPU_HPD_1:
452 			idx = 0;
453 			break;
454 		case AMDGPU_HPD_2:
455 			idx = 1;
456 			break;
457 		case AMDGPU_HPD_3:
458 			idx = 2;
459 			break;
460 		case AMDGPU_HPD_4:
461 			idx = 3;
462 			break;
463 		case AMDGPU_HPD_5:
464 			idx = 4;
465 			break;
466 		case AMDGPU_HPD_6:
467 			idx = 5;
468 			break;
469 		default:
470 			continue;
471 		}
472 
473 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
474 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
475 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
476 
477 		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
478 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
479 				    DC_HPD_CONNECT_INT_DELAY,
480 				    AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
481 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
482 				    DC_HPD_DISCONNECT_INT_DELAY,
483 				    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
484 		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
485 
486 		dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
487 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
488 	}
489 }
490 
491 /**
492  * dce_v11_0_hpd_fini - hpd tear down callback.
493  *
494  * @adev: amdgpu_device pointer
495  *
496  * Tear down the hpd pins used by the card (evergreen+).
497  * Disable the hpd interrupts.
498  */
499 static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
500 {
501 	struct drm_device *dev = adev->ddev;
502 	struct drm_connector *connector;
503 	u32 tmp;
504 	int idx;
505 
506 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
507 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
508 
509 		switch (amdgpu_connector->hpd.hpd) {
510 		case AMDGPU_HPD_1:
511 			idx = 0;
512 			break;
513 		case AMDGPU_HPD_2:
514 			idx = 1;
515 			break;
516 		case AMDGPU_HPD_3:
517 			idx = 2;
518 			break;
519 		case AMDGPU_HPD_4:
520 			idx = 3;
521 			break;
522 		case AMDGPU_HPD_5:
523 			idx = 4;
524 			break;
525 		case AMDGPU_HPD_6:
526 			idx = 5;
527 			break;
528 		default:
529 			continue;
530 		}
531 
532 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
533 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
534 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
535 
536 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
537 	}
538 }
539 
540 static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
541 {
542 	return mmDC_GPIO_HPD_A;
543 }
544 
545 static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
546 {
547 	u32 crtc_hung = 0;
548 	u32 crtc_status[6];
549 	u32 i, j, tmp;
550 
551 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
552 		tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
553 		if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
554 			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
555 			crtc_hung |= (1 << i);
556 		}
557 	}
558 
559 	for (j = 0; j < 10; j++) {
560 		for (i = 0; i < adev->mode_info.num_crtc; i++) {
561 			if (crtc_hung & (1 << i)) {
562 				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
563 				if (tmp != crtc_status[i])
564 					crtc_hung &= ~(1 << i);
565 			}
566 		}
567 		if (crtc_hung == 0)
568 			return false;
569 		udelay(100);
570 	}
571 
572 	return true;
573 }
574 
575 static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
576 				     struct amdgpu_mode_mc_save *save)
577 {
578 	u32 crtc_enabled, tmp;
579 	int i;
580 
581 	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
582 	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
583 
584 	/* disable VGA render */
585 	tmp = RREG32(mmVGA_RENDER_CONTROL);
586 	tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
587 	WREG32(mmVGA_RENDER_CONTROL, tmp);
588 
589 	/* blank the display controllers */
590 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
591 		crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
592 					     CRTC_CONTROL, CRTC_MASTER_EN);
593 		if (crtc_enabled) {
594 #if 1
595 			save->crtc_enabled[i] = true;
596 			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
597 			if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
598 				/*it is correct only for RGB ; black is 0*/
599 				WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
600 				tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
601 				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
602 			}
603 #else
604 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
605 			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
606 			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
607 			tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
608 			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
609 			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
610 			save->crtc_enabled[i] = false;
611 			/* ***** */
612 #endif
613 		} else {
614 			save->crtc_enabled[i] = false;
615 		}
616 	}
617 }
618 
619 static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
620 				       struct amdgpu_mode_mc_save *save)
621 {
622 	u32 tmp;
623 	int i;
624 
625 	/* update crtc base addresses */
626 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
627 		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
628 		       upper_32_bits(adev->mc.vram_start));
629 		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
630 		       (u32)adev->mc.vram_start);
631 
632 		if (save->crtc_enabled[i]) {
633 			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
634 			tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
635 			WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
636 		}
637 	}
638 
639 	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
640 	WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
641 
642 	/* Unlock vga access */
643 	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
644 	mdelay(1);
645 	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
646 }
647 
648 static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
649 					   bool render)
650 {
651 	u32 tmp;
652 
653 	/* Lockout access through VGA aperture*/
654 	tmp = RREG32(mmVGA_HDP_CONTROL);
655 	if (render)
656 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
657 	else
658 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
659 	WREG32(mmVGA_HDP_CONTROL, tmp);
660 
661 	/* disable VGA render */
662 	tmp = RREG32(mmVGA_RENDER_CONTROL);
663 	if (render)
664 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
665 	else
666 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
667 	WREG32(mmVGA_RENDER_CONTROL, tmp);
668 }
669 
670 static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
671 {
672 	struct drm_device *dev = encoder->dev;
673 	struct amdgpu_device *adev = dev->dev_private;
674 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
675 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
676 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
677 	int bpc = 0;
678 	u32 tmp = 0;
679 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
680 
681 	if (connector) {
682 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
683 		bpc = amdgpu_connector_get_monitor_bpc(connector);
684 		dither = amdgpu_connector->dither;
685 	}
686 
687 	/* LVDS/eDP FMT is set up by atom */
688 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
689 		return;
690 
691 	/* not needed for analog */
692 	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
693 	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
694 		return;
695 
696 	if (bpc == 0)
697 		return;
698 
699 	switch (bpc) {
700 	case 6:
701 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
702 			/* XXX sort out optimal dither settings */
703 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
704 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
705 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
706 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
707 		} else {
708 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
709 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
710 		}
711 		break;
712 	case 8:
713 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
714 			/* XXX sort out optimal dither settings */
715 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
716 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
717 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
718 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
719 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
720 		} else {
721 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
722 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
723 		}
724 		break;
725 	case 10:
726 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
727 			/* XXX sort out optimal dither settings */
728 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
729 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
730 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
731 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
732 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
733 		} else {
734 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
735 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
736 		}
737 		break;
738 	default:
739 		/* not needed */
740 		break;
741 	}
742 
743 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
744 }
745 
746 
747 /* display watermark setup */
748 /**
749  * dce_v11_0_line_buffer_adjust - Set up the line buffer
750  *
751  * @adev: amdgpu_device pointer
752  * @amdgpu_crtc: the selected display controller
753  * @mode: the current display mode on the selected display
754  * controller
755  *
756  * Setup up the line buffer allocation for
757  * the selected display controller (CIK).
758  * Returns the line buffer size in pixels.
759  */
760 static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
761 				       struct amdgpu_crtc *amdgpu_crtc,
762 				       struct drm_display_mode *mode)
763 {
764 	u32 tmp, buffer_alloc, i, mem_cfg;
765 	u32 pipe_offset = amdgpu_crtc->crtc_id;
766 	/*
767 	 * Line Buffer Setup
768 	 * There are 6 line buffers, one for each display controllers.
769 	 * There are 3 partitions per LB. Select the number of partitions
770 	 * to enable based on the display width.  For display widths larger
771 	 * than 4096, you need use to use 2 display controllers and combine
772 	 * them using the stereo blender.
773 	 */
774 	if (amdgpu_crtc->base.enabled && mode) {
775 		if (mode->crtc_hdisplay < 1920) {
776 			mem_cfg = 1;
777 			buffer_alloc = 2;
778 		} else if (mode->crtc_hdisplay < 2560) {
779 			mem_cfg = 2;
780 			buffer_alloc = 2;
781 		} else if (mode->crtc_hdisplay < 4096) {
782 			mem_cfg = 0;
783 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
784 		} else {
785 			DRM_DEBUG_KMS("Mode too big for LB!\n");
786 			mem_cfg = 0;
787 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
788 		}
789 	} else {
790 		mem_cfg = 1;
791 		buffer_alloc = 0;
792 	}
793 
794 	tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
795 	tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
796 	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
797 
798 	tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
799 	tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
800 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
801 
802 	for (i = 0; i < adev->usec_timeout; i++) {
803 		tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
804 		if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
805 			break;
806 		udelay(1);
807 	}
808 
809 	if (amdgpu_crtc->base.enabled && mode) {
810 		switch (mem_cfg) {
811 		case 0:
812 		default:
813 			return 4096 * 2;
814 		case 1:
815 			return 1920 * 2;
816 		case 2:
817 			return 2560 * 2;
818 		}
819 	}
820 
821 	/* controller not enabled, so no lb used */
822 	return 0;
823 }
824 
825 /**
826  * cik_get_number_of_dram_channels - get the number of dram channels
827  *
828  * @adev: amdgpu_device pointer
829  *
830  * Look up the number of video ram channels (CIK).
831  * Used for display watermark bandwidth calculations
832  * Returns the number of dram channels
833  */
834 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
835 {
836 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
837 
838 	switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
839 	case 0:
840 	default:
841 		return 1;
842 	case 1:
843 		return 2;
844 	case 2:
845 		return 4;
846 	case 3:
847 		return 8;
848 	case 4:
849 		return 3;
850 	case 5:
851 		return 6;
852 	case 6:
853 		return 10;
854 	case 7:
855 		return 12;
856 	case 8:
857 		return 16;
858 	}
859 }
860 
861 struct dce10_wm_params {
862 	u32 dram_channels; /* number of dram channels */
863 	u32 yclk;          /* bandwidth per dram data pin in kHz */
864 	u32 sclk;          /* engine clock in kHz */
865 	u32 disp_clk;      /* display clock in kHz */
866 	u32 src_width;     /* viewport width */
867 	u32 active_time;   /* active display time in ns */
868 	u32 blank_time;    /* blank time in ns */
869 	bool interlaced;    /* mode is interlaced */
870 	fixed20_12 vsc;    /* vertical scale ratio */
871 	u32 num_heads;     /* number of active crtcs */
872 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
873 	u32 lb_size;       /* line buffer allocated to pipe */
874 	u32 vtaps;         /* vertical scaler taps */
875 };
876 
877 /**
878  * dce_v11_0_dram_bandwidth - get the dram bandwidth
879  *
880  * @wm: watermark calculation data
881  *
882  * Calculate the raw dram bandwidth (CIK).
883  * Used for display watermark bandwidth calculations
884  * Returns the dram bandwidth in MBytes/s
885  */
886 static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
887 {
888 	/* Calculate raw DRAM Bandwidth */
889 	fixed20_12 dram_efficiency; /* 0.7 */
890 	fixed20_12 yclk, dram_channels, bandwidth;
891 	fixed20_12 a;
892 
893 	a.full = dfixed_const(1000);
894 	yclk.full = dfixed_const(wm->yclk);
895 	yclk.full = dfixed_div(yclk, a);
896 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
897 	a.full = dfixed_const(10);
898 	dram_efficiency.full = dfixed_const(7);
899 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
900 	bandwidth.full = dfixed_mul(dram_channels, yclk);
901 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
902 
903 	return dfixed_trunc(bandwidth);
904 }
905 
906 /**
907  * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
908  *
909  * @wm: watermark calculation data
910  *
911  * Calculate the dram bandwidth used for display (CIK).
912  * Used for display watermark bandwidth calculations
913  * Returns the dram bandwidth for display in MBytes/s
914  */
915 static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
916 {
917 	/* Calculate DRAM Bandwidth and the part allocated to display. */
918 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
919 	fixed20_12 yclk, dram_channels, bandwidth;
920 	fixed20_12 a;
921 
922 	a.full = dfixed_const(1000);
923 	yclk.full = dfixed_const(wm->yclk);
924 	yclk.full = dfixed_div(yclk, a);
925 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
926 	a.full = dfixed_const(10);
927 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
928 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
929 	bandwidth.full = dfixed_mul(dram_channels, yclk);
930 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
931 
932 	return dfixed_trunc(bandwidth);
933 }
934 
935 /**
936  * dce_v11_0_data_return_bandwidth - get the data return bandwidth
937  *
938  * @wm: watermark calculation data
939  *
940  * Calculate the data return bandwidth used for display (CIK).
941  * Used for display watermark bandwidth calculations
942  * Returns the data return bandwidth in MBytes/s
943  */
944 static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
945 {
946 	/* Calculate the display Data return Bandwidth */
947 	fixed20_12 return_efficiency; /* 0.8 */
948 	fixed20_12 sclk, bandwidth;
949 	fixed20_12 a;
950 
951 	a.full = dfixed_const(1000);
952 	sclk.full = dfixed_const(wm->sclk);
953 	sclk.full = dfixed_div(sclk, a);
954 	a.full = dfixed_const(10);
955 	return_efficiency.full = dfixed_const(8);
956 	return_efficiency.full = dfixed_div(return_efficiency, a);
957 	a.full = dfixed_const(32);
958 	bandwidth.full = dfixed_mul(a, sclk);
959 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
960 
961 	return dfixed_trunc(bandwidth);
962 }
963 
964 /**
965  * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
966  *
967  * @wm: watermark calculation data
968  *
969  * Calculate the dmif bandwidth used for display (CIK).
970  * Used for display watermark bandwidth calculations
971  * Returns the dmif bandwidth in MBytes/s
972  */
973 static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
974 {
975 	/* Calculate the DMIF Request Bandwidth */
976 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
977 	fixed20_12 disp_clk, bandwidth;
978 	fixed20_12 a, b;
979 
980 	a.full = dfixed_const(1000);
981 	disp_clk.full = dfixed_const(wm->disp_clk);
982 	disp_clk.full = dfixed_div(disp_clk, a);
983 	a.full = dfixed_const(32);
984 	b.full = dfixed_mul(a, disp_clk);
985 
986 	a.full = dfixed_const(10);
987 	disp_clk_request_efficiency.full = dfixed_const(8);
988 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
989 
990 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
991 
992 	return dfixed_trunc(bandwidth);
993 }
994 
995 /**
996  * dce_v11_0_available_bandwidth - get the min available bandwidth
997  *
998  * @wm: watermark calculation data
999  *
1000  * Calculate the min available bandwidth used for display (CIK).
1001  * Used for display watermark bandwidth calculations
1002  * Returns the min available bandwidth in MBytes/s
1003  */
1004 static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
1005 {
1006 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
1007 	u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
1008 	u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
1009 	u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
1010 
1011 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
1012 }
1013 
1014 /**
1015  * dce_v11_0_average_bandwidth - get the average available bandwidth
1016  *
1017  * @wm: watermark calculation data
1018  *
1019  * Calculate the average available bandwidth used for display (CIK).
1020  * Used for display watermark bandwidth calculations
1021  * Returns the average available bandwidth in MBytes/s
1022  */
1023 static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
1024 {
1025 	/* Calculate the display mode Average Bandwidth
1026 	 * DisplayMode should contain the source and destination dimensions,
1027 	 * timing, etc.
1028 	 */
1029 	fixed20_12 bpp;
1030 	fixed20_12 line_time;
1031 	fixed20_12 src_width;
1032 	fixed20_12 bandwidth;
1033 	fixed20_12 a;
1034 
1035 	a.full = dfixed_const(1000);
1036 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1037 	line_time.full = dfixed_div(line_time, a);
1038 	bpp.full = dfixed_const(wm->bytes_per_pixel);
1039 	src_width.full = dfixed_const(wm->src_width);
1040 	bandwidth.full = dfixed_mul(src_width, bpp);
1041 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1042 	bandwidth.full = dfixed_div(bandwidth, line_time);
1043 
1044 	return dfixed_trunc(bandwidth);
1045 }
1046 
1047 /**
1048  * dce_v11_0_latency_watermark - get the latency watermark
1049  *
1050  * @wm: watermark calculation data
1051  *
1052  * Calculate the latency watermark (CIK).
1053  * Used for display watermark bandwidth calculations
1054  * Returns the latency watermark in ns
1055  */
1056 static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
1057 {
1058 	/* First calculate the latency in ns */
1059 	u32 mc_latency = 2000; /* 2000 ns. */
1060 	u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
1061 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1062 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1063 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1064 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1065 		(wm->num_heads * cursor_line_pair_return_time);
1066 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1067 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1068 	u32 tmp, dmif_size = 12288;
1069 	fixed20_12 a, b, c;
1070 
1071 	if (wm->num_heads == 0)
1072 		return 0;
1073 
1074 	a.full = dfixed_const(2);
1075 	b.full = dfixed_const(1);
1076 	if ((wm->vsc.full > a.full) ||
1077 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1078 	    (wm->vtaps >= 5) ||
1079 	    ((wm->vsc.full >= a.full) && wm->interlaced))
1080 		max_src_lines_per_dst_line = 4;
1081 	else
1082 		max_src_lines_per_dst_line = 2;
1083 
1084 	a.full = dfixed_const(available_bandwidth);
1085 	b.full = dfixed_const(wm->num_heads);
1086 	a.full = dfixed_div(a, b);
1087 
1088 	b.full = dfixed_const(mc_latency + 512);
1089 	c.full = dfixed_const(wm->disp_clk);
1090 	b.full = dfixed_div(b, c);
1091 
1092 	c.full = dfixed_const(dmif_size);
1093 	b.full = dfixed_div(c, b);
1094 
1095 	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1096 
1097 	b.full = dfixed_const(1000);
1098 	c.full = dfixed_const(wm->disp_clk);
1099 	b.full = dfixed_div(c, b);
1100 	c.full = dfixed_const(wm->bytes_per_pixel);
1101 	b.full = dfixed_mul(b, c);
1102 
1103 	lb_fill_bw = min(tmp, dfixed_trunc(b));
1104 
1105 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1106 	b.full = dfixed_const(1000);
1107 	c.full = dfixed_const(lb_fill_bw);
1108 	b.full = dfixed_div(c, b);
1109 	a.full = dfixed_div(a, b);
1110 	line_fill_time = dfixed_trunc(a);
1111 
1112 	if (line_fill_time < wm->active_time)
1113 		return latency;
1114 	else
1115 		return latency + (line_fill_time - wm->active_time);
1116 
1117 }
1118 
1119 /**
1120  * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1121  * average and available dram bandwidth
1122  *
1123  * @wm: watermark calculation data
1124  *
1125  * Check if the display average bandwidth fits in the display
1126  * dram bandwidth (CIK).
1127  * Used for display watermark bandwidth calculations
1128  * Returns true if the display fits, false if not.
1129  */
1130 static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
1131 {
1132 	if (dce_v11_0_average_bandwidth(wm) <=
1133 	    (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1134 		return true;
1135 	else
1136 		return false;
1137 }
1138 
1139 /**
1140  * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
1141  * average and available bandwidth
1142  *
1143  * @wm: watermark calculation data
1144  *
1145  * Check if the display average bandwidth fits in the display
1146  * available bandwidth (CIK).
1147  * Used for display watermark bandwidth calculations
1148  * Returns true if the display fits, false if not.
1149  */
1150 static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
1151 {
1152 	if (dce_v11_0_average_bandwidth(wm) <=
1153 	    (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
1154 		return true;
1155 	else
1156 		return false;
1157 }
1158 
1159 /**
1160  * dce_v11_0_check_latency_hiding - check latency hiding
1161  *
1162  * @wm: watermark calculation data
1163  *
1164  * Check latency hiding (CIK).
1165  * Used for display watermark bandwidth calculations
1166  * Returns true if the display fits, false if not.
1167  */
1168 static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
1169 {
1170 	u32 lb_partitions = wm->lb_size / wm->src_width;
1171 	u32 line_time = wm->active_time + wm->blank_time;
1172 	u32 latency_tolerant_lines;
1173 	u32 latency_hiding;
1174 	fixed20_12 a;
1175 
1176 	a.full = dfixed_const(1);
1177 	if (wm->vsc.full > a.full)
1178 		latency_tolerant_lines = 1;
1179 	else {
1180 		if (lb_partitions <= (wm->vtaps + 1))
1181 			latency_tolerant_lines = 1;
1182 		else
1183 			latency_tolerant_lines = 2;
1184 	}
1185 
1186 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1187 
1188 	if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
1189 		return true;
1190 	else
1191 		return false;
1192 }
1193 
1194 /**
1195  * dce_v11_0_program_watermarks - program display watermarks
1196  *
1197  * @adev: amdgpu_device pointer
1198  * @amdgpu_crtc: the selected display controller
1199  * @lb_size: line buffer size
1200  * @num_heads: number of display controllers in use
1201  *
1202  * Calculate and program the display watermarks for the
1203  * selected display controller (CIK).
1204  */
1205 static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1206 					struct amdgpu_crtc *amdgpu_crtc,
1207 					u32 lb_size, u32 num_heads)
1208 {
1209 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1210 	struct dce10_wm_params wm_low, wm_high;
1211 	u32 pixel_period;
1212 	u32 line_time = 0;
1213 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1214 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1215 
1216 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
1217 		pixel_period = 1000000 / (u32)mode->clock;
1218 		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1219 
1220 		/* watermark for high clocks */
1221 		if (adev->pm.dpm_enabled) {
1222 			wm_high.yclk =
1223 				amdgpu_dpm_get_mclk(adev, false) * 10;
1224 			wm_high.sclk =
1225 				amdgpu_dpm_get_sclk(adev, false) * 10;
1226 		} else {
1227 			wm_high.yclk = adev->pm.current_mclk * 10;
1228 			wm_high.sclk = adev->pm.current_sclk * 10;
1229 		}
1230 
1231 		wm_high.disp_clk = mode->clock;
1232 		wm_high.src_width = mode->crtc_hdisplay;
1233 		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1234 		wm_high.blank_time = line_time - wm_high.active_time;
1235 		wm_high.interlaced = false;
1236 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1237 			wm_high.interlaced = true;
1238 		wm_high.vsc = amdgpu_crtc->vsc;
1239 		wm_high.vtaps = 1;
1240 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1241 			wm_high.vtaps = 2;
1242 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1243 		wm_high.lb_size = lb_size;
1244 		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1245 		wm_high.num_heads = num_heads;
1246 
1247 		/* set for high clocks */
1248 		latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535);
1249 
1250 		/* possibly force display priority to high */
1251 		/* should really do this at mode validation time... */
1252 		if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1253 		    !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1254 		    !dce_v11_0_check_latency_hiding(&wm_high) ||
1255 		    (adev->mode_info.disp_priority == 2)) {
1256 			DRM_DEBUG_KMS("force priority to high\n");
1257 		}
1258 
1259 		/* watermark for low clocks */
1260 		if (adev->pm.dpm_enabled) {
1261 			wm_low.yclk =
1262 				amdgpu_dpm_get_mclk(adev, true) * 10;
1263 			wm_low.sclk =
1264 				amdgpu_dpm_get_sclk(adev, true) * 10;
1265 		} else {
1266 			wm_low.yclk = adev->pm.current_mclk * 10;
1267 			wm_low.sclk = adev->pm.current_sclk * 10;
1268 		}
1269 
1270 		wm_low.disp_clk = mode->clock;
1271 		wm_low.src_width = mode->crtc_hdisplay;
1272 		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1273 		wm_low.blank_time = line_time - wm_low.active_time;
1274 		wm_low.interlaced = false;
1275 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1276 			wm_low.interlaced = true;
1277 		wm_low.vsc = amdgpu_crtc->vsc;
1278 		wm_low.vtaps = 1;
1279 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1280 			wm_low.vtaps = 2;
1281 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1282 		wm_low.lb_size = lb_size;
1283 		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1284 		wm_low.num_heads = num_heads;
1285 
1286 		/* set for low clocks */
1287 		latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535);
1288 
1289 		/* possibly force display priority to high */
1290 		/* should really do this at mode validation time... */
1291 		if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1292 		    !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1293 		    !dce_v11_0_check_latency_hiding(&wm_low) ||
1294 		    (adev->mode_info.disp_priority == 2)) {
1295 			DRM_DEBUG_KMS("force priority to high\n");
1296 		}
1297 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1298 	}
1299 
1300 	/* select wm A */
1301 	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1302 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1303 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1304 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1305 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1306 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1307 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1308 	/* select wm B */
1309 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1310 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1311 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1312 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1313 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1314 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1315 	/* restore original selection */
1316 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1317 
1318 	/* save values for DPM */
1319 	amdgpu_crtc->line_time = line_time;
1320 	amdgpu_crtc->wm_high = latency_watermark_a;
1321 	amdgpu_crtc->wm_low = latency_watermark_b;
1322 	/* Save number of lines the linebuffer leads before the scanout */
1323 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1324 }
1325 
1326 /**
1327  * dce_v11_0_bandwidth_update - program display watermarks
1328  *
1329  * @adev: amdgpu_device pointer
1330  *
1331  * Calculate and program the display watermarks and line
1332  * buffer allocation (CIK).
1333  */
1334 static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
1335 {
1336 	struct drm_display_mode *mode = NULL;
1337 	u32 num_heads = 0, lb_size;
1338 	int i;
1339 
1340 	amdgpu_update_display_priority(adev);
1341 
1342 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1343 		if (adev->mode_info.crtcs[i]->base.enabled)
1344 			num_heads++;
1345 	}
1346 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1347 		mode = &adev->mode_info.crtcs[i]->base.mode;
1348 		lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1349 		dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1350 					    lb_size, num_heads);
1351 	}
1352 }
1353 
1354 static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
1355 {
1356 	int i;
1357 	u32 offset, tmp;
1358 
1359 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1360 		offset = adev->mode_info.audio.pin[i].offset;
1361 		tmp = RREG32_AUDIO_ENDPT(offset,
1362 					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1363 		if (((tmp &
1364 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1365 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1366 			adev->mode_info.audio.pin[i].connected = false;
1367 		else
1368 			adev->mode_info.audio.pin[i].connected = true;
1369 	}
1370 }
1371 
1372 static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
1373 {
1374 	int i;
1375 
1376 	dce_v11_0_audio_get_connected_pins(adev);
1377 
1378 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1379 		if (adev->mode_info.audio.pin[i].connected)
1380 			return &adev->mode_info.audio.pin[i];
1381 	}
1382 	DRM_ERROR("No connected audio pins found!\n");
1383 	return NULL;
1384 }
1385 
1386 static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1387 {
1388 	struct amdgpu_device *adev = encoder->dev->dev_private;
1389 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1390 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1391 	u32 tmp;
1392 
1393 	if (!dig || !dig->afmt || !dig->afmt->pin)
1394 		return;
1395 
1396 	tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1397 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1398 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1399 }
1400 
1401 static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
1402 						struct drm_display_mode *mode)
1403 {
1404 	struct amdgpu_device *adev = encoder->dev->dev_private;
1405 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1406 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1407 	struct drm_connector *connector;
1408 	struct amdgpu_connector *amdgpu_connector = NULL;
1409 	u32 tmp;
1410 	int interlace = 0;
1411 
1412 	if (!dig || !dig->afmt || !dig->afmt->pin)
1413 		return;
1414 
1415 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1416 		if (connector->encoder == encoder) {
1417 			amdgpu_connector = to_amdgpu_connector(connector);
1418 			break;
1419 		}
1420 	}
1421 
1422 	if (!amdgpu_connector) {
1423 		DRM_ERROR("Couldn't find encoder's connector\n");
1424 		return;
1425 	}
1426 
1427 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1428 		interlace = 1;
1429 	if (connector->latency_present[interlace]) {
1430 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1431 				    VIDEO_LIPSYNC, connector->video_latency[interlace]);
1432 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1433 				    AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1434 	} else {
1435 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1436 				    VIDEO_LIPSYNC, 0);
1437 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1438 				    AUDIO_LIPSYNC, 0);
1439 	}
1440 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1441 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1442 }
1443 
1444 static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1445 {
1446 	struct amdgpu_device *adev = encoder->dev->dev_private;
1447 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1448 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1449 	struct drm_connector *connector;
1450 	struct amdgpu_connector *amdgpu_connector = NULL;
1451 	u32 tmp;
1452 	u8 *sadb = NULL;
1453 	int sad_count;
1454 
1455 	if (!dig || !dig->afmt || !dig->afmt->pin)
1456 		return;
1457 
1458 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1459 		if (connector->encoder == encoder) {
1460 			amdgpu_connector = to_amdgpu_connector(connector);
1461 			break;
1462 		}
1463 	}
1464 
1465 	if (!amdgpu_connector) {
1466 		DRM_ERROR("Couldn't find encoder's connector\n");
1467 		return;
1468 	}
1469 
1470 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1471 	if (sad_count < 0) {
1472 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1473 		sad_count = 0;
1474 	}
1475 
1476 	/* program the speaker allocation */
1477 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1478 				 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1479 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1480 			    DP_CONNECTION, 0);
1481 	/* set HDMI mode */
1482 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1483 			    HDMI_CONNECTION, 1);
1484 	if (sad_count)
1485 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1486 				    SPEAKER_ALLOCATION, sadb[0]);
1487 	else
1488 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1489 				    SPEAKER_ALLOCATION, 5); /* stereo */
1490 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1491 			   ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1492 
1493 	kfree(sadb);
1494 }
1495 
1496 static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
1497 {
1498 	struct amdgpu_device *adev = encoder->dev->dev_private;
1499 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1500 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1501 	struct drm_connector *connector;
1502 	struct amdgpu_connector *amdgpu_connector = NULL;
1503 	struct cea_sad *sads;
1504 	int i, sad_count;
1505 
1506 	static const u16 eld_reg_to_type[][2] = {
1507 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1508 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1509 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1510 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1511 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1512 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1513 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1514 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1515 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1516 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1517 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1518 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1519 	};
1520 
1521 	if (!dig || !dig->afmt || !dig->afmt->pin)
1522 		return;
1523 
1524 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1525 		if (connector->encoder == encoder) {
1526 			amdgpu_connector = to_amdgpu_connector(connector);
1527 			break;
1528 		}
1529 	}
1530 
1531 	if (!amdgpu_connector) {
1532 		DRM_ERROR("Couldn't find encoder's connector\n");
1533 		return;
1534 	}
1535 
1536 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1537 	if (sad_count <= 0) {
1538 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1539 		return;
1540 	}
1541 	BUG_ON(!sads);
1542 
1543 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1544 		u32 tmp = 0;
1545 		u8 stereo_freqs = 0;
1546 		int max_channels = -1;
1547 		int j;
1548 
1549 		for (j = 0; j < sad_count; j++) {
1550 			struct cea_sad *sad = &sads[j];
1551 
1552 			if (sad->format == eld_reg_to_type[i][1]) {
1553 				if (sad->channels > max_channels) {
1554 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1555 							    MAX_CHANNELS, sad->channels);
1556 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1557 							    DESCRIPTOR_BYTE_2, sad->byte2);
1558 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1559 							    SUPPORTED_FREQUENCIES, sad->freq);
1560 					max_channels = sad->channels;
1561 				}
1562 
1563 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1564 					stereo_freqs |= sad->freq;
1565 				else
1566 					break;
1567 			}
1568 		}
1569 
1570 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1571 				    SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1572 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1573 	}
1574 
1575 	kfree(sads);
1576 }
1577 
1578 static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
1579 				  struct amdgpu_audio_pin *pin,
1580 				  bool enable)
1581 {
1582 	if (!pin)
1583 		return;
1584 
1585 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1586 			   enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1587 }
1588 
1589 static const u32 pin_offsets[] =
1590 {
1591 	AUD0_REGISTER_OFFSET,
1592 	AUD1_REGISTER_OFFSET,
1593 	AUD2_REGISTER_OFFSET,
1594 	AUD3_REGISTER_OFFSET,
1595 	AUD4_REGISTER_OFFSET,
1596 	AUD5_REGISTER_OFFSET,
1597 	AUD6_REGISTER_OFFSET,
1598 };
1599 
1600 static int dce_v11_0_audio_init(struct amdgpu_device *adev)
1601 {
1602 	int i;
1603 
1604 	if (!amdgpu_audio)
1605 		return 0;
1606 
1607 	adev->mode_info.audio.enabled = true;
1608 
1609 	switch (adev->asic_type) {
1610 	case CHIP_CARRIZO:
1611 	case CHIP_STONEY:
1612 		adev->mode_info.audio.num_pins = 7;
1613 		break;
1614 	case CHIP_POLARIS10:
1615 		adev->mode_info.audio.num_pins = 8;
1616 		break;
1617 	case CHIP_POLARIS11:
1618 		adev->mode_info.audio.num_pins = 6;
1619 		break;
1620 	default:
1621 		return -EINVAL;
1622 	}
1623 
1624 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1625 		adev->mode_info.audio.pin[i].channels = -1;
1626 		adev->mode_info.audio.pin[i].rate = -1;
1627 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1628 		adev->mode_info.audio.pin[i].status_bits = 0;
1629 		adev->mode_info.audio.pin[i].category_code = 0;
1630 		adev->mode_info.audio.pin[i].connected = false;
1631 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1632 		adev->mode_info.audio.pin[i].id = i;
1633 		/* disable audio.  it will be set up later */
1634 		/* XXX remove once we switch to ip funcs */
1635 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1636 	}
1637 
1638 	return 0;
1639 }
1640 
1641 static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
1642 {
1643 	int i;
1644 
1645 	if (!amdgpu_audio)
1646 		return;
1647 
1648 	if (!adev->mode_info.audio.enabled)
1649 		return;
1650 
1651 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1652 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1653 
1654 	adev->mode_info.audio.enabled = false;
1655 }
1656 
1657 /*
1658  * update the N and CTS parameters for a given pixel clock rate
1659  */
1660 static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1661 {
1662 	struct drm_device *dev = encoder->dev;
1663 	struct amdgpu_device *adev = dev->dev_private;
1664 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1665 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1666 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1667 	u32 tmp;
1668 
1669 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1670 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1671 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1672 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1673 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1674 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1675 
1676 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1677 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1678 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1679 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1680 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1681 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1682 
1683 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1684 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1685 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1686 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1687 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1688 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1689 
1690 }
1691 
1692 /*
1693  * build a HDMI Video Info Frame
1694  */
1695 static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1696 					       void *buffer, size_t size)
1697 {
1698 	struct drm_device *dev = encoder->dev;
1699 	struct amdgpu_device *adev = dev->dev_private;
1700 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1701 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1702 	uint8_t *frame = buffer + 3;
1703 	uint8_t *header = buffer;
1704 
1705 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1706 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1707 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1708 		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1709 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1710 		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1711 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1712 		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1713 }
1714 
1715 static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1716 {
1717 	struct drm_device *dev = encoder->dev;
1718 	struct amdgpu_device *adev = dev->dev_private;
1719 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1720 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1721 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1722 	u32 dto_phase = 24 * 1000;
1723 	u32 dto_modulo = clock;
1724 	u32 tmp;
1725 
1726 	if (!dig || !dig->afmt)
1727 		return;
1728 
1729 	/* XXX two dtos; generally use dto0 for hdmi */
1730 	/* Express [24MHz / target pixel clock] as an exact rational
1731 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1732 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1733 	 */
1734 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1735 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1736 			    amdgpu_crtc->crtc_id);
1737 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1738 	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1739 	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1740 }
1741 
1742 /*
1743  * update the info frames with the data from the current display mode
1744  */
1745 static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
1746 				  struct drm_display_mode *mode)
1747 {
1748 	struct drm_device *dev = encoder->dev;
1749 	struct amdgpu_device *adev = dev->dev_private;
1750 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1751 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1752 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1753 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1754 	struct hdmi_avi_infoframe frame;
1755 	ssize_t err;
1756 	u32 tmp;
1757 	int bpc = 8;
1758 
1759 	if (!dig || !dig->afmt)
1760 		return;
1761 
1762 	/* Silent, r600_hdmi_enable will raise WARN for us */
1763 	if (!dig->afmt->enabled)
1764 		return;
1765 
1766 	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1767 	if (encoder->crtc) {
1768 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1769 		bpc = amdgpu_crtc->bpc;
1770 	}
1771 
1772 	/* disable audio prior to setting up hw */
1773 	dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
1774 	dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1775 
1776 	dce_v11_0_audio_set_dto(encoder, mode->clock);
1777 
1778 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1779 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1780 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1781 
1782 	WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1783 
1784 	tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1785 	switch (bpc) {
1786 	case 0:
1787 	case 6:
1788 	case 8:
1789 	case 16:
1790 	default:
1791 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1792 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1793 		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1794 			  connector->name, bpc);
1795 		break;
1796 	case 10:
1797 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1798 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1799 		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1800 			  connector->name);
1801 		break;
1802 	case 12:
1803 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1804 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1805 		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1806 			  connector->name);
1807 		break;
1808 	}
1809 	WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1810 
1811 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1812 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1813 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1814 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1815 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1816 
1817 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1818 	/* enable audio info frames (frames won't be set until audio is enabled) */
1819 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1820 	/* required for audio info values to be updated */
1821 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1822 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1823 
1824 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1825 	/* required for audio info values to be updated */
1826 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1827 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1828 
1829 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1830 	/* anything other than 0 */
1831 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1832 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1833 
1834 	WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1835 
1836 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1837 	/* set the default audio delay */
1838 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1839 	/* should be suffient for all audio modes and small enough for all hblanks */
1840 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1841 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1842 
1843 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1844 	/* allow 60958 channel status fields to be updated */
1845 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1846 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1847 
1848 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1849 	if (bpc > 8)
1850 		/* clear SW CTS value */
1851 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1852 	else
1853 		/* select SW CTS value */
1854 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1855 	/* allow hw to sent ACR packets when required */
1856 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1857 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1858 
1859 	dce_v11_0_afmt_update_ACR(encoder, mode->clock);
1860 
1861 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1862 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1863 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1864 
1865 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1866 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1867 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1868 
1869 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1870 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1871 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1872 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1873 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1874 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1875 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1876 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1877 
1878 	dce_v11_0_audio_write_speaker_allocation(encoder);
1879 
1880 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1881 	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1882 
1883 	dce_v11_0_afmt_audio_select_pin(encoder);
1884 	dce_v11_0_audio_write_sad_regs(encoder);
1885 	dce_v11_0_audio_write_latency_fields(encoder, mode);
1886 
1887 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1888 	if (err < 0) {
1889 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1890 		return;
1891 	}
1892 
1893 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1894 	if (err < 0) {
1895 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1896 		return;
1897 	}
1898 
1899 	dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1900 
1901 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1902 	/* enable AVI info frames */
1903 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1904 	/* required for audio info values to be updated */
1905 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1906 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1907 
1908 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1909 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1910 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1911 
1912 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1913 	/* send audio packets */
1914 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1915 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1916 
1917 	WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1918 	WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1919 	WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1920 	WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1921 
1922 	/* enable audio after to setting up hw */
1923 	dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
1924 }
1925 
1926 static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1927 {
1928 	struct drm_device *dev = encoder->dev;
1929 	struct amdgpu_device *adev = dev->dev_private;
1930 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1931 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1932 
1933 	if (!dig || !dig->afmt)
1934 		return;
1935 
1936 	/* Silent, r600_hdmi_enable will raise WARN for us */
1937 	if (enable && dig->afmt->enabled)
1938 		return;
1939 	if (!enable && !dig->afmt->enabled)
1940 		return;
1941 
1942 	if (!enable && dig->afmt->pin) {
1943 		dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1944 		dig->afmt->pin = NULL;
1945 	}
1946 
1947 	dig->afmt->enabled = enable;
1948 
1949 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1950 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1951 }
1952 
1953 static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
1954 {
1955 	int i;
1956 
1957 	for (i = 0; i < adev->mode_info.num_dig; i++)
1958 		adev->mode_info.afmt[i] = NULL;
1959 
1960 	/* DCE11 has audio blocks tied to DIG encoders */
1961 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1962 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1963 		if (adev->mode_info.afmt[i]) {
1964 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1965 			adev->mode_info.afmt[i]->id = i;
1966 		} else {
1967 			int j;
1968 			for (j = 0; j < i; j++) {
1969 				kfree(adev->mode_info.afmt[j]);
1970 				adev->mode_info.afmt[j] = NULL;
1971 			}
1972 			return -ENOMEM;
1973 		}
1974 	}
1975 	return 0;
1976 }
1977 
1978 static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
1979 {
1980 	int i;
1981 
1982 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1983 		kfree(adev->mode_info.afmt[i]);
1984 		adev->mode_info.afmt[i] = NULL;
1985 	}
1986 }
1987 
1988 static const u32 vga_control_regs[6] =
1989 {
1990 	mmD1VGA_CONTROL,
1991 	mmD2VGA_CONTROL,
1992 	mmD3VGA_CONTROL,
1993 	mmD4VGA_CONTROL,
1994 	mmD5VGA_CONTROL,
1995 	mmD6VGA_CONTROL,
1996 };
1997 
1998 static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
1999 {
2000 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2001 	struct drm_device *dev = crtc->dev;
2002 	struct amdgpu_device *adev = dev->dev_private;
2003 	u32 vga_control;
2004 
2005 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
2006 	if (enable)
2007 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
2008 	else
2009 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
2010 }
2011 
2012 static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
2013 {
2014 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2015 	struct drm_device *dev = crtc->dev;
2016 	struct amdgpu_device *adev = dev->dev_private;
2017 
2018 	if (enable)
2019 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
2020 	else
2021 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
2022 }
2023 
2024 static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
2025 				     struct drm_framebuffer *fb,
2026 				     int x, int y, int atomic)
2027 {
2028 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2029 	struct drm_device *dev = crtc->dev;
2030 	struct amdgpu_device *adev = dev->dev_private;
2031 	struct amdgpu_framebuffer *amdgpu_fb;
2032 	struct drm_framebuffer *target_fb;
2033 	struct drm_gem_object *obj;
2034 	struct amdgpu_bo *rbo;
2035 	uint64_t fb_location, tiling_flags;
2036 	uint32_t fb_format, fb_pitch_pixels;
2037 	u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
2038 	u32 pipe_config;
2039 	u32 tmp, viewport_w, viewport_h;
2040 	int r;
2041 	bool bypass_lut = false;
2042 
2043 	/* no fb bound */
2044 	if (!atomic && !crtc->primary->fb) {
2045 		DRM_DEBUG_KMS("No FB bound\n");
2046 		return 0;
2047 	}
2048 
2049 	if (atomic) {
2050 		amdgpu_fb = to_amdgpu_framebuffer(fb);
2051 		target_fb = fb;
2052 	} else {
2053 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2054 		target_fb = crtc->primary->fb;
2055 	}
2056 
2057 	/* If atomic, assume fb object is pinned & idle & fenced and
2058 	 * just update base pointers
2059 	 */
2060 	obj = amdgpu_fb->obj;
2061 	rbo = gem_to_amdgpu_bo(obj);
2062 	r = amdgpu_bo_reserve(rbo, false);
2063 	if (unlikely(r != 0))
2064 		return r;
2065 
2066 	if (atomic) {
2067 		fb_location = amdgpu_bo_gpu_offset(rbo);
2068 	} else {
2069 		r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
2070 		if (unlikely(r != 0)) {
2071 			amdgpu_bo_unreserve(rbo);
2072 			return -EINVAL;
2073 		}
2074 	}
2075 
2076 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
2077 	amdgpu_bo_unreserve(rbo);
2078 
2079 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2080 
2081 	switch (target_fb->pixel_format) {
2082 	case DRM_FORMAT_C8:
2083 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
2084 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2085 		break;
2086 	case DRM_FORMAT_XRGB4444:
2087 	case DRM_FORMAT_ARGB4444:
2088 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2089 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
2090 #ifdef __BIG_ENDIAN
2091 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2092 					ENDIAN_8IN16);
2093 #endif
2094 		break;
2095 	case DRM_FORMAT_XRGB1555:
2096 	case DRM_FORMAT_ARGB1555:
2097 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2098 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2099 #ifdef __BIG_ENDIAN
2100 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2101 					ENDIAN_8IN16);
2102 #endif
2103 		break;
2104 	case DRM_FORMAT_BGRX5551:
2105 	case DRM_FORMAT_BGRA5551:
2106 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2107 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
2108 #ifdef __BIG_ENDIAN
2109 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2110 					ENDIAN_8IN16);
2111 #endif
2112 		break;
2113 	case DRM_FORMAT_RGB565:
2114 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2115 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2116 #ifdef __BIG_ENDIAN
2117 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2118 					ENDIAN_8IN16);
2119 #endif
2120 		break;
2121 	case DRM_FORMAT_XRGB8888:
2122 	case DRM_FORMAT_ARGB8888:
2123 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2124 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2125 #ifdef __BIG_ENDIAN
2126 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2127 					ENDIAN_8IN32);
2128 #endif
2129 		break;
2130 	case DRM_FORMAT_XRGB2101010:
2131 	case DRM_FORMAT_ARGB2101010:
2132 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2133 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2134 #ifdef __BIG_ENDIAN
2135 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2136 					ENDIAN_8IN32);
2137 #endif
2138 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2139 		bypass_lut = true;
2140 		break;
2141 	case DRM_FORMAT_BGRX1010102:
2142 	case DRM_FORMAT_BGRA1010102:
2143 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2144 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
2145 #ifdef __BIG_ENDIAN
2146 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2147 					ENDIAN_8IN32);
2148 #endif
2149 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2150 		bypass_lut = true;
2151 		break;
2152 	default:
2153 		DRM_ERROR("Unsupported screen format %s\n",
2154 			drm_get_format_name(target_fb->pixel_format));
2155 		return -EINVAL;
2156 	}
2157 
2158 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2159 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2160 
2161 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2162 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2163 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2164 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2165 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2166 
2167 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
2168 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2169 					  ARRAY_2D_TILED_THIN1);
2170 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2171 					  tile_split);
2172 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2173 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2174 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2175 					  mtaspect);
2176 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2177 					  ADDR_SURF_MICRO_TILING_DISPLAY);
2178 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2179 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2180 					  ARRAY_1D_TILED_THIN1);
2181 	}
2182 
2183 	fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2184 				  pipe_config);
2185 
2186 	dce_v11_0_vga_enable(crtc, false);
2187 
2188 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2189 	       upper_32_bits(fb_location));
2190 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2191 	       upper_32_bits(fb_location));
2192 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2193 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2194 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2195 	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2196 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2197 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2198 
2199 	/*
2200 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2201 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2202 	 * retain the full precision throughout the pipeline.
2203 	 */
2204 	tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2205 	if (bypass_lut)
2206 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2207 	else
2208 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2209 	WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2210 
2211 	if (bypass_lut)
2212 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2213 
2214 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2215 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2216 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2217 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2218 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2219 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2220 
2221 	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2222 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2223 
2224 	dce_v11_0_grph_enable(crtc, true);
2225 
2226 	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2227 	       target_fb->height);
2228 
2229 	x &= ~3;
2230 	y &= ~1;
2231 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2232 	       (x << 16) | y);
2233 	viewport_w = crtc->mode.hdisplay;
2234 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2235 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2236 	       (viewport_w << 16) | viewport_h);
2237 
2238 	/* pageflip setup */
2239 	/* make sure flip is at vb rather than hb */
2240 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2241 	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2242 			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2243 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2244 
2245 	/* set pageflip to happen only at start of vblank interval (front porch) */
2246 	WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
2247 
2248 	if (!atomic && fb && fb != crtc->primary->fb) {
2249 		amdgpu_fb = to_amdgpu_framebuffer(fb);
2250 		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2251 		r = amdgpu_bo_reserve(rbo, false);
2252 		if (unlikely(r != 0))
2253 			return r;
2254 		amdgpu_bo_unpin(rbo);
2255 		amdgpu_bo_unreserve(rbo);
2256 	}
2257 
2258 	/* Bytes per pixel may have changed */
2259 	dce_v11_0_bandwidth_update(adev);
2260 
2261 	return 0;
2262 }
2263 
2264 static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
2265 				     struct drm_display_mode *mode)
2266 {
2267 	struct drm_device *dev = crtc->dev;
2268 	struct amdgpu_device *adev = dev->dev_private;
2269 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2270 	u32 tmp;
2271 
2272 	tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2273 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2274 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2275 	else
2276 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2277 	WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2278 }
2279 
2280 static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
2281 {
2282 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2283 	struct drm_device *dev = crtc->dev;
2284 	struct amdgpu_device *adev = dev->dev_private;
2285 	int i;
2286 	u32 tmp;
2287 
2288 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2289 
2290 	tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2291 	tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2292 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2293 
2294 	tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2295 	tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2296 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2297 
2298 	tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2299 	tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2300 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2301 
2302 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2303 
2304 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2305 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2306 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2307 
2308 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2309 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2310 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2311 
2312 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2313 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2314 
2315 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2316 	for (i = 0; i < 256; i++) {
2317 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2318 		       (amdgpu_crtc->lut_r[i] << 20) |
2319 		       (amdgpu_crtc->lut_g[i] << 10) |
2320 		       (amdgpu_crtc->lut_b[i] << 0));
2321 	}
2322 
2323 	tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2324 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2325 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2326 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
2327 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2328 
2329 	tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2330 	tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2331 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2332 
2333 	tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2334 	tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2335 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2336 
2337 	tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2338 	tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2339 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2340 
2341 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2342 	WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2343 	/* XXX this only needs to be programmed once per crtc at startup,
2344 	 * not sure where the best place for it is
2345 	 */
2346 	tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2347 	tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2348 	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2349 }
2350 
2351 static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
2352 {
2353 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2354 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2355 
2356 	switch (amdgpu_encoder->encoder_id) {
2357 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2358 		if (dig->linkb)
2359 			return 1;
2360 		else
2361 			return 0;
2362 		break;
2363 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2364 		if (dig->linkb)
2365 			return 3;
2366 		else
2367 			return 2;
2368 		break;
2369 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2370 		if (dig->linkb)
2371 			return 5;
2372 		else
2373 			return 4;
2374 		break;
2375 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2376 		return 6;
2377 		break;
2378 	default:
2379 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2380 		return 0;
2381 	}
2382 }
2383 
2384 /**
2385  * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
2386  *
2387  * @crtc: drm crtc
2388  *
2389  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2390  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2391  * monitors a dedicated PPLL must be used.  If a particular board has
2392  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2393  * as there is no need to program the PLL itself.  If we are not able to
2394  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2395  * avoid messing up an existing monitor.
2396  *
2397  * Asic specific PLL information
2398  *
2399  * DCE 10.x
2400  * Tonga
2401  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2402  * CI
2403  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2404  *
2405  */
2406 static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
2407 {
2408 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2409 	struct drm_device *dev = crtc->dev;
2410 	struct amdgpu_device *adev = dev->dev_private;
2411 	u32 pll_in_use;
2412 	int pll;
2413 
2414 	if ((adev->asic_type == CHIP_POLARIS10) ||
2415 	    (adev->asic_type == CHIP_POLARIS11)) {
2416 		struct amdgpu_encoder *amdgpu_encoder =
2417 			to_amdgpu_encoder(amdgpu_crtc->encoder);
2418 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2419 
2420 		if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2421 			return ATOM_DP_DTO;
2422 		/* use the same PPLL for all monitors with the same clock */
2423 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2424 		if (pll != ATOM_PPLL_INVALID)
2425 			return pll;
2426 
2427 		switch (amdgpu_encoder->encoder_id) {
2428 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2429 			if (dig->linkb)
2430 				return ATOM_COMBOPHY_PLL1;
2431 			else
2432 				return ATOM_COMBOPHY_PLL0;
2433 			break;
2434 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2435 			if (dig->linkb)
2436 				return ATOM_COMBOPHY_PLL3;
2437 			else
2438 				return ATOM_COMBOPHY_PLL2;
2439 			break;
2440 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2441 			if (dig->linkb)
2442 				return ATOM_COMBOPHY_PLL5;
2443 			else
2444 				return ATOM_COMBOPHY_PLL4;
2445 			break;
2446 		default:
2447 			DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2448 			return ATOM_PPLL_INVALID;
2449 		}
2450 	}
2451 
2452 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2453 		if (adev->clock.dp_extclk)
2454 			/* skip PPLL programming if using ext clock */
2455 			return ATOM_PPLL_INVALID;
2456 		else {
2457 			/* use the same PPLL for all DP monitors */
2458 			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2459 			if (pll != ATOM_PPLL_INVALID)
2460 				return pll;
2461 		}
2462 	} else {
2463 		/* use the same PPLL for all monitors with the same clock */
2464 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2465 		if (pll != ATOM_PPLL_INVALID)
2466 			return pll;
2467 	}
2468 
2469 	/* XXX need to determine what plls are available on each DCE11 part */
2470 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2471 	if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
2472 		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2473 			return ATOM_PPLL1;
2474 		if (!(pll_in_use & (1 << ATOM_PPLL0)))
2475 			return ATOM_PPLL0;
2476 		DRM_ERROR("unable to allocate a PPLL\n");
2477 		return ATOM_PPLL_INVALID;
2478 	} else {
2479 		if (!(pll_in_use & (1 << ATOM_PPLL2)))
2480 			return ATOM_PPLL2;
2481 		if (!(pll_in_use & (1 << ATOM_PPLL1)))
2482 			return ATOM_PPLL1;
2483 		if (!(pll_in_use & (1 << ATOM_PPLL0)))
2484 			return ATOM_PPLL0;
2485 		DRM_ERROR("unable to allocate a PPLL\n");
2486 		return ATOM_PPLL_INVALID;
2487 	}
2488 	return ATOM_PPLL_INVALID;
2489 }
2490 
2491 static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2492 {
2493 	struct amdgpu_device *adev = crtc->dev->dev_private;
2494 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2495 	uint32_t cur_lock;
2496 
2497 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2498 	if (lock)
2499 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2500 	else
2501 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2502 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2503 }
2504 
2505 static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
2506 {
2507 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2508 	struct amdgpu_device *adev = crtc->dev->dev_private;
2509 	u32 tmp;
2510 
2511 	tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2512 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2513 	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2514 }
2515 
2516 static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
2517 {
2518 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2519 	struct amdgpu_device *adev = crtc->dev->dev_private;
2520 	u32 tmp;
2521 
2522 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2523 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2524 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2525 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2526 
2527 	tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2528 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2529 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2530 	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2531 }
2532 
2533 static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
2534 					int x, int y)
2535 {
2536 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2537 	struct amdgpu_device *adev = crtc->dev->dev_private;
2538 	int xorigin = 0, yorigin = 0;
2539 
2540 	/* avivo cursor are offset into the total surface */
2541 	x += crtc->x;
2542 	y += crtc->y;
2543 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2544 
2545 	if (x < 0) {
2546 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2547 		x = 0;
2548 	}
2549 	if (y < 0) {
2550 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2551 		y = 0;
2552 	}
2553 
2554 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2555 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2556 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2557 	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2558 
2559 	amdgpu_crtc->cursor_x = x;
2560 	amdgpu_crtc->cursor_y = y;
2561 
2562 	return 0;
2563 }
2564 
2565 static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
2566 				      int x, int y)
2567 {
2568 	int ret;
2569 
2570 	dce_v11_0_lock_cursor(crtc, true);
2571 	ret = dce_v11_0_cursor_move_locked(crtc, x, y);
2572 	dce_v11_0_lock_cursor(crtc, false);
2573 
2574 	return ret;
2575 }
2576 
2577 static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2578 				      struct drm_file *file_priv,
2579 				      uint32_t handle,
2580 				      uint32_t width,
2581 				      uint32_t height,
2582 				      int32_t hot_x,
2583 				      int32_t hot_y)
2584 {
2585 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2586 	struct drm_gem_object *obj;
2587 	struct amdgpu_bo *aobj;
2588 	int ret;
2589 
2590 	if (!handle) {
2591 		/* turn off cursor */
2592 		dce_v11_0_hide_cursor(crtc);
2593 		obj = NULL;
2594 		goto unpin;
2595 	}
2596 
2597 	if ((width > amdgpu_crtc->max_cursor_width) ||
2598 	    (height > amdgpu_crtc->max_cursor_height)) {
2599 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2600 		return -EINVAL;
2601 	}
2602 
2603 	obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
2604 	if (!obj) {
2605 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2606 		return -ENOENT;
2607 	}
2608 
2609 	aobj = gem_to_amdgpu_bo(obj);
2610 	ret = amdgpu_bo_reserve(aobj, false);
2611 	if (ret != 0) {
2612 		drm_gem_object_unreference_unlocked(obj);
2613 		return ret;
2614 	}
2615 
2616 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2617 	amdgpu_bo_unreserve(aobj);
2618 	if (ret) {
2619 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2620 		drm_gem_object_unreference_unlocked(obj);
2621 		return ret;
2622 	}
2623 
2624 	amdgpu_crtc->cursor_width = width;
2625 	amdgpu_crtc->cursor_height = height;
2626 
2627 	dce_v11_0_lock_cursor(crtc, true);
2628 
2629 	if (hot_x != amdgpu_crtc->cursor_hot_x ||
2630 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2631 		int x, y;
2632 
2633 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2634 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2635 
2636 		dce_v11_0_cursor_move_locked(crtc, x, y);
2637 
2638 		amdgpu_crtc->cursor_hot_x = hot_x;
2639 		amdgpu_crtc->cursor_hot_y = hot_y;
2640 	}
2641 
2642 	dce_v11_0_show_cursor(crtc);
2643 	dce_v11_0_lock_cursor(crtc, false);
2644 
2645 unpin:
2646 	if (amdgpu_crtc->cursor_bo) {
2647 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2648 		ret = amdgpu_bo_reserve(aobj, false);
2649 		if (likely(ret == 0)) {
2650 			amdgpu_bo_unpin(aobj);
2651 			amdgpu_bo_unreserve(aobj);
2652 		}
2653 		drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2654 	}
2655 
2656 	amdgpu_crtc->cursor_bo = obj;
2657 	return 0;
2658 }
2659 
2660 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2661 {
2662 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2663 
2664 	if (amdgpu_crtc->cursor_bo) {
2665 		dce_v11_0_lock_cursor(crtc, true);
2666 
2667 		dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2668 					     amdgpu_crtc->cursor_y);
2669 
2670 		dce_v11_0_show_cursor(crtc);
2671 
2672 		dce_v11_0_lock_cursor(crtc, false);
2673 	}
2674 }
2675 
2676 static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2677 				    u16 *blue, uint32_t start, uint32_t size)
2678 {
2679 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2680 	int end = (start + size > 256) ? 256 : start + size, i;
2681 
2682 	/* userspace palettes are always correct as is */
2683 	for (i = start; i < end; i++) {
2684 		amdgpu_crtc->lut_r[i] = red[i] >> 6;
2685 		amdgpu_crtc->lut_g[i] = green[i] >> 6;
2686 		amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2687 	}
2688 	dce_v11_0_crtc_load_lut(crtc);
2689 }
2690 
2691 static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
2692 {
2693 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2694 
2695 	drm_crtc_cleanup(crtc);
2696 	kfree(amdgpu_crtc);
2697 }
2698 
2699 static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
2700 	.cursor_set2 = dce_v11_0_crtc_cursor_set2,
2701 	.cursor_move = dce_v11_0_crtc_cursor_move,
2702 	.gamma_set = dce_v11_0_crtc_gamma_set,
2703 	.set_config = amdgpu_crtc_set_config,
2704 	.destroy = dce_v11_0_crtc_destroy,
2705 	.page_flip = amdgpu_crtc_page_flip,
2706 };
2707 
2708 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2709 {
2710 	struct drm_device *dev = crtc->dev;
2711 	struct amdgpu_device *adev = dev->dev_private;
2712 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2713 	unsigned type;
2714 
2715 	switch (mode) {
2716 	case DRM_MODE_DPMS_ON:
2717 		amdgpu_crtc->enabled = true;
2718 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2719 		dce_v11_0_vga_enable(crtc, true);
2720 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2721 		dce_v11_0_vga_enable(crtc, false);
2722 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2723 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2724 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2725 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2726 		drm_vblank_on(dev, amdgpu_crtc->crtc_id);
2727 		dce_v11_0_crtc_load_lut(crtc);
2728 		break;
2729 	case DRM_MODE_DPMS_STANDBY:
2730 	case DRM_MODE_DPMS_SUSPEND:
2731 	case DRM_MODE_DPMS_OFF:
2732 		drm_vblank_off(dev, amdgpu_crtc->crtc_id);
2733 		if (amdgpu_crtc->enabled) {
2734 			dce_v11_0_vga_enable(crtc, true);
2735 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2736 			dce_v11_0_vga_enable(crtc, false);
2737 		}
2738 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2739 		amdgpu_crtc->enabled = false;
2740 		break;
2741 	}
2742 	/* adjust pm to dpms */
2743 	amdgpu_pm_compute_clocks(adev);
2744 }
2745 
2746 static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
2747 {
2748 	/* disable crtc pair power gating before programming */
2749 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2750 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2751 	dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2752 }
2753 
2754 static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
2755 {
2756 	dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2757 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2758 }
2759 
2760 static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
2761 {
2762 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2763 	struct drm_device *dev = crtc->dev;
2764 	struct amdgpu_device *adev = dev->dev_private;
2765 	struct amdgpu_atom_ss ss;
2766 	int i;
2767 
2768 	dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2769 	if (crtc->primary->fb) {
2770 		int r;
2771 		struct amdgpu_framebuffer *amdgpu_fb;
2772 		struct amdgpu_bo *rbo;
2773 
2774 		amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2775 		rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2776 		r = amdgpu_bo_reserve(rbo, false);
2777 		if (unlikely(r))
2778 			DRM_ERROR("failed to reserve rbo before unpin\n");
2779 		else {
2780 			amdgpu_bo_unpin(rbo);
2781 			amdgpu_bo_unreserve(rbo);
2782 		}
2783 	}
2784 	/* disable the GRPH */
2785 	dce_v11_0_grph_enable(crtc, false);
2786 
2787 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2788 
2789 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2790 		if (adev->mode_info.crtcs[i] &&
2791 		    adev->mode_info.crtcs[i]->enabled &&
2792 		    i != amdgpu_crtc->crtc_id &&
2793 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2794 			/* one other crtc is using this pll don't turn
2795 			 * off the pll
2796 			 */
2797 			goto done;
2798 		}
2799 	}
2800 
2801 	switch (amdgpu_crtc->pll_id) {
2802 	case ATOM_PPLL0:
2803 	case ATOM_PPLL1:
2804 	case ATOM_PPLL2:
2805 		/* disable the ppll */
2806 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2807 						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2808 		break;
2809 	case ATOM_COMBOPHY_PLL0:
2810 	case ATOM_COMBOPHY_PLL1:
2811 	case ATOM_COMBOPHY_PLL2:
2812 	case ATOM_COMBOPHY_PLL3:
2813 	case ATOM_COMBOPHY_PLL4:
2814 	case ATOM_COMBOPHY_PLL5:
2815 		/* disable the ppll */
2816 		amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
2817 						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2818 		break;
2819 	default:
2820 		break;
2821 	}
2822 done:
2823 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2824 	amdgpu_crtc->adjusted_clock = 0;
2825 	amdgpu_crtc->encoder = NULL;
2826 	amdgpu_crtc->connector = NULL;
2827 }
2828 
2829 static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
2830 				  struct drm_display_mode *mode,
2831 				  struct drm_display_mode *adjusted_mode,
2832 				  int x, int y, struct drm_framebuffer *old_fb)
2833 {
2834 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2835 	struct drm_device *dev = crtc->dev;
2836 	struct amdgpu_device *adev = dev->dev_private;
2837 
2838 	if (!amdgpu_crtc->adjusted_clock)
2839 		return -EINVAL;
2840 
2841 	if ((adev->asic_type == CHIP_POLARIS10) ||
2842 	    (adev->asic_type == CHIP_POLARIS11)) {
2843 		struct amdgpu_encoder *amdgpu_encoder =
2844 			to_amdgpu_encoder(amdgpu_crtc->encoder);
2845 		int encoder_mode =
2846 			amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
2847 
2848 		/* SetPixelClock calculates the plls and ss values now */
2849 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
2850 						 amdgpu_crtc->pll_id,
2851 						 encoder_mode, amdgpu_encoder->encoder_id,
2852 						 adjusted_mode->clock, 0, 0, 0, 0,
2853 						 amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
2854 	} else {
2855 		amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2856 	}
2857 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2858 	dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2859 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2860 	amdgpu_atombios_crtc_scaler_setup(crtc);
2861 	dce_v11_0_cursor_reset(crtc);
2862 	/* update the hw version fpr dpm */
2863 	amdgpu_crtc->hw_mode = *adjusted_mode;
2864 
2865 	return 0;
2866 }
2867 
2868 static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
2869 				     const struct drm_display_mode *mode,
2870 				     struct drm_display_mode *adjusted_mode)
2871 {
2872 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2873 	struct drm_device *dev = crtc->dev;
2874 	struct drm_encoder *encoder;
2875 
2876 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2877 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2878 		if (encoder->crtc == crtc) {
2879 			amdgpu_crtc->encoder = encoder;
2880 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2881 			break;
2882 		}
2883 	}
2884 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2885 		amdgpu_crtc->encoder = NULL;
2886 		amdgpu_crtc->connector = NULL;
2887 		return false;
2888 	}
2889 	if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2890 		return false;
2891 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2892 		return false;
2893 	/* pick pll */
2894 	amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
2895 	/* if we can't get a PPLL for a non-DP encoder, fail */
2896 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2897 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2898 		return false;
2899 
2900 	return true;
2901 }
2902 
2903 static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2904 				  struct drm_framebuffer *old_fb)
2905 {
2906 	return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2907 }
2908 
2909 static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2910 					 struct drm_framebuffer *fb,
2911 					 int x, int y, enum mode_set_atomic state)
2912 {
2913        return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
2914 }
2915 
2916 static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
2917 	.dpms = dce_v11_0_crtc_dpms,
2918 	.mode_fixup = dce_v11_0_crtc_mode_fixup,
2919 	.mode_set = dce_v11_0_crtc_mode_set,
2920 	.mode_set_base = dce_v11_0_crtc_set_base,
2921 	.mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
2922 	.prepare = dce_v11_0_crtc_prepare,
2923 	.commit = dce_v11_0_crtc_commit,
2924 	.load_lut = dce_v11_0_crtc_load_lut,
2925 	.disable = dce_v11_0_crtc_disable,
2926 };
2927 
2928 static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
2929 {
2930 	struct amdgpu_crtc *amdgpu_crtc;
2931 	int i;
2932 
2933 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2934 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2935 	if (amdgpu_crtc == NULL)
2936 		return -ENOMEM;
2937 
2938 	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
2939 
2940 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2941 	amdgpu_crtc->crtc_id = index;
2942 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2943 
2944 	amdgpu_crtc->max_cursor_width = 128;
2945 	amdgpu_crtc->max_cursor_height = 128;
2946 	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2947 	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2948 
2949 	for (i = 0; i < 256; i++) {
2950 		amdgpu_crtc->lut_r[i] = i << 2;
2951 		amdgpu_crtc->lut_g[i] = i << 2;
2952 		amdgpu_crtc->lut_b[i] = i << 2;
2953 	}
2954 
2955 	switch (amdgpu_crtc->crtc_id) {
2956 	case 0:
2957 	default:
2958 		amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2959 		break;
2960 	case 1:
2961 		amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2962 		break;
2963 	case 2:
2964 		amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2965 		break;
2966 	case 3:
2967 		amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2968 		break;
2969 	case 4:
2970 		amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2971 		break;
2972 	case 5:
2973 		amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2974 		break;
2975 	}
2976 
2977 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2978 	amdgpu_crtc->adjusted_clock = 0;
2979 	amdgpu_crtc->encoder = NULL;
2980 	amdgpu_crtc->connector = NULL;
2981 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
2982 
2983 	return 0;
2984 }
2985 
2986 static int dce_v11_0_early_init(void *handle)
2987 {
2988 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2989 
2990 	adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
2991 	adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
2992 
2993 	dce_v11_0_set_display_funcs(adev);
2994 	dce_v11_0_set_irq_funcs(adev);
2995 
2996 	switch (adev->asic_type) {
2997 	case CHIP_CARRIZO:
2998 		adev->mode_info.num_crtc = 3;
2999 		adev->mode_info.num_hpd = 6;
3000 		adev->mode_info.num_dig = 9;
3001 		break;
3002 	case CHIP_STONEY:
3003 		adev->mode_info.num_crtc = 2;
3004 		adev->mode_info.num_hpd = 6;
3005 		adev->mode_info.num_dig = 9;
3006 		break;
3007 	case CHIP_POLARIS10:
3008 		adev->mode_info.num_crtc = 6;
3009 		adev->mode_info.num_hpd = 6;
3010 		adev->mode_info.num_dig = 6;
3011 		break;
3012 	case CHIP_POLARIS11:
3013 		adev->mode_info.num_crtc = 5;
3014 		adev->mode_info.num_hpd = 5;
3015 		adev->mode_info.num_dig = 5;
3016 		break;
3017 	default:
3018 		/* FIXME: not supported yet */
3019 		return -EINVAL;
3020 	}
3021 
3022 	return 0;
3023 }
3024 
3025 static int dce_v11_0_sw_init(void *handle)
3026 {
3027 	int r, i;
3028 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3029 
3030 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3031 		r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
3032 		if (r)
3033 			return r;
3034 	}
3035 
3036 	for (i = 8; i < 20; i += 2) {
3037 		r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
3038 		if (r)
3039 			return r;
3040 	}
3041 
3042 	/* HPD hotplug */
3043 	r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
3044 	if (r)
3045 		return r;
3046 
3047 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
3048 
3049 	adev->ddev->mode_config.max_width = 16384;
3050 	adev->ddev->mode_config.max_height = 16384;
3051 
3052 	adev->ddev->mode_config.preferred_depth = 24;
3053 	adev->ddev->mode_config.prefer_shadow = 1;
3054 
3055 	adev->ddev->mode_config.fb_base = adev->mc.aper_base;
3056 
3057 	r = amdgpu_modeset_create_props(adev);
3058 	if (r)
3059 		return r;
3060 
3061 	adev->ddev->mode_config.max_width = 16384;
3062 	adev->ddev->mode_config.max_height = 16384;
3063 
3064 
3065 	/* allocate crtcs */
3066 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3067 		r = dce_v11_0_crtc_init(adev, i);
3068 		if (r)
3069 			return r;
3070 	}
3071 
3072 	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
3073 		amdgpu_print_display_setup(adev->ddev);
3074 	else
3075 		return -EINVAL;
3076 
3077 	/* setup afmt */
3078 	r = dce_v11_0_afmt_init(adev);
3079 	if (r)
3080 		return r;
3081 
3082 	r = dce_v11_0_audio_init(adev);
3083 	if (r)
3084 		return r;
3085 
3086 	drm_kms_helper_poll_init(adev->ddev);
3087 
3088 	adev->mode_info.mode_config_initialized = true;
3089 	return 0;
3090 }
3091 
3092 static int dce_v11_0_sw_fini(void *handle)
3093 {
3094 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3095 
3096 	kfree(adev->mode_info.bios_hardcoded_edid);
3097 
3098 	drm_kms_helper_poll_fini(adev->ddev);
3099 
3100 	dce_v11_0_audio_fini(adev);
3101 
3102 	dce_v11_0_afmt_fini(adev);
3103 
3104 	adev->mode_info.mode_config_initialized = false;
3105 
3106 	return 0;
3107 }
3108 
3109 static int dce_v11_0_hw_init(void *handle)
3110 {
3111 	int i;
3112 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3113 
3114 	dce_v11_0_init_golden_registers(adev);
3115 
3116 	/* init dig PHYs, disp eng pll */
3117 	amdgpu_atombios_crtc_powergate_init(adev);
3118 	amdgpu_atombios_encoder_init_dig(adev);
3119 	if ((adev->asic_type == CHIP_POLARIS10) ||
3120 	    (adev->asic_type == CHIP_POLARIS11)) {
3121 		amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
3122 						   DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
3123 		amdgpu_atombios_crtc_set_dce_clock(adev, 0,
3124 						   DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
3125 	} else {
3126 		amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3127 	}
3128 
3129 	/* initialize hpd */
3130 	dce_v11_0_hpd_init(adev);
3131 
3132 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3133 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3134 	}
3135 
3136 	dce_v11_0_pageflip_interrupt_init(adev);
3137 
3138 	return 0;
3139 }
3140 
3141 static int dce_v11_0_hw_fini(void *handle)
3142 {
3143 	int i;
3144 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3145 
3146 	dce_v11_0_hpd_fini(adev);
3147 
3148 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3149 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3150 	}
3151 
3152 	dce_v11_0_pageflip_interrupt_fini(adev);
3153 
3154 	return 0;
3155 }
3156 
3157 static int dce_v11_0_suspend(void *handle)
3158 {
3159 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3160 
3161 	amdgpu_atombios_scratch_regs_save(adev);
3162 
3163 	return dce_v11_0_hw_fini(handle);
3164 }
3165 
3166 static int dce_v11_0_resume(void *handle)
3167 {
3168 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3169 	int ret;
3170 
3171 	ret = dce_v11_0_hw_init(handle);
3172 
3173 	amdgpu_atombios_scratch_regs_restore(adev);
3174 
3175 	/* turn on the BL */
3176 	if (adev->mode_info.bl_encoder) {
3177 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
3178 								  adev->mode_info.bl_encoder);
3179 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3180 						    bl_level);
3181 	}
3182 
3183 	return ret;
3184 }
3185 
3186 static bool dce_v11_0_is_idle(void *handle)
3187 {
3188 	return true;
3189 }
3190 
3191 static int dce_v11_0_wait_for_idle(void *handle)
3192 {
3193 	return 0;
3194 }
3195 
3196 static int dce_v11_0_soft_reset(void *handle)
3197 {
3198 	u32 srbm_soft_reset = 0, tmp;
3199 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3200 
3201 	if (dce_v11_0_is_display_hung(adev))
3202 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3203 
3204 	if (srbm_soft_reset) {
3205 		tmp = RREG32(mmSRBM_SOFT_RESET);
3206 		tmp |= srbm_soft_reset;
3207 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3208 		WREG32(mmSRBM_SOFT_RESET, tmp);
3209 		tmp = RREG32(mmSRBM_SOFT_RESET);
3210 
3211 		udelay(50);
3212 
3213 		tmp &= ~srbm_soft_reset;
3214 		WREG32(mmSRBM_SOFT_RESET, tmp);
3215 		tmp = RREG32(mmSRBM_SOFT_RESET);
3216 
3217 		/* Wait a little for things to settle down */
3218 		udelay(50);
3219 	}
3220 	return 0;
3221 }
3222 
3223 static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3224 						     int crtc,
3225 						     enum amdgpu_interrupt_state state)
3226 {
3227 	u32 lb_interrupt_mask;
3228 
3229 	if (crtc >= adev->mode_info.num_crtc) {
3230 		DRM_DEBUG("invalid crtc %d\n", crtc);
3231 		return;
3232 	}
3233 
3234 	switch (state) {
3235 	case AMDGPU_IRQ_STATE_DISABLE:
3236 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3237 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3238 						  VBLANK_INTERRUPT_MASK, 0);
3239 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3240 		break;
3241 	case AMDGPU_IRQ_STATE_ENABLE:
3242 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3243 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3244 						  VBLANK_INTERRUPT_MASK, 1);
3245 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3246 		break;
3247 	default:
3248 		break;
3249 	}
3250 }
3251 
3252 static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3253 						    int crtc,
3254 						    enum amdgpu_interrupt_state state)
3255 {
3256 	u32 lb_interrupt_mask;
3257 
3258 	if (crtc >= adev->mode_info.num_crtc) {
3259 		DRM_DEBUG("invalid crtc %d\n", crtc);
3260 		return;
3261 	}
3262 
3263 	switch (state) {
3264 	case AMDGPU_IRQ_STATE_DISABLE:
3265 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3266 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3267 						  VLINE_INTERRUPT_MASK, 0);
3268 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3269 		break;
3270 	case AMDGPU_IRQ_STATE_ENABLE:
3271 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3272 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3273 						  VLINE_INTERRUPT_MASK, 1);
3274 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3275 		break;
3276 	default:
3277 		break;
3278 	}
3279 }
3280 
3281 static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
3282 					struct amdgpu_irq_src *source,
3283 					unsigned hpd,
3284 					enum amdgpu_interrupt_state state)
3285 {
3286 	u32 tmp;
3287 
3288 	if (hpd >= adev->mode_info.num_hpd) {
3289 		DRM_DEBUG("invalid hdp %d\n", hpd);
3290 		return 0;
3291 	}
3292 
3293 	switch (state) {
3294 	case AMDGPU_IRQ_STATE_DISABLE:
3295 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3296 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3297 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3298 		break;
3299 	case AMDGPU_IRQ_STATE_ENABLE:
3300 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3301 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3302 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3303 		break;
3304 	default:
3305 		break;
3306 	}
3307 
3308 	return 0;
3309 }
3310 
3311 static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
3312 					struct amdgpu_irq_src *source,
3313 					unsigned type,
3314 					enum amdgpu_interrupt_state state)
3315 {
3316 	switch (type) {
3317 	case AMDGPU_CRTC_IRQ_VBLANK1:
3318 		dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3319 		break;
3320 	case AMDGPU_CRTC_IRQ_VBLANK2:
3321 		dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3322 		break;
3323 	case AMDGPU_CRTC_IRQ_VBLANK3:
3324 		dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3325 		break;
3326 	case AMDGPU_CRTC_IRQ_VBLANK4:
3327 		dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3328 		break;
3329 	case AMDGPU_CRTC_IRQ_VBLANK5:
3330 		dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3331 		break;
3332 	case AMDGPU_CRTC_IRQ_VBLANK6:
3333 		dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3334 		break;
3335 	case AMDGPU_CRTC_IRQ_VLINE1:
3336 		dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
3337 		break;
3338 	case AMDGPU_CRTC_IRQ_VLINE2:
3339 		dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
3340 		break;
3341 	case AMDGPU_CRTC_IRQ_VLINE3:
3342 		dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
3343 		break;
3344 	case AMDGPU_CRTC_IRQ_VLINE4:
3345 		dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
3346 		break;
3347 	case AMDGPU_CRTC_IRQ_VLINE5:
3348 		dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
3349 		break;
3350 	 case AMDGPU_CRTC_IRQ_VLINE6:
3351 		dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
3352 		break;
3353 	default:
3354 		break;
3355 	}
3356 	return 0;
3357 }
3358 
3359 static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3360 					    struct amdgpu_irq_src *src,
3361 					    unsigned type,
3362 					    enum amdgpu_interrupt_state state)
3363 {
3364 	u32 reg;
3365 
3366 	if (type >= adev->mode_info.num_crtc) {
3367 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3368 		return -EINVAL;
3369 	}
3370 
3371 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3372 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3373 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3374 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3375 	else
3376 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3377 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3378 
3379 	return 0;
3380 }
3381 
3382 static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3383 				  struct amdgpu_irq_src *source,
3384 				  struct amdgpu_iv_entry *entry)
3385 {
3386 	unsigned long flags;
3387 	unsigned crtc_id;
3388 	struct amdgpu_crtc *amdgpu_crtc;
3389 	struct amdgpu_flip_work *works;
3390 
3391 	crtc_id = (entry->src_id - 8) >> 1;
3392 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3393 
3394 	if (crtc_id >= adev->mode_info.num_crtc) {
3395 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3396 		return -EINVAL;
3397 	}
3398 
3399 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3400 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3401 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3402 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3403 
3404 	/* IRQ could occur when in initial stage */
3405 	if(amdgpu_crtc == NULL)
3406 		return 0;
3407 
3408 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3409 	works = amdgpu_crtc->pflip_works;
3410 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3411 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3412 						 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3413 						 amdgpu_crtc->pflip_status,
3414 						 AMDGPU_FLIP_SUBMITTED);
3415 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3416 		return 0;
3417 	}
3418 
3419 	/* page flip completed. clean up */
3420 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3421 	amdgpu_crtc->pflip_works = NULL;
3422 
3423 	/* wakeup usersapce */
3424 	if(works->event)
3425 		drm_send_vblank_event(adev->ddev, crtc_id, works->event);
3426 
3427 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3428 
3429 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3430 	schedule_work(&works->unpin_work);
3431 
3432 	return 0;
3433 }
3434 
3435 static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
3436 				  int hpd)
3437 {
3438 	u32 tmp;
3439 
3440 	if (hpd >= adev->mode_info.num_hpd) {
3441 		DRM_DEBUG("invalid hdp %d\n", hpd);
3442 		return;
3443 	}
3444 
3445 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3446 	tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3447 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3448 }
3449 
3450 static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3451 					  int crtc)
3452 {
3453 	u32 tmp;
3454 
3455 	if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3456 		DRM_DEBUG("invalid crtc %d\n", crtc);
3457 		return;
3458 	}
3459 
3460 	tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3461 	tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3462 	WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3463 }
3464 
3465 static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3466 					 int crtc)
3467 {
3468 	u32 tmp;
3469 
3470 	if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3471 		DRM_DEBUG("invalid crtc %d\n", crtc);
3472 		return;
3473 	}
3474 
3475 	tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3476 	tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3477 	WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3478 }
3479 
3480 static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3481 				struct amdgpu_irq_src *source,
3482 				struct amdgpu_iv_entry *entry)
3483 {
3484 	unsigned crtc = entry->src_id - 1;
3485 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3486 	unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3487 
3488 	switch (entry->src_data) {
3489 	case 0: /* vblank */
3490 		if (disp_int & interrupt_status_offsets[crtc].vblank)
3491 			dce_v11_0_crtc_vblank_int_ack(adev, crtc);
3492 		else
3493 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3494 
3495 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3496 			drm_handle_vblank(adev->ddev, crtc);
3497 		}
3498 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3499 
3500 		break;
3501 	case 1: /* vline */
3502 		if (disp_int & interrupt_status_offsets[crtc].vline)
3503 			dce_v11_0_crtc_vline_int_ack(adev, crtc);
3504 		else
3505 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3506 
3507 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3508 
3509 		break;
3510 	default:
3511 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3512 		break;
3513 	}
3514 
3515 	return 0;
3516 }
3517 
3518 static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
3519 			     struct amdgpu_irq_src *source,
3520 			     struct amdgpu_iv_entry *entry)
3521 {
3522 	uint32_t disp_int, mask;
3523 	unsigned hpd;
3524 
3525 	if (entry->src_data >= adev->mode_info.num_hpd) {
3526 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3527 		return 0;
3528 	}
3529 
3530 	hpd = entry->src_data;
3531 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3532 	mask = interrupt_status_offsets[hpd].hpd;
3533 
3534 	if (disp_int & mask) {
3535 		dce_v11_0_hpd_int_ack(adev, hpd);
3536 		schedule_work(&adev->hotplug_work);
3537 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3538 	}
3539 
3540 	return 0;
3541 }
3542 
3543 static int dce_v11_0_set_clockgating_state(void *handle,
3544 					  enum amd_clockgating_state state)
3545 {
3546 	return 0;
3547 }
3548 
3549 static int dce_v11_0_set_powergating_state(void *handle,
3550 					  enum amd_powergating_state state)
3551 {
3552 	return 0;
3553 }
3554 
3555 const struct amd_ip_funcs dce_v11_0_ip_funcs = {
3556 	.early_init = dce_v11_0_early_init,
3557 	.late_init = NULL,
3558 	.sw_init = dce_v11_0_sw_init,
3559 	.sw_fini = dce_v11_0_sw_fini,
3560 	.hw_init = dce_v11_0_hw_init,
3561 	.hw_fini = dce_v11_0_hw_fini,
3562 	.suspend = dce_v11_0_suspend,
3563 	.resume = dce_v11_0_resume,
3564 	.is_idle = dce_v11_0_is_idle,
3565 	.wait_for_idle = dce_v11_0_wait_for_idle,
3566 	.soft_reset = dce_v11_0_soft_reset,
3567 	.set_clockgating_state = dce_v11_0_set_clockgating_state,
3568 	.set_powergating_state = dce_v11_0_set_powergating_state,
3569 };
3570 
3571 static void
3572 dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
3573 			  struct drm_display_mode *mode,
3574 			  struct drm_display_mode *adjusted_mode)
3575 {
3576 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3577 
3578 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3579 
3580 	/* need to call this here rather than in prepare() since we need some crtc info */
3581 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3582 
3583 	/* set scaler clears this on some chips */
3584 	dce_v11_0_set_interleave(encoder->crtc, mode);
3585 
3586 	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3587 		dce_v11_0_afmt_enable(encoder, true);
3588 		dce_v11_0_afmt_setmode(encoder, adjusted_mode);
3589 	}
3590 }
3591 
3592 static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
3593 {
3594 	struct amdgpu_device *adev = encoder->dev->dev_private;
3595 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3596 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3597 
3598 	if ((amdgpu_encoder->active_device &
3599 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3600 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3601 	     ENCODER_OBJECT_ID_NONE)) {
3602 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3603 		if (dig) {
3604 			dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
3605 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3606 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3607 		}
3608 	}
3609 
3610 	amdgpu_atombios_scratch_regs_lock(adev, true);
3611 
3612 	if (connector) {
3613 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3614 
3615 		/* select the clock/data port if it uses a router */
3616 		if (amdgpu_connector->router.cd_valid)
3617 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3618 
3619 		/* turn eDP panel on for mode set */
3620 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3621 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3622 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3623 	}
3624 
3625 	/* this is needed for the pll/ss setup to work correctly in some cases */
3626 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3627 	/* set up the FMT blocks */
3628 	dce_v11_0_program_fmt(encoder);
3629 }
3630 
3631 static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
3632 {
3633 	struct drm_device *dev = encoder->dev;
3634 	struct amdgpu_device *adev = dev->dev_private;
3635 
3636 	/* need to call this here as we need the crtc set up */
3637 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3638 	amdgpu_atombios_scratch_regs_lock(adev, false);
3639 }
3640 
3641 static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
3642 {
3643 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3644 	struct amdgpu_encoder_atom_dig *dig;
3645 
3646 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3647 
3648 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3649 		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3650 			dce_v11_0_afmt_enable(encoder, false);
3651 		dig = amdgpu_encoder->enc_priv;
3652 		dig->dig_encoder = -1;
3653 	}
3654 	amdgpu_encoder->active_device = 0;
3655 }
3656 
3657 /* these are handled by the primary encoders */
3658 static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
3659 {
3660 
3661 }
3662 
3663 static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
3664 {
3665 
3666 }
3667 
3668 static void
3669 dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
3670 		      struct drm_display_mode *mode,
3671 		      struct drm_display_mode *adjusted_mode)
3672 {
3673 
3674 }
3675 
3676 static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
3677 {
3678 
3679 }
3680 
3681 static void
3682 dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
3683 {
3684 
3685 }
3686 
3687 static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
3688 	.dpms = dce_v11_0_ext_dpms,
3689 	.prepare = dce_v11_0_ext_prepare,
3690 	.mode_set = dce_v11_0_ext_mode_set,
3691 	.commit = dce_v11_0_ext_commit,
3692 	.disable = dce_v11_0_ext_disable,
3693 	/* no detect for TMDS/LVDS yet */
3694 };
3695 
3696 static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
3697 	.dpms = amdgpu_atombios_encoder_dpms,
3698 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3699 	.prepare = dce_v11_0_encoder_prepare,
3700 	.mode_set = dce_v11_0_encoder_mode_set,
3701 	.commit = dce_v11_0_encoder_commit,
3702 	.disable = dce_v11_0_encoder_disable,
3703 	.detect = amdgpu_atombios_encoder_dig_detect,
3704 };
3705 
3706 static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
3707 	.dpms = amdgpu_atombios_encoder_dpms,
3708 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3709 	.prepare = dce_v11_0_encoder_prepare,
3710 	.mode_set = dce_v11_0_encoder_mode_set,
3711 	.commit = dce_v11_0_encoder_commit,
3712 	.detect = amdgpu_atombios_encoder_dac_detect,
3713 };
3714 
3715 static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
3716 {
3717 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3718 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3719 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3720 	kfree(amdgpu_encoder->enc_priv);
3721 	drm_encoder_cleanup(encoder);
3722 	kfree(amdgpu_encoder);
3723 }
3724 
3725 static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
3726 	.destroy = dce_v11_0_encoder_destroy,
3727 };
3728 
3729 static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3730 				 uint32_t encoder_enum,
3731 				 uint32_t supported_device,
3732 				 u16 caps)
3733 {
3734 	struct drm_device *dev = adev->ddev;
3735 	struct drm_encoder *encoder;
3736 	struct amdgpu_encoder *amdgpu_encoder;
3737 
3738 	/* see if we already added it */
3739 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3740 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3741 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3742 			amdgpu_encoder->devices |= supported_device;
3743 			return;
3744 		}
3745 
3746 	}
3747 
3748 	/* add a new one */
3749 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3750 	if (!amdgpu_encoder)
3751 		return;
3752 
3753 	encoder = &amdgpu_encoder->base;
3754 	switch (adev->mode_info.num_crtc) {
3755 	case 1:
3756 		encoder->possible_crtcs = 0x1;
3757 		break;
3758 	case 2:
3759 	default:
3760 		encoder->possible_crtcs = 0x3;
3761 		break;
3762 	case 4:
3763 		encoder->possible_crtcs = 0xf;
3764 		break;
3765 	case 6:
3766 		encoder->possible_crtcs = 0x3f;
3767 		break;
3768 	}
3769 
3770 	amdgpu_encoder->enc_priv = NULL;
3771 
3772 	amdgpu_encoder->encoder_enum = encoder_enum;
3773 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3774 	amdgpu_encoder->devices = supported_device;
3775 	amdgpu_encoder->rmx_type = RMX_OFF;
3776 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3777 	amdgpu_encoder->is_ext_encoder = false;
3778 	amdgpu_encoder->caps = caps;
3779 
3780 	switch (amdgpu_encoder->encoder_id) {
3781 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3782 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3783 		drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3784 				 DRM_MODE_ENCODER_DAC, NULL);
3785 		drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
3786 		break;
3787 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3788 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3789 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3790 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3791 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3792 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3793 			amdgpu_encoder->rmx_type = RMX_FULL;
3794 			drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3795 					 DRM_MODE_ENCODER_LVDS, NULL);
3796 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3797 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3798 			drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3799 					 DRM_MODE_ENCODER_DAC, NULL);
3800 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3801 		} else {
3802 			drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3803 					 DRM_MODE_ENCODER_TMDS, NULL);
3804 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3805 		}
3806 		drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
3807 		break;
3808 	case ENCODER_OBJECT_ID_SI170B:
3809 	case ENCODER_OBJECT_ID_CH7303:
3810 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3811 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3812 	case ENCODER_OBJECT_ID_TITFP513:
3813 	case ENCODER_OBJECT_ID_VT1623:
3814 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3815 	case ENCODER_OBJECT_ID_TRAVIS:
3816 	case ENCODER_OBJECT_ID_NUTMEG:
3817 		/* these are handled by the primary encoders */
3818 		amdgpu_encoder->is_ext_encoder = true;
3819 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3820 			drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3821 					 DRM_MODE_ENCODER_LVDS, NULL);
3822 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3823 			drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3824 					 DRM_MODE_ENCODER_DAC, NULL);
3825 		else
3826 			drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3827 					 DRM_MODE_ENCODER_TMDS, NULL);
3828 		drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
3829 		break;
3830 	}
3831 }
3832 
3833 static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
3834 	.set_vga_render_state = &dce_v11_0_set_vga_render_state,
3835 	.bandwidth_update = &dce_v11_0_bandwidth_update,
3836 	.vblank_get_counter = &dce_v11_0_vblank_get_counter,
3837 	.vblank_wait = &dce_v11_0_vblank_wait,
3838 	.is_display_hung = &dce_v11_0_is_display_hung,
3839 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3840 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3841 	.hpd_sense = &dce_v11_0_hpd_sense,
3842 	.hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
3843 	.hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
3844 	.page_flip = &dce_v11_0_page_flip,
3845 	.page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
3846 	.add_encoder = &dce_v11_0_encoder_add,
3847 	.add_connector = &amdgpu_connector_add,
3848 	.stop_mc_access = &dce_v11_0_stop_mc_access,
3849 	.resume_mc_access = &dce_v11_0_resume_mc_access,
3850 };
3851 
3852 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
3853 {
3854 	if (adev->mode_info.funcs == NULL)
3855 		adev->mode_info.funcs = &dce_v11_0_display_funcs;
3856 }
3857 
3858 static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
3859 	.set = dce_v11_0_set_crtc_irq_state,
3860 	.process = dce_v11_0_crtc_irq,
3861 };
3862 
3863 static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
3864 	.set = dce_v11_0_set_pageflip_irq_state,
3865 	.process = dce_v11_0_pageflip_irq,
3866 };
3867 
3868 static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
3869 	.set = dce_v11_0_set_hpd_irq_state,
3870 	.process = dce_v11_0_hpd_irq,
3871 };
3872 
3873 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
3874 {
3875 	adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3876 	adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
3877 
3878 	adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3879 	adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
3880 
3881 	adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3882 	adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
3883 }
3884