1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/drm_fourcc.h>
25 #include <drm/drm_vblank.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_i2c.h"
30 #include "vid.h"
31 #include "atom.h"
32 #include "amdgpu_atombios.h"
33 #include "atombios_crtc.h"
34 #include "atombios_encoders.h"
35 #include "amdgpu_pll.h"
36 #include "amdgpu_connectors.h"
37 #include "amdgpu_display.h"
38 #include "dce_v10_0.h"
39 
40 #include "dce/dce_10_0_d.h"
41 #include "dce/dce_10_0_sh_mask.h"
42 #include "dce/dce_10_0_enum.h"
43 #include "oss/oss_3_0_d.h"
44 #include "oss/oss_3_0_sh_mask.h"
45 #include "gmc/gmc_8_1_d.h"
46 #include "gmc/gmc_8_1_sh_mask.h"
47 
48 #include "ivsrcid/ivsrcid_vislands30.h"
49 
50 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
51 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
52 
53 static const u32 crtc_offsets[] =
54 {
55 	CRTC0_REGISTER_OFFSET,
56 	CRTC1_REGISTER_OFFSET,
57 	CRTC2_REGISTER_OFFSET,
58 	CRTC3_REGISTER_OFFSET,
59 	CRTC4_REGISTER_OFFSET,
60 	CRTC5_REGISTER_OFFSET,
61 	CRTC6_REGISTER_OFFSET
62 };
63 
64 static const u32 hpd_offsets[] =
65 {
66 	HPD0_REGISTER_OFFSET,
67 	HPD1_REGISTER_OFFSET,
68 	HPD2_REGISTER_OFFSET,
69 	HPD3_REGISTER_OFFSET,
70 	HPD4_REGISTER_OFFSET,
71 	HPD5_REGISTER_OFFSET
72 };
73 
74 static const uint32_t dig_offsets[] = {
75 	DIG0_REGISTER_OFFSET,
76 	DIG1_REGISTER_OFFSET,
77 	DIG2_REGISTER_OFFSET,
78 	DIG3_REGISTER_OFFSET,
79 	DIG4_REGISTER_OFFSET,
80 	DIG5_REGISTER_OFFSET,
81 	DIG6_REGISTER_OFFSET
82 };
83 
84 static const struct {
85 	uint32_t        reg;
86 	uint32_t        vblank;
87 	uint32_t        vline;
88 	uint32_t        hpd;
89 
90 } interrupt_status_offsets[] = { {
91 	.reg = mmDISP_INTERRUPT_STATUS,
92 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
93 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
94 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
95 }, {
96 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
97 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
98 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
99 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
100 }, {
101 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
102 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
103 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
104 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
105 }, {
106 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
107 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
108 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
109 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
110 }, {
111 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
112 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
113 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
114 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
115 }, {
116 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
117 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
118 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
119 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
120 } };
121 
122 static const u32 golden_settings_tonga_a11[] =
123 {
124 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
125 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
126 	mmFBC_MISC, 0x1f311fff, 0x12300000,
127 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
128 };
129 
130 static const u32 tonga_mgcg_cgcg_init[] =
131 {
132 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
133 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
134 };
135 
136 static const u32 golden_settings_fiji_a10[] =
137 {
138 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
139 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
140 	mmFBC_MISC, 0x1f311fff, 0x12300000,
141 	mmHDMI_CONTROL, 0x31000111, 0x00000011,
142 };
143 
144 static const u32 fiji_mgcg_cgcg_init[] =
145 {
146 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
147 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
148 };
149 
150 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
151 {
152 	switch (adev->asic_type) {
153 	case CHIP_FIJI:
154 		amdgpu_device_program_register_sequence(adev,
155 							fiji_mgcg_cgcg_init,
156 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
157 		amdgpu_device_program_register_sequence(adev,
158 							golden_settings_fiji_a10,
159 							ARRAY_SIZE(golden_settings_fiji_a10));
160 		break;
161 	case CHIP_TONGA:
162 		amdgpu_device_program_register_sequence(adev,
163 							tonga_mgcg_cgcg_init,
164 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
165 		amdgpu_device_program_register_sequence(adev,
166 							golden_settings_tonga_a11,
167 							ARRAY_SIZE(golden_settings_tonga_a11));
168 		break;
169 	default:
170 		break;
171 	}
172 }
173 
174 static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev,
175 				     u32 block_offset, u32 reg)
176 {
177 	unsigned long flags;
178 	u32 r;
179 
180 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
181 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
182 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
183 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
184 
185 	return r;
186 }
187 
188 static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
189 				      u32 block_offset, u32 reg, u32 v)
190 {
191 	unsigned long flags;
192 
193 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
194 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
195 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
196 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
197 }
198 
199 static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
200 {
201 	if (crtc >= adev->mode_info.num_crtc)
202 		return 0;
203 	else
204 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
205 }
206 
207 static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
208 {
209 	unsigned i;
210 
211 	/* Enable pflip interrupts */
212 	for (i = 0; i < adev->mode_info.num_crtc; i++)
213 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
214 }
215 
216 static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
217 {
218 	unsigned i;
219 
220 	/* Disable pflip interrupts */
221 	for (i = 0; i < adev->mode_info.num_crtc; i++)
222 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
223 }
224 
225 /**
226  * dce_v10_0_page_flip - pageflip callback.
227  *
228  * @adev: amdgpu_device pointer
229  * @crtc_id: crtc to cleanup pageflip on
230  * @crtc_base: new address of the crtc (GPU MC address)
231  * @async: asynchronous flip
232  *
233  * Triggers the actual pageflip by updating the primary
234  * surface base address.
235  */
236 static void dce_v10_0_page_flip(struct amdgpu_device *adev,
237 				int crtc_id, u64 crtc_base, bool async)
238 {
239 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
240 	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
241 	u32 tmp;
242 
243 	/* flip at hsync for async, default is vsync */
244 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
245 	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
246 			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
247 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
248 	/* update pitch */
249 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
250 	       fb->pitches[0] / fb->format->cpp[0]);
251 	/* update the primary scanout address */
252 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
253 	       upper_32_bits(crtc_base));
254 	/* writing to the low address triggers the update */
255 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
256 	       lower_32_bits(crtc_base));
257 	/* post the write */
258 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
259 }
260 
261 static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
262 					u32 *vbl, u32 *position)
263 {
264 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
265 		return -EINVAL;
266 
267 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
268 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
269 
270 	return 0;
271 }
272 
273 /**
274  * dce_v10_0_hpd_sense - hpd sense callback.
275  *
276  * @adev: amdgpu_device pointer
277  * @hpd: hpd (hotplug detect) pin
278  *
279  * Checks if a digital monitor is connected (evergreen+).
280  * Returns true if connected, false if not connected.
281  */
282 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
283 			       enum amdgpu_hpd_id hpd)
284 {
285 	bool connected = false;
286 
287 	if (hpd >= adev->mode_info.num_hpd)
288 		return connected;
289 
290 	if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
291 	    DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
292 		connected = true;
293 
294 	return connected;
295 }
296 
297 /**
298  * dce_v10_0_hpd_set_polarity - hpd set polarity callback.
299  *
300  * @adev: amdgpu_device pointer
301  * @hpd: hpd (hotplug detect) pin
302  *
303  * Set the polarity of the hpd pin (evergreen+).
304  */
305 static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
306 				      enum amdgpu_hpd_id hpd)
307 {
308 	u32 tmp;
309 	bool connected = dce_v10_0_hpd_sense(adev, hpd);
310 
311 	if (hpd >= adev->mode_info.num_hpd)
312 		return;
313 
314 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
315 	if (connected)
316 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
317 	else
318 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
319 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
320 }
321 
322 /**
323  * dce_v10_0_hpd_init - hpd setup callback.
324  *
325  * @adev: amdgpu_device pointer
326  *
327  * Setup the hpd pins used by the card (evergreen+).
328  * Enable the pin, set the polarity, and enable the hpd interrupts.
329  */
330 static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
331 {
332 	struct drm_device *dev = adev_to_drm(adev);
333 	struct drm_connector *connector;
334 	struct drm_connector_list_iter iter;
335 	u32 tmp;
336 
337 	drm_connector_list_iter_begin(dev, &iter);
338 	drm_for_each_connector_iter(connector, &iter) {
339 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
340 
341 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
342 			continue;
343 
344 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
345 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
346 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
347 			 * aux dp channel on imac and help (but not completely fix)
348 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
349 			 * also avoid interrupt storms during dpms.
350 			 */
351 			tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
352 			tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
353 			WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
354 			continue;
355 		}
356 
357 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
358 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
359 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
360 
361 		tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
362 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
363 				    DC_HPD_CONNECT_INT_DELAY,
364 				    AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
365 		tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
366 				    DC_HPD_DISCONNECT_INT_DELAY,
367 				    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
368 		WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
369 
370 		dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
371 		amdgpu_irq_get(adev, &adev->hpd_irq,
372 			       amdgpu_connector->hpd.hpd);
373 	}
374 	drm_connector_list_iter_end(&iter);
375 }
376 
377 /**
378  * dce_v10_0_hpd_fini - hpd tear down callback.
379  *
380  * @adev: amdgpu_device pointer
381  *
382  * Tear down the hpd pins used by the card (evergreen+).
383  * Disable the hpd interrupts.
384  */
385 static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
386 {
387 	struct drm_device *dev = adev_to_drm(adev);
388 	struct drm_connector *connector;
389 	struct drm_connector_list_iter iter;
390 	u32 tmp;
391 
392 	drm_connector_list_iter_begin(dev, &iter);
393 	drm_for_each_connector_iter(connector, &iter) {
394 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
395 
396 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
397 			continue;
398 
399 		tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
400 		tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
401 		WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
402 
403 		amdgpu_irq_put(adev, &adev->hpd_irq,
404 			       amdgpu_connector->hpd.hpd);
405 	}
406 	drm_connector_list_iter_end(&iter);
407 }
408 
409 static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
410 {
411 	return mmDC_GPIO_HPD_A;
412 }
413 
414 static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
415 {
416 	u32 crtc_hung = 0;
417 	u32 crtc_status[6];
418 	u32 i, j, tmp;
419 
420 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
421 		tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
422 		if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
423 			crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
424 			crtc_hung |= (1 << i);
425 		}
426 	}
427 
428 	for (j = 0; j < 10; j++) {
429 		for (i = 0; i < adev->mode_info.num_crtc; i++) {
430 			if (crtc_hung & (1 << i)) {
431 				tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
432 				if (tmp != crtc_status[i])
433 					crtc_hung &= ~(1 << i);
434 			}
435 		}
436 		if (crtc_hung == 0)
437 			return false;
438 		udelay(100);
439 	}
440 
441 	return true;
442 }
443 
444 static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
445 					   bool render)
446 {
447 	u32 tmp;
448 
449 	/* Lockout access through VGA aperture*/
450 	tmp = RREG32(mmVGA_HDP_CONTROL);
451 	if (render)
452 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
453 	else
454 		tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
455 	WREG32(mmVGA_HDP_CONTROL, tmp);
456 
457 	/* disable VGA render */
458 	tmp = RREG32(mmVGA_RENDER_CONTROL);
459 	if (render)
460 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
461 	else
462 		tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
463 	WREG32(mmVGA_RENDER_CONTROL, tmp);
464 }
465 
466 static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
467 {
468 	int num_crtc = 0;
469 
470 	switch (adev->asic_type) {
471 	case CHIP_FIJI:
472 	case CHIP_TONGA:
473 		num_crtc = 6;
474 		break;
475 	default:
476 		num_crtc = 0;
477 	}
478 	return num_crtc;
479 }
480 
481 void dce_v10_0_disable_dce(struct amdgpu_device *adev)
482 {
483 	/*Disable VGA render and enabled crtc, if has DCE engine*/
484 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
485 		u32 tmp;
486 		int crtc_enabled, i;
487 
488 		dce_v10_0_set_vga_render_state(adev, false);
489 
490 		/*Disable crtc*/
491 		for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
492 			crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
493 									 CRTC_CONTROL, CRTC_MASTER_EN);
494 			if (crtc_enabled) {
495 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
496 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
497 				tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
498 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
499 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
500 			}
501 		}
502 	}
503 }
504 
505 static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
506 {
507 	struct drm_device *dev = encoder->dev;
508 	struct amdgpu_device *adev = drm_to_adev(dev);
509 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
510 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
511 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
512 	int bpc = 0;
513 	u32 tmp = 0;
514 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
515 
516 	if (connector) {
517 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
518 		bpc = amdgpu_connector_get_monitor_bpc(connector);
519 		dither = amdgpu_connector->dither;
520 	}
521 
522 	/* LVDS/eDP FMT is set up by atom */
523 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
524 		return;
525 
526 	/* not needed for analog */
527 	if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
528 	    (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
529 		return;
530 
531 	if (bpc == 0)
532 		return;
533 
534 	switch (bpc) {
535 	case 6:
536 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
537 			/* XXX sort out optimal dither settings */
538 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
539 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
540 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
541 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
542 		} else {
543 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
544 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
545 		}
546 		break;
547 	case 8:
548 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
549 			/* XXX sort out optimal dither settings */
550 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
551 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
552 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
553 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
554 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
555 		} else {
556 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
557 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
558 		}
559 		break;
560 	case 10:
561 		if (dither == AMDGPU_FMT_DITHER_ENABLE) {
562 			/* XXX sort out optimal dither settings */
563 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
564 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
565 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
566 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
567 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
568 		} else {
569 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
570 			tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
571 		}
572 		break;
573 	default:
574 		/* not needed */
575 		break;
576 	}
577 
578 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
579 }
580 
581 
582 /* display watermark setup */
583 /**
584  * dce_v10_0_line_buffer_adjust - Set up the line buffer
585  *
586  * @adev: amdgpu_device pointer
587  * @amdgpu_crtc: the selected display controller
588  * @mode: the current display mode on the selected display
589  * controller
590  *
591  * Setup up the line buffer allocation for
592  * the selected display controller (CIK).
593  * Returns the line buffer size in pixels.
594  */
595 static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
596 				       struct amdgpu_crtc *amdgpu_crtc,
597 				       struct drm_display_mode *mode)
598 {
599 	u32 tmp, buffer_alloc, i, mem_cfg;
600 	u32 pipe_offset = amdgpu_crtc->crtc_id;
601 	/*
602 	 * Line Buffer Setup
603 	 * There are 6 line buffers, one for each display controllers.
604 	 * There are 3 partitions per LB. Select the number of partitions
605 	 * to enable based on the display width.  For display widths larger
606 	 * than 4096, you need use to use 2 display controllers and combine
607 	 * them using the stereo blender.
608 	 */
609 	if (amdgpu_crtc->base.enabled && mode) {
610 		if (mode->crtc_hdisplay < 1920) {
611 			mem_cfg = 1;
612 			buffer_alloc = 2;
613 		} else if (mode->crtc_hdisplay < 2560) {
614 			mem_cfg = 2;
615 			buffer_alloc = 2;
616 		} else if (mode->crtc_hdisplay < 4096) {
617 			mem_cfg = 0;
618 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
619 		} else {
620 			DRM_DEBUG_KMS("Mode too big for LB!\n");
621 			mem_cfg = 0;
622 			buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
623 		}
624 	} else {
625 		mem_cfg = 1;
626 		buffer_alloc = 0;
627 	}
628 
629 	tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
630 	tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
631 	WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
632 
633 	tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
634 	tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
635 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
636 
637 	for (i = 0; i < adev->usec_timeout; i++) {
638 		tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
639 		if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
640 			break;
641 		udelay(1);
642 	}
643 
644 	if (amdgpu_crtc->base.enabled && mode) {
645 		switch (mem_cfg) {
646 		case 0:
647 		default:
648 			return 4096 * 2;
649 		case 1:
650 			return 1920 * 2;
651 		case 2:
652 			return 2560 * 2;
653 		}
654 	}
655 
656 	/* controller not enabled, so no lb used */
657 	return 0;
658 }
659 
660 /**
661  * cik_get_number_of_dram_channels - get the number of dram channels
662  *
663  * @adev: amdgpu_device pointer
664  *
665  * Look up the number of video ram channels (CIK).
666  * Used for display watermark bandwidth calculations
667  * Returns the number of dram channels
668  */
669 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
670 {
671 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
672 
673 	switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
674 	case 0:
675 	default:
676 		return 1;
677 	case 1:
678 		return 2;
679 	case 2:
680 		return 4;
681 	case 3:
682 		return 8;
683 	case 4:
684 		return 3;
685 	case 5:
686 		return 6;
687 	case 6:
688 		return 10;
689 	case 7:
690 		return 12;
691 	case 8:
692 		return 16;
693 	}
694 }
695 
696 struct dce10_wm_params {
697 	u32 dram_channels; /* number of dram channels */
698 	u32 yclk;          /* bandwidth per dram data pin in kHz */
699 	u32 sclk;          /* engine clock in kHz */
700 	u32 disp_clk;      /* display clock in kHz */
701 	u32 src_width;     /* viewport width */
702 	u32 active_time;   /* active display time in ns */
703 	u32 blank_time;    /* blank time in ns */
704 	bool interlaced;    /* mode is interlaced */
705 	fixed20_12 vsc;    /* vertical scale ratio */
706 	u32 num_heads;     /* number of active crtcs */
707 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
708 	u32 lb_size;       /* line buffer allocated to pipe */
709 	u32 vtaps;         /* vertical scaler taps */
710 };
711 
712 /**
713  * dce_v10_0_dram_bandwidth - get the dram bandwidth
714  *
715  * @wm: watermark calculation data
716  *
717  * Calculate the raw dram bandwidth (CIK).
718  * Used for display watermark bandwidth calculations
719  * Returns the dram bandwidth in MBytes/s
720  */
721 static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm)
722 {
723 	/* Calculate raw DRAM Bandwidth */
724 	fixed20_12 dram_efficiency; /* 0.7 */
725 	fixed20_12 yclk, dram_channels, bandwidth;
726 	fixed20_12 a;
727 
728 	a.full = dfixed_const(1000);
729 	yclk.full = dfixed_const(wm->yclk);
730 	yclk.full = dfixed_div(yclk, a);
731 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
732 	a.full = dfixed_const(10);
733 	dram_efficiency.full = dfixed_const(7);
734 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
735 	bandwidth.full = dfixed_mul(dram_channels, yclk);
736 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
737 
738 	return dfixed_trunc(bandwidth);
739 }
740 
741 /**
742  * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display
743  *
744  * @wm: watermark calculation data
745  *
746  * Calculate the dram bandwidth used for display (CIK).
747  * Used for display watermark bandwidth calculations
748  * Returns the dram bandwidth for display in MBytes/s
749  */
750 static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
751 {
752 	/* Calculate DRAM Bandwidth and the part allocated to display. */
753 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
754 	fixed20_12 yclk, dram_channels, bandwidth;
755 	fixed20_12 a;
756 
757 	a.full = dfixed_const(1000);
758 	yclk.full = dfixed_const(wm->yclk);
759 	yclk.full = dfixed_div(yclk, a);
760 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
761 	a.full = dfixed_const(10);
762 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
763 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
764 	bandwidth.full = dfixed_mul(dram_channels, yclk);
765 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
766 
767 	return dfixed_trunc(bandwidth);
768 }
769 
770 /**
771  * dce_v10_0_data_return_bandwidth - get the data return bandwidth
772  *
773  * @wm: watermark calculation data
774  *
775  * Calculate the data return bandwidth used for display (CIK).
776  * Used for display watermark bandwidth calculations
777  * Returns the data return bandwidth in MBytes/s
778  */
779 static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm)
780 {
781 	/* Calculate the display Data return Bandwidth */
782 	fixed20_12 return_efficiency; /* 0.8 */
783 	fixed20_12 sclk, bandwidth;
784 	fixed20_12 a;
785 
786 	a.full = dfixed_const(1000);
787 	sclk.full = dfixed_const(wm->sclk);
788 	sclk.full = dfixed_div(sclk, a);
789 	a.full = dfixed_const(10);
790 	return_efficiency.full = dfixed_const(8);
791 	return_efficiency.full = dfixed_div(return_efficiency, a);
792 	a.full = dfixed_const(32);
793 	bandwidth.full = dfixed_mul(a, sclk);
794 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
795 
796 	return dfixed_trunc(bandwidth);
797 }
798 
799 /**
800  * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth
801  *
802  * @wm: watermark calculation data
803  *
804  * Calculate the dmif bandwidth used for display (CIK).
805  * Used for display watermark bandwidth calculations
806  * Returns the dmif bandwidth in MBytes/s
807  */
808 static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
809 {
810 	/* Calculate the DMIF Request Bandwidth */
811 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
812 	fixed20_12 disp_clk, bandwidth;
813 	fixed20_12 a, b;
814 
815 	a.full = dfixed_const(1000);
816 	disp_clk.full = dfixed_const(wm->disp_clk);
817 	disp_clk.full = dfixed_div(disp_clk, a);
818 	a.full = dfixed_const(32);
819 	b.full = dfixed_mul(a, disp_clk);
820 
821 	a.full = dfixed_const(10);
822 	disp_clk_request_efficiency.full = dfixed_const(8);
823 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
824 
825 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
826 
827 	return dfixed_trunc(bandwidth);
828 }
829 
830 /**
831  * dce_v10_0_available_bandwidth - get the min available bandwidth
832  *
833  * @wm: watermark calculation data
834  *
835  * Calculate the min available bandwidth used for display (CIK).
836  * Used for display watermark bandwidth calculations
837  * Returns the min available bandwidth in MBytes/s
838  */
839 static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm)
840 {
841 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
842 	u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm);
843 	u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm);
844 	u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm);
845 
846 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
847 }
848 
849 /**
850  * dce_v10_0_average_bandwidth - get the average available bandwidth
851  *
852  * @wm: watermark calculation data
853  *
854  * Calculate the average available bandwidth used for display (CIK).
855  * Used for display watermark bandwidth calculations
856  * Returns the average available bandwidth in MBytes/s
857  */
858 static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm)
859 {
860 	/* Calculate the display mode Average Bandwidth
861 	 * DisplayMode should contain the source and destination dimensions,
862 	 * timing, etc.
863 	 */
864 	fixed20_12 bpp;
865 	fixed20_12 line_time;
866 	fixed20_12 src_width;
867 	fixed20_12 bandwidth;
868 	fixed20_12 a;
869 
870 	a.full = dfixed_const(1000);
871 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
872 	line_time.full = dfixed_div(line_time, a);
873 	bpp.full = dfixed_const(wm->bytes_per_pixel);
874 	src_width.full = dfixed_const(wm->src_width);
875 	bandwidth.full = dfixed_mul(src_width, bpp);
876 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
877 	bandwidth.full = dfixed_div(bandwidth, line_time);
878 
879 	return dfixed_trunc(bandwidth);
880 }
881 
882 /**
883  * dce_v10_0_latency_watermark - get the latency watermark
884  *
885  * @wm: watermark calculation data
886  *
887  * Calculate the latency watermark (CIK).
888  * Used for display watermark bandwidth calculations
889  * Returns the latency watermark in ns
890  */
891 static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
892 {
893 	/* First calculate the latency in ns */
894 	u32 mc_latency = 2000; /* 2000 ns. */
895 	u32 available_bandwidth = dce_v10_0_available_bandwidth(wm);
896 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
897 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
898 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
899 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
900 		(wm->num_heads * cursor_line_pair_return_time);
901 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
902 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
903 	u32 tmp, dmif_size = 12288;
904 	fixed20_12 a, b, c;
905 
906 	if (wm->num_heads == 0)
907 		return 0;
908 
909 	a.full = dfixed_const(2);
910 	b.full = dfixed_const(1);
911 	if ((wm->vsc.full > a.full) ||
912 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
913 	    (wm->vtaps >= 5) ||
914 	    ((wm->vsc.full >= a.full) && wm->interlaced))
915 		max_src_lines_per_dst_line = 4;
916 	else
917 		max_src_lines_per_dst_line = 2;
918 
919 	a.full = dfixed_const(available_bandwidth);
920 	b.full = dfixed_const(wm->num_heads);
921 	a.full = dfixed_div(a, b);
922 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
923 	tmp = min(dfixed_trunc(a), tmp);
924 
925 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
926 
927 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
928 	b.full = dfixed_const(1000);
929 	c.full = dfixed_const(lb_fill_bw);
930 	b.full = dfixed_div(c, b);
931 	a.full = dfixed_div(a, b);
932 	line_fill_time = dfixed_trunc(a);
933 
934 	if (line_fill_time < wm->active_time)
935 		return latency;
936 	else
937 		return latency + (line_fill_time - wm->active_time);
938 
939 }
940 
941 /**
942  * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check
943  * average and available dram bandwidth
944  *
945  * @wm: watermark calculation data
946  *
947  * Check if the display average bandwidth fits in the display
948  * dram bandwidth (CIK).
949  * Used for display watermark bandwidth calculations
950  * Returns true if the display fits, false if not.
951  */
952 static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
953 {
954 	if (dce_v10_0_average_bandwidth(wm) <=
955 	    (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads))
956 		return true;
957 	else
958 		return false;
959 }
960 
961 /**
962  * dce_v10_0_average_bandwidth_vs_available_bandwidth - check
963  * average and available bandwidth
964  *
965  * @wm: watermark calculation data
966  *
967  * Check if the display average bandwidth fits in the display
968  * available bandwidth (CIK).
969  * Used for display watermark bandwidth calculations
970  * Returns true if the display fits, false if not.
971  */
972 static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
973 {
974 	if (dce_v10_0_average_bandwidth(wm) <=
975 	    (dce_v10_0_available_bandwidth(wm) / wm->num_heads))
976 		return true;
977 	else
978 		return false;
979 }
980 
981 /**
982  * dce_v10_0_check_latency_hiding - check latency hiding
983  *
984  * @wm: watermark calculation data
985  *
986  * Check latency hiding (CIK).
987  * Used for display watermark bandwidth calculations
988  * Returns true if the display fits, false if not.
989  */
990 static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm)
991 {
992 	u32 lb_partitions = wm->lb_size / wm->src_width;
993 	u32 line_time = wm->active_time + wm->blank_time;
994 	u32 latency_tolerant_lines;
995 	u32 latency_hiding;
996 	fixed20_12 a;
997 
998 	a.full = dfixed_const(1);
999 	if (wm->vsc.full > a.full)
1000 		latency_tolerant_lines = 1;
1001 	else {
1002 		if (lb_partitions <= (wm->vtaps + 1))
1003 			latency_tolerant_lines = 1;
1004 		else
1005 			latency_tolerant_lines = 2;
1006 	}
1007 
1008 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1009 
1010 	if (dce_v10_0_latency_watermark(wm) <= latency_hiding)
1011 		return true;
1012 	else
1013 		return false;
1014 }
1015 
1016 /**
1017  * dce_v10_0_program_watermarks - program display watermarks
1018  *
1019  * @adev: amdgpu_device pointer
1020  * @amdgpu_crtc: the selected display controller
1021  * @lb_size: line buffer size
1022  * @num_heads: number of display controllers in use
1023  *
1024  * Calculate and program the display watermarks for the
1025  * selected display controller (CIK).
1026  */
1027 static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1028 					struct amdgpu_crtc *amdgpu_crtc,
1029 					u32 lb_size, u32 num_heads)
1030 {
1031 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1032 	struct dce10_wm_params wm_low, wm_high;
1033 	u32 active_time;
1034 	u32 line_time = 0;
1035 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
1036 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1037 
1038 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
1039 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1040 					    (u32)mode->clock);
1041 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1042 					  (u32)mode->clock);
1043 		line_time = min(line_time, (u32)65535);
1044 
1045 		/* watermark for high clocks */
1046 		if (adev->pm.dpm_enabled) {
1047 			wm_high.yclk =
1048 				amdgpu_dpm_get_mclk(adev, false) * 10;
1049 			wm_high.sclk =
1050 				amdgpu_dpm_get_sclk(adev, false) * 10;
1051 		} else {
1052 			wm_high.yclk = adev->pm.current_mclk * 10;
1053 			wm_high.sclk = adev->pm.current_sclk * 10;
1054 		}
1055 
1056 		wm_high.disp_clk = mode->clock;
1057 		wm_high.src_width = mode->crtc_hdisplay;
1058 		wm_high.active_time = active_time;
1059 		wm_high.blank_time = line_time - wm_high.active_time;
1060 		wm_high.interlaced = false;
1061 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1062 			wm_high.interlaced = true;
1063 		wm_high.vsc = amdgpu_crtc->vsc;
1064 		wm_high.vtaps = 1;
1065 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1066 			wm_high.vtaps = 2;
1067 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1068 		wm_high.lb_size = lb_size;
1069 		wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1070 		wm_high.num_heads = num_heads;
1071 
1072 		/* set for high clocks */
1073 		latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535);
1074 
1075 		/* possibly force display priority to high */
1076 		/* should really do this at mode validation time... */
1077 		if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1078 		    !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1079 		    !dce_v10_0_check_latency_hiding(&wm_high) ||
1080 		    (adev->mode_info.disp_priority == 2)) {
1081 			DRM_DEBUG_KMS("force priority to high\n");
1082 		}
1083 
1084 		/* watermark for low clocks */
1085 		if (adev->pm.dpm_enabled) {
1086 			wm_low.yclk =
1087 				amdgpu_dpm_get_mclk(adev, true) * 10;
1088 			wm_low.sclk =
1089 				amdgpu_dpm_get_sclk(adev, true) * 10;
1090 		} else {
1091 			wm_low.yclk = adev->pm.current_mclk * 10;
1092 			wm_low.sclk = adev->pm.current_sclk * 10;
1093 		}
1094 
1095 		wm_low.disp_clk = mode->clock;
1096 		wm_low.src_width = mode->crtc_hdisplay;
1097 		wm_low.active_time = active_time;
1098 		wm_low.blank_time = line_time - wm_low.active_time;
1099 		wm_low.interlaced = false;
1100 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1101 			wm_low.interlaced = true;
1102 		wm_low.vsc = amdgpu_crtc->vsc;
1103 		wm_low.vtaps = 1;
1104 		if (amdgpu_crtc->rmx_type != RMX_OFF)
1105 			wm_low.vtaps = 2;
1106 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1107 		wm_low.lb_size = lb_size;
1108 		wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1109 		wm_low.num_heads = num_heads;
1110 
1111 		/* set for low clocks */
1112 		latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535);
1113 
1114 		/* possibly force display priority to high */
1115 		/* should really do this at mode validation time... */
1116 		if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1117 		    !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1118 		    !dce_v10_0_check_latency_hiding(&wm_low) ||
1119 		    (adev->mode_info.disp_priority == 2)) {
1120 			DRM_DEBUG_KMS("force priority to high\n");
1121 		}
1122 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1123 	}
1124 
1125 	/* select wm A */
1126 	wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1127 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1128 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1129 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1130 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1131 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1132 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1133 	/* select wm B */
1134 	tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1135 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1136 	tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1137 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1138 	tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1139 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1140 	/* restore original selection */
1141 	WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1142 
1143 	/* save values for DPM */
1144 	amdgpu_crtc->line_time = line_time;
1145 	amdgpu_crtc->wm_high = latency_watermark_a;
1146 	amdgpu_crtc->wm_low = latency_watermark_b;
1147 	/* Save number of lines the linebuffer leads before the scanout */
1148 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1149 }
1150 
1151 /**
1152  * dce_v10_0_bandwidth_update - program display watermarks
1153  *
1154  * @adev: amdgpu_device pointer
1155  *
1156  * Calculate and program the display watermarks and line
1157  * buffer allocation (CIK).
1158  */
1159 static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
1160 {
1161 	struct drm_display_mode *mode = NULL;
1162 	u32 num_heads = 0, lb_size;
1163 	int i;
1164 
1165 	amdgpu_display_update_priority(adev);
1166 
1167 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1168 		if (adev->mode_info.crtcs[i]->base.enabled)
1169 			num_heads++;
1170 	}
1171 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1172 		mode = &adev->mode_info.crtcs[i]->base.mode;
1173 		lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1174 		dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1175 					    lb_size, num_heads);
1176 	}
1177 }
1178 
1179 static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev)
1180 {
1181 	int i;
1182 	u32 offset, tmp;
1183 
1184 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1185 		offset = adev->mode_info.audio.pin[i].offset;
1186 		tmp = RREG32_AUDIO_ENDPT(offset,
1187 					 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1188 		if (((tmp &
1189 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1190 		AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1191 			adev->mode_info.audio.pin[i].connected = false;
1192 		else
1193 			adev->mode_info.audio.pin[i].connected = true;
1194 	}
1195 }
1196 
1197 static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev)
1198 {
1199 	int i;
1200 
1201 	dce_v10_0_audio_get_connected_pins(adev);
1202 
1203 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1204 		if (adev->mode_info.audio.pin[i].connected)
1205 			return &adev->mode_info.audio.pin[i];
1206 	}
1207 	DRM_ERROR("No connected audio pins found!\n");
1208 	return NULL;
1209 }
1210 
1211 static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1212 {
1213 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1214 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1215 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1216 	u32 tmp;
1217 
1218 	if (!dig || !dig->afmt || !dig->afmt->pin)
1219 		return;
1220 
1221 	tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1222 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1223 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1224 }
1225 
1226 static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
1227 						struct drm_display_mode *mode)
1228 {
1229 	struct drm_device *dev = encoder->dev;
1230 	struct amdgpu_device *adev = drm_to_adev(dev);
1231 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1232 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1233 	struct drm_connector *connector;
1234 	struct drm_connector_list_iter iter;
1235 	struct amdgpu_connector *amdgpu_connector = NULL;
1236 	u32 tmp;
1237 	int interlace = 0;
1238 
1239 	if (!dig || !dig->afmt || !dig->afmt->pin)
1240 		return;
1241 
1242 	drm_connector_list_iter_begin(dev, &iter);
1243 	drm_for_each_connector_iter(connector, &iter) {
1244 		if (connector->encoder == encoder) {
1245 			amdgpu_connector = to_amdgpu_connector(connector);
1246 			break;
1247 		}
1248 	}
1249 	drm_connector_list_iter_end(&iter);
1250 
1251 	if (!amdgpu_connector) {
1252 		DRM_ERROR("Couldn't find encoder's connector\n");
1253 		return;
1254 	}
1255 
1256 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1257 		interlace = 1;
1258 	if (connector->latency_present[interlace]) {
1259 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1260 				    VIDEO_LIPSYNC, connector->video_latency[interlace]);
1261 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1262 				    AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1263 	} else {
1264 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1265 				    VIDEO_LIPSYNC, 0);
1266 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1267 				    AUDIO_LIPSYNC, 0);
1268 	}
1269 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1270 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1271 }
1272 
1273 static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1274 {
1275 	struct drm_device *dev = encoder->dev;
1276 	struct amdgpu_device *adev = drm_to_adev(dev);
1277 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1278 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1279 	struct drm_connector *connector;
1280 	struct drm_connector_list_iter iter;
1281 	struct amdgpu_connector *amdgpu_connector = NULL;
1282 	u32 tmp;
1283 	u8 *sadb = NULL;
1284 	int sad_count;
1285 
1286 	if (!dig || !dig->afmt || !dig->afmt->pin)
1287 		return;
1288 
1289 	drm_connector_list_iter_begin(dev, &iter);
1290 	drm_for_each_connector_iter(connector, &iter) {
1291 		if (connector->encoder == encoder) {
1292 			amdgpu_connector = to_amdgpu_connector(connector);
1293 			break;
1294 		}
1295 	}
1296 	drm_connector_list_iter_end(&iter);
1297 
1298 	if (!amdgpu_connector) {
1299 		DRM_ERROR("Couldn't find encoder's connector\n");
1300 		return;
1301 	}
1302 
1303 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1304 	if (sad_count < 0) {
1305 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1306 		sad_count = 0;
1307 	}
1308 
1309 	/* program the speaker allocation */
1310 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1311 				 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1312 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1313 			    DP_CONNECTION, 0);
1314 	/* set HDMI mode */
1315 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1316 			    HDMI_CONNECTION, 1);
1317 	if (sad_count)
1318 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1319 				    SPEAKER_ALLOCATION, sadb[0]);
1320 	else
1321 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1322 				    SPEAKER_ALLOCATION, 5); /* stereo */
1323 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1324 			   ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1325 
1326 	kfree(sadb);
1327 }
1328 
1329 static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
1330 {
1331 	struct drm_device *dev = encoder->dev;
1332 	struct amdgpu_device *adev = drm_to_adev(dev);
1333 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1334 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1335 	struct drm_connector *connector;
1336 	struct drm_connector_list_iter iter;
1337 	struct amdgpu_connector *amdgpu_connector = NULL;
1338 	struct cea_sad *sads;
1339 	int i, sad_count;
1340 
1341 	static const u16 eld_reg_to_type[][2] = {
1342 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1343 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1344 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1345 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1346 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1347 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1348 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1349 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1350 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1351 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1352 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1353 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1354 	};
1355 
1356 	if (!dig || !dig->afmt || !dig->afmt->pin)
1357 		return;
1358 
1359 	drm_connector_list_iter_begin(dev, &iter);
1360 	drm_for_each_connector_iter(connector, &iter) {
1361 		if (connector->encoder == encoder) {
1362 			amdgpu_connector = to_amdgpu_connector(connector);
1363 			break;
1364 		}
1365 	}
1366 	drm_connector_list_iter_end(&iter);
1367 
1368 	if (!amdgpu_connector) {
1369 		DRM_ERROR("Couldn't find encoder's connector\n");
1370 		return;
1371 	}
1372 
1373 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1374 	if (sad_count < 0)
1375 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1376 	if (sad_count <= 0)
1377 		return;
1378 	BUG_ON(!sads);
1379 
1380 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1381 		u32 tmp = 0;
1382 		u8 stereo_freqs = 0;
1383 		int max_channels = -1;
1384 		int j;
1385 
1386 		for (j = 0; j < sad_count; j++) {
1387 			struct cea_sad *sad = &sads[j];
1388 
1389 			if (sad->format == eld_reg_to_type[i][1]) {
1390 				if (sad->channels > max_channels) {
1391 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1392 							    MAX_CHANNELS, sad->channels);
1393 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1394 							    DESCRIPTOR_BYTE_2, sad->byte2);
1395 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1396 							    SUPPORTED_FREQUENCIES, sad->freq);
1397 					max_channels = sad->channels;
1398 				}
1399 
1400 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1401 					stereo_freqs |= sad->freq;
1402 				else
1403 					break;
1404 			}
1405 		}
1406 
1407 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1408 				    SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1409 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1410 	}
1411 
1412 	kfree(sads);
1413 }
1414 
1415 static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
1416 				  struct amdgpu_audio_pin *pin,
1417 				  bool enable)
1418 {
1419 	if (!pin)
1420 		return;
1421 
1422 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1423 			   enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1424 }
1425 
1426 static const u32 pin_offsets[] =
1427 {
1428 	AUD0_REGISTER_OFFSET,
1429 	AUD1_REGISTER_OFFSET,
1430 	AUD2_REGISTER_OFFSET,
1431 	AUD3_REGISTER_OFFSET,
1432 	AUD4_REGISTER_OFFSET,
1433 	AUD5_REGISTER_OFFSET,
1434 	AUD6_REGISTER_OFFSET,
1435 };
1436 
1437 static int dce_v10_0_audio_init(struct amdgpu_device *adev)
1438 {
1439 	int i;
1440 
1441 	if (!amdgpu_audio)
1442 		return 0;
1443 
1444 	adev->mode_info.audio.enabled = true;
1445 
1446 	adev->mode_info.audio.num_pins = 7;
1447 
1448 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1449 		adev->mode_info.audio.pin[i].channels = -1;
1450 		adev->mode_info.audio.pin[i].rate = -1;
1451 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1452 		adev->mode_info.audio.pin[i].status_bits = 0;
1453 		adev->mode_info.audio.pin[i].category_code = 0;
1454 		adev->mode_info.audio.pin[i].connected = false;
1455 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1456 		adev->mode_info.audio.pin[i].id = i;
1457 		/* disable audio.  it will be set up later */
1458 		/* XXX remove once we switch to ip funcs */
1459 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1460 	}
1461 
1462 	return 0;
1463 }
1464 
1465 static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
1466 {
1467 	int i;
1468 
1469 	if (!amdgpu_audio)
1470 		return;
1471 
1472 	if (!adev->mode_info.audio.enabled)
1473 		return;
1474 
1475 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1476 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1477 
1478 	adev->mode_info.audio.enabled = false;
1479 }
1480 
1481 /*
1482  * update the N and CTS parameters for a given pixel clock rate
1483  */
1484 static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1485 {
1486 	struct drm_device *dev = encoder->dev;
1487 	struct amdgpu_device *adev = drm_to_adev(dev);
1488 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1489 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1490 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1491 	u32 tmp;
1492 
1493 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1494 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1495 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1496 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1497 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1498 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1499 
1500 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1501 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1502 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1503 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1504 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1505 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1506 
1507 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1508 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1509 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1510 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1511 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1512 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1513 
1514 }
1515 
1516 /*
1517  * build a HDMI Video Info Frame
1518  */
1519 static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1520 					       void *buffer, size_t size)
1521 {
1522 	struct drm_device *dev = encoder->dev;
1523 	struct amdgpu_device *adev = drm_to_adev(dev);
1524 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1525 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1526 	uint8_t *frame = buffer + 3;
1527 	uint8_t *header = buffer;
1528 
1529 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1530 		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1531 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1532 		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1533 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1534 		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1535 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1536 		frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1537 }
1538 
1539 static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1540 {
1541 	struct drm_device *dev = encoder->dev;
1542 	struct amdgpu_device *adev = drm_to_adev(dev);
1543 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1544 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1545 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1546 	u32 dto_phase = 24 * 1000;
1547 	u32 dto_modulo = clock;
1548 	u32 tmp;
1549 
1550 	if (!dig || !dig->afmt)
1551 		return;
1552 
1553 	/* XXX two dtos; generally use dto0 for hdmi */
1554 	/* Express [24MHz / target pixel clock] as an exact rational
1555 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1556 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1557 	 */
1558 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1559 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1560 			    amdgpu_crtc->crtc_id);
1561 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1562 	WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1563 	WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1564 }
1565 
1566 /*
1567  * update the info frames with the data from the current display mode
1568  */
1569 static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
1570 				  struct drm_display_mode *mode)
1571 {
1572 	struct drm_device *dev = encoder->dev;
1573 	struct amdgpu_device *adev = drm_to_adev(dev);
1574 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1575 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1576 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1577 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1578 	struct hdmi_avi_infoframe frame;
1579 	ssize_t err;
1580 	u32 tmp;
1581 	int bpc = 8;
1582 
1583 	if (!dig || !dig->afmt)
1584 		return;
1585 
1586 	/* Silent, r600_hdmi_enable will raise WARN for us */
1587 	if (!dig->afmt->enabled)
1588 		return;
1589 
1590 	/* hdmi deep color mode general control packets setup, if bpc > 8 */
1591 	if (encoder->crtc) {
1592 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1593 		bpc = amdgpu_crtc->bpc;
1594 	}
1595 
1596 	/* disable audio prior to setting up hw */
1597 	dig->afmt->pin = dce_v10_0_audio_get_pin(adev);
1598 	dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1599 
1600 	dce_v10_0_audio_set_dto(encoder, mode->clock);
1601 
1602 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1603 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1604 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1605 
1606 	WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1607 
1608 	tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1609 	switch (bpc) {
1610 	case 0:
1611 	case 6:
1612 	case 8:
1613 	case 16:
1614 	default:
1615 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1616 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1617 		DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1618 			  connector->name, bpc);
1619 		break;
1620 	case 10:
1621 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1622 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1623 		DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1624 			  connector->name);
1625 		break;
1626 	case 12:
1627 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1628 		tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1629 		DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1630 			  connector->name);
1631 		break;
1632 	}
1633 	WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1634 
1635 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1636 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1637 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1638 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1639 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1640 
1641 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1642 	/* enable audio info frames (frames won't be set until audio is enabled) */
1643 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1644 	/* required for audio info values to be updated */
1645 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1646 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1647 
1648 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1649 	/* required for audio info values to be updated */
1650 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1651 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1652 
1653 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1654 	/* anything other than 0 */
1655 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1656 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1657 
1658 	WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1659 
1660 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1661 	/* set the default audio delay */
1662 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1663 	/* should be suffient for all audio modes and small enough for all hblanks */
1664 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1665 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1666 
1667 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1668 	/* allow 60958 channel status fields to be updated */
1669 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1670 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1671 
1672 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1673 	if (bpc > 8)
1674 		/* clear SW CTS value */
1675 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1676 	else
1677 		/* select SW CTS value */
1678 		tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1679 	/* allow hw to sent ACR packets when required */
1680 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1681 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1682 
1683 	dce_v10_0_afmt_update_ACR(encoder, mode->clock);
1684 
1685 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1686 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1687 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1688 
1689 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1690 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1691 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1692 
1693 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1694 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1695 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1696 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1697 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1698 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1699 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1700 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1701 
1702 	dce_v10_0_audio_write_speaker_allocation(encoder);
1703 
1704 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1705 	       (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1706 
1707 	dce_v10_0_afmt_audio_select_pin(encoder);
1708 	dce_v10_0_audio_write_sad_regs(encoder);
1709 	dce_v10_0_audio_write_latency_fields(encoder, mode);
1710 
1711 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1712 	if (err < 0) {
1713 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1714 		return;
1715 	}
1716 
1717 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1718 	if (err < 0) {
1719 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1720 		return;
1721 	}
1722 
1723 	dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1724 
1725 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1726 	/* enable AVI info frames */
1727 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1728 	/* required for audio info values to be updated */
1729 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1730 	WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1731 
1732 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1733 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1734 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1735 
1736 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1737 	/* send audio packets */
1738 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1739 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1740 
1741 	WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1742 	WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1743 	WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1744 	WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1745 
1746 	/* enable audio after to setting up hw */
1747 	dce_v10_0_audio_enable(adev, dig->afmt->pin, true);
1748 }
1749 
1750 static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1751 {
1752 	struct drm_device *dev = encoder->dev;
1753 	struct amdgpu_device *adev = drm_to_adev(dev);
1754 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1755 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1756 
1757 	if (!dig || !dig->afmt)
1758 		return;
1759 
1760 	/* Silent, r600_hdmi_enable will raise WARN for us */
1761 	if (enable && dig->afmt->enabled)
1762 		return;
1763 	if (!enable && !dig->afmt->enabled)
1764 		return;
1765 
1766 	if (!enable && dig->afmt->pin) {
1767 		dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1768 		dig->afmt->pin = NULL;
1769 	}
1770 
1771 	dig->afmt->enabled = enable;
1772 
1773 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1774 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1775 }
1776 
1777 static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
1778 {
1779 	int i;
1780 
1781 	for (i = 0; i < adev->mode_info.num_dig; i++)
1782 		adev->mode_info.afmt[i] = NULL;
1783 
1784 	/* DCE10 has audio blocks tied to DIG encoders */
1785 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1786 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1787 		if (adev->mode_info.afmt[i]) {
1788 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1789 			adev->mode_info.afmt[i]->id = i;
1790 		} else {
1791 			int j;
1792 			for (j = 0; j < i; j++) {
1793 				kfree(adev->mode_info.afmt[j]);
1794 				adev->mode_info.afmt[j] = NULL;
1795 			}
1796 			return -ENOMEM;
1797 		}
1798 	}
1799 	return 0;
1800 }
1801 
1802 static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
1803 {
1804 	int i;
1805 
1806 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1807 		kfree(adev->mode_info.afmt[i]);
1808 		adev->mode_info.afmt[i] = NULL;
1809 	}
1810 }
1811 
1812 static const u32 vga_control_regs[6] =
1813 {
1814 	mmD1VGA_CONTROL,
1815 	mmD2VGA_CONTROL,
1816 	mmD3VGA_CONTROL,
1817 	mmD4VGA_CONTROL,
1818 	mmD5VGA_CONTROL,
1819 	mmD6VGA_CONTROL,
1820 };
1821 
1822 static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
1823 {
1824 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1825 	struct drm_device *dev = crtc->dev;
1826 	struct amdgpu_device *adev = drm_to_adev(dev);
1827 	u32 vga_control;
1828 
1829 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1830 	if (enable)
1831 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1832 	else
1833 		WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1834 }
1835 
1836 static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
1837 {
1838 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1839 	struct drm_device *dev = crtc->dev;
1840 	struct amdgpu_device *adev = drm_to_adev(dev);
1841 
1842 	if (enable)
1843 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1844 	else
1845 		WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1846 }
1847 
1848 static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
1849 				     struct drm_framebuffer *fb,
1850 				     int x, int y, int atomic)
1851 {
1852 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1853 	struct drm_device *dev = crtc->dev;
1854 	struct amdgpu_device *adev = drm_to_adev(dev);
1855 	struct drm_framebuffer *target_fb;
1856 	struct drm_gem_object *obj;
1857 	struct amdgpu_bo *abo;
1858 	uint64_t fb_location, tiling_flags;
1859 	uint32_t fb_format, fb_pitch_pixels;
1860 	u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
1861 	u32 pipe_config;
1862 	u32 tmp, viewport_w, viewport_h;
1863 	int r;
1864 	bool bypass_lut = false;
1865 	struct drm_format_name_buf format_name;
1866 
1867 	/* no fb bound */
1868 	if (!atomic && !crtc->primary->fb) {
1869 		DRM_DEBUG_KMS("No FB bound\n");
1870 		return 0;
1871 	}
1872 
1873 	if (atomic)
1874 		target_fb = fb;
1875 	else
1876 		target_fb = crtc->primary->fb;
1877 
1878 	/* If atomic, assume fb object is pinned & idle & fenced and
1879 	 * just update base pointers
1880 	 */
1881 	obj = target_fb->obj[0];
1882 	abo = gem_to_amdgpu_bo(obj);
1883 	r = amdgpu_bo_reserve(abo, false);
1884 	if (unlikely(r != 0))
1885 		return r;
1886 
1887 	if (!atomic) {
1888 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1889 		if (unlikely(r != 0)) {
1890 			amdgpu_bo_unreserve(abo);
1891 			return -EINVAL;
1892 		}
1893 	}
1894 	fb_location = amdgpu_bo_gpu_offset(abo);
1895 
1896 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1897 	amdgpu_bo_unreserve(abo);
1898 
1899 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1900 
1901 	switch (target_fb->format->format) {
1902 	case DRM_FORMAT_C8:
1903 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
1904 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1905 		break;
1906 	case DRM_FORMAT_XRGB4444:
1907 	case DRM_FORMAT_ARGB4444:
1908 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1909 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
1910 #ifdef __BIG_ENDIAN
1911 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1912 					ENDIAN_8IN16);
1913 #endif
1914 		break;
1915 	case DRM_FORMAT_XRGB1555:
1916 	case DRM_FORMAT_ARGB1555:
1917 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1918 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1919 #ifdef __BIG_ENDIAN
1920 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1921 					ENDIAN_8IN16);
1922 #endif
1923 		break;
1924 	case DRM_FORMAT_BGRX5551:
1925 	case DRM_FORMAT_BGRA5551:
1926 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1927 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
1928 #ifdef __BIG_ENDIAN
1929 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1930 					ENDIAN_8IN16);
1931 #endif
1932 		break;
1933 	case DRM_FORMAT_RGB565:
1934 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1935 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1936 #ifdef __BIG_ENDIAN
1937 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1938 					ENDIAN_8IN16);
1939 #endif
1940 		break;
1941 	case DRM_FORMAT_XRGB8888:
1942 	case DRM_FORMAT_ARGB8888:
1943 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1944 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1945 #ifdef __BIG_ENDIAN
1946 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1947 					ENDIAN_8IN32);
1948 #endif
1949 		break;
1950 	case DRM_FORMAT_XRGB2101010:
1951 	case DRM_FORMAT_ARGB2101010:
1952 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1953 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1954 #ifdef __BIG_ENDIAN
1955 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1956 					ENDIAN_8IN32);
1957 #endif
1958 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1959 		bypass_lut = true;
1960 		break;
1961 	case DRM_FORMAT_BGRX1010102:
1962 	case DRM_FORMAT_BGRA1010102:
1963 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1964 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
1965 #ifdef __BIG_ENDIAN
1966 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1967 					ENDIAN_8IN32);
1968 #endif
1969 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1970 		bypass_lut = true;
1971 		break;
1972 	case DRM_FORMAT_XBGR8888:
1973 	case DRM_FORMAT_ABGR8888:
1974 		fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1975 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1976 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
1977 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
1978 #ifdef __BIG_ENDIAN
1979 		fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1980 					ENDIAN_8IN32);
1981 #endif
1982 		break;
1983 	default:
1984 		DRM_ERROR("Unsupported screen format %s\n",
1985 		          drm_get_format_name(target_fb->format->format, &format_name));
1986 		return -EINVAL;
1987 	}
1988 
1989 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1990 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1991 
1992 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1993 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1994 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1995 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1996 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1997 
1998 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
1999 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2000 					  ARRAY_2D_TILED_THIN1);
2001 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2002 					  tile_split);
2003 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2004 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2005 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2006 					  mtaspect);
2007 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2008 					  ADDR_SURF_MICRO_TILING_DISPLAY);
2009 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2010 		fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2011 					  ARRAY_1D_TILED_THIN1);
2012 	}
2013 
2014 	fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2015 				  pipe_config);
2016 
2017 	dce_v10_0_vga_enable(crtc, false);
2018 
2019 	/* Make sure surface address is updated at vertical blank rather than
2020 	 * horizontal blank
2021 	 */
2022 	tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2023 	tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2024 			    GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2025 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2026 
2027 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2028 	       upper_32_bits(fb_location));
2029 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2030 	       upper_32_bits(fb_location));
2031 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2032 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2033 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2034 	       (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2035 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2036 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2037 
2038 	/*
2039 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2040 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2041 	 * retain the full precision throughout the pipeline.
2042 	 */
2043 	tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2044 	if (bypass_lut)
2045 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2046 	else
2047 		tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2048 	WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2049 
2050 	if (bypass_lut)
2051 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2052 
2053 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2054 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2055 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2056 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2057 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2058 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2059 
2060 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2061 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2062 
2063 	dce_v10_0_grph_enable(crtc, true);
2064 
2065 	WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2066 	       target_fb->height);
2067 
2068 	x &= ~3;
2069 	y &= ~1;
2070 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2071 	       (x << 16) | y);
2072 	viewport_w = crtc->mode.hdisplay;
2073 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2074 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2075 	       (viewport_w << 16) | viewport_h);
2076 
2077 	/* set pageflip to happen anywhere in vblank interval */
2078 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2079 
2080 	if (!atomic && fb && fb != crtc->primary->fb) {
2081 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2082 		r = amdgpu_bo_reserve(abo, true);
2083 		if (unlikely(r != 0))
2084 			return r;
2085 		amdgpu_bo_unpin(abo);
2086 		amdgpu_bo_unreserve(abo);
2087 	}
2088 
2089 	/* Bytes per pixel may have changed */
2090 	dce_v10_0_bandwidth_update(adev);
2091 
2092 	return 0;
2093 }
2094 
2095 static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
2096 				     struct drm_display_mode *mode)
2097 {
2098 	struct drm_device *dev = crtc->dev;
2099 	struct amdgpu_device *adev = drm_to_adev(dev);
2100 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2101 	u32 tmp;
2102 
2103 	tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2104 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2105 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2106 	else
2107 		tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2108 	WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2109 }
2110 
2111 static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
2112 {
2113 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2114 	struct drm_device *dev = crtc->dev;
2115 	struct amdgpu_device *adev = drm_to_adev(dev);
2116 	u16 *r, *g, *b;
2117 	int i;
2118 	u32 tmp;
2119 
2120 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2121 
2122 	tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2123 	tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2124 	tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0);
2125 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2126 
2127 	tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2128 	tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2129 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2130 
2131 	tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset);
2132 	tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1);
2133 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2134 
2135 	tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2136 	tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2137 	tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0);
2138 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2139 
2140 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2141 
2142 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2143 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2144 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2145 
2146 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2147 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2148 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2149 
2150 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2151 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2152 
2153 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2154 	r = crtc->gamma_store;
2155 	g = r + crtc->gamma_size;
2156 	b = g + crtc->gamma_size;
2157 	for (i = 0; i < 256; i++) {
2158 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2159 		       ((*r++ & 0xffc0) << 14) |
2160 		       ((*g++ & 0xffc0) << 4) |
2161 		       (*b++ >> 6));
2162 	}
2163 
2164 	tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2165 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2166 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0);
2167 	tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2168 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2169 
2170 	tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2171 	tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2172 	tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0);
2173 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2174 
2175 	tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2176 	tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2177 	tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0);
2178 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2179 
2180 	tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2181 	tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2182 	tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0);
2183 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2184 
2185 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2186 	WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2187 	/* XXX this only needs to be programmed once per crtc at startup,
2188 	 * not sure where the best place for it is
2189 	 */
2190 	tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2191 	tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2192 	WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2193 }
2194 
2195 static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
2196 {
2197 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2198 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2199 
2200 	switch (amdgpu_encoder->encoder_id) {
2201 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2202 		if (dig->linkb)
2203 			return 1;
2204 		else
2205 			return 0;
2206 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2207 		if (dig->linkb)
2208 			return 3;
2209 		else
2210 			return 2;
2211 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2212 		if (dig->linkb)
2213 			return 5;
2214 		else
2215 			return 4;
2216 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2217 		return 6;
2218 	default:
2219 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2220 		return 0;
2221 	}
2222 }
2223 
2224 /**
2225  * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc.
2226  *
2227  * @crtc: drm crtc
2228  *
2229  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2230  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2231  * monitors a dedicated PPLL must be used.  If a particular board has
2232  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2233  * as there is no need to program the PLL itself.  If we are not able to
2234  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2235  * avoid messing up an existing monitor.
2236  *
2237  * Asic specific PLL information
2238  *
2239  * DCE 10.x
2240  * Tonga
2241  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2242  * CI
2243  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2244  *
2245  */
2246 static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
2247 {
2248 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2249 	struct drm_device *dev = crtc->dev;
2250 	struct amdgpu_device *adev = drm_to_adev(dev);
2251 	u32 pll_in_use;
2252 	int pll;
2253 
2254 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2255 		if (adev->clock.dp_extclk)
2256 			/* skip PPLL programming if using ext clock */
2257 			return ATOM_PPLL_INVALID;
2258 		else {
2259 			/* use the same PPLL for all DP monitors */
2260 			pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2261 			if (pll != ATOM_PPLL_INVALID)
2262 				return pll;
2263 		}
2264 	} else {
2265 		/* use the same PPLL for all monitors with the same clock */
2266 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2267 		if (pll != ATOM_PPLL_INVALID)
2268 			return pll;
2269 	}
2270 
2271 	/* DCE10 has PPLL0, PPLL1, and PPLL2 */
2272 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2273 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2274 		return ATOM_PPLL2;
2275 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2276 		return ATOM_PPLL1;
2277 	if (!(pll_in_use & (1 << ATOM_PPLL0)))
2278 		return ATOM_PPLL0;
2279 	DRM_ERROR("unable to allocate a PPLL\n");
2280 	return ATOM_PPLL_INVALID;
2281 }
2282 
2283 static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2284 {
2285 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2286 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2287 	uint32_t cur_lock;
2288 
2289 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2290 	if (lock)
2291 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2292 	else
2293 		cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2294 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2295 }
2296 
2297 static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
2298 {
2299 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2300 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2301 	u32 tmp;
2302 
2303 	tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2304 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2305 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2306 }
2307 
2308 static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
2309 {
2310 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2311 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2312 	u32 tmp;
2313 
2314 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2315 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2316 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2317 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2318 
2319 	tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2320 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2321 	tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2322 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2323 }
2324 
2325 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2326 					int x, int y)
2327 {
2328 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2329 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2330 	int xorigin = 0, yorigin = 0;
2331 
2332 	amdgpu_crtc->cursor_x = x;
2333 	amdgpu_crtc->cursor_y = y;
2334 
2335 	/* avivo cursor are offset into the total surface */
2336 	x += crtc->x;
2337 	y += crtc->y;
2338 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2339 
2340 	if (x < 0) {
2341 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2342 		x = 0;
2343 	}
2344 	if (y < 0) {
2345 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2346 		y = 0;
2347 	}
2348 
2349 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2350 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2351 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2352 	       ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2353 
2354 	return 0;
2355 }
2356 
2357 static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2358 				      int x, int y)
2359 {
2360 	int ret;
2361 
2362 	dce_v10_0_lock_cursor(crtc, true);
2363 	ret = dce_v10_0_cursor_move_locked(crtc, x, y);
2364 	dce_v10_0_lock_cursor(crtc, false);
2365 
2366 	return ret;
2367 }
2368 
2369 static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2370 				      struct drm_file *file_priv,
2371 				      uint32_t handle,
2372 				      uint32_t width,
2373 				      uint32_t height,
2374 				      int32_t hot_x,
2375 				      int32_t hot_y)
2376 {
2377 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2378 	struct drm_gem_object *obj;
2379 	struct amdgpu_bo *aobj;
2380 	int ret;
2381 
2382 	if (!handle) {
2383 		/* turn off cursor */
2384 		dce_v10_0_hide_cursor(crtc);
2385 		obj = NULL;
2386 		goto unpin;
2387 	}
2388 
2389 	if ((width > amdgpu_crtc->max_cursor_width) ||
2390 	    (height > amdgpu_crtc->max_cursor_height)) {
2391 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2392 		return -EINVAL;
2393 	}
2394 
2395 	obj = drm_gem_object_lookup(file_priv, handle);
2396 	if (!obj) {
2397 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2398 		return -ENOENT;
2399 	}
2400 
2401 	aobj = gem_to_amdgpu_bo(obj);
2402 	ret = amdgpu_bo_reserve(aobj, false);
2403 	if (ret != 0) {
2404 		drm_gem_object_put(obj);
2405 		return ret;
2406 	}
2407 
2408 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2409 	amdgpu_bo_unreserve(aobj);
2410 	if (ret) {
2411 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2412 		drm_gem_object_put(obj);
2413 		return ret;
2414 	}
2415 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2416 
2417 	dce_v10_0_lock_cursor(crtc, true);
2418 
2419 	if (width != amdgpu_crtc->cursor_width ||
2420 	    height != amdgpu_crtc->cursor_height ||
2421 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2422 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2423 		int x, y;
2424 
2425 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2426 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2427 
2428 		dce_v10_0_cursor_move_locked(crtc, x, y);
2429 
2430 		amdgpu_crtc->cursor_width = width;
2431 		amdgpu_crtc->cursor_height = height;
2432 		amdgpu_crtc->cursor_hot_x = hot_x;
2433 		amdgpu_crtc->cursor_hot_y = hot_y;
2434 	}
2435 
2436 	dce_v10_0_show_cursor(crtc);
2437 	dce_v10_0_lock_cursor(crtc, false);
2438 
2439 unpin:
2440 	if (amdgpu_crtc->cursor_bo) {
2441 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2442 		ret = amdgpu_bo_reserve(aobj, true);
2443 		if (likely(ret == 0)) {
2444 			amdgpu_bo_unpin(aobj);
2445 			amdgpu_bo_unreserve(aobj);
2446 		}
2447 		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2448 	}
2449 
2450 	amdgpu_crtc->cursor_bo = obj;
2451 	return 0;
2452 }
2453 
2454 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2455 {
2456 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2457 
2458 	if (amdgpu_crtc->cursor_bo) {
2459 		dce_v10_0_lock_cursor(crtc, true);
2460 
2461 		dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2462 					     amdgpu_crtc->cursor_y);
2463 
2464 		dce_v10_0_show_cursor(crtc);
2465 
2466 		dce_v10_0_lock_cursor(crtc, false);
2467 	}
2468 }
2469 
2470 static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2471 				    u16 *blue, uint32_t size,
2472 				    struct drm_modeset_acquire_ctx *ctx)
2473 {
2474 	dce_v10_0_crtc_load_lut(crtc);
2475 
2476 	return 0;
2477 }
2478 
2479 static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
2480 {
2481 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2482 
2483 	drm_crtc_cleanup(crtc);
2484 	kfree(amdgpu_crtc);
2485 }
2486 
2487 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
2488 	.cursor_set2 = dce_v10_0_crtc_cursor_set2,
2489 	.cursor_move = dce_v10_0_crtc_cursor_move,
2490 	.gamma_set = dce_v10_0_crtc_gamma_set,
2491 	.set_config = amdgpu_display_crtc_set_config,
2492 	.destroy = dce_v10_0_crtc_destroy,
2493 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2494 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2495 	.enable_vblank = amdgpu_enable_vblank_kms,
2496 	.disable_vblank = amdgpu_disable_vblank_kms,
2497 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2498 };
2499 
2500 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2501 {
2502 	struct drm_device *dev = crtc->dev;
2503 	struct amdgpu_device *adev = drm_to_adev(dev);
2504 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2505 	unsigned type;
2506 
2507 	switch (mode) {
2508 	case DRM_MODE_DPMS_ON:
2509 		amdgpu_crtc->enabled = true;
2510 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2511 		dce_v10_0_vga_enable(crtc, true);
2512 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2513 		dce_v10_0_vga_enable(crtc, false);
2514 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2515 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2516 						amdgpu_crtc->crtc_id);
2517 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2518 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2519 		drm_crtc_vblank_on(crtc);
2520 		dce_v10_0_crtc_load_lut(crtc);
2521 		break;
2522 	case DRM_MODE_DPMS_STANDBY:
2523 	case DRM_MODE_DPMS_SUSPEND:
2524 	case DRM_MODE_DPMS_OFF:
2525 		drm_crtc_vblank_off(crtc);
2526 		if (amdgpu_crtc->enabled) {
2527 			dce_v10_0_vga_enable(crtc, true);
2528 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2529 			dce_v10_0_vga_enable(crtc, false);
2530 		}
2531 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2532 		amdgpu_crtc->enabled = false;
2533 		break;
2534 	}
2535 	/* adjust pm to dpms */
2536 	amdgpu_pm_compute_clocks(adev);
2537 }
2538 
2539 static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
2540 {
2541 	/* disable crtc pair power gating before programming */
2542 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2543 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2544 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2545 }
2546 
2547 static void dce_v10_0_crtc_commit(struct drm_crtc *crtc)
2548 {
2549 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2550 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2551 }
2552 
2553 static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
2554 {
2555 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2556 	struct drm_device *dev = crtc->dev;
2557 	struct amdgpu_device *adev = drm_to_adev(dev);
2558 	struct amdgpu_atom_ss ss;
2559 	int i;
2560 
2561 	dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2562 	if (crtc->primary->fb) {
2563 		int r;
2564 		struct amdgpu_bo *abo;
2565 
2566 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2567 		r = amdgpu_bo_reserve(abo, true);
2568 		if (unlikely(r))
2569 			DRM_ERROR("failed to reserve abo before unpin\n");
2570 		else {
2571 			amdgpu_bo_unpin(abo);
2572 			amdgpu_bo_unreserve(abo);
2573 		}
2574 	}
2575 	/* disable the GRPH */
2576 	dce_v10_0_grph_enable(crtc, false);
2577 
2578 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2579 
2580 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2581 		if (adev->mode_info.crtcs[i] &&
2582 		    adev->mode_info.crtcs[i]->enabled &&
2583 		    i != amdgpu_crtc->crtc_id &&
2584 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2585 			/* one other crtc is using this pll don't turn
2586 			 * off the pll
2587 			 */
2588 			goto done;
2589 		}
2590 	}
2591 
2592 	switch (amdgpu_crtc->pll_id) {
2593 	case ATOM_PPLL0:
2594 	case ATOM_PPLL1:
2595 	case ATOM_PPLL2:
2596 		/* disable the ppll */
2597 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2598 					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2599 		break;
2600 	default:
2601 		break;
2602 	}
2603 done:
2604 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2605 	amdgpu_crtc->adjusted_clock = 0;
2606 	amdgpu_crtc->encoder = NULL;
2607 	amdgpu_crtc->connector = NULL;
2608 }
2609 
2610 static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
2611 				  struct drm_display_mode *mode,
2612 				  struct drm_display_mode *adjusted_mode,
2613 				  int x, int y, struct drm_framebuffer *old_fb)
2614 {
2615 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2616 
2617 	if (!amdgpu_crtc->adjusted_clock)
2618 		return -EINVAL;
2619 
2620 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2621 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2622 	dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2623 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2624 	amdgpu_atombios_crtc_scaler_setup(crtc);
2625 	dce_v10_0_cursor_reset(crtc);
2626 	/* update the hw version fpr dpm */
2627 	amdgpu_crtc->hw_mode = *adjusted_mode;
2628 
2629 	return 0;
2630 }
2631 
2632 static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
2633 				     const struct drm_display_mode *mode,
2634 				     struct drm_display_mode *adjusted_mode)
2635 {
2636 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2637 	struct drm_device *dev = crtc->dev;
2638 	struct drm_encoder *encoder;
2639 
2640 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2641 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2642 		if (encoder->crtc == crtc) {
2643 			amdgpu_crtc->encoder = encoder;
2644 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2645 			break;
2646 		}
2647 	}
2648 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2649 		amdgpu_crtc->encoder = NULL;
2650 		amdgpu_crtc->connector = NULL;
2651 		return false;
2652 	}
2653 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2654 		return false;
2655 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2656 		return false;
2657 	/* pick pll */
2658 	amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc);
2659 	/* if we can't get a PPLL for a non-DP encoder, fail */
2660 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2661 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2662 		return false;
2663 
2664 	return true;
2665 }
2666 
2667 static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2668 				  struct drm_framebuffer *old_fb)
2669 {
2670 	return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2671 }
2672 
2673 static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2674 					 struct drm_framebuffer *fb,
2675 					 int x, int y, enum mode_set_atomic state)
2676 {
2677 	return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
2678 }
2679 
2680 static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
2681 	.dpms = dce_v10_0_crtc_dpms,
2682 	.mode_fixup = dce_v10_0_crtc_mode_fixup,
2683 	.mode_set = dce_v10_0_crtc_mode_set,
2684 	.mode_set_base = dce_v10_0_crtc_set_base,
2685 	.mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
2686 	.prepare = dce_v10_0_crtc_prepare,
2687 	.commit = dce_v10_0_crtc_commit,
2688 	.disable = dce_v10_0_crtc_disable,
2689 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2690 };
2691 
2692 static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
2693 {
2694 	struct amdgpu_crtc *amdgpu_crtc;
2695 
2696 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2697 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2698 	if (amdgpu_crtc == NULL)
2699 		return -ENOMEM;
2700 
2701 	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
2702 
2703 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2704 	amdgpu_crtc->crtc_id = index;
2705 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2706 
2707 	amdgpu_crtc->max_cursor_width = 128;
2708 	amdgpu_crtc->max_cursor_height = 128;
2709 	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2710 	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2711 
2712 	switch (amdgpu_crtc->crtc_id) {
2713 	case 0:
2714 	default:
2715 		amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2716 		break;
2717 	case 1:
2718 		amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2719 		break;
2720 	case 2:
2721 		amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2722 		break;
2723 	case 3:
2724 		amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2725 		break;
2726 	case 4:
2727 		amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2728 		break;
2729 	case 5:
2730 		amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2731 		break;
2732 	}
2733 
2734 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2735 	amdgpu_crtc->adjusted_clock = 0;
2736 	amdgpu_crtc->encoder = NULL;
2737 	amdgpu_crtc->connector = NULL;
2738 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
2739 
2740 	return 0;
2741 }
2742 
2743 static int dce_v10_0_early_init(void *handle)
2744 {
2745 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2746 
2747 	adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
2748 	adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
2749 
2750 	dce_v10_0_set_display_funcs(adev);
2751 
2752 	adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
2753 
2754 	switch (adev->asic_type) {
2755 	case CHIP_FIJI:
2756 	case CHIP_TONGA:
2757 		adev->mode_info.num_hpd = 6;
2758 		adev->mode_info.num_dig = 7;
2759 		break;
2760 	default:
2761 		/* FIXME: not supported yet */
2762 		return -EINVAL;
2763 	}
2764 
2765 	dce_v10_0_set_irq_funcs(adev);
2766 
2767 	return 0;
2768 }
2769 
2770 static int dce_v10_0_sw_init(void *handle)
2771 {
2772 	int r, i;
2773 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2774 
2775 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2776 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2777 		if (r)
2778 			return r;
2779 	}
2780 
2781 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2782 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2783 		if (r)
2784 			return r;
2785 	}
2786 
2787 	/* HPD hotplug */
2788 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2789 	if (r)
2790 		return r;
2791 
2792 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2793 
2794 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2795 
2796 	adev_to_drm(adev)->mode_config.max_width = 16384;
2797 	adev_to_drm(adev)->mode_config.max_height = 16384;
2798 
2799 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2800 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2801 
2802 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2803 
2804 	r = amdgpu_display_modeset_create_props(adev);
2805 	if (r)
2806 		return r;
2807 
2808 	adev_to_drm(adev)->mode_config.max_width = 16384;
2809 	adev_to_drm(adev)->mode_config.max_height = 16384;
2810 
2811 	/* allocate crtcs */
2812 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2813 		r = dce_v10_0_crtc_init(adev, i);
2814 		if (r)
2815 			return r;
2816 	}
2817 
2818 	if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2819 		amdgpu_display_print_display_setup(adev_to_drm(adev));
2820 	else
2821 		return -EINVAL;
2822 
2823 	/* setup afmt */
2824 	r = dce_v10_0_afmt_init(adev);
2825 	if (r)
2826 		return r;
2827 
2828 	r = dce_v10_0_audio_init(adev);
2829 	if (r)
2830 		return r;
2831 
2832 	drm_kms_helper_poll_init(adev_to_drm(adev));
2833 
2834 	adev->mode_info.mode_config_initialized = true;
2835 	return 0;
2836 }
2837 
2838 static int dce_v10_0_sw_fini(void *handle)
2839 {
2840 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2841 
2842 	kfree(adev->mode_info.bios_hardcoded_edid);
2843 
2844 	drm_kms_helper_poll_fini(adev_to_drm(adev));
2845 
2846 	dce_v10_0_audio_fini(adev);
2847 
2848 	dce_v10_0_afmt_fini(adev);
2849 
2850 	drm_mode_config_cleanup(adev_to_drm(adev));
2851 	adev->mode_info.mode_config_initialized = false;
2852 
2853 	return 0;
2854 }
2855 
2856 static int dce_v10_0_hw_init(void *handle)
2857 {
2858 	int i;
2859 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2860 
2861 	dce_v10_0_init_golden_registers(adev);
2862 
2863 	/* disable vga render */
2864 	dce_v10_0_set_vga_render_state(adev, false);
2865 	/* init dig PHYs, disp eng pll */
2866 	amdgpu_atombios_encoder_init_dig(adev);
2867 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2868 
2869 	/* initialize hpd */
2870 	dce_v10_0_hpd_init(adev);
2871 
2872 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2873 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2874 	}
2875 
2876 	dce_v10_0_pageflip_interrupt_init(adev);
2877 
2878 	return 0;
2879 }
2880 
2881 static int dce_v10_0_hw_fini(void *handle)
2882 {
2883 	int i;
2884 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2885 
2886 	dce_v10_0_hpd_fini(adev);
2887 
2888 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2889 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2890 	}
2891 
2892 	dce_v10_0_pageflip_interrupt_fini(adev);
2893 
2894 	return 0;
2895 }
2896 
2897 static int dce_v10_0_suspend(void *handle)
2898 {
2899 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2900 
2901 	adev->mode_info.bl_level =
2902 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2903 
2904 	return dce_v10_0_hw_fini(handle);
2905 }
2906 
2907 static int dce_v10_0_resume(void *handle)
2908 {
2909 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2910 	int ret;
2911 
2912 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2913 							   adev->mode_info.bl_level);
2914 
2915 	ret = dce_v10_0_hw_init(handle);
2916 
2917 	/* turn on the BL */
2918 	if (adev->mode_info.bl_encoder) {
2919 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2920 								  adev->mode_info.bl_encoder);
2921 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2922 						    bl_level);
2923 	}
2924 
2925 	return ret;
2926 }
2927 
2928 static bool dce_v10_0_is_idle(void *handle)
2929 {
2930 	return true;
2931 }
2932 
2933 static int dce_v10_0_wait_for_idle(void *handle)
2934 {
2935 	return 0;
2936 }
2937 
2938 static bool dce_v10_0_check_soft_reset(void *handle)
2939 {
2940 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2941 
2942 	return dce_v10_0_is_display_hung(adev);
2943 }
2944 
2945 static int dce_v10_0_soft_reset(void *handle)
2946 {
2947 	u32 srbm_soft_reset = 0, tmp;
2948 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2949 
2950 	if (dce_v10_0_is_display_hung(adev))
2951 		srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2952 
2953 	if (srbm_soft_reset) {
2954 		tmp = RREG32(mmSRBM_SOFT_RESET);
2955 		tmp |= srbm_soft_reset;
2956 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2957 		WREG32(mmSRBM_SOFT_RESET, tmp);
2958 		tmp = RREG32(mmSRBM_SOFT_RESET);
2959 
2960 		udelay(50);
2961 
2962 		tmp &= ~srbm_soft_reset;
2963 		WREG32(mmSRBM_SOFT_RESET, tmp);
2964 		tmp = RREG32(mmSRBM_SOFT_RESET);
2965 
2966 		/* Wait a little for things to settle down */
2967 		udelay(50);
2968 	}
2969 	return 0;
2970 }
2971 
2972 static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2973 						     int crtc,
2974 						     enum amdgpu_interrupt_state state)
2975 {
2976 	u32 lb_interrupt_mask;
2977 
2978 	if (crtc >= adev->mode_info.num_crtc) {
2979 		DRM_DEBUG("invalid crtc %d\n", crtc);
2980 		return;
2981 	}
2982 
2983 	switch (state) {
2984 	case AMDGPU_IRQ_STATE_DISABLE:
2985 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2986 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2987 						  VBLANK_INTERRUPT_MASK, 0);
2988 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2989 		break;
2990 	case AMDGPU_IRQ_STATE_ENABLE:
2991 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2992 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2993 						  VBLANK_INTERRUPT_MASK, 1);
2994 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2995 		break;
2996 	default:
2997 		break;
2998 	}
2999 }
3000 
3001 static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3002 						    int crtc,
3003 						    enum amdgpu_interrupt_state state)
3004 {
3005 	u32 lb_interrupt_mask;
3006 
3007 	if (crtc >= adev->mode_info.num_crtc) {
3008 		DRM_DEBUG("invalid crtc %d\n", crtc);
3009 		return;
3010 	}
3011 
3012 	switch (state) {
3013 	case AMDGPU_IRQ_STATE_DISABLE:
3014 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3015 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3016 						  VLINE_INTERRUPT_MASK, 0);
3017 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3018 		break;
3019 	case AMDGPU_IRQ_STATE_ENABLE:
3020 		lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3021 		lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3022 						  VLINE_INTERRUPT_MASK, 1);
3023 		WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3024 		break;
3025 	default:
3026 		break;
3027 	}
3028 }
3029 
3030 static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
3031 				       struct amdgpu_irq_src *source,
3032 				       unsigned hpd,
3033 				       enum amdgpu_interrupt_state state)
3034 {
3035 	u32 tmp;
3036 
3037 	if (hpd >= adev->mode_info.num_hpd) {
3038 		DRM_DEBUG("invalid hdp %d\n", hpd);
3039 		return 0;
3040 	}
3041 
3042 	switch (state) {
3043 	case AMDGPU_IRQ_STATE_DISABLE:
3044 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3045 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3046 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3047 		break;
3048 	case AMDGPU_IRQ_STATE_ENABLE:
3049 		tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3050 		tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3051 		WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3052 		break;
3053 	default:
3054 		break;
3055 	}
3056 
3057 	return 0;
3058 }
3059 
3060 static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev,
3061 					struct amdgpu_irq_src *source,
3062 					unsigned type,
3063 					enum amdgpu_interrupt_state state)
3064 {
3065 	switch (type) {
3066 	case AMDGPU_CRTC_IRQ_VBLANK1:
3067 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3068 		break;
3069 	case AMDGPU_CRTC_IRQ_VBLANK2:
3070 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3071 		break;
3072 	case AMDGPU_CRTC_IRQ_VBLANK3:
3073 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3074 		break;
3075 	case AMDGPU_CRTC_IRQ_VBLANK4:
3076 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3077 		break;
3078 	case AMDGPU_CRTC_IRQ_VBLANK5:
3079 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3080 		break;
3081 	case AMDGPU_CRTC_IRQ_VBLANK6:
3082 		dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3083 		break;
3084 	case AMDGPU_CRTC_IRQ_VLINE1:
3085 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state);
3086 		break;
3087 	case AMDGPU_CRTC_IRQ_VLINE2:
3088 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state);
3089 		break;
3090 	case AMDGPU_CRTC_IRQ_VLINE3:
3091 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state);
3092 		break;
3093 	case AMDGPU_CRTC_IRQ_VLINE4:
3094 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state);
3095 		break;
3096 	case AMDGPU_CRTC_IRQ_VLINE5:
3097 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state);
3098 		break;
3099 	case AMDGPU_CRTC_IRQ_VLINE6:
3100 		dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state);
3101 		break;
3102 	default:
3103 		break;
3104 	}
3105 	return 0;
3106 }
3107 
3108 static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3109 					    struct amdgpu_irq_src *src,
3110 					    unsigned type,
3111 					    enum amdgpu_interrupt_state state)
3112 {
3113 	u32 reg;
3114 
3115 	if (type >= adev->mode_info.num_crtc) {
3116 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3117 		return -EINVAL;
3118 	}
3119 
3120 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3121 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3122 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3123 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3124 	else
3125 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3126 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3127 
3128 	return 0;
3129 }
3130 
3131 static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3132 				  struct amdgpu_irq_src *source,
3133 				  struct amdgpu_iv_entry *entry)
3134 {
3135 	unsigned long flags;
3136 	unsigned crtc_id;
3137 	struct amdgpu_crtc *amdgpu_crtc;
3138 	struct amdgpu_flip_work *works;
3139 
3140 	crtc_id = (entry->src_id - 8) >> 1;
3141 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3142 
3143 	if (crtc_id >= adev->mode_info.num_crtc) {
3144 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3145 		return -EINVAL;
3146 	}
3147 
3148 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3149 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3150 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3151 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3152 
3153 	/* IRQ could occur when in initial stage */
3154 	if (amdgpu_crtc == NULL)
3155 		return 0;
3156 
3157 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3158 	works = amdgpu_crtc->pflip_works;
3159 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3160 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3161 						 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3162 						 amdgpu_crtc->pflip_status,
3163 						 AMDGPU_FLIP_SUBMITTED);
3164 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3165 		return 0;
3166 	}
3167 
3168 	/* page flip completed. clean up */
3169 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3170 	amdgpu_crtc->pflip_works = NULL;
3171 
3172 	/* wakeup usersapce */
3173 	if (works->event)
3174 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3175 
3176 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3177 
3178 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3179 	schedule_work(&works->unpin_work);
3180 
3181 	return 0;
3182 }
3183 
3184 static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
3185 				  int hpd)
3186 {
3187 	u32 tmp;
3188 
3189 	if (hpd >= adev->mode_info.num_hpd) {
3190 		DRM_DEBUG("invalid hdp %d\n", hpd);
3191 		return;
3192 	}
3193 
3194 	tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3195 	tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3196 	WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3197 }
3198 
3199 static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3200 					  int crtc)
3201 {
3202 	u32 tmp;
3203 
3204 	if (crtc >= adev->mode_info.num_crtc) {
3205 		DRM_DEBUG("invalid crtc %d\n", crtc);
3206 		return;
3207 	}
3208 
3209 	tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3210 	tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3211 	WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3212 }
3213 
3214 static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3215 					 int crtc)
3216 {
3217 	u32 tmp;
3218 
3219 	if (crtc >= adev->mode_info.num_crtc) {
3220 		DRM_DEBUG("invalid crtc %d\n", crtc);
3221 		return;
3222 	}
3223 
3224 	tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3225 	tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3226 	WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3227 }
3228 
3229 static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3230 			      struct amdgpu_irq_src *source,
3231 			      struct amdgpu_iv_entry *entry)
3232 {
3233 	unsigned crtc = entry->src_id - 1;
3234 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3235 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
3236 
3237 	switch (entry->src_data[0]) {
3238 	case 0: /* vblank */
3239 		if (disp_int & interrupt_status_offsets[crtc].vblank)
3240 			dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3241 		else
3242 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3243 
3244 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
3245 			drm_handle_vblank(adev_to_drm(adev), crtc);
3246 		}
3247 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3248 
3249 		break;
3250 	case 1: /* vline */
3251 		if (disp_int & interrupt_status_offsets[crtc].vline)
3252 			dce_v10_0_crtc_vline_int_ack(adev, crtc);
3253 		else
3254 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3255 
3256 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3257 
3258 		break;
3259 	default:
3260 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3261 		break;
3262 	}
3263 
3264 	return 0;
3265 }
3266 
3267 static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
3268 			     struct amdgpu_irq_src *source,
3269 			     struct amdgpu_iv_entry *entry)
3270 {
3271 	uint32_t disp_int, mask;
3272 	unsigned hpd;
3273 
3274 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3275 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3276 		return 0;
3277 	}
3278 
3279 	hpd = entry->src_data[0];
3280 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3281 	mask = interrupt_status_offsets[hpd].hpd;
3282 
3283 	if (disp_int & mask) {
3284 		dce_v10_0_hpd_int_ack(adev, hpd);
3285 		schedule_work(&adev->hotplug_work);
3286 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3287 	}
3288 
3289 	return 0;
3290 }
3291 
3292 static int dce_v10_0_set_clockgating_state(void *handle,
3293 					  enum amd_clockgating_state state)
3294 {
3295 	return 0;
3296 }
3297 
3298 static int dce_v10_0_set_powergating_state(void *handle,
3299 					  enum amd_powergating_state state)
3300 {
3301 	return 0;
3302 }
3303 
3304 static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
3305 	.name = "dce_v10_0",
3306 	.early_init = dce_v10_0_early_init,
3307 	.late_init = NULL,
3308 	.sw_init = dce_v10_0_sw_init,
3309 	.sw_fini = dce_v10_0_sw_fini,
3310 	.hw_init = dce_v10_0_hw_init,
3311 	.hw_fini = dce_v10_0_hw_fini,
3312 	.suspend = dce_v10_0_suspend,
3313 	.resume = dce_v10_0_resume,
3314 	.is_idle = dce_v10_0_is_idle,
3315 	.wait_for_idle = dce_v10_0_wait_for_idle,
3316 	.check_soft_reset = dce_v10_0_check_soft_reset,
3317 	.soft_reset = dce_v10_0_soft_reset,
3318 	.set_clockgating_state = dce_v10_0_set_clockgating_state,
3319 	.set_powergating_state = dce_v10_0_set_powergating_state,
3320 };
3321 
3322 static void
3323 dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
3324 			  struct drm_display_mode *mode,
3325 			  struct drm_display_mode *adjusted_mode)
3326 {
3327 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3328 
3329 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3330 
3331 	/* need to call this here rather than in prepare() since we need some crtc info */
3332 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3333 
3334 	/* set scaler clears this on some chips */
3335 	dce_v10_0_set_interleave(encoder->crtc, mode);
3336 
3337 	if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3338 		dce_v10_0_afmt_enable(encoder, true);
3339 		dce_v10_0_afmt_setmode(encoder, adjusted_mode);
3340 	}
3341 }
3342 
3343 static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
3344 {
3345 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3346 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3347 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3348 
3349 	if ((amdgpu_encoder->active_device &
3350 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3351 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3352 	     ENCODER_OBJECT_ID_NONE)) {
3353 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3354 		if (dig) {
3355 			dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder);
3356 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3357 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3358 		}
3359 	}
3360 
3361 	amdgpu_atombios_scratch_regs_lock(adev, true);
3362 
3363 	if (connector) {
3364 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3365 
3366 		/* select the clock/data port if it uses a router */
3367 		if (amdgpu_connector->router.cd_valid)
3368 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3369 
3370 		/* turn eDP panel on for mode set */
3371 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3372 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3373 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3374 	}
3375 
3376 	/* this is needed for the pll/ss setup to work correctly in some cases */
3377 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3378 	/* set up the FMT blocks */
3379 	dce_v10_0_program_fmt(encoder);
3380 }
3381 
3382 static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
3383 {
3384 	struct drm_device *dev = encoder->dev;
3385 	struct amdgpu_device *adev = drm_to_adev(dev);
3386 
3387 	/* need to call this here as we need the crtc set up */
3388 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3389 	amdgpu_atombios_scratch_regs_lock(adev, false);
3390 }
3391 
3392 static void dce_v10_0_encoder_disable(struct drm_encoder *encoder)
3393 {
3394 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3395 	struct amdgpu_encoder_atom_dig *dig;
3396 
3397 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3398 
3399 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3400 		if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3401 			dce_v10_0_afmt_enable(encoder, false);
3402 		dig = amdgpu_encoder->enc_priv;
3403 		dig->dig_encoder = -1;
3404 	}
3405 	amdgpu_encoder->active_device = 0;
3406 }
3407 
3408 /* these are handled by the primary encoders */
3409 static void dce_v10_0_ext_prepare(struct drm_encoder *encoder)
3410 {
3411 
3412 }
3413 
3414 static void dce_v10_0_ext_commit(struct drm_encoder *encoder)
3415 {
3416 
3417 }
3418 
3419 static void
3420 dce_v10_0_ext_mode_set(struct drm_encoder *encoder,
3421 		      struct drm_display_mode *mode,
3422 		      struct drm_display_mode *adjusted_mode)
3423 {
3424 
3425 }
3426 
3427 static void dce_v10_0_ext_disable(struct drm_encoder *encoder)
3428 {
3429 
3430 }
3431 
3432 static void
3433 dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
3434 {
3435 
3436 }
3437 
3438 static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
3439 	.dpms = dce_v10_0_ext_dpms,
3440 	.prepare = dce_v10_0_ext_prepare,
3441 	.mode_set = dce_v10_0_ext_mode_set,
3442 	.commit = dce_v10_0_ext_commit,
3443 	.disable = dce_v10_0_ext_disable,
3444 	/* no detect for TMDS/LVDS yet */
3445 };
3446 
3447 static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = {
3448 	.dpms = amdgpu_atombios_encoder_dpms,
3449 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3450 	.prepare = dce_v10_0_encoder_prepare,
3451 	.mode_set = dce_v10_0_encoder_mode_set,
3452 	.commit = dce_v10_0_encoder_commit,
3453 	.disable = dce_v10_0_encoder_disable,
3454 	.detect = amdgpu_atombios_encoder_dig_detect,
3455 };
3456 
3457 static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = {
3458 	.dpms = amdgpu_atombios_encoder_dpms,
3459 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3460 	.prepare = dce_v10_0_encoder_prepare,
3461 	.mode_set = dce_v10_0_encoder_mode_set,
3462 	.commit = dce_v10_0_encoder_commit,
3463 	.detect = amdgpu_atombios_encoder_dac_detect,
3464 };
3465 
3466 static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder)
3467 {
3468 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3469 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3470 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3471 	kfree(amdgpu_encoder->enc_priv);
3472 	drm_encoder_cleanup(encoder);
3473 	kfree(amdgpu_encoder);
3474 }
3475 
3476 static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = {
3477 	.destroy = dce_v10_0_encoder_destroy,
3478 };
3479 
3480 static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3481 				 uint32_t encoder_enum,
3482 				 uint32_t supported_device,
3483 				 u16 caps)
3484 {
3485 	struct drm_device *dev = adev_to_drm(adev);
3486 	struct drm_encoder *encoder;
3487 	struct amdgpu_encoder *amdgpu_encoder;
3488 
3489 	/* see if we already added it */
3490 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3491 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3492 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3493 			amdgpu_encoder->devices |= supported_device;
3494 			return;
3495 		}
3496 
3497 	}
3498 
3499 	/* add a new one */
3500 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3501 	if (!amdgpu_encoder)
3502 		return;
3503 
3504 	encoder = &amdgpu_encoder->base;
3505 	switch (adev->mode_info.num_crtc) {
3506 	case 1:
3507 		encoder->possible_crtcs = 0x1;
3508 		break;
3509 	case 2:
3510 	default:
3511 		encoder->possible_crtcs = 0x3;
3512 		break;
3513 	case 4:
3514 		encoder->possible_crtcs = 0xf;
3515 		break;
3516 	case 6:
3517 		encoder->possible_crtcs = 0x3f;
3518 		break;
3519 	}
3520 
3521 	amdgpu_encoder->enc_priv = NULL;
3522 
3523 	amdgpu_encoder->encoder_enum = encoder_enum;
3524 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3525 	amdgpu_encoder->devices = supported_device;
3526 	amdgpu_encoder->rmx_type = RMX_OFF;
3527 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3528 	amdgpu_encoder->is_ext_encoder = false;
3529 	amdgpu_encoder->caps = caps;
3530 
3531 	switch (amdgpu_encoder->encoder_id) {
3532 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3533 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3534 		drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3535 				 DRM_MODE_ENCODER_DAC, NULL);
3536 		drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3537 		break;
3538 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3539 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3540 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3541 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3542 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3543 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3544 			amdgpu_encoder->rmx_type = RMX_FULL;
3545 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3546 					 DRM_MODE_ENCODER_LVDS, NULL);
3547 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3548 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3549 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3550 					 DRM_MODE_ENCODER_DAC, NULL);
3551 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3552 		} else {
3553 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3554 					 DRM_MODE_ENCODER_TMDS, NULL);
3555 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3556 		}
3557 		drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
3558 		break;
3559 	case ENCODER_OBJECT_ID_SI170B:
3560 	case ENCODER_OBJECT_ID_CH7303:
3561 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3562 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3563 	case ENCODER_OBJECT_ID_TITFP513:
3564 	case ENCODER_OBJECT_ID_VT1623:
3565 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3566 	case ENCODER_OBJECT_ID_TRAVIS:
3567 	case ENCODER_OBJECT_ID_NUTMEG:
3568 		/* these are handled by the primary encoders */
3569 		amdgpu_encoder->is_ext_encoder = true;
3570 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3571 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3572 					 DRM_MODE_ENCODER_LVDS, NULL);
3573 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3574 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3575 					 DRM_MODE_ENCODER_DAC, NULL);
3576 		else
3577 			drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3578 					 DRM_MODE_ENCODER_TMDS, NULL);
3579 		drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3580 		break;
3581 	}
3582 }
3583 
3584 static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
3585 	.bandwidth_update = &dce_v10_0_bandwidth_update,
3586 	.vblank_get_counter = &dce_v10_0_vblank_get_counter,
3587 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3588 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3589 	.hpd_sense = &dce_v10_0_hpd_sense,
3590 	.hpd_set_polarity = &dce_v10_0_hpd_set_polarity,
3591 	.hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg,
3592 	.page_flip = &dce_v10_0_page_flip,
3593 	.page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
3594 	.add_encoder = &dce_v10_0_encoder_add,
3595 	.add_connector = &amdgpu_connector_add,
3596 };
3597 
3598 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
3599 {
3600 	adev->mode_info.funcs = &dce_v10_0_display_funcs;
3601 }
3602 
3603 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
3604 	.set = dce_v10_0_set_crtc_irq_state,
3605 	.process = dce_v10_0_crtc_irq,
3606 };
3607 
3608 static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = {
3609 	.set = dce_v10_0_set_pageflip_irq_state,
3610 	.process = dce_v10_0_pageflip_irq,
3611 };
3612 
3613 static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
3614 	.set = dce_v10_0_set_hpd_irq_state,
3615 	.process = dce_v10_0_hpd_irq,
3616 };
3617 
3618 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
3619 {
3620 	if (adev->mode_info.num_crtc > 0)
3621 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3622 	else
3623 		adev->crtc_irq.num_types = 0;
3624 	adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
3625 
3626 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3627 	adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
3628 
3629 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3630 	adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
3631 }
3632 
3633 const struct amdgpu_ip_block_version dce_v10_0_ip_block =
3634 {
3635 	.type = AMD_IP_BLOCK_TYPE_DCE,
3636 	.major = 10,
3637 	.minor = 0,
3638 	.rev = 0,
3639 	.funcs = &dce_v10_0_ip_funcs,
3640 };
3641 
3642 const struct amdgpu_ip_block_version dce_v10_1_ip_block =
3643 {
3644 	.type = AMD_IP_BLOCK_TYPE_DCE,
3645 	.major = 10,
3646 	.minor = 1,
3647 	.rev = 0,
3648 	.funcs = &dce_v10_0_ip_funcs,
3649 };
3650