xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c (revision 7effbd18)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include <drm/drm_fourcc.h>
27 #include <drm/drm_modeset_helper.h>
28 #include <drm/drm_modeset_helper_vtables.h>
29 #include <drm/drm_vblank.h>
30 
31 #include "amdgpu.h"
32 #include "amdgpu_pm.h"
33 #include "amdgpu_i2c.h"
34 #include "atom.h"
35 #include "amdgpu_atombios.h"
36 #include "atombios_crtc.h"
37 #include "atombios_encoders.h"
38 #include "amdgpu_pll.h"
39 #include "amdgpu_connectors.h"
40 #include "amdgpu_display.h"
41 
42 #include "bif/bif_3_0_d.h"
43 #include "bif/bif_3_0_sh_mask.h"
44 #include "oss/oss_1_0_d.h"
45 #include "oss/oss_1_0_sh_mask.h"
46 #include "gca/gfx_6_0_d.h"
47 #include "gca/gfx_6_0_sh_mask.h"
48 #include "gmc/gmc_6_0_d.h"
49 #include "gmc/gmc_6_0_sh_mask.h"
50 #include "dce/dce_6_0_d.h"
51 #include "dce/dce_6_0_sh_mask.h"
52 #include "gca/gfx_7_2_enum.h"
53 #include "dce_v6_0.h"
54 #include "si_enums.h"
55 
56 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
57 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
58 
59 static const u32 crtc_offsets[6] =
60 {
61 	SI_CRTC0_REGISTER_OFFSET,
62 	SI_CRTC1_REGISTER_OFFSET,
63 	SI_CRTC2_REGISTER_OFFSET,
64 	SI_CRTC3_REGISTER_OFFSET,
65 	SI_CRTC4_REGISTER_OFFSET,
66 	SI_CRTC5_REGISTER_OFFSET
67 };
68 
69 static const u32 hpd_offsets[] =
70 {
71 	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
72 	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
73 	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
74 	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
75 	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
76 	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
77 };
78 
79 static const uint32_t dig_offsets[] = {
80 	SI_CRTC0_REGISTER_OFFSET,
81 	SI_CRTC1_REGISTER_OFFSET,
82 	SI_CRTC2_REGISTER_OFFSET,
83 	SI_CRTC3_REGISTER_OFFSET,
84 	SI_CRTC4_REGISTER_OFFSET,
85 	SI_CRTC5_REGISTER_OFFSET,
86 	(0x13830 - 0x7030) >> 2,
87 };
88 
89 static const struct {
90 	uint32_t	reg;
91 	uint32_t	vblank;
92 	uint32_t	vline;
93 	uint32_t	hpd;
94 
95 } interrupt_status_offsets[6] = { {
96 	.reg = mmDISP_INTERRUPT_STATUS,
97 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
98 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
99 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
100 }, {
101 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
102 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
103 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
104 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
105 }, {
106 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
107 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
108 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
109 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
110 }, {
111 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
112 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
113 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
114 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
115 }, {
116 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
117 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
118 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
119 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
120 }, {
121 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
122 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
123 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
124 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
125 } };
126 
127 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
128 				     u32 block_offset, u32 reg)
129 {
130 	unsigned long flags;
131 	u32 r;
132 
133 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
134 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
135 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
136 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
137 
138 	return r;
139 }
140 
141 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
142 				      u32 block_offset, u32 reg, u32 v)
143 {
144 	unsigned long flags;
145 
146 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
147 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
148 		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
149 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
150 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
151 }
152 
153 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154 {
155 	if (crtc >= adev->mode_info.num_crtc)
156 		return 0;
157 	else
158 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
159 }
160 
161 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
162 {
163 	unsigned i;
164 
165 	/* Enable pflip interrupts */
166 	for (i = 0; i < adev->mode_info.num_crtc; i++)
167 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
168 }
169 
170 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
171 {
172 	unsigned i;
173 
174 	/* Disable pflip interrupts */
175 	for (i = 0; i < adev->mode_info.num_crtc; i++)
176 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
177 }
178 
179 /**
180  * dce_v6_0_page_flip - pageflip callback.
181  *
182  * @adev: amdgpu_device pointer
183  * @crtc_id: crtc to cleanup pageflip on
184  * @crtc_base: new address of the crtc (GPU MC address)
185  * @async: asynchronous flip
186  *
187  * Does the actual pageflip (evergreen+).
188  * During vblank we take the crtc lock and wait for the update_pending
189  * bit to go high, when it does, we release the lock, and allow the
190  * double buffered update to take place.
191  * Returns the current update pending status.
192  */
193 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
194 			       int crtc_id, u64 crtc_base, bool async)
195 {
196 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
197 	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
198 
199 	/* flip at hsync for async, default is vsync */
200 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
201 	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
202 	/* update pitch */
203 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
204 	       fb->pitches[0] / fb->format->cpp[0]);
205 	/* update the scanout addresses */
206 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
207 	       upper_32_bits(crtc_base));
208 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
209 	       (u32)crtc_base);
210 
211 	/* post the write */
212 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
213 }
214 
215 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
216 					u32 *vbl, u32 *position)
217 {
218 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
219 		return -EINVAL;
220 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
221 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
222 
223 	return 0;
224 
225 }
226 
227 /**
228  * dce_v6_0_hpd_sense - hpd sense callback.
229  *
230  * @adev: amdgpu_device pointer
231  * @hpd: hpd (hotplug detect) pin
232  *
233  * Checks if a digital monitor is connected (evergreen+).
234  * Returns true if connected, false if not connected.
235  */
236 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
237 			       enum amdgpu_hpd_id hpd)
238 {
239 	bool connected = false;
240 
241 	if (hpd >= adev->mode_info.num_hpd)
242 		return connected;
243 
244 	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
245 		connected = true;
246 
247 	return connected;
248 }
249 
250 /**
251  * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
252  *
253  * @adev: amdgpu_device pointer
254  * @hpd: hpd (hotplug detect) pin
255  *
256  * Set the polarity of the hpd pin (evergreen+).
257  */
258 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
259 				      enum amdgpu_hpd_id hpd)
260 {
261 	u32 tmp;
262 	bool connected = dce_v6_0_hpd_sense(adev, hpd);
263 
264 	if (hpd >= adev->mode_info.num_hpd)
265 		return;
266 
267 	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
268 	if (connected)
269 		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
270 	else
271 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
272 	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
273 }
274 
275 /**
276  * dce_v6_0_hpd_init - hpd setup callback.
277  *
278  * @adev: amdgpu_device pointer
279  *
280  * Setup the hpd pins used by the card (evergreen+).
281  * Enable the pin, set the polarity, and enable the hpd interrupts.
282  */
283 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
284 {
285 	struct drm_device *dev = adev_to_drm(adev);
286 	struct drm_connector *connector;
287 	struct drm_connector_list_iter iter;
288 	u32 tmp;
289 
290 	drm_connector_list_iter_begin(dev, &iter);
291 	drm_for_each_connector_iter(connector, &iter) {
292 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
293 
294 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
295 			continue;
296 
297 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
298 		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
299 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
300 
301 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
302 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
303 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
304 			 * aux dp channel on imac and help (but not completely fix)
305 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
306 			 * also avoid interrupt storms during dpms.
307 			 */
308 			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
309 			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
310 			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
311 			continue;
312 		}
313 
314 		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
315 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
316 	}
317 	drm_connector_list_iter_end(&iter);
318 }
319 
320 /**
321  * dce_v6_0_hpd_fini - hpd tear down callback.
322  *
323  * @adev: amdgpu_device pointer
324  *
325  * Tear down the hpd pins used by the card (evergreen+).
326  * Disable the hpd interrupts.
327  */
328 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
329 {
330 	struct drm_device *dev = adev_to_drm(adev);
331 	struct drm_connector *connector;
332 	struct drm_connector_list_iter iter;
333 	u32 tmp;
334 
335 	drm_connector_list_iter_begin(dev, &iter);
336 	drm_for_each_connector_iter(connector, &iter) {
337 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
338 
339 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
340 			continue;
341 
342 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
343 		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
344 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
345 
346 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
347 	}
348 	drm_connector_list_iter_end(&iter);
349 }
350 
351 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
352 {
353 	return mmDC_GPIO_HPD_A;
354 }
355 
356 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
357 					  bool render)
358 {
359 	if (!render)
360 		WREG32(mmVGA_RENDER_CONTROL,
361 			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
362 
363 }
364 
365 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
366 {
367 	switch (adev->asic_type) {
368 	case CHIP_TAHITI:
369 	case CHIP_PITCAIRN:
370 	case CHIP_VERDE:
371 		return 6;
372 	case CHIP_OLAND:
373 		return 2;
374 	default:
375 		return 0;
376 	}
377 }
378 
379 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
380 {
381 	/*Disable VGA render and enabled crtc, if has DCE engine*/
382 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
383 		u32 tmp;
384 		int crtc_enabled, i;
385 
386 		dce_v6_0_set_vga_render_state(adev, false);
387 
388 		/*Disable crtc*/
389 		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
390 			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
391 				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
392 			if (crtc_enabled) {
393 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
394 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
395 				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
396 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
397 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
398 			}
399 		}
400 	}
401 }
402 
403 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
404 {
405 
406 	struct drm_device *dev = encoder->dev;
407 	struct amdgpu_device *adev = drm_to_adev(dev);
408 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
409 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
410 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
411 	int bpc = 0;
412 	u32 tmp = 0;
413 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
414 
415 	if (connector) {
416 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
417 		bpc = amdgpu_connector_get_monitor_bpc(connector);
418 		dither = amdgpu_connector->dither;
419 	}
420 
421 	/* LVDS FMT is set up by atom */
422 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
423 		return;
424 
425 	if (bpc == 0)
426 		return;
427 
428 
429 	switch (bpc) {
430 	case 6:
431 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
432 			/* XXX sort out optimal dither settings */
433 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
434 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
435 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
436 		else
437 			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
438 		break;
439 	case 8:
440 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
441 			/* XXX sort out optimal dither settings */
442 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
443 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
444 				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
445 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
446 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
447 		else
448 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
449 				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
450 		break;
451 	case 10:
452 	default:
453 		/* not needed */
454 		break;
455 	}
456 
457 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
458 }
459 
460 /**
461  * si_get_number_of_dram_channels - get the number of dram channels
462  *
463  * @adev: amdgpu_device pointer
464  *
465  * Look up the number of video ram channels (CIK).
466  * Used for display watermark bandwidth calculations
467  * Returns the number of dram channels
468  */
469 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
470 {
471 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
472 
473 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
474 	case 0:
475 	default:
476 		return 1;
477 	case 1:
478 		return 2;
479 	case 2:
480 		return 4;
481 	case 3:
482 		return 8;
483 	case 4:
484 		return 3;
485 	case 5:
486 		return 6;
487 	case 6:
488 		return 10;
489 	case 7:
490 		return 12;
491 	case 8:
492 		return 16;
493 	}
494 }
495 
496 struct dce6_wm_params {
497 	u32 dram_channels; /* number of dram channels */
498 	u32 yclk;          /* bandwidth per dram data pin in kHz */
499 	u32 sclk;          /* engine clock in kHz */
500 	u32 disp_clk;      /* display clock in kHz */
501 	u32 src_width;     /* viewport width */
502 	u32 active_time;   /* active display time in ns */
503 	u32 blank_time;    /* blank time in ns */
504 	bool interlaced;    /* mode is interlaced */
505 	fixed20_12 vsc;    /* vertical scale ratio */
506 	u32 num_heads;     /* number of active crtcs */
507 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
508 	u32 lb_size;       /* line buffer allocated to pipe */
509 	u32 vtaps;         /* vertical scaler taps */
510 };
511 
512 /**
513  * dce_v6_0_dram_bandwidth - get the dram bandwidth
514  *
515  * @wm: watermark calculation data
516  *
517  * Calculate the raw dram bandwidth (CIK).
518  * Used for display watermark bandwidth calculations
519  * Returns the dram bandwidth in MBytes/s
520  */
521 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
522 {
523 	/* Calculate raw DRAM Bandwidth */
524 	fixed20_12 dram_efficiency; /* 0.7 */
525 	fixed20_12 yclk, dram_channels, bandwidth;
526 	fixed20_12 a;
527 
528 	a.full = dfixed_const(1000);
529 	yclk.full = dfixed_const(wm->yclk);
530 	yclk.full = dfixed_div(yclk, a);
531 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
532 	a.full = dfixed_const(10);
533 	dram_efficiency.full = dfixed_const(7);
534 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
535 	bandwidth.full = dfixed_mul(dram_channels, yclk);
536 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
537 
538 	return dfixed_trunc(bandwidth);
539 }
540 
541 /**
542  * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
543  *
544  * @wm: watermark calculation data
545  *
546  * Calculate the dram bandwidth used for display (CIK).
547  * Used for display watermark bandwidth calculations
548  * Returns the dram bandwidth for display in MBytes/s
549  */
550 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
551 {
552 	/* Calculate DRAM Bandwidth and the part allocated to display. */
553 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
554 	fixed20_12 yclk, dram_channels, bandwidth;
555 	fixed20_12 a;
556 
557 	a.full = dfixed_const(1000);
558 	yclk.full = dfixed_const(wm->yclk);
559 	yclk.full = dfixed_div(yclk, a);
560 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
561 	a.full = dfixed_const(10);
562 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
563 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
564 	bandwidth.full = dfixed_mul(dram_channels, yclk);
565 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
566 
567 	return dfixed_trunc(bandwidth);
568 }
569 
570 /**
571  * dce_v6_0_data_return_bandwidth - get the data return bandwidth
572  *
573  * @wm: watermark calculation data
574  *
575  * Calculate the data return bandwidth used for display (CIK).
576  * Used for display watermark bandwidth calculations
577  * Returns the data return bandwidth in MBytes/s
578  */
579 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
580 {
581 	/* Calculate the display Data return Bandwidth */
582 	fixed20_12 return_efficiency; /* 0.8 */
583 	fixed20_12 sclk, bandwidth;
584 	fixed20_12 a;
585 
586 	a.full = dfixed_const(1000);
587 	sclk.full = dfixed_const(wm->sclk);
588 	sclk.full = dfixed_div(sclk, a);
589 	a.full = dfixed_const(10);
590 	return_efficiency.full = dfixed_const(8);
591 	return_efficiency.full = dfixed_div(return_efficiency, a);
592 	a.full = dfixed_const(32);
593 	bandwidth.full = dfixed_mul(a, sclk);
594 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
595 
596 	return dfixed_trunc(bandwidth);
597 }
598 
599 /**
600  * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
601  *
602  * @wm: watermark calculation data
603  *
604  * Calculate the dmif bandwidth used for display (CIK).
605  * Used for display watermark bandwidth calculations
606  * Returns the dmif bandwidth in MBytes/s
607  */
608 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
609 {
610 	/* Calculate the DMIF Request Bandwidth */
611 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
612 	fixed20_12 disp_clk, bandwidth;
613 	fixed20_12 a, b;
614 
615 	a.full = dfixed_const(1000);
616 	disp_clk.full = dfixed_const(wm->disp_clk);
617 	disp_clk.full = dfixed_div(disp_clk, a);
618 	a.full = dfixed_const(32);
619 	b.full = dfixed_mul(a, disp_clk);
620 
621 	a.full = dfixed_const(10);
622 	disp_clk_request_efficiency.full = dfixed_const(8);
623 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
624 
625 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
626 
627 	return dfixed_trunc(bandwidth);
628 }
629 
630 /**
631  * dce_v6_0_available_bandwidth - get the min available bandwidth
632  *
633  * @wm: watermark calculation data
634  *
635  * Calculate the min available bandwidth used for display (CIK).
636  * Used for display watermark bandwidth calculations
637  * Returns the min available bandwidth in MBytes/s
638  */
639 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
640 {
641 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
642 	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
643 	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
644 	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
645 
646 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
647 }
648 
649 /**
650  * dce_v6_0_average_bandwidth - get the average available bandwidth
651  *
652  * @wm: watermark calculation data
653  *
654  * Calculate the average available bandwidth used for display (CIK).
655  * Used for display watermark bandwidth calculations
656  * Returns the average available bandwidth in MBytes/s
657  */
658 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
659 {
660 	/* Calculate the display mode Average Bandwidth
661 	 * DisplayMode should contain the source and destination dimensions,
662 	 * timing, etc.
663 	 */
664 	fixed20_12 bpp;
665 	fixed20_12 line_time;
666 	fixed20_12 src_width;
667 	fixed20_12 bandwidth;
668 	fixed20_12 a;
669 
670 	a.full = dfixed_const(1000);
671 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
672 	line_time.full = dfixed_div(line_time, a);
673 	bpp.full = dfixed_const(wm->bytes_per_pixel);
674 	src_width.full = dfixed_const(wm->src_width);
675 	bandwidth.full = dfixed_mul(src_width, bpp);
676 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
677 	bandwidth.full = dfixed_div(bandwidth, line_time);
678 
679 	return dfixed_trunc(bandwidth);
680 }
681 
682 /**
683  * dce_v6_0_latency_watermark - get the latency watermark
684  *
685  * @wm: watermark calculation data
686  *
687  * Calculate the latency watermark (CIK).
688  * Used for display watermark bandwidth calculations
689  * Returns the latency watermark in ns
690  */
691 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
692 {
693 	/* First calculate the latency in ns */
694 	u32 mc_latency = 2000; /* 2000 ns. */
695 	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
696 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
697 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
698 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
699 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
700 		(wm->num_heads * cursor_line_pair_return_time);
701 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
702 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
703 	u32 tmp, dmif_size = 12288;
704 	fixed20_12 a, b, c;
705 
706 	if (wm->num_heads == 0)
707 		return 0;
708 
709 	a.full = dfixed_const(2);
710 	b.full = dfixed_const(1);
711 	if ((wm->vsc.full > a.full) ||
712 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
713 	    (wm->vtaps >= 5) ||
714 	    ((wm->vsc.full >= a.full) && wm->interlaced))
715 		max_src_lines_per_dst_line = 4;
716 	else
717 		max_src_lines_per_dst_line = 2;
718 
719 	a.full = dfixed_const(available_bandwidth);
720 	b.full = dfixed_const(wm->num_heads);
721 	a.full = dfixed_div(a, b);
722 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
723 	tmp = min(dfixed_trunc(a), tmp);
724 
725 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
726 
727 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
728 	b.full = dfixed_const(1000);
729 	c.full = dfixed_const(lb_fill_bw);
730 	b.full = dfixed_div(c, b);
731 	a.full = dfixed_div(a, b);
732 	line_fill_time = dfixed_trunc(a);
733 
734 	if (line_fill_time < wm->active_time)
735 		return latency;
736 	else
737 		return latency + (line_fill_time - wm->active_time);
738 
739 }
740 
741 /**
742  * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
743  * average and available dram bandwidth
744  *
745  * @wm: watermark calculation data
746  *
747  * Check if the display average bandwidth fits in the display
748  * dram bandwidth (CIK).
749  * Used for display watermark bandwidth calculations
750  * Returns true if the display fits, false if not.
751  */
752 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
753 {
754 	if (dce_v6_0_average_bandwidth(wm) <=
755 	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
756 		return true;
757 	else
758 		return false;
759 }
760 
761 /**
762  * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
763  * average and available bandwidth
764  *
765  * @wm: watermark calculation data
766  *
767  * Check if the display average bandwidth fits in the display
768  * available bandwidth (CIK).
769  * Used for display watermark bandwidth calculations
770  * Returns true if the display fits, false if not.
771  */
772 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
773 {
774 	if (dce_v6_0_average_bandwidth(wm) <=
775 	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
776 		return true;
777 	else
778 		return false;
779 }
780 
781 /**
782  * dce_v6_0_check_latency_hiding - check latency hiding
783  *
784  * @wm: watermark calculation data
785  *
786  * Check latency hiding (CIK).
787  * Used for display watermark bandwidth calculations
788  * Returns true if the display fits, false if not.
789  */
790 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
791 {
792 	u32 lb_partitions = wm->lb_size / wm->src_width;
793 	u32 line_time = wm->active_time + wm->blank_time;
794 	u32 latency_tolerant_lines;
795 	u32 latency_hiding;
796 	fixed20_12 a;
797 
798 	a.full = dfixed_const(1);
799 	if (wm->vsc.full > a.full)
800 		latency_tolerant_lines = 1;
801 	else {
802 		if (lb_partitions <= (wm->vtaps + 1))
803 			latency_tolerant_lines = 1;
804 		else
805 			latency_tolerant_lines = 2;
806 	}
807 
808 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
809 
810 	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
811 		return true;
812 	else
813 		return false;
814 }
815 
816 /**
817  * dce_v6_0_program_watermarks - program display watermarks
818  *
819  * @adev: amdgpu_device pointer
820  * @amdgpu_crtc: the selected display controller
821  * @lb_size: line buffer size
822  * @num_heads: number of display controllers in use
823  *
824  * Calculate and program the display watermarks for the
825  * selected display controller (CIK).
826  */
827 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
828 					struct amdgpu_crtc *amdgpu_crtc,
829 					u32 lb_size, u32 num_heads)
830 {
831 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
832 	struct dce6_wm_params wm_low, wm_high;
833 	u32 dram_channels;
834 	u32 active_time;
835 	u32 line_time = 0;
836 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
837 	u32 priority_a_mark = 0, priority_b_mark = 0;
838 	u32 priority_a_cnt = PRIORITY_OFF;
839 	u32 priority_b_cnt = PRIORITY_OFF;
840 	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
841 	fixed20_12 a, b, c;
842 
843 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
844 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
845 					    (u32)mode->clock);
846 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
847 					  (u32)mode->clock);
848 		line_time = min(line_time, (u32)65535);
849 		priority_a_cnt = 0;
850 		priority_b_cnt = 0;
851 
852 		dram_channels = si_get_number_of_dram_channels(adev);
853 
854 		/* watermark for high clocks */
855 		if (adev->pm.dpm_enabled) {
856 			wm_high.yclk =
857 				amdgpu_dpm_get_mclk(adev, false) * 10;
858 			wm_high.sclk =
859 				amdgpu_dpm_get_sclk(adev, false) * 10;
860 		} else {
861 			wm_high.yclk = adev->pm.current_mclk * 10;
862 			wm_high.sclk = adev->pm.current_sclk * 10;
863 		}
864 
865 		wm_high.disp_clk = mode->clock;
866 		wm_high.src_width = mode->crtc_hdisplay;
867 		wm_high.active_time = active_time;
868 		wm_high.blank_time = line_time - wm_high.active_time;
869 		wm_high.interlaced = false;
870 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
871 			wm_high.interlaced = true;
872 		wm_high.vsc = amdgpu_crtc->vsc;
873 		wm_high.vtaps = 1;
874 		if (amdgpu_crtc->rmx_type != RMX_OFF)
875 			wm_high.vtaps = 2;
876 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
877 		wm_high.lb_size = lb_size;
878 		wm_high.dram_channels = dram_channels;
879 		wm_high.num_heads = num_heads;
880 
881 		if (adev->pm.dpm_enabled) {
882 		/* watermark for low clocks */
883 			wm_low.yclk =
884 				amdgpu_dpm_get_mclk(adev, true) * 10;
885 			wm_low.sclk =
886 				amdgpu_dpm_get_sclk(adev, true) * 10;
887 		} else {
888 			wm_low.yclk = adev->pm.current_mclk * 10;
889 			wm_low.sclk = adev->pm.current_sclk * 10;
890 		}
891 
892 		wm_low.disp_clk = mode->clock;
893 		wm_low.src_width = mode->crtc_hdisplay;
894 		wm_low.active_time = active_time;
895 		wm_low.blank_time = line_time - wm_low.active_time;
896 		wm_low.interlaced = false;
897 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
898 			wm_low.interlaced = true;
899 		wm_low.vsc = amdgpu_crtc->vsc;
900 		wm_low.vtaps = 1;
901 		if (amdgpu_crtc->rmx_type != RMX_OFF)
902 			wm_low.vtaps = 2;
903 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
904 		wm_low.lb_size = lb_size;
905 		wm_low.dram_channels = dram_channels;
906 		wm_low.num_heads = num_heads;
907 
908 		/* set for high clocks */
909 		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
910 		/* set for low clocks */
911 		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
912 
913 		/* possibly force display priority to high */
914 		/* should really do this at mode validation time... */
915 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
916 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
917 		    !dce_v6_0_check_latency_hiding(&wm_high) ||
918 		    (adev->mode_info.disp_priority == 2)) {
919 			DRM_DEBUG_KMS("force priority to high\n");
920 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
921 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
922 		}
923 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
924 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
925 		    !dce_v6_0_check_latency_hiding(&wm_low) ||
926 		    (adev->mode_info.disp_priority == 2)) {
927 			DRM_DEBUG_KMS("force priority to high\n");
928 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
929 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
930 		}
931 
932 		a.full = dfixed_const(1000);
933 		b.full = dfixed_const(mode->clock);
934 		b.full = dfixed_div(b, a);
935 		c.full = dfixed_const(latency_watermark_a);
936 		c.full = dfixed_mul(c, b);
937 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
938 		c.full = dfixed_div(c, a);
939 		a.full = dfixed_const(16);
940 		c.full = dfixed_div(c, a);
941 		priority_a_mark = dfixed_trunc(c);
942 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
943 
944 		a.full = dfixed_const(1000);
945 		b.full = dfixed_const(mode->clock);
946 		b.full = dfixed_div(b, a);
947 		c.full = dfixed_const(latency_watermark_b);
948 		c.full = dfixed_mul(c, b);
949 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
950 		c.full = dfixed_div(c, a);
951 		a.full = dfixed_const(16);
952 		c.full = dfixed_div(c, a);
953 		priority_b_mark = dfixed_trunc(c);
954 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
955 
956 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
957 	}
958 
959 	/* select wm A */
960 	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
961 	tmp = arb_control3;
962 	tmp &= ~LATENCY_WATERMARK_MASK(3);
963 	tmp |= LATENCY_WATERMARK_MASK(1);
964 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
965 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
966 	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
967 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
968 	/* select wm B */
969 	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
970 	tmp &= ~LATENCY_WATERMARK_MASK(3);
971 	tmp |= LATENCY_WATERMARK_MASK(2);
972 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
973 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
974 	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
975 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
976 	/* restore original selection */
977 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
978 
979 	/* write the priority marks */
980 	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
981 	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
982 
983 	/* save values for DPM */
984 	amdgpu_crtc->line_time = line_time;
985 	amdgpu_crtc->wm_high = latency_watermark_a;
986 
987 	/* Save number of lines the linebuffer leads before the scanout */
988 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
989 }
990 
991 /* watermark setup */
992 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
993 				   struct amdgpu_crtc *amdgpu_crtc,
994 				   struct drm_display_mode *mode,
995 				   struct drm_display_mode *other_mode)
996 {
997 	u32 tmp, buffer_alloc, i;
998 	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
999 	/*
1000 	 * Line Buffer Setup
1001 	 * There are 3 line buffers, each one shared by 2 display controllers.
1002 	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1003 	 * the display controllers.  The paritioning is done via one of four
1004 	 * preset allocations specified in bits 21:20:
1005 	 *  0 - half lb
1006 	 *  2 - whole lb, other crtc must be disabled
1007 	 */
1008 	/* this can get tricky if we have two large displays on a paired group
1009 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1010 	 * non-linked crtcs for maximum line buffer allocation.
1011 	 */
1012 	if (amdgpu_crtc->base.enabled && mode) {
1013 		if (other_mode) {
1014 			tmp = 0; /* 1/2 */
1015 			buffer_alloc = 1;
1016 		} else {
1017 			tmp = 2; /* whole */
1018 			buffer_alloc = 2;
1019 		}
1020 	} else {
1021 		tmp = 0;
1022 		buffer_alloc = 0;
1023 	}
1024 
1025 	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1026 	       DC_LB_MEMORY_CONFIG(tmp));
1027 
1028 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1029 	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1030 	for (i = 0; i < adev->usec_timeout; i++) {
1031 		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1032 		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1033 			break;
1034 		udelay(1);
1035 	}
1036 
1037 	if (amdgpu_crtc->base.enabled && mode) {
1038 		switch (tmp) {
1039 		case 0:
1040 		default:
1041 			return 4096 * 2;
1042 		case 2:
1043 			return 8192 * 2;
1044 		}
1045 	}
1046 
1047 	/* controller not enabled, so no lb used */
1048 	return 0;
1049 }
1050 
1051 
1052 /**
1053  * dce_v6_0_bandwidth_update - program display watermarks
1054  *
1055  * @adev: amdgpu_device pointer
1056  *
1057  * Calculate and program the display watermarks and line
1058  * buffer allocation (CIK).
1059  */
1060 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1061 {
1062 	struct drm_display_mode *mode0 = NULL;
1063 	struct drm_display_mode *mode1 = NULL;
1064 	u32 num_heads = 0, lb_size;
1065 	int i;
1066 
1067 	if (!adev->mode_info.mode_config_initialized)
1068 		return;
1069 
1070 	amdgpu_display_update_priority(adev);
1071 
1072 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1073 		if (adev->mode_info.crtcs[i]->base.enabled)
1074 			num_heads++;
1075 	}
1076 	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1077 		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1078 		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1079 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1080 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1081 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1082 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1083 	}
1084 }
1085 
1086 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1087 {
1088 	int i;
1089 	u32 tmp;
1090 
1091 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1092 		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1093 				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1094 		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1095 					PORT_CONNECTIVITY))
1096 			adev->mode_info.audio.pin[i].connected = false;
1097 		else
1098 			adev->mode_info.audio.pin[i].connected = true;
1099 	}
1100 
1101 }
1102 
1103 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1104 {
1105 	int i;
1106 
1107 	dce_v6_0_audio_get_connected_pins(adev);
1108 
1109 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1110 		if (adev->mode_info.audio.pin[i].connected)
1111 			return &adev->mode_info.audio.pin[i];
1112 	}
1113 	DRM_ERROR("No connected audio pins found!\n");
1114 	return NULL;
1115 }
1116 
1117 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1118 {
1119 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1120 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1121 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1122 
1123 	if (!dig || !dig->afmt || !dig->afmt->pin)
1124 		return;
1125 
1126 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1127 	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1128 		             dig->afmt->pin->id));
1129 }
1130 
1131 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1132 						struct drm_display_mode *mode)
1133 {
1134 	struct drm_device *dev = encoder->dev;
1135 	struct amdgpu_device *adev = drm_to_adev(dev);
1136 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1137 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1138 	struct drm_connector *connector;
1139 	struct drm_connector_list_iter iter;
1140 	struct amdgpu_connector *amdgpu_connector = NULL;
1141 	int interlace = 0;
1142 	u32 tmp;
1143 
1144 	drm_connector_list_iter_begin(dev, &iter);
1145 	drm_for_each_connector_iter(connector, &iter) {
1146 		if (connector->encoder == encoder) {
1147 			amdgpu_connector = to_amdgpu_connector(connector);
1148 			break;
1149 		}
1150 	}
1151 	drm_connector_list_iter_end(&iter);
1152 
1153 	if (!amdgpu_connector) {
1154 		DRM_ERROR("Couldn't find encoder's connector\n");
1155 		return;
1156 	}
1157 
1158 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1159 		interlace = 1;
1160 
1161 	if (connector->latency_present[interlace]) {
1162 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1163 				VIDEO_LIPSYNC, connector->video_latency[interlace]);
1164 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1165 				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1166 	} else {
1167 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1168 				VIDEO_LIPSYNC, 0);
1169 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1170 				AUDIO_LIPSYNC, 0);
1171 	}
1172 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1173 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1174 }
1175 
1176 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1177 {
1178 	struct drm_device *dev = encoder->dev;
1179 	struct amdgpu_device *adev = drm_to_adev(dev);
1180 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1181 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1182 	struct drm_connector *connector;
1183 	struct drm_connector_list_iter iter;
1184 	struct amdgpu_connector *amdgpu_connector = NULL;
1185 	u8 *sadb = NULL;
1186 	int sad_count;
1187 	u32 tmp;
1188 
1189 	drm_connector_list_iter_begin(dev, &iter);
1190 	drm_for_each_connector_iter(connector, &iter) {
1191 		if (connector->encoder == encoder) {
1192 			amdgpu_connector = to_amdgpu_connector(connector);
1193 			break;
1194 		}
1195 	}
1196 	drm_connector_list_iter_end(&iter);
1197 
1198 	if (!amdgpu_connector) {
1199 		DRM_ERROR("Couldn't find encoder's connector\n");
1200 		return;
1201 	}
1202 
1203 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1204 	if (sad_count < 0) {
1205 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1206 		sad_count = 0;
1207 	}
1208 
1209 	/* program the speaker allocation */
1210 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1211 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1212 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1213 			HDMI_CONNECTION, 0);
1214 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1215 			DP_CONNECTION, 0);
1216 
1217 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1218 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1219 				DP_CONNECTION, 1);
1220 	else
1221 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1222 				HDMI_CONNECTION, 1);
1223 
1224 	if (sad_count)
1225 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1226 				SPEAKER_ALLOCATION, sadb[0]);
1227 	else
1228 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1229 				SPEAKER_ALLOCATION, 5); /* stereo */
1230 
1231 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1232 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1233 
1234 	kfree(sadb);
1235 }
1236 
1237 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1238 {
1239 	struct drm_device *dev = encoder->dev;
1240 	struct amdgpu_device *adev = drm_to_adev(dev);
1241 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1242 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1243 	struct drm_connector *connector;
1244 	struct drm_connector_list_iter iter;
1245 	struct amdgpu_connector *amdgpu_connector = NULL;
1246 	struct cea_sad *sads;
1247 	int i, sad_count;
1248 
1249 	static const u16 eld_reg_to_type[][2] = {
1250 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1251 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1252 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1253 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1254 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1255 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1256 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1257 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1258 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1259 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1260 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1261 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1262 	};
1263 
1264 	drm_connector_list_iter_begin(dev, &iter);
1265 	drm_for_each_connector_iter(connector, &iter) {
1266 		if (connector->encoder == encoder) {
1267 			amdgpu_connector = to_amdgpu_connector(connector);
1268 			break;
1269 		}
1270 	}
1271 	drm_connector_list_iter_end(&iter);
1272 
1273 	if (!amdgpu_connector) {
1274 		DRM_ERROR("Couldn't find encoder's connector\n");
1275 		return;
1276 	}
1277 
1278 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1279 	if (sad_count < 0)
1280 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1281 	if (sad_count <= 0)
1282 		return;
1283 
1284 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1285 		u32 tmp = 0;
1286 		u8 stereo_freqs = 0;
1287 		int max_channels = -1;
1288 		int j;
1289 
1290 		for (j = 0; j < sad_count; j++) {
1291 			struct cea_sad *sad = &sads[j];
1292 
1293 			if (sad->format == eld_reg_to_type[i][1]) {
1294 				if (sad->channels > max_channels) {
1295 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1296 							MAX_CHANNELS, sad->channels);
1297 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1298 							DESCRIPTOR_BYTE_2, sad->byte2);
1299 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1300 							SUPPORTED_FREQUENCIES, sad->freq);
1301 					max_channels = sad->channels;
1302 				}
1303 
1304 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1305 					stereo_freqs |= sad->freq;
1306 				else
1307 					break;
1308 			}
1309 		}
1310 
1311 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1312 				SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1313 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1314 	}
1315 
1316 	kfree(sads);
1317 
1318 }
1319 
1320 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1321 				  struct amdgpu_audio_pin *pin,
1322 				  bool enable)
1323 {
1324 	if (!pin)
1325 		return;
1326 
1327 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1328 			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1329 }
1330 
1331 static const u32 pin_offsets[7] =
1332 {
1333 	(0x1780 - 0x1780),
1334 	(0x1786 - 0x1780),
1335 	(0x178c - 0x1780),
1336 	(0x1792 - 0x1780),
1337 	(0x1798 - 0x1780),
1338 	(0x179d - 0x1780),
1339 	(0x17a4 - 0x1780),
1340 };
1341 
1342 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1343 {
1344 	int i;
1345 
1346 	if (!amdgpu_audio)
1347 		return 0;
1348 
1349 	adev->mode_info.audio.enabled = true;
1350 
1351 	switch (adev->asic_type) {
1352 	case CHIP_TAHITI:
1353 	case CHIP_PITCAIRN:
1354 	case CHIP_VERDE:
1355 	default:
1356 		adev->mode_info.audio.num_pins = 6;
1357 		break;
1358 	case CHIP_OLAND:
1359 		adev->mode_info.audio.num_pins = 2;
1360 		break;
1361 	}
1362 
1363 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1364 		adev->mode_info.audio.pin[i].channels = -1;
1365 		adev->mode_info.audio.pin[i].rate = -1;
1366 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1367 		adev->mode_info.audio.pin[i].status_bits = 0;
1368 		adev->mode_info.audio.pin[i].category_code = 0;
1369 		adev->mode_info.audio.pin[i].connected = false;
1370 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1371 		adev->mode_info.audio.pin[i].id = i;
1372 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1373 	}
1374 
1375 	return 0;
1376 }
1377 
1378 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1379 {
1380 	int i;
1381 
1382 	if (!amdgpu_audio)
1383 		return;
1384 
1385 	if (!adev->mode_info.audio.enabled)
1386 		return;
1387 
1388 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1389 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1390 
1391 	adev->mode_info.audio.enabled = false;
1392 }
1393 
1394 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1395 {
1396 	struct drm_device *dev = encoder->dev;
1397 	struct amdgpu_device *adev = drm_to_adev(dev);
1398 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1399 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1400 	u32 tmp;
1401 
1402 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1403 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1404 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1405 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1406 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1407 }
1408 
1409 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1410 				   uint32_t clock, int bpc)
1411 {
1412 	struct drm_device *dev = encoder->dev;
1413 	struct amdgpu_device *adev = drm_to_adev(dev);
1414 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1415 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1416 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1417 	u32 tmp;
1418 
1419 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1420 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1421 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1422 			bpc > 8 ? 0 : 1);
1423 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1424 
1425 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1426 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1427 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1428 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1429 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1430 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1431 
1432 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1433 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1434 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1435 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1436 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1437 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1438 
1439 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1440 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1441 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1442 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1443 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1444 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1445 }
1446 
1447 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1448 					       struct drm_display_mode *mode)
1449 {
1450 	struct drm_device *dev = encoder->dev;
1451 	struct amdgpu_device *adev = drm_to_adev(dev);
1452 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1453 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1454 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1455 	struct hdmi_avi_infoframe frame;
1456 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1457 	uint8_t *payload = buffer + 3;
1458 	uint8_t *header = buffer;
1459 	ssize_t err;
1460 	u32 tmp;
1461 
1462 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1463 	if (err < 0) {
1464 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1465 		return;
1466 	}
1467 
1468 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1469 	if (err < 0) {
1470 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1471 		return;
1472 	}
1473 
1474 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1475 	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1476 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1477 	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1478 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1479 	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1480 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1481 	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1482 
1483 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1484 	/* anything other than 0 */
1485 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1486 			HDMI_AUDIO_INFO_LINE, 2);
1487 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1488 }
1489 
1490 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1491 {
1492 	struct drm_device *dev = encoder->dev;
1493 	struct amdgpu_device *adev = drm_to_adev(dev);
1494 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1495 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1496 	u32 tmp;
1497 
1498 	/*
1499 	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1500 	 * Express [24MHz / target pixel clock] as an exact rational
1501 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1502 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1503 	 */
1504 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1505 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1506 			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1507 	if (em == ATOM_ENCODER_MODE_HDMI) {
1508 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1509 				DCCG_AUDIO_DTO_SEL, 0);
1510 	} else if (ENCODER_MODE_IS_DP(em)) {
1511 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1512 				DCCG_AUDIO_DTO_SEL, 1);
1513 	}
1514 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1515 	if (em == ATOM_ENCODER_MODE_HDMI) {
1516 		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1517 		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1518 	} else if (ENCODER_MODE_IS_DP(em)) {
1519 		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1520 		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1521 	}
1522 }
1523 
1524 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1525 {
1526 	struct drm_device *dev = encoder->dev;
1527 	struct amdgpu_device *adev = drm_to_adev(dev);
1528 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1529 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1530 	u32 tmp;
1531 
1532 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1533 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1534 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1535 
1536 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1537 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1538 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1539 
1540 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1541 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1542 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1543 
1544 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1545 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1546 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1547 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1548 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1549 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1550 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1551 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1552 
1553 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1554 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1555 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1556 
1557 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1558 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1559 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1560 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1561 
1562 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1563 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1564 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1565 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1566 }
1567 
1568 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1569 {
1570 	struct drm_device *dev = encoder->dev;
1571 	struct amdgpu_device *adev = drm_to_adev(dev);
1572 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1573 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1574 	u32 tmp;
1575 
1576 	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1577 	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1578 	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1579 }
1580 
1581 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1582 {
1583 	struct drm_device *dev = encoder->dev;
1584 	struct amdgpu_device *adev = drm_to_adev(dev);
1585 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1586 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1587 	u32 tmp;
1588 
1589 	if (enable) {
1590 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1591 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1592 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1593 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1594 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1595 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1596 
1597 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1598 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1599 		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1600 
1601 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1602 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1603 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1604 	} else {
1605 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1606 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1607 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1608 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1609 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1610 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1611 
1612 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1613 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1614 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1615 	}
1616 }
1617 
1618 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1619 {
1620 	struct drm_device *dev = encoder->dev;
1621 	struct amdgpu_device *adev = drm_to_adev(dev);
1622 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1623 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1624 	u32 tmp;
1625 
1626 	if (enable) {
1627 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1628 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1629 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1630 
1631 		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1632 		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1633 		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1634 
1635 		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1636 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1637 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1638 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1639 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1640 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1641 	} else {
1642 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1643 	}
1644 }
1645 
1646 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1647 				  struct drm_display_mode *mode)
1648 {
1649 	struct drm_device *dev = encoder->dev;
1650 	struct amdgpu_device *adev = drm_to_adev(dev);
1651 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1652 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1653 	struct drm_connector *connector;
1654 	struct drm_connector_list_iter iter;
1655 	struct amdgpu_connector *amdgpu_connector = NULL;
1656 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1657 	int bpc = 8;
1658 
1659 	if (!dig || !dig->afmt)
1660 		return;
1661 
1662 	drm_connector_list_iter_begin(dev, &iter);
1663 	drm_for_each_connector_iter(connector, &iter) {
1664 		if (connector->encoder == encoder) {
1665 			amdgpu_connector = to_amdgpu_connector(connector);
1666 			break;
1667 		}
1668 	}
1669 	drm_connector_list_iter_end(&iter);
1670 
1671 	if (!amdgpu_connector) {
1672 		DRM_ERROR("Couldn't find encoder's connector\n");
1673 		return;
1674 	}
1675 
1676 	if (!dig->afmt->enabled)
1677 		return;
1678 
1679 	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1680 	if (!dig->afmt->pin)
1681 		return;
1682 
1683 	if (encoder->crtc) {
1684 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1685 		bpc = amdgpu_crtc->bpc;
1686 	}
1687 
1688 	/* disable audio before setting up hw */
1689 	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1690 
1691 	dce_v6_0_audio_set_mute(encoder, true);
1692 	dce_v6_0_audio_write_speaker_allocation(encoder);
1693 	dce_v6_0_audio_write_sad_regs(encoder);
1694 	dce_v6_0_audio_write_latency_fields(encoder, mode);
1695 	if (em == ATOM_ENCODER_MODE_HDMI) {
1696 		dce_v6_0_audio_set_dto(encoder, mode->clock);
1697 		dce_v6_0_audio_set_vbi_packet(encoder);
1698 		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1699 	} else if (ENCODER_MODE_IS_DP(em)) {
1700 		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1701 	}
1702 	dce_v6_0_audio_set_packet(encoder);
1703 	dce_v6_0_audio_select_pin(encoder);
1704 	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1705 	dce_v6_0_audio_set_mute(encoder, false);
1706 	if (em == ATOM_ENCODER_MODE_HDMI) {
1707 		dce_v6_0_audio_hdmi_enable(encoder, 1);
1708 	} else if (ENCODER_MODE_IS_DP(em)) {
1709 		dce_v6_0_audio_dp_enable(encoder, 1);
1710 	}
1711 
1712 	/* enable audio after setting up hw */
1713 	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1714 }
1715 
1716 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1717 {
1718 	struct drm_device *dev = encoder->dev;
1719 	struct amdgpu_device *adev = drm_to_adev(dev);
1720 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1721 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1722 
1723 	if (!dig || !dig->afmt)
1724 		return;
1725 
1726 	/* Silent, r600_hdmi_enable will raise WARN for us */
1727 	if (enable && dig->afmt->enabled)
1728 		return;
1729 
1730 	if (!enable && !dig->afmt->enabled)
1731 		return;
1732 
1733 	if (!enable && dig->afmt->pin) {
1734 		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1735 		dig->afmt->pin = NULL;
1736 	}
1737 
1738 	dig->afmt->enabled = enable;
1739 
1740 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1741 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1742 }
1743 
1744 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1745 {
1746 	int i, j;
1747 
1748 	for (i = 0; i < adev->mode_info.num_dig; i++)
1749 		adev->mode_info.afmt[i] = NULL;
1750 
1751 	/* DCE6 has audio blocks tied to DIG encoders */
1752 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1753 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1754 		if (adev->mode_info.afmt[i]) {
1755 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1756 			adev->mode_info.afmt[i]->id = i;
1757 		} else {
1758 			for (j = 0; j < i; j++) {
1759 				kfree(adev->mode_info.afmt[j]);
1760 				adev->mode_info.afmt[j] = NULL;
1761 			}
1762 			DRM_ERROR("Out of memory allocating afmt table\n");
1763 			return -ENOMEM;
1764 		}
1765 	}
1766 	return 0;
1767 }
1768 
1769 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1770 {
1771 	int i;
1772 
1773 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1774 		kfree(adev->mode_info.afmt[i]);
1775 		adev->mode_info.afmt[i] = NULL;
1776 	}
1777 }
1778 
1779 static const u32 vga_control_regs[6] =
1780 {
1781 	mmD1VGA_CONTROL,
1782 	mmD2VGA_CONTROL,
1783 	mmD3VGA_CONTROL,
1784 	mmD4VGA_CONTROL,
1785 	mmD5VGA_CONTROL,
1786 	mmD6VGA_CONTROL,
1787 };
1788 
1789 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1790 {
1791 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1792 	struct drm_device *dev = crtc->dev;
1793 	struct amdgpu_device *adev = drm_to_adev(dev);
1794 	u32 vga_control;
1795 
1796 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1797 	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1798 }
1799 
1800 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1801 {
1802 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1803 	struct drm_device *dev = crtc->dev;
1804 	struct amdgpu_device *adev = drm_to_adev(dev);
1805 
1806 	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1807 }
1808 
1809 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1810 				     struct drm_framebuffer *fb,
1811 				     int x, int y, int atomic)
1812 {
1813 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1814 	struct drm_device *dev = crtc->dev;
1815 	struct amdgpu_device *adev = drm_to_adev(dev);
1816 	struct drm_framebuffer *target_fb;
1817 	struct drm_gem_object *obj;
1818 	struct amdgpu_bo *abo;
1819 	uint64_t fb_location, tiling_flags;
1820 	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1821 	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1822 	u32 viewport_w, viewport_h;
1823 	int r;
1824 	bool bypass_lut = false;
1825 
1826 	/* no fb bound */
1827 	if (!atomic && !crtc->primary->fb) {
1828 		DRM_DEBUG_KMS("No FB bound\n");
1829 		return 0;
1830 	}
1831 
1832 	if (atomic)
1833 		target_fb = fb;
1834 	else
1835 		target_fb = crtc->primary->fb;
1836 
1837 	/* If atomic, assume fb object is pinned & idle & fenced and
1838 	 * just update base pointers
1839 	 */
1840 	obj = target_fb->obj[0];
1841 	abo = gem_to_amdgpu_bo(obj);
1842 	r = amdgpu_bo_reserve(abo, false);
1843 	if (unlikely(r != 0))
1844 		return r;
1845 
1846 	if (!atomic) {
1847 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1848 		if (unlikely(r != 0)) {
1849 			amdgpu_bo_unreserve(abo);
1850 			return -EINVAL;
1851 		}
1852 	}
1853 	fb_location = amdgpu_bo_gpu_offset(abo);
1854 
1855 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1856 	amdgpu_bo_unreserve(abo);
1857 
1858 	switch (target_fb->format->format) {
1859 	case DRM_FORMAT_C8:
1860 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1861 			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1862 		break;
1863 	case DRM_FORMAT_XRGB4444:
1864 	case DRM_FORMAT_ARGB4444:
1865 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1866 			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1867 #ifdef __BIG_ENDIAN
1868 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1869 #endif
1870 		break;
1871 	case DRM_FORMAT_XRGB1555:
1872 	case DRM_FORMAT_ARGB1555:
1873 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1874 			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1875 #ifdef __BIG_ENDIAN
1876 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1877 #endif
1878 		break;
1879 	case DRM_FORMAT_BGRX5551:
1880 	case DRM_FORMAT_BGRA5551:
1881 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1882 			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1883 #ifdef __BIG_ENDIAN
1884 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1885 #endif
1886 		break;
1887 	case DRM_FORMAT_RGB565:
1888 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1889 			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1890 #ifdef __BIG_ENDIAN
1891 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1892 #endif
1893 		break;
1894 	case DRM_FORMAT_XRGB8888:
1895 	case DRM_FORMAT_ARGB8888:
1896 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1897 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1898 #ifdef __BIG_ENDIAN
1899 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1900 #endif
1901 		break;
1902 	case DRM_FORMAT_XRGB2101010:
1903 	case DRM_FORMAT_ARGB2101010:
1904 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1905 			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1906 #ifdef __BIG_ENDIAN
1907 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1908 #endif
1909 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1910 		bypass_lut = true;
1911 		break;
1912 	case DRM_FORMAT_BGRX1010102:
1913 	case DRM_FORMAT_BGRA1010102:
1914 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1915 			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1916 #ifdef __BIG_ENDIAN
1917 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1918 #endif
1919 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1920 		bypass_lut = true;
1921 		break;
1922 	case DRM_FORMAT_XBGR8888:
1923 	case DRM_FORMAT_ABGR8888:
1924 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1925 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1926 		fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
1927 			   GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
1928 #ifdef __BIG_ENDIAN
1929 		fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1930 #endif
1931 		break;
1932 	default:
1933 		DRM_ERROR("Unsupported screen format %p4cc\n",
1934 			  &target_fb->format->format);
1935 		return -EINVAL;
1936 	}
1937 
1938 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1939 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1940 
1941 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1942 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1943 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1944 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1945 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1946 
1947 		fb_format |= GRPH_NUM_BANKS(num_banks);
1948 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1949 		fb_format |= GRPH_TILE_SPLIT(tile_split);
1950 		fb_format |= GRPH_BANK_WIDTH(bankw);
1951 		fb_format |= GRPH_BANK_HEIGHT(bankh);
1952 		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1953 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1954 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1955 	}
1956 
1957 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1958 	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1959 
1960 	dce_v6_0_vga_enable(crtc, false);
1961 
1962 	/* Make sure surface address is updated at vertical blank rather than
1963 	 * horizontal blank
1964 	 */
1965 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1966 
1967 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1968 	       upper_32_bits(fb_location));
1969 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1970 	       upper_32_bits(fb_location));
1971 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1972 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1973 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1974 	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1975 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1976 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1977 
1978 	/*
1979 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1980 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1981 	 * retain the full precision throughout the pipeline.
1982 	 */
1983 	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1984 		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1985 		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1986 
1987 	if (bypass_lut)
1988 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1989 
1990 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1991 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1992 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1993 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1994 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1995 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1996 
1997 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1998 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1999 
2000 	dce_v6_0_grph_enable(crtc, true);
2001 
2002 	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2003 		       target_fb->height);
2004 	x &= ~3;
2005 	y &= ~1;
2006 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2007 	       (x << 16) | y);
2008 	viewport_w = crtc->mode.hdisplay;
2009 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2010 
2011 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2012 	       (viewport_w << 16) | viewport_h);
2013 
2014 	/* set pageflip to happen anywhere in vblank interval */
2015 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2016 
2017 	if (!atomic && fb && fb != crtc->primary->fb) {
2018 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2019 		r = amdgpu_bo_reserve(abo, true);
2020 		if (unlikely(r != 0))
2021 			return r;
2022 		amdgpu_bo_unpin(abo);
2023 		amdgpu_bo_unreserve(abo);
2024 	}
2025 
2026 	/* Bytes per pixel may have changed */
2027 	dce_v6_0_bandwidth_update(adev);
2028 
2029 	return 0;
2030 
2031 }
2032 
2033 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2034 				    struct drm_display_mode *mode)
2035 {
2036 	struct drm_device *dev = crtc->dev;
2037 	struct amdgpu_device *adev = drm_to_adev(dev);
2038 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2039 
2040 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2041 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2042 		       INTERLEAVE_EN);
2043 	else
2044 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2045 }
2046 
2047 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2048 {
2049 
2050 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2051 	struct drm_device *dev = crtc->dev;
2052 	struct amdgpu_device *adev = drm_to_adev(dev);
2053 	u16 *r, *g, *b;
2054 	int i;
2055 
2056 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2057 
2058 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2059 	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2060 		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2061 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2062 	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2063 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2064 	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2065 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2066 	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2067 		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2068 
2069 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2070 
2071 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2072 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2073 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2074 
2075 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2076 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2077 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2078 
2079 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2080 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2081 
2082 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2083 	r = crtc->gamma_store;
2084 	g = r + crtc->gamma_size;
2085 	b = g + crtc->gamma_size;
2086 	for (i = 0; i < 256; i++) {
2087 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2088 		       ((*r++ & 0xffc0) << 14) |
2089 		       ((*g++ & 0xffc0) << 4) |
2090 		       (*b++ >> 6));
2091 	}
2092 
2093 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2094 	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2095 		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2096 		ICON_DEGAMMA_MODE(0) |
2097 		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2098 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2099 	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2100 		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2101 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2102 	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2103 		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2104 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2105 	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2106 		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2107 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2108 	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2109 
2110 
2111 }
2112 
2113 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2114 {
2115 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2116 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2117 
2118 	switch (amdgpu_encoder->encoder_id) {
2119 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2120 		return dig->linkb ? 1 : 0;
2121 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2122 		return dig->linkb ? 3 : 2;
2123 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2124 		return dig->linkb ? 5 : 4;
2125 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2126 		return 6;
2127 	default:
2128 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2129 		return 0;
2130 	}
2131 }
2132 
2133 /**
2134  * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2135  *
2136  * @crtc: drm crtc
2137  *
2138  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2139  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2140  * monitors a dedicated PPLL must be used.  If a particular board has
2141  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2142  * as there is no need to program the PLL itself.  If we are not able to
2143  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2144  * avoid messing up an existing monitor.
2145  *
2146  *
2147  */
2148 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2149 {
2150 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2151 	struct drm_device *dev = crtc->dev;
2152 	struct amdgpu_device *adev = drm_to_adev(dev);
2153 	u32 pll_in_use;
2154 	int pll;
2155 
2156 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2157 		if (adev->clock.dp_extclk)
2158 			/* skip PPLL programming if using ext clock */
2159 			return ATOM_PPLL_INVALID;
2160 		else
2161 			return ATOM_PPLL0;
2162 	} else {
2163 		/* use the same PPLL for all monitors with the same clock */
2164 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2165 		if (pll != ATOM_PPLL_INVALID)
2166 			return pll;
2167 	}
2168 
2169 	/*  PPLL1, and PPLL2 */
2170 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2171 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2172 		return ATOM_PPLL2;
2173 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2174 		return ATOM_PPLL1;
2175 	DRM_ERROR("unable to allocate a PPLL\n");
2176 	return ATOM_PPLL_INVALID;
2177 }
2178 
2179 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2180 {
2181 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2182 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2183 	uint32_t cur_lock;
2184 
2185 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2186 	if (lock)
2187 		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2188 	else
2189 		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2190 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2191 }
2192 
2193 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2194 {
2195 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2196 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2197 
2198 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2199 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2200 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2201 
2202 
2203 }
2204 
2205 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2206 {
2207 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2208 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2209 
2210 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2211 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2212 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2213 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2214 
2215 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2216 	       CUR_CONTROL__CURSOR_EN_MASK |
2217 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2218 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2219 
2220 }
2221 
2222 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2223 				       int x, int y)
2224 {
2225 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2226 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2227 	int xorigin = 0, yorigin = 0;
2228 
2229 	int w = amdgpu_crtc->cursor_width;
2230 
2231 	amdgpu_crtc->cursor_x = x;
2232 	amdgpu_crtc->cursor_y = y;
2233 
2234 	/* avivo cursor are offset into the total surface */
2235 	x += crtc->x;
2236 	y += crtc->y;
2237 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2238 
2239 	if (x < 0) {
2240 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2241 		x = 0;
2242 	}
2243 	if (y < 0) {
2244 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2245 		y = 0;
2246 	}
2247 
2248 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2249 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2250 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2251 	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2252 
2253 	return 0;
2254 }
2255 
2256 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2257 				     int x, int y)
2258 {
2259 	int ret;
2260 
2261 	dce_v6_0_lock_cursor(crtc, true);
2262 	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2263 	dce_v6_0_lock_cursor(crtc, false);
2264 
2265 	return ret;
2266 }
2267 
2268 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2269 				     struct drm_file *file_priv,
2270 				     uint32_t handle,
2271 				     uint32_t width,
2272 				     uint32_t height,
2273 				     int32_t hot_x,
2274 				     int32_t hot_y)
2275 {
2276 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2277 	struct drm_gem_object *obj;
2278 	struct amdgpu_bo *aobj;
2279 	int ret;
2280 
2281 	if (!handle) {
2282 		/* turn off cursor */
2283 		dce_v6_0_hide_cursor(crtc);
2284 		obj = NULL;
2285 		goto unpin;
2286 	}
2287 
2288 	if ((width > amdgpu_crtc->max_cursor_width) ||
2289 	    (height > amdgpu_crtc->max_cursor_height)) {
2290 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2291 		return -EINVAL;
2292 	}
2293 
2294 	obj = drm_gem_object_lookup(file_priv, handle);
2295 	if (!obj) {
2296 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2297 		return -ENOENT;
2298 	}
2299 
2300 	aobj = gem_to_amdgpu_bo(obj);
2301 	ret = amdgpu_bo_reserve(aobj, false);
2302 	if (ret != 0) {
2303 		drm_gem_object_put(obj);
2304 		return ret;
2305 	}
2306 
2307 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2308 	amdgpu_bo_unreserve(aobj);
2309 	if (ret) {
2310 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2311 		drm_gem_object_put(obj);
2312 		return ret;
2313 	}
2314 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2315 
2316 	dce_v6_0_lock_cursor(crtc, true);
2317 
2318 	if (width != amdgpu_crtc->cursor_width ||
2319 	    height != amdgpu_crtc->cursor_height ||
2320 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2321 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2322 		int x, y;
2323 
2324 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2325 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2326 
2327 		dce_v6_0_cursor_move_locked(crtc, x, y);
2328 
2329 		amdgpu_crtc->cursor_width = width;
2330 		amdgpu_crtc->cursor_height = height;
2331 		amdgpu_crtc->cursor_hot_x = hot_x;
2332 		amdgpu_crtc->cursor_hot_y = hot_y;
2333 	}
2334 
2335 	dce_v6_0_show_cursor(crtc);
2336 	dce_v6_0_lock_cursor(crtc, false);
2337 
2338 unpin:
2339 	if (amdgpu_crtc->cursor_bo) {
2340 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2341 		ret = amdgpu_bo_reserve(aobj, true);
2342 		if (likely(ret == 0)) {
2343 			amdgpu_bo_unpin(aobj);
2344 			amdgpu_bo_unreserve(aobj);
2345 		}
2346 		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2347 	}
2348 
2349 	amdgpu_crtc->cursor_bo = obj;
2350 	return 0;
2351 }
2352 
2353 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2354 {
2355 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2356 
2357 	if (amdgpu_crtc->cursor_bo) {
2358 		dce_v6_0_lock_cursor(crtc, true);
2359 
2360 		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2361 					    amdgpu_crtc->cursor_y);
2362 
2363 		dce_v6_0_show_cursor(crtc);
2364 		dce_v6_0_lock_cursor(crtc, false);
2365 	}
2366 }
2367 
2368 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2369 				   u16 *blue, uint32_t size,
2370 				   struct drm_modeset_acquire_ctx *ctx)
2371 {
2372 	dce_v6_0_crtc_load_lut(crtc);
2373 
2374 	return 0;
2375 }
2376 
2377 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2378 {
2379 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2380 
2381 	drm_crtc_cleanup(crtc);
2382 	kfree(amdgpu_crtc);
2383 }
2384 
2385 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2386 	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2387 	.cursor_move = dce_v6_0_crtc_cursor_move,
2388 	.gamma_set = dce_v6_0_crtc_gamma_set,
2389 	.set_config = amdgpu_display_crtc_set_config,
2390 	.destroy = dce_v6_0_crtc_destroy,
2391 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2392 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2393 	.enable_vblank = amdgpu_enable_vblank_kms,
2394 	.disable_vblank = amdgpu_disable_vblank_kms,
2395 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2396 };
2397 
2398 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2399 {
2400 	struct drm_device *dev = crtc->dev;
2401 	struct amdgpu_device *adev = drm_to_adev(dev);
2402 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2403 	unsigned type;
2404 
2405 	switch (mode) {
2406 	case DRM_MODE_DPMS_ON:
2407 		amdgpu_crtc->enabled = true;
2408 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2409 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2410 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2411 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2412 						amdgpu_crtc->crtc_id);
2413 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2414 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2415 		drm_crtc_vblank_on(crtc);
2416 		dce_v6_0_crtc_load_lut(crtc);
2417 		break;
2418 	case DRM_MODE_DPMS_STANDBY:
2419 	case DRM_MODE_DPMS_SUSPEND:
2420 	case DRM_MODE_DPMS_OFF:
2421 		drm_crtc_vblank_off(crtc);
2422 		if (amdgpu_crtc->enabled)
2423 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2424 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2425 		amdgpu_crtc->enabled = false;
2426 		break;
2427 	}
2428 	/* adjust pm to dpms */
2429 	amdgpu_dpm_compute_clocks(adev);
2430 }
2431 
2432 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2433 {
2434 	/* disable crtc pair power gating before programming */
2435 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2436 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2437 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2438 }
2439 
2440 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2441 {
2442 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2443 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2444 }
2445 
2446 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2447 {
2448 
2449 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2450 	struct drm_device *dev = crtc->dev;
2451 	struct amdgpu_device *adev = drm_to_adev(dev);
2452 	struct amdgpu_atom_ss ss;
2453 	int i;
2454 
2455 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2456 	if (crtc->primary->fb) {
2457 		int r;
2458 		struct amdgpu_bo *abo;
2459 
2460 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2461 		r = amdgpu_bo_reserve(abo, true);
2462 		if (unlikely(r))
2463 			DRM_ERROR("failed to reserve abo before unpin\n");
2464 		else {
2465 			amdgpu_bo_unpin(abo);
2466 			amdgpu_bo_unreserve(abo);
2467 		}
2468 	}
2469 	/* disable the GRPH */
2470 	dce_v6_0_grph_enable(crtc, false);
2471 
2472 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2473 
2474 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2475 		if (adev->mode_info.crtcs[i] &&
2476 		    adev->mode_info.crtcs[i]->enabled &&
2477 		    i != amdgpu_crtc->crtc_id &&
2478 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2479 			/* one other crtc is using this pll don't turn
2480 			 * off the pll
2481 			 */
2482 			goto done;
2483 		}
2484 	}
2485 
2486 	switch (amdgpu_crtc->pll_id) {
2487 	case ATOM_PPLL1:
2488 	case ATOM_PPLL2:
2489 		/* disable the ppll */
2490 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2491 						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2492 		break;
2493 	default:
2494 		break;
2495 	}
2496 done:
2497 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2498 	amdgpu_crtc->adjusted_clock = 0;
2499 	amdgpu_crtc->encoder = NULL;
2500 	amdgpu_crtc->connector = NULL;
2501 }
2502 
2503 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2504 				  struct drm_display_mode *mode,
2505 				  struct drm_display_mode *adjusted_mode,
2506 				  int x, int y, struct drm_framebuffer *old_fb)
2507 {
2508 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2509 
2510 	if (!amdgpu_crtc->adjusted_clock)
2511 		return -EINVAL;
2512 
2513 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2514 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2515 	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2516 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2517 	amdgpu_atombios_crtc_scaler_setup(crtc);
2518 	dce_v6_0_cursor_reset(crtc);
2519 	/* update the hw version fpr dpm */
2520 	amdgpu_crtc->hw_mode = *adjusted_mode;
2521 
2522 	return 0;
2523 }
2524 
2525 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2526 				     const struct drm_display_mode *mode,
2527 				     struct drm_display_mode *adjusted_mode)
2528 {
2529 
2530 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2531 	struct drm_device *dev = crtc->dev;
2532 	struct drm_encoder *encoder;
2533 
2534 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2535 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2536 		if (encoder->crtc == crtc) {
2537 			amdgpu_crtc->encoder = encoder;
2538 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2539 			break;
2540 		}
2541 	}
2542 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2543 		amdgpu_crtc->encoder = NULL;
2544 		amdgpu_crtc->connector = NULL;
2545 		return false;
2546 	}
2547 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2548 		return false;
2549 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2550 		return false;
2551 	/* pick pll */
2552 	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2553 	/* if we can't get a PPLL for a non-DP encoder, fail */
2554 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2555 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2556 		return false;
2557 
2558 	return true;
2559 }
2560 
2561 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2562 				  struct drm_framebuffer *old_fb)
2563 {
2564 	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2565 }
2566 
2567 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2568 					 struct drm_framebuffer *fb,
2569 					 int x, int y, enum mode_set_atomic state)
2570 {
2571 	return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2572 }
2573 
2574 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2575 	.dpms = dce_v6_0_crtc_dpms,
2576 	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2577 	.mode_set = dce_v6_0_crtc_mode_set,
2578 	.mode_set_base = dce_v6_0_crtc_set_base,
2579 	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2580 	.prepare = dce_v6_0_crtc_prepare,
2581 	.commit = dce_v6_0_crtc_commit,
2582 	.disable = dce_v6_0_crtc_disable,
2583 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2584 };
2585 
2586 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2587 {
2588 	struct amdgpu_crtc *amdgpu_crtc;
2589 
2590 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2591 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2592 	if (amdgpu_crtc == NULL)
2593 		return -ENOMEM;
2594 
2595 	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2596 
2597 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2598 	amdgpu_crtc->crtc_id = index;
2599 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2600 
2601 	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2602 	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2603 	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2604 	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2605 
2606 	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2607 
2608 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2609 	amdgpu_crtc->adjusted_clock = 0;
2610 	amdgpu_crtc->encoder = NULL;
2611 	amdgpu_crtc->connector = NULL;
2612 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2613 
2614 	return 0;
2615 }
2616 
2617 static int dce_v6_0_early_init(void *handle)
2618 {
2619 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2620 
2621 	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2622 	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2623 
2624 	dce_v6_0_set_display_funcs(adev);
2625 
2626 	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2627 
2628 	switch (adev->asic_type) {
2629 	case CHIP_TAHITI:
2630 	case CHIP_PITCAIRN:
2631 	case CHIP_VERDE:
2632 		adev->mode_info.num_hpd = 6;
2633 		adev->mode_info.num_dig = 6;
2634 		break;
2635 	case CHIP_OLAND:
2636 		adev->mode_info.num_hpd = 2;
2637 		adev->mode_info.num_dig = 2;
2638 		break;
2639 	default:
2640 		return -EINVAL;
2641 	}
2642 
2643 	dce_v6_0_set_irq_funcs(adev);
2644 
2645 	return 0;
2646 }
2647 
2648 static int dce_v6_0_sw_init(void *handle)
2649 {
2650 	int r, i;
2651 	bool ret;
2652 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2653 
2654 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2655 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2656 		if (r)
2657 			return r;
2658 	}
2659 
2660 	for (i = 8; i < 20; i += 2) {
2661 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2662 		if (r)
2663 			return r;
2664 	}
2665 
2666 	/* HPD hotplug */
2667 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2668 	if (r)
2669 		return r;
2670 
2671 	adev->mode_info.mode_config_initialized = true;
2672 
2673 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2674 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2675 	adev_to_drm(adev)->mode_config.max_width = 16384;
2676 	adev_to_drm(adev)->mode_config.max_height = 16384;
2677 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2678 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2679 	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2680 
2681 	r = amdgpu_display_modeset_create_props(adev);
2682 	if (r)
2683 		return r;
2684 
2685 	adev_to_drm(adev)->mode_config.max_width = 16384;
2686 	adev_to_drm(adev)->mode_config.max_height = 16384;
2687 
2688 	/* allocate crtcs */
2689 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2690 		r = dce_v6_0_crtc_init(adev, i);
2691 		if (r)
2692 			return r;
2693 	}
2694 
2695 	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2696 	if (ret)
2697 		amdgpu_display_print_display_setup(adev_to_drm(adev));
2698 	else
2699 		return -EINVAL;
2700 
2701 	/* setup afmt */
2702 	r = dce_v6_0_afmt_init(adev);
2703 	if (r)
2704 		return r;
2705 
2706 	r = dce_v6_0_audio_init(adev);
2707 	if (r)
2708 		return r;
2709 
2710 	/* Disable vblank IRQs aggressively for power-saving */
2711 	/* XXX: can this be enabled for DC? */
2712 	adev_to_drm(adev)->vblank_disable_immediate = true;
2713 
2714 	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2715 	if (r)
2716 		return r;
2717 
2718 	/* Pre-DCE11 */
2719 	INIT_DELAYED_WORK(&adev->hotplug_work,
2720 		  amdgpu_display_hotplug_work_func);
2721 
2722 	drm_kms_helper_poll_init(adev_to_drm(adev));
2723 
2724 	return r;
2725 }
2726 
2727 static int dce_v6_0_sw_fini(void *handle)
2728 {
2729 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2730 
2731 	kfree(adev->mode_info.bios_hardcoded_edid);
2732 
2733 	drm_kms_helper_poll_fini(adev_to_drm(adev));
2734 
2735 	dce_v6_0_audio_fini(adev);
2736 	dce_v6_0_afmt_fini(adev);
2737 
2738 	drm_mode_config_cleanup(adev_to_drm(adev));
2739 	adev->mode_info.mode_config_initialized = false;
2740 
2741 	return 0;
2742 }
2743 
2744 static int dce_v6_0_hw_init(void *handle)
2745 {
2746 	int i;
2747 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2748 
2749 	/* disable vga render */
2750 	dce_v6_0_set_vga_render_state(adev, false);
2751 	/* init dig PHYs, disp eng pll */
2752 	amdgpu_atombios_encoder_init_dig(adev);
2753 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2754 
2755 	/* initialize hpd */
2756 	dce_v6_0_hpd_init(adev);
2757 
2758 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2759 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2760 	}
2761 
2762 	dce_v6_0_pageflip_interrupt_init(adev);
2763 
2764 	return 0;
2765 }
2766 
2767 static int dce_v6_0_hw_fini(void *handle)
2768 {
2769 	int i;
2770 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2771 
2772 	dce_v6_0_hpd_fini(adev);
2773 
2774 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2775 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2776 	}
2777 
2778 	dce_v6_0_pageflip_interrupt_fini(adev);
2779 
2780 	flush_delayed_work(&adev->hotplug_work);
2781 
2782 	return 0;
2783 }
2784 
2785 static int dce_v6_0_suspend(void *handle)
2786 {
2787 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2788 	int r;
2789 
2790 	r = amdgpu_display_suspend_helper(adev);
2791 	if (r)
2792 		return r;
2793 	adev->mode_info.bl_level =
2794 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2795 
2796 	return dce_v6_0_hw_fini(handle);
2797 }
2798 
2799 static int dce_v6_0_resume(void *handle)
2800 {
2801 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2802 	int ret;
2803 
2804 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2805 							   adev->mode_info.bl_level);
2806 
2807 	ret = dce_v6_0_hw_init(handle);
2808 
2809 	/* turn on the BL */
2810 	if (adev->mode_info.bl_encoder) {
2811 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2812 								  adev->mode_info.bl_encoder);
2813 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2814 						    bl_level);
2815 	}
2816 	if (ret)
2817 		return ret;
2818 
2819 	return amdgpu_display_resume_helper(adev);
2820 }
2821 
2822 static bool dce_v6_0_is_idle(void *handle)
2823 {
2824 	return true;
2825 }
2826 
2827 static int dce_v6_0_wait_for_idle(void *handle)
2828 {
2829 	return 0;
2830 }
2831 
2832 static int dce_v6_0_soft_reset(void *handle)
2833 {
2834 	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2835 	return 0;
2836 }
2837 
2838 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2839 						     int crtc,
2840 						     enum amdgpu_interrupt_state state)
2841 {
2842 	u32 reg_block, interrupt_mask;
2843 
2844 	if (crtc >= adev->mode_info.num_crtc) {
2845 		DRM_DEBUG("invalid crtc %d\n", crtc);
2846 		return;
2847 	}
2848 
2849 	switch (crtc) {
2850 	case 0:
2851 		reg_block = SI_CRTC0_REGISTER_OFFSET;
2852 		break;
2853 	case 1:
2854 		reg_block = SI_CRTC1_REGISTER_OFFSET;
2855 		break;
2856 	case 2:
2857 		reg_block = SI_CRTC2_REGISTER_OFFSET;
2858 		break;
2859 	case 3:
2860 		reg_block = SI_CRTC3_REGISTER_OFFSET;
2861 		break;
2862 	case 4:
2863 		reg_block = SI_CRTC4_REGISTER_OFFSET;
2864 		break;
2865 	case 5:
2866 		reg_block = SI_CRTC5_REGISTER_OFFSET;
2867 		break;
2868 	default:
2869 		DRM_DEBUG("invalid crtc %d\n", crtc);
2870 		return;
2871 	}
2872 
2873 	switch (state) {
2874 	case AMDGPU_IRQ_STATE_DISABLE:
2875 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2876 		interrupt_mask &= ~VBLANK_INT_MASK;
2877 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2878 		break;
2879 	case AMDGPU_IRQ_STATE_ENABLE:
2880 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2881 		interrupt_mask |= VBLANK_INT_MASK;
2882 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2883 		break;
2884 	default:
2885 		break;
2886 	}
2887 }
2888 
2889 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2890 						    int crtc,
2891 						    enum amdgpu_interrupt_state state)
2892 {
2893 
2894 }
2895 
2896 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2897 					    struct amdgpu_irq_src *src,
2898 					    unsigned type,
2899 					    enum amdgpu_interrupt_state state)
2900 {
2901 	u32 dc_hpd_int_cntl;
2902 
2903 	if (type >= adev->mode_info.num_hpd) {
2904 		DRM_DEBUG("invalid hdp %d\n", type);
2905 		return 0;
2906 	}
2907 
2908 	switch (state) {
2909 	case AMDGPU_IRQ_STATE_DISABLE:
2910 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2911 		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2912 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2913 		break;
2914 	case AMDGPU_IRQ_STATE_ENABLE:
2915 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2916 		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2917 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2918 		break;
2919 	default:
2920 		break;
2921 	}
2922 
2923 	return 0;
2924 }
2925 
2926 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2927 					     struct amdgpu_irq_src *src,
2928 					     unsigned type,
2929 					     enum amdgpu_interrupt_state state)
2930 {
2931 	switch (type) {
2932 	case AMDGPU_CRTC_IRQ_VBLANK1:
2933 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2934 		break;
2935 	case AMDGPU_CRTC_IRQ_VBLANK2:
2936 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2937 		break;
2938 	case AMDGPU_CRTC_IRQ_VBLANK3:
2939 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2940 		break;
2941 	case AMDGPU_CRTC_IRQ_VBLANK4:
2942 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2943 		break;
2944 	case AMDGPU_CRTC_IRQ_VBLANK5:
2945 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2946 		break;
2947 	case AMDGPU_CRTC_IRQ_VBLANK6:
2948 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2949 		break;
2950 	case AMDGPU_CRTC_IRQ_VLINE1:
2951 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2952 		break;
2953 	case AMDGPU_CRTC_IRQ_VLINE2:
2954 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2955 		break;
2956 	case AMDGPU_CRTC_IRQ_VLINE3:
2957 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2958 		break;
2959 	case AMDGPU_CRTC_IRQ_VLINE4:
2960 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2961 		break;
2962 	case AMDGPU_CRTC_IRQ_VLINE5:
2963 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2964 		break;
2965 	case AMDGPU_CRTC_IRQ_VLINE6:
2966 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2967 		break;
2968 	default:
2969 		break;
2970 	}
2971 	return 0;
2972 }
2973 
2974 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2975 			     struct amdgpu_irq_src *source,
2976 			     struct amdgpu_iv_entry *entry)
2977 {
2978 	unsigned crtc = entry->src_id - 1;
2979 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2980 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2981 								    crtc);
2982 
2983 	switch (entry->src_data[0]) {
2984 	case 0: /* vblank */
2985 		if (disp_int & interrupt_status_offsets[crtc].vblank)
2986 			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2987 		else
2988 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2989 
2990 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2991 			drm_handle_vblank(adev_to_drm(adev), crtc);
2992 		}
2993 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2994 		break;
2995 	case 1: /* vline */
2996 		if (disp_int & interrupt_status_offsets[crtc].vline)
2997 			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2998 		else
2999 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3000 
3001 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3002 		break;
3003 	default:
3004 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3005 		break;
3006 	}
3007 
3008 	return 0;
3009 }
3010 
3011 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3012 						 struct amdgpu_irq_src *src,
3013 						 unsigned type,
3014 						 enum amdgpu_interrupt_state state)
3015 {
3016 	u32 reg;
3017 
3018 	if (type >= adev->mode_info.num_crtc) {
3019 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3020 		return -EINVAL;
3021 	}
3022 
3023 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3024 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3025 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3026 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3027 	else
3028 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3029 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3030 
3031 	return 0;
3032 }
3033 
3034 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
3035 				 struct amdgpu_irq_src *source,
3036 				 struct amdgpu_iv_entry *entry)
3037 {
3038 	unsigned long flags;
3039 	unsigned crtc_id;
3040 	struct amdgpu_crtc *amdgpu_crtc;
3041 	struct amdgpu_flip_work *works;
3042 
3043 	crtc_id = (entry->src_id - 8) >> 1;
3044 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3045 
3046 	if (crtc_id >= adev->mode_info.num_crtc) {
3047 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3048 		return -EINVAL;
3049 	}
3050 
3051 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3052 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3053 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3054 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3055 
3056 	/* IRQ could occur when in initial stage */
3057 	if (amdgpu_crtc == NULL)
3058 		return 0;
3059 
3060 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3061 	works = amdgpu_crtc->pflip_works;
3062 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3063 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3064 						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3065 						amdgpu_crtc->pflip_status,
3066 						AMDGPU_FLIP_SUBMITTED);
3067 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3068 		return 0;
3069 	}
3070 
3071 	/* page flip completed. clean up */
3072 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3073 	amdgpu_crtc->pflip_works = NULL;
3074 
3075 	/* wakeup usersapce */
3076 	if (works->event)
3077 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3078 
3079 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3080 
3081 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3082 	schedule_work(&works->unpin_work);
3083 
3084 	return 0;
3085 }
3086 
3087 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3088 			    struct amdgpu_irq_src *source,
3089 			    struct amdgpu_iv_entry *entry)
3090 {
3091 	uint32_t disp_int, mask, tmp;
3092 	unsigned hpd;
3093 
3094 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3095 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3096 		return 0;
3097 	}
3098 
3099 	hpd = entry->src_data[0];
3100 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3101 	mask = interrupt_status_offsets[hpd].hpd;
3102 
3103 	if (disp_int & mask) {
3104 		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3105 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3106 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3107 		schedule_delayed_work(&adev->hotplug_work, 0);
3108 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3109 	}
3110 
3111 	return 0;
3112 
3113 }
3114 
3115 static int dce_v6_0_set_clockgating_state(void *handle,
3116 					  enum amd_clockgating_state state)
3117 {
3118 	return 0;
3119 }
3120 
3121 static int dce_v6_0_set_powergating_state(void *handle,
3122 					  enum amd_powergating_state state)
3123 {
3124 	return 0;
3125 }
3126 
3127 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3128 	.name = "dce_v6_0",
3129 	.early_init = dce_v6_0_early_init,
3130 	.late_init = NULL,
3131 	.sw_init = dce_v6_0_sw_init,
3132 	.sw_fini = dce_v6_0_sw_fini,
3133 	.hw_init = dce_v6_0_hw_init,
3134 	.hw_fini = dce_v6_0_hw_fini,
3135 	.suspend = dce_v6_0_suspend,
3136 	.resume = dce_v6_0_resume,
3137 	.is_idle = dce_v6_0_is_idle,
3138 	.wait_for_idle = dce_v6_0_wait_for_idle,
3139 	.soft_reset = dce_v6_0_soft_reset,
3140 	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3141 	.set_powergating_state = dce_v6_0_set_powergating_state,
3142 };
3143 
3144 static void
3145 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3146 			  struct drm_display_mode *mode,
3147 			  struct drm_display_mode *adjusted_mode)
3148 {
3149 
3150 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3151 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3152 
3153 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3154 
3155 	/* need to call this here rather than in prepare() since we need some crtc info */
3156 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3157 
3158 	/* set scaler clears this on some chips */
3159 	dce_v6_0_set_interleave(encoder->crtc, mode);
3160 
3161 	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3162 		dce_v6_0_afmt_enable(encoder, true);
3163 		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3164 	}
3165 }
3166 
3167 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3168 {
3169 
3170 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3171 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3172 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3173 
3174 	if ((amdgpu_encoder->active_device &
3175 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3176 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3177 	     ENCODER_OBJECT_ID_NONE)) {
3178 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3179 		if (dig) {
3180 			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3181 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3182 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3183 		}
3184 	}
3185 
3186 	amdgpu_atombios_scratch_regs_lock(adev, true);
3187 
3188 	if (connector) {
3189 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3190 
3191 		/* select the clock/data port if it uses a router */
3192 		if (amdgpu_connector->router.cd_valid)
3193 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3194 
3195 		/* turn eDP panel on for mode set */
3196 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3197 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3198 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3199 	}
3200 
3201 	/* this is needed for the pll/ss setup to work correctly in some cases */
3202 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3203 	/* set up the FMT blocks */
3204 	dce_v6_0_program_fmt(encoder);
3205 }
3206 
3207 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3208 {
3209 
3210 	struct drm_device *dev = encoder->dev;
3211 	struct amdgpu_device *adev = drm_to_adev(dev);
3212 
3213 	/* need to call this here as we need the crtc set up */
3214 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3215 	amdgpu_atombios_scratch_regs_lock(adev, false);
3216 }
3217 
3218 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3219 {
3220 
3221 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3222 	struct amdgpu_encoder_atom_dig *dig;
3223 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3224 
3225 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3226 
3227 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3228 		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3229 			dce_v6_0_afmt_enable(encoder, false);
3230 		dig = amdgpu_encoder->enc_priv;
3231 		dig->dig_encoder = -1;
3232 	}
3233 	amdgpu_encoder->active_device = 0;
3234 }
3235 
3236 /* these are handled by the primary encoders */
3237 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3238 {
3239 
3240 }
3241 
3242 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3243 {
3244 
3245 }
3246 
3247 static void
3248 dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3249 		      struct drm_display_mode *mode,
3250 		      struct drm_display_mode *adjusted_mode)
3251 {
3252 
3253 }
3254 
3255 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3256 {
3257 
3258 }
3259 
3260 static void
3261 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3262 {
3263 
3264 }
3265 
3266 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3267 				    const struct drm_display_mode *mode,
3268 				    struct drm_display_mode *adjusted_mode)
3269 {
3270 	return true;
3271 }
3272 
3273 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3274 	.dpms = dce_v6_0_ext_dpms,
3275 	.mode_fixup = dce_v6_0_ext_mode_fixup,
3276 	.prepare = dce_v6_0_ext_prepare,
3277 	.mode_set = dce_v6_0_ext_mode_set,
3278 	.commit = dce_v6_0_ext_commit,
3279 	.disable = dce_v6_0_ext_disable,
3280 	/* no detect for TMDS/LVDS yet */
3281 };
3282 
3283 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3284 	.dpms = amdgpu_atombios_encoder_dpms,
3285 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3286 	.prepare = dce_v6_0_encoder_prepare,
3287 	.mode_set = dce_v6_0_encoder_mode_set,
3288 	.commit = dce_v6_0_encoder_commit,
3289 	.disable = dce_v6_0_encoder_disable,
3290 	.detect = amdgpu_atombios_encoder_dig_detect,
3291 };
3292 
3293 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3294 	.dpms = amdgpu_atombios_encoder_dpms,
3295 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3296 	.prepare = dce_v6_0_encoder_prepare,
3297 	.mode_set = dce_v6_0_encoder_mode_set,
3298 	.commit = dce_v6_0_encoder_commit,
3299 	.detect = amdgpu_atombios_encoder_dac_detect,
3300 };
3301 
3302 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3303 {
3304 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3305 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3306 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3307 	kfree(amdgpu_encoder->enc_priv);
3308 	drm_encoder_cleanup(encoder);
3309 	kfree(amdgpu_encoder);
3310 }
3311 
3312 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3313 	.destroy = dce_v6_0_encoder_destroy,
3314 };
3315 
3316 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3317 				 uint32_t encoder_enum,
3318 				 uint32_t supported_device,
3319 				 u16 caps)
3320 {
3321 	struct drm_device *dev = adev_to_drm(adev);
3322 	struct drm_encoder *encoder;
3323 	struct amdgpu_encoder *amdgpu_encoder;
3324 
3325 	/* see if we already added it */
3326 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3327 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3328 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3329 			amdgpu_encoder->devices |= supported_device;
3330 			return;
3331 		}
3332 
3333 	}
3334 
3335 	/* add a new one */
3336 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3337 	if (!amdgpu_encoder)
3338 		return;
3339 
3340 	encoder = &amdgpu_encoder->base;
3341 	switch (adev->mode_info.num_crtc) {
3342 	case 1:
3343 		encoder->possible_crtcs = 0x1;
3344 		break;
3345 	case 2:
3346 	default:
3347 		encoder->possible_crtcs = 0x3;
3348 		break;
3349 	case 4:
3350 		encoder->possible_crtcs = 0xf;
3351 		break;
3352 	case 6:
3353 		encoder->possible_crtcs = 0x3f;
3354 		break;
3355 	}
3356 
3357 	amdgpu_encoder->enc_priv = NULL;
3358 	amdgpu_encoder->encoder_enum = encoder_enum;
3359 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3360 	amdgpu_encoder->devices = supported_device;
3361 	amdgpu_encoder->rmx_type = RMX_OFF;
3362 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3363 	amdgpu_encoder->is_ext_encoder = false;
3364 	amdgpu_encoder->caps = caps;
3365 
3366 	switch (amdgpu_encoder->encoder_id) {
3367 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3368 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3369 		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3370 				 DRM_MODE_ENCODER_DAC, NULL);
3371 		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3372 		break;
3373 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3374 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3375 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3376 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3377 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3378 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3379 			amdgpu_encoder->rmx_type = RMX_FULL;
3380 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3381 					 DRM_MODE_ENCODER_LVDS, NULL);
3382 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3383 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3384 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3385 					 DRM_MODE_ENCODER_DAC, NULL);
3386 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3387 		} else {
3388 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3389 					 DRM_MODE_ENCODER_TMDS, NULL);
3390 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3391 		}
3392 		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3393 		break;
3394 	case ENCODER_OBJECT_ID_SI170B:
3395 	case ENCODER_OBJECT_ID_CH7303:
3396 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3397 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3398 	case ENCODER_OBJECT_ID_TITFP513:
3399 	case ENCODER_OBJECT_ID_VT1623:
3400 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3401 	case ENCODER_OBJECT_ID_TRAVIS:
3402 	case ENCODER_OBJECT_ID_NUTMEG:
3403 		/* these are handled by the primary encoders */
3404 		amdgpu_encoder->is_ext_encoder = true;
3405 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3406 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3407 					 DRM_MODE_ENCODER_LVDS, NULL);
3408 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3409 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3410 					 DRM_MODE_ENCODER_DAC, NULL);
3411 		else
3412 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3413 					 DRM_MODE_ENCODER_TMDS, NULL);
3414 		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3415 		break;
3416 	}
3417 }
3418 
3419 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3420 	.bandwidth_update = &dce_v6_0_bandwidth_update,
3421 	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3422 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3423 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3424 	.hpd_sense = &dce_v6_0_hpd_sense,
3425 	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3426 	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3427 	.page_flip = &dce_v6_0_page_flip,
3428 	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3429 	.add_encoder = &dce_v6_0_encoder_add,
3430 	.add_connector = &amdgpu_connector_add,
3431 };
3432 
3433 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3434 {
3435 	adev->mode_info.funcs = &dce_v6_0_display_funcs;
3436 }
3437 
3438 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3439 	.set = dce_v6_0_set_crtc_interrupt_state,
3440 	.process = dce_v6_0_crtc_irq,
3441 };
3442 
3443 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3444 	.set = dce_v6_0_set_pageflip_interrupt_state,
3445 	.process = dce_v6_0_pageflip_irq,
3446 };
3447 
3448 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3449 	.set = dce_v6_0_set_hpd_interrupt_state,
3450 	.process = dce_v6_0_hpd_irq,
3451 };
3452 
3453 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3454 {
3455 	if (adev->mode_info.num_crtc > 0)
3456 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3457 	else
3458 		adev->crtc_irq.num_types = 0;
3459 	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3460 
3461 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3462 	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3463 
3464 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3465 	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3466 }
3467 
3468 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3469 {
3470 	.type = AMD_IP_BLOCK_TYPE_DCE,
3471 	.major = 6,
3472 	.minor = 0,
3473 	.rev = 0,
3474 	.funcs = &dce_v6_0_ip_funcs,
3475 };
3476 
3477 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3478 {
3479 	.type = AMD_IP_BLOCK_TYPE_DCE,
3480 	.major = 6,
3481 	.minor = 4,
3482 	.rev = 0,
3483 	.funcs = &dce_v6_0_ip_funcs,
3484 };
3485