xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c (revision b830f94f)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 
26 #include <drm/drm_fourcc.h>
27 #include <drm/drm_vblank.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_i2c.h"
32 #include "atom.h"
33 #include "amdgpu_atombios.h"
34 #include "atombios_crtc.h"
35 #include "atombios_encoders.h"
36 #include "amdgpu_pll.h"
37 #include "amdgpu_connectors.h"
38 #include "amdgpu_display.h"
39 
40 #include "bif/bif_3_0_d.h"
41 #include "bif/bif_3_0_sh_mask.h"
42 #include "oss/oss_1_0_d.h"
43 #include "oss/oss_1_0_sh_mask.h"
44 #include "gca/gfx_6_0_d.h"
45 #include "gca/gfx_6_0_sh_mask.h"
46 #include "gmc/gmc_6_0_d.h"
47 #include "gmc/gmc_6_0_sh_mask.h"
48 #include "dce/dce_6_0_d.h"
49 #include "dce/dce_6_0_sh_mask.h"
50 #include "gca/gfx_7_2_enum.h"
51 #include "dce_v6_0.h"
52 #include "si_enums.h"
53 
54 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
55 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
56 
57 static const u32 crtc_offsets[6] =
58 {
59 	SI_CRTC0_REGISTER_OFFSET,
60 	SI_CRTC1_REGISTER_OFFSET,
61 	SI_CRTC2_REGISTER_OFFSET,
62 	SI_CRTC3_REGISTER_OFFSET,
63 	SI_CRTC4_REGISTER_OFFSET,
64 	SI_CRTC5_REGISTER_OFFSET
65 };
66 
67 static const u32 hpd_offsets[] =
68 {
69 	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
70 	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
71 	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
72 	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
73 	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
74 	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
75 };
76 
77 static const uint32_t dig_offsets[] = {
78 	SI_CRTC0_REGISTER_OFFSET,
79 	SI_CRTC1_REGISTER_OFFSET,
80 	SI_CRTC2_REGISTER_OFFSET,
81 	SI_CRTC3_REGISTER_OFFSET,
82 	SI_CRTC4_REGISTER_OFFSET,
83 	SI_CRTC5_REGISTER_OFFSET,
84 	(0x13830 - 0x7030) >> 2,
85 };
86 
87 static const struct {
88 	uint32_t	reg;
89 	uint32_t	vblank;
90 	uint32_t	vline;
91 	uint32_t	hpd;
92 
93 } interrupt_status_offsets[6] = { {
94 	.reg = mmDISP_INTERRUPT_STATUS,
95 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
96 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
97 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
98 }, {
99 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
100 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
101 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
102 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
103 }, {
104 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
105 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
106 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
107 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
108 }, {
109 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
110 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
111 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
112 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
113 }, {
114 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
115 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
116 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
117 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
118 }, {
119 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
120 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
121 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
122 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
123 } };
124 
125 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
126 				     u32 block_offset, u32 reg)
127 {
128 	unsigned long flags;
129 	u32 r;
130 
131 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
132 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
133 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
134 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
135 
136 	return r;
137 }
138 
139 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
140 				      u32 block_offset, u32 reg, u32 v)
141 {
142 	unsigned long flags;
143 
144 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
145 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
146 		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
147 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
148 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
149 }
150 
151 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
152 {
153 	if (crtc >= adev->mode_info.num_crtc)
154 		return 0;
155 	else
156 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
157 }
158 
159 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
160 {
161 	unsigned i;
162 
163 	/* Enable pflip interrupts */
164 	for (i = 0; i < adev->mode_info.num_crtc; i++)
165 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
166 }
167 
168 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
169 {
170 	unsigned i;
171 
172 	/* Disable pflip interrupts */
173 	for (i = 0; i < adev->mode_info.num_crtc; i++)
174 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
175 }
176 
177 /**
178  * dce_v6_0_page_flip - pageflip callback.
179  *
180  * @adev: amdgpu_device pointer
181  * @crtc_id: crtc to cleanup pageflip on
182  * @crtc_base: new address of the crtc (GPU MC address)
183  *
184  * Does the actual pageflip (evergreen+).
185  * During vblank we take the crtc lock and wait for the update_pending
186  * bit to go high, when it does, we release the lock, and allow the
187  * double buffered update to take place.
188  * Returns the current update pending status.
189  */
190 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
191 			       int crtc_id, u64 crtc_base, bool async)
192 {
193 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
194 
195 	/* flip at hsync for async, default is vsync */
196 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
197 	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
198 	/* update the scanout addresses */
199 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
200 	       upper_32_bits(crtc_base));
201 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
202 	       (u32)crtc_base);
203 
204 	/* post the write */
205 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
206 }
207 
208 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
209 					u32 *vbl, u32 *position)
210 {
211 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
212 		return -EINVAL;
213 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
214 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
215 
216 	return 0;
217 
218 }
219 
220 /**
221  * dce_v6_0_hpd_sense - hpd sense callback.
222  *
223  * @adev: amdgpu_device pointer
224  * @hpd: hpd (hotplug detect) pin
225  *
226  * Checks if a digital monitor is connected (evergreen+).
227  * Returns true if connected, false if not connected.
228  */
229 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
230 			       enum amdgpu_hpd_id hpd)
231 {
232 	bool connected = false;
233 
234 	if (hpd >= adev->mode_info.num_hpd)
235 		return connected;
236 
237 	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
238 		connected = true;
239 
240 	return connected;
241 }
242 
243 /**
244  * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
245  *
246  * @adev: amdgpu_device pointer
247  * @hpd: hpd (hotplug detect) pin
248  *
249  * Set the polarity of the hpd pin (evergreen+).
250  */
251 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
252 				      enum amdgpu_hpd_id hpd)
253 {
254 	u32 tmp;
255 	bool connected = dce_v6_0_hpd_sense(adev, hpd);
256 
257 	if (hpd >= adev->mode_info.num_hpd)
258 		return;
259 
260 	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
261 	if (connected)
262 		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
263 	else
264 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
265 	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
266 }
267 
268 /**
269  * dce_v6_0_hpd_init - hpd setup callback.
270  *
271  * @adev: amdgpu_device pointer
272  *
273  * Setup the hpd pins used by the card (evergreen+).
274  * Enable the pin, set the polarity, and enable the hpd interrupts.
275  */
276 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
277 {
278 	struct drm_device *dev = adev->ddev;
279 	struct drm_connector *connector;
280 	u32 tmp;
281 
282 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
283 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
284 
285 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
286 			continue;
287 
288 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
289 		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
290 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
291 
292 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
293 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
294 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
295 			 * aux dp channel on imac and help (but not completely fix)
296 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
297 			 * also avoid interrupt storms during dpms.
298 			 */
299 			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
300 			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
301 			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
302 			continue;
303 		}
304 
305 		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
306 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
307 	}
308 
309 }
310 
311 /**
312  * dce_v6_0_hpd_fini - hpd tear down callback.
313  *
314  * @adev: amdgpu_device pointer
315  *
316  * Tear down the hpd pins used by the card (evergreen+).
317  * Disable the hpd interrupts.
318  */
319 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
320 {
321 	struct drm_device *dev = adev->ddev;
322 	struct drm_connector *connector;
323 	u32 tmp;
324 
325 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
326 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
327 
328 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
329 			continue;
330 
331 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
332 		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
333 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
334 
335 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
336 	}
337 }
338 
339 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
340 {
341 	return mmDC_GPIO_HPD_A;
342 }
343 
344 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
345 					  bool render)
346 {
347 	if (!render)
348 		WREG32(mmVGA_RENDER_CONTROL,
349 			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
350 
351 }
352 
353 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
354 {
355 	switch (adev->asic_type) {
356 	case CHIP_TAHITI:
357 	case CHIP_PITCAIRN:
358 	case CHIP_VERDE:
359 		return 6;
360 	case CHIP_OLAND:
361 		return 2;
362 	default:
363 		return 0;
364 	}
365 }
366 
367 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
368 {
369 	/*Disable VGA render and enabled crtc, if has DCE engine*/
370 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
371 		u32 tmp;
372 		int crtc_enabled, i;
373 
374 		dce_v6_0_set_vga_render_state(adev, false);
375 
376 		/*Disable crtc*/
377 		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
378 			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
379 				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
380 			if (crtc_enabled) {
381 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
382 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
383 				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
384 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
385 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
386 			}
387 		}
388 	}
389 }
390 
391 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
392 {
393 
394 	struct drm_device *dev = encoder->dev;
395 	struct amdgpu_device *adev = dev->dev_private;
396 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
397 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
398 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
399 	int bpc = 0;
400 	u32 tmp = 0;
401 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
402 
403 	if (connector) {
404 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
405 		bpc = amdgpu_connector_get_monitor_bpc(connector);
406 		dither = amdgpu_connector->dither;
407 	}
408 
409 	/* LVDS FMT is set up by atom */
410 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
411 		return;
412 
413 	if (bpc == 0)
414 		return;
415 
416 
417 	switch (bpc) {
418 	case 6:
419 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
420 			/* XXX sort out optimal dither settings */
421 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
422 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
423 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
424 		else
425 			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
426 		break;
427 	case 8:
428 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
429 			/* XXX sort out optimal dither settings */
430 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
431 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
432 				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
433 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
434 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
435 		else
436 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
437 				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
438 		break;
439 	case 10:
440 	default:
441 		/* not needed */
442 		break;
443 	}
444 
445 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
446 }
447 
448 /**
449  * cik_get_number_of_dram_channels - get the number of dram channels
450  *
451  * @adev: amdgpu_device pointer
452  *
453  * Look up the number of video ram channels (CIK).
454  * Used for display watermark bandwidth calculations
455  * Returns the number of dram channels
456  */
457 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
458 {
459 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
460 
461 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
462 	case 0:
463 	default:
464 		return 1;
465 	case 1:
466 		return 2;
467 	case 2:
468 		return 4;
469 	case 3:
470 		return 8;
471 	case 4:
472 		return 3;
473 	case 5:
474 		return 6;
475 	case 6:
476 		return 10;
477 	case 7:
478 		return 12;
479 	case 8:
480 		return 16;
481 	}
482 }
483 
484 struct dce6_wm_params {
485 	u32 dram_channels; /* number of dram channels */
486 	u32 yclk;          /* bandwidth per dram data pin in kHz */
487 	u32 sclk;          /* engine clock in kHz */
488 	u32 disp_clk;      /* display clock in kHz */
489 	u32 src_width;     /* viewport width */
490 	u32 active_time;   /* active display time in ns */
491 	u32 blank_time;    /* blank time in ns */
492 	bool interlaced;    /* mode is interlaced */
493 	fixed20_12 vsc;    /* vertical scale ratio */
494 	u32 num_heads;     /* number of active crtcs */
495 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
496 	u32 lb_size;       /* line buffer allocated to pipe */
497 	u32 vtaps;         /* vertical scaler taps */
498 };
499 
500 /**
501  * dce_v6_0_dram_bandwidth - get the dram bandwidth
502  *
503  * @wm: watermark calculation data
504  *
505  * Calculate the raw dram bandwidth (CIK).
506  * Used for display watermark bandwidth calculations
507  * Returns the dram bandwidth in MBytes/s
508  */
509 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
510 {
511 	/* Calculate raw DRAM Bandwidth */
512 	fixed20_12 dram_efficiency; /* 0.7 */
513 	fixed20_12 yclk, dram_channels, bandwidth;
514 	fixed20_12 a;
515 
516 	a.full = dfixed_const(1000);
517 	yclk.full = dfixed_const(wm->yclk);
518 	yclk.full = dfixed_div(yclk, a);
519 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
520 	a.full = dfixed_const(10);
521 	dram_efficiency.full = dfixed_const(7);
522 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
523 	bandwidth.full = dfixed_mul(dram_channels, yclk);
524 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
525 
526 	return dfixed_trunc(bandwidth);
527 }
528 
529 /**
530  * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
531  *
532  * @wm: watermark calculation data
533  *
534  * Calculate the dram bandwidth used for display (CIK).
535  * Used for display watermark bandwidth calculations
536  * Returns the dram bandwidth for display in MBytes/s
537  */
538 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
539 {
540 	/* Calculate DRAM Bandwidth and the part allocated to display. */
541 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
542 	fixed20_12 yclk, dram_channels, bandwidth;
543 	fixed20_12 a;
544 
545 	a.full = dfixed_const(1000);
546 	yclk.full = dfixed_const(wm->yclk);
547 	yclk.full = dfixed_div(yclk, a);
548 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
549 	a.full = dfixed_const(10);
550 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
551 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
552 	bandwidth.full = dfixed_mul(dram_channels, yclk);
553 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
554 
555 	return dfixed_trunc(bandwidth);
556 }
557 
558 /**
559  * dce_v6_0_data_return_bandwidth - get the data return bandwidth
560  *
561  * @wm: watermark calculation data
562  *
563  * Calculate the data return bandwidth used for display (CIK).
564  * Used for display watermark bandwidth calculations
565  * Returns the data return bandwidth in MBytes/s
566  */
567 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
568 {
569 	/* Calculate the display Data return Bandwidth */
570 	fixed20_12 return_efficiency; /* 0.8 */
571 	fixed20_12 sclk, bandwidth;
572 	fixed20_12 a;
573 
574 	a.full = dfixed_const(1000);
575 	sclk.full = dfixed_const(wm->sclk);
576 	sclk.full = dfixed_div(sclk, a);
577 	a.full = dfixed_const(10);
578 	return_efficiency.full = dfixed_const(8);
579 	return_efficiency.full = dfixed_div(return_efficiency, a);
580 	a.full = dfixed_const(32);
581 	bandwidth.full = dfixed_mul(a, sclk);
582 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
583 
584 	return dfixed_trunc(bandwidth);
585 }
586 
587 /**
588  * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
589  *
590  * @wm: watermark calculation data
591  *
592  * Calculate the dmif bandwidth used for display (CIK).
593  * Used for display watermark bandwidth calculations
594  * Returns the dmif bandwidth in MBytes/s
595  */
596 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
597 {
598 	/* Calculate the DMIF Request Bandwidth */
599 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
600 	fixed20_12 disp_clk, bandwidth;
601 	fixed20_12 a, b;
602 
603 	a.full = dfixed_const(1000);
604 	disp_clk.full = dfixed_const(wm->disp_clk);
605 	disp_clk.full = dfixed_div(disp_clk, a);
606 	a.full = dfixed_const(32);
607 	b.full = dfixed_mul(a, disp_clk);
608 
609 	a.full = dfixed_const(10);
610 	disp_clk_request_efficiency.full = dfixed_const(8);
611 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
612 
613 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
614 
615 	return dfixed_trunc(bandwidth);
616 }
617 
618 /**
619  * dce_v6_0_available_bandwidth - get the min available bandwidth
620  *
621  * @wm: watermark calculation data
622  *
623  * Calculate the min available bandwidth used for display (CIK).
624  * Used for display watermark bandwidth calculations
625  * Returns the min available bandwidth in MBytes/s
626  */
627 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
628 {
629 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
630 	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
631 	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
632 	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
633 
634 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
635 }
636 
637 /**
638  * dce_v6_0_average_bandwidth - get the average available bandwidth
639  *
640  * @wm: watermark calculation data
641  *
642  * Calculate the average available bandwidth used for display (CIK).
643  * Used for display watermark bandwidth calculations
644  * Returns the average available bandwidth in MBytes/s
645  */
646 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
647 {
648 	/* Calculate the display mode Average Bandwidth
649 	 * DisplayMode should contain the source and destination dimensions,
650 	 * timing, etc.
651 	 */
652 	fixed20_12 bpp;
653 	fixed20_12 line_time;
654 	fixed20_12 src_width;
655 	fixed20_12 bandwidth;
656 	fixed20_12 a;
657 
658 	a.full = dfixed_const(1000);
659 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
660 	line_time.full = dfixed_div(line_time, a);
661 	bpp.full = dfixed_const(wm->bytes_per_pixel);
662 	src_width.full = dfixed_const(wm->src_width);
663 	bandwidth.full = dfixed_mul(src_width, bpp);
664 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
665 	bandwidth.full = dfixed_div(bandwidth, line_time);
666 
667 	return dfixed_trunc(bandwidth);
668 }
669 
670 /**
671  * dce_v6_0_latency_watermark - get the latency watermark
672  *
673  * @wm: watermark calculation data
674  *
675  * Calculate the latency watermark (CIK).
676  * Used for display watermark bandwidth calculations
677  * Returns the latency watermark in ns
678  */
679 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
680 {
681 	/* First calculate the latency in ns */
682 	u32 mc_latency = 2000; /* 2000 ns. */
683 	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
684 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
685 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
686 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
687 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
688 		(wm->num_heads * cursor_line_pair_return_time);
689 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
690 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
691 	u32 tmp, dmif_size = 12288;
692 	fixed20_12 a, b, c;
693 
694 	if (wm->num_heads == 0)
695 		return 0;
696 
697 	a.full = dfixed_const(2);
698 	b.full = dfixed_const(1);
699 	if ((wm->vsc.full > a.full) ||
700 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
701 	    (wm->vtaps >= 5) ||
702 	    ((wm->vsc.full >= a.full) && wm->interlaced))
703 		max_src_lines_per_dst_line = 4;
704 	else
705 		max_src_lines_per_dst_line = 2;
706 
707 	a.full = dfixed_const(available_bandwidth);
708 	b.full = dfixed_const(wm->num_heads);
709 	a.full = dfixed_div(a, b);
710 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
711 	tmp = min(dfixed_trunc(a), tmp);
712 
713 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
714 
715 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
716 	b.full = dfixed_const(1000);
717 	c.full = dfixed_const(lb_fill_bw);
718 	b.full = dfixed_div(c, b);
719 	a.full = dfixed_div(a, b);
720 	line_fill_time = dfixed_trunc(a);
721 
722 	if (line_fill_time < wm->active_time)
723 		return latency;
724 	else
725 		return latency + (line_fill_time - wm->active_time);
726 
727 }
728 
729 /**
730  * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
731  * average and available dram bandwidth
732  *
733  * @wm: watermark calculation data
734  *
735  * Check if the display average bandwidth fits in the display
736  * dram bandwidth (CIK).
737  * Used for display watermark bandwidth calculations
738  * Returns true if the display fits, false if not.
739  */
740 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
741 {
742 	if (dce_v6_0_average_bandwidth(wm) <=
743 	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
744 		return true;
745 	else
746 		return false;
747 }
748 
749 /**
750  * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
751  * average and available bandwidth
752  *
753  * @wm: watermark calculation data
754  *
755  * Check if the display average bandwidth fits in the display
756  * available bandwidth (CIK).
757  * Used for display watermark bandwidth calculations
758  * Returns true if the display fits, false if not.
759  */
760 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
761 {
762 	if (dce_v6_0_average_bandwidth(wm) <=
763 	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
764 		return true;
765 	else
766 		return false;
767 }
768 
769 /**
770  * dce_v6_0_check_latency_hiding - check latency hiding
771  *
772  * @wm: watermark calculation data
773  *
774  * Check latency hiding (CIK).
775  * Used for display watermark bandwidth calculations
776  * Returns true if the display fits, false if not.
777  */
778 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
779 {
780 	u32 lb_partitions = wm->lb_size / wm->src_width;
781 	u32 line_time = wm->active_time + wm->blank_time;
782 	u32 latency_tolerant_lines;
783 	u32 latency_hiding;
784 	fixed20_12 a;
785 
786 	a.full = dfixed_const(1);
787 	if (wm->vsc.full > a.full)
788 		latency_tolerant_lines = 1;
789 	else {
790 		if (lb_partitions <= (wm->vtaps + 1))
791 			latency_tolerant_lines = 1;
792 		else
793 			latency_tolerant_lines = 2;
794 	}
795 
796 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
797 
798 	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
799 		return true;
800 	else
801 		return false;
802 }
803 
804 /**
805  * dce_v6_0_program_watermarks - program display watermarks
806  *
807  * @adev: amdgpu_device pointer
808  * @amdgpu_crtc: the selected display controller
809  * @lb_size: line buffer size
810  * @num_heads: number of display controllers in use
811  *
812  * Calculate and program the display watermarks for the
813  * selected display controller (CIK).
814  */
815 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
816 					struct amdgpu_crtc *amdgpu_crtc,
817 					u32 lb_size, u32 num_heads)
818 {
819 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
820 	struct dce6_wm_params wm_low, wm_high;
821 	u32 dram_channels;
822 	u32 active_time;
823 	u32 line_time = 0;
824 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
825 	u32 priority_a_mark = 0, priority_b_mark = 0;
826 	u32 priority_a_cnt = PRIORITY_OFF;
827 	u32 priority_b_cnt = PRIORITY_OFF;
828 	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
829 	fixed20_12 a, b, c;
830 
831 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
832 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
833 					    (u32)mode->clock);
834 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
835 					  (u32)mode->clock);
836 		line_time = min(line_time, (u32)65535);
837 		priority_a_cnt = 0;
838 		priority_b_cnt = 0;
839 
840 		dram_channels = si_get_number_of_dram_channels(adev);
841 
842 		/* watermark for high clocks */
843 		if (adev->pm.dpm_enabled) {
844 			wm_high.yclk =
845 				amdgpu_dpm_get_mclk(adev, false) * 10;
846 			wm_high.sclk =
847 				amdgpu_dpm_get_sclk(adev, false) * 10;
848 		} else {
849 			wm_high.yclk = adev->pm.current_mclk * 10;
850 			wm_high.sclk = adev->pm.current_sclk * 10;
851 		}
852 
853 		wm_high.disp_clk = mode->clock;
854 		wm_high.src_width = mode->crtc_hdisplay;
855 		wm_high.active_time = active_time;
856 		wm_high.blank_time = line_time - wm_high.active_time;
857 		wm_high.interlaced = false;
858 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
859 			wm_high.interlaced = true;
860 		wm_high.vsc = amdgpu_crtc->vsc;
861 		wm_high.vtaps = 1;
862 		if (amdgpu_crtc->rmx_type != RMX_OFF)
863 			wm_high.vtaps = 2;
864 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
865 		wm_high.lb_size = lb_size;
866 		wm_high.dram_channels = dram_channels;
867 		wm_high.num_heads = num_heads;
868 
869 		if (adev->pm.dpm_enabled) {
870 		/* watermark for low clocks */
871 			wm_low.yclk =
872 				amdgpu_dpm_get_mclk(adev, true) * 10;
873 			wm_low.sclk =
874 				amdgpu_dpm_get_sclk(adev, true) * 10;
875 		} else {
876 			wm_low.yclk = adev->pm.current_mclk * 10;
877 			wm_low.sclk = adev->pm.current_sclk * 10;
878 		}
879 
880 		wm_low.disp_clk = mode->clock;
881 		wm_low.src_width = mode->crtc_hdisplay;
882 		wm_low.active_time = active_time;
883 		wm_low.blank_time = line_time - wm_low.active_time;
884 		wm_low.interlaced = false;
885 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
886 			wm_low.interlaced = true;
887 		wm_low.vsc = amdgpu_crtc->vsc;
888 		wm_low.vtaps = 1;
889 		if (amdgpu_crtc->rmx_type != RMX_OFF)
890 			wm_low.vtaps = 2;
891 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
892 		wm_low.lb_size = lb_size;
893 		wm_low.dram_channels = dram_channels;
894 		wm_low.num_heads = num_heads;
895 
896 		/* set for high clocks */
897 		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
898 		/* set for low clocks */
899 		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
900 
901 		/* possibly force display priority to high */
902 		/* should really do this at mode validation time... */
903 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
904 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
905 		    !dce_v6_0_check_latency_hiding(&wm_high) ||
906 		    (adev->mode_info.disp_priority == 2)) {
907 			DRM_DEBUG_KMS("force priority to high\n");
908 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
909 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
910 		}
911 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
912 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
913 		    !dce_v6_0_check_latency_hiding(&wm_low) ||
914 		    (adev->mode_info.disp_priority == 2)) {
915 			DRM_DEBUG_KMS("force priority to high\n");
916 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
917 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
918 		}
919 
920 		a.full = dfixed_const(1000);
921 		b.full = dfixed_const(mode->clock);
922 		b.full = dfixed_div(b, a);
923 		c.full = dfixed_const(latency_watermark_a);
924 		c.full = dfixed_mul(c, b);
925 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
926 		c.full = dfixed_div(c, a);
927 		a.full = dfixed_const(16);
928 		c.full = dfixed_div(c, a);
929 		priority_a_mark = dfixed_trunc(c);
930 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
931 
932 		a.full = dfixed_const(1000);
933 		b.full = dfixed_const(mode->clock);
934 		b.full = dfixed_div(b, a);
935 		c.full = dfixed_const(latency_watermark_b);
936 		c.full = dfixed_mul(c, b);
937 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
938 		c.full = dfixed_div(c, a);
939 		a.full = dfixed_const(16);
940 		c.full = dfixed_div(c, a);
941 		priority_b_mark = dfixed_trunc(c);
942 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
943 
944 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
945 	}
946 
947 	/* select wm A */
948 	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
949 	tmp = arb_control3;
950 	tmp &= ~LATENCY_WATERMARK_MASK(3);
951 	tmp |= LATENCY_WATERMARK_MASK(1);
952 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
953 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
954 	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
955 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
956 	/* select wm B */
957 	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
958 	tmp &= ~LATENCY_WATERMARK_MASK(3);
959 	tmp |= LATENCY_WATERMARK_MASK(2);
960 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
961 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
962 	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
963 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
964 	/* restore original selection */
965 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
966 
967 	/* write the priority marks */
968 	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
969 	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
970 
971 	/* save values for DPM */
972 	amdgpu_crtc->line_time = line_time;
973 	amdgpu_crtc->wm_high = latency_watermark_a;
974 
975 	/* Save number of lines the linebuffer leads before the scanout */
976 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
977 }
978 
979 /* watermark setup */
980 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
981 				   struct amdgpu_crtc *amdgpu_crtc,
982 				   struct drm_display_mode *mode,
983 				   struct drm_display_mode *other_mode)
984 {
985 	u32 tmp, buffer_alloc, i;
986 	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
987 	/*
988 	 * Line Buffer Setup
989 	 * There are 3 line buffers, each one shared by 2 display controllers.
990 	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
991 	 * the display controllers.  The paritioning is done via one of four
992 	 * preset allocations specified in bits 21:20:
993 	 *  0 - half lb
994 	 *  2 - whole lb, other crtc must be disabled
995 	 */
996 	/* this can get tricky if we have two large displays on a paired group
997 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
998 	 * non-linked crtcs for maximum line buffer allocation.
999 	 */
1000 	if (amdgpu_crtc->base.enabled && mode) {
1001 		if (other_mode) {
1002 			tmp = 0; /* 1/2 */
1003 			buffer_alloc = 1;
1004 		} else {
1005 			tmp = 2; /* whole */
1006 			buffer_alloc = 2;
1007 		}
1008 	} else {
1009 		tmp = 0;
1010 		buffer_alloc = 0;
1011 	}
1012 
1013 	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1014 	       DC_LB_MEMORY_CONFIG(tmp));
1015 
1016 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1017 	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1018 	for (i = 0; i < adev->usec_timeout; i++) {
1019 		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1020 		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1021 			break;
1022 		udelay(1);
1023 	}
1024 
1025 	if (amdgpu_crtc->base.enabled && mode) {
1026 		switch (tmp) {
1027 		case 0:
1028 		default:
1029 			return 4096 * 2;
1030 		case 2:
1031 			return 8192 * 2;
1032 		}
1033 	}
1034 
1035 	/* controller not enabled, so no lb used */
1036 	return 0;
1037 }
1038 
1039 
1040 /**
1041  *
1042  * dce_v6_0_bandwidth_update - program display watermarks
1043  *
1044  * @adev: amdgpu_device pointer
1045  *
1046  * Calculate and program the display watermarks and line
1047  * buffer allocation (CIK).
1048  */
1049 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1050 {
1051 	struct drm_display_mode *mode0 = NULL;
1052 	struct drm_display_mode *mode1 = NULL;
1053 	u32 num_heads = 0, lb_size;
1054 	int i;
1055 
1056 	if (!adev->mode_info.mode_config_initialized)
1057 		return;
1058 
1059 	amdgpu_display_update_priority(adev);
1060 
1061 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1062 		if (adev->mode_info.crtcs[i]->base.enabled)
1063 			num_heads++;
1064 	}
1065 	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1066 		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1067 		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1068 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1069 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1070 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1071 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1072 	}
1073 }
1074 
1075 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1076 {
1077 	int i;
1078 	u32 tmp;
1079 
1080 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1081 		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1082 				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1083 		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1084 					PORT_CONNECTIVITY))
1085 			adev->mode_info.audio.pin[i].connected = false;
1086 		else
1087 			adev->mode_info.audio.pin[i].connected = true;
1088 	}
1089 
1090 }
1091 
1092 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1093 {
1094 	int i;
1095 
1096 	dce_v6_0_audio_get_connected_pins(adev);
1097 
1098 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1099 		if (adev->mode_info.audio.pin[i].connected)
1100 			return &adev->mode_info.audio.pin[i];
1101 	}
1102 	DRM_ERROR("No connected audio pins found!\n");
1103 	return NULL;
1104 }
1105 
1106 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1107 {
1108 	struct amdgpu_device *adev = encoder->dev->dev_private;
1109 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1110 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1111 
1112 	if (!dig || !dig->afmt || !dig->afmt->pin)
1113 		return;
1114 
1115 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1116 	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1117 		             dig->afmt->pin->id));
1118 }
1119 
1120 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1121 						struct drm_display_mode *mode)
1122 {
1123 	struct amdgpu_device *adev = encoder->dev->dev_private;
1124 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1125 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1126 	struct drm_connector *connector;
1127 	struct amdgpu_connector *amdgpu_connector = NULL;
1128 	int interlace = 0;
1129 	u32 tmp;
1130 
1131 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1132 		if (connector->encoder == encoder) {
1133 			amdgpu_connector = to_amdgpu_connector(connector);
1134 			break;
1135 		}
1136 	}
1137 
1138 	if (!amdgpu_connector) {
1139 		DRM_ERROR("Couldn't find encoder's connector\n");
1140 		return;
1141 	}
1142 
1143 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1144 		interlace = 1;
1145 
1146 	if (connector->latency_present[interlace]) {
1147 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1148 				VIDEO_LIPSYNC, connector->video_latency[interlace]);
1149 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1150 				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1151 	} else {
1152 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1153 				VIDEO_LIPSYNC, 0);
1154 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1155 				AUDIO_LIPSYNC, 0);
1156 	}
1157 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1158 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1159 }
1160 
1161 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1162 {
1163 	struct amdgpu_device *adev = encoder->dev->dev_private;
1164 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1165 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1166 	struct drm_connector *connector;
1167 	struct amdgpu_connector *amdgpu_connector = NULL;
1168 	u8 *sadb = NULL;
1169 	int sad_count;
1170 	u32 tmp;
1171 
1172 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1173 		if (connector->encoder == encoder) {
1174 			amdgpu_connector = to_amdgpu_connector(connector);
1175 			break;
1176 		}
1177 	}
1178 
1179 	if (!amdgpu_connector) {
1180 		DRM_ERROR("Couldn't find encoder's connector\n");
1181 		return;
1182 	}
1183 
1184 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1185 	if (sad_count < 0) {
1186 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1187 		sad_count = 0;
1188 	}
1189 
1190 	/* program the speaker allocation */
1191 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1192 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1193 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1194 			HDMI_CONNECTION, 0);
1195 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1196 			DP_CONNECTION, 0);
1197 
1198 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1199 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1200 				DP_CONNECTION, 1);
1201 	else
1202 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1203 				HDMI_CONNECTION, 1);
1204 
1205 	if (sad_count)
1206 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1207 				SPEAKER_ALLOCATION, sadb[0]);
1208 	else
1209 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1210 				SPEAKER_ALLOCATION, 5); /* stereo */
1211 
1212 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1213 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1214 
1215 	kfree(sadb);
1216 }
1217 
1218 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1219 {
1220 	struct amdgpu_device *adev = encoder->dev->dev_private;
1221 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1222 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1223 	struct drm_connector *connector;
1224 	struct amdgpu_connector *amdgpu_connector = NULL;
1225 	struct cea_sad *sads;
1226 	int i, sad_count;
1227 
1228 	static const u16 eld_reg_to_type[][2] = {
1229 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1230 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1231 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1232 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1233 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1234 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1235 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1236 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1237 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1238 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1239 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1240 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1241 	};
1242 
1243 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1244 		if (connector->encoder == encoder) {
1245 			amdgpu_connector = to_amdgpu_connector(connector);
1246 			break;
1247 		}
1248 	}
1249 
1250 	if (!amdgpu_connector) {
1251 		DRM_ERROR("Couldn't find encoder's connector\n");
1252 		return;
1253 	}
1254 
1255 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1256 	if (sad_count <= 0) {
1257 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1258 		return;
1259 	}
1260 
1261 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1262 		u32 tmp = 0;
1263 		u8 stereo_freqs = 0;
1264 		int max_channels = -1;
1265 		int j;
1266 
1267 		for (j = 0; j < sad_count; j++) {
1268 			struct cea_sad *sad = &sads[j];
1269 
1270 			if (sad->format == eld_reg_to_type[i][1]) {
1271 				if (sad->channels > max_channels) {
1272 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1273 							MAX_CHANNELS, sad->channels);
1274 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1275 							DESCRIPTOR_BYTE_2, sad->byte2);
1276 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1277 							SUPPORTED_FREQUENCIES, sad->freq);
1278 					max_channels = sad->channels;
1279 				}
1280 
1281 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1282 					stereo_freqs |= sad->freq;
1283 				else
1284 					break;
1285 			}
1286 		}
1287 
1288 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1289 				SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1290 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1291 	}
1292 
1293 	kfree(sads);
1294 
1295 }
1296 
1297 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1298 				  struct amdgpu_audio_pin *pin,
1299 				  bool enable)
1300 {
1301 	if (!pin)
1302 		return;
1303 
1304 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1305 			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1306 }
1307 
1308 static const u32 pin_offsets[7] =
1309 {
1310 	(0x1780 - 0x1780),
1311 	(0x1786 - 0x1780),
1312 	(0x178c - 0x1780),
1313 	(0x1792 - 0x1780),
1314 	(0x1798 - 0x1780),
1315 	(0x179d - 0x1780),
1316 	(0x17a4 - 0x1780),
1317 };
1318 
1319 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1320 {
1321 	int i;
1322 
1323 	if (!amdgpu_audio)
1324 		return 0;
1325 
1326 	adev->mode_info.audio.enabled = true;
1327 
1328 	switch (adev->asic_type) {
1329 	case CHIP_TAHITI:
1330 	case CHIP_PITCAIRN:
1331 	case CHIP_VERDE:
1332 	default:
1333 		adev->mode_info.audio.num_pins = 6;
1334 		break;
1335 	case CHIP_OLAND:
1336 		adev->mode_info.audio.num_pins = 2;
1337 		break;
1338 	}
1339 
1340 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1341 		adev->mode_info.audio.pin[i].channels = -1;
1342 		adev->mode_info.audio.pin[i].rate = -1;
1343 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1344 		adev->mode_info.audio.pin[i].status_bits = 0;
1345 		adev->mode_info.audio.pin[i].category_code = 0;
1346 		adev->mode_info.audio.pin[i].connected = false;
1347 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1348 		adev->mode_info.audio.pin[i].id = i;
1349 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1356 {
1357 	int i;
1358 
1359 	if (!amdgpu_audio)
1360 		return;
1361 
1362 	if (!adev->mode_info.audio.enabled)
1363 		return;
1364 
1365 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1366 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1367 
1368 	adev->mode_info.audio.enabled = false;
1369 }
1370 
1371 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1372 {
1373 	struct drm_device *dev = encoder->dev;
1374 	struct amdgpu_device *adev = dev->dev_private;
1375 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1376 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1377 	u32 tmp;
1378 
1379 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1380 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1381 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1382 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1383 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1384 }
1385 
1386 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1387 				   uint32_t clock, int bpc)
1388 {
1389 	struct drm_device *dev = encoder->dev;
1390 	struct amdgpu_device *adev = dev->dev_private;
1391 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1392 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1393 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1394 	u32 tmp;
1395 
1396 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1397 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1398 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1399 			bpc > 8 ? 0 : 1);
1400 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1401 
1402 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1403 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1404 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1405 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1406 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1407 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1408 
1409 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1410 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1411 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1412 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1413 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1414 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1415 
1416 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1417 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1418 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1419 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1420 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1421 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1422 }
1423 
1424 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1425 					       struct drm_display_mode *mode)
1426 {
1427 	struct drm_device *dev = encoder->dev;
1428 	struct amdgpu_device *adev = dev->dev_private;
1429 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1430 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1431 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1432 	struct hdmi_avi_infoframe frame;
1433 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1434 	uint8_t *payload = buffer + 3;
1435 	uint8_t *header = buffer;
1436 	ssize_t err;
1437 	u32 tmp;
1438 
1439 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1440 	if (err < 0) {
1441 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1442 		return;
1443 	}
1444 
1445 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1446 	if (err < 0) {
1447 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1448 		return;
1449 	}
1450 
1451 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1452 	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1453 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1454 	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1455 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1456 	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1457 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1458 	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1459 
1460 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1461 	/* anything other than 0 */
1462 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1463 			HDMI_AUDIO_INFO_LINE, 2);
1464 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1465 }
1466 
1467 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1468 {
1469 	struct drm_device *dev = encoder->dev;
1470 	struct amdgpu_device *adev = dev->dev_private;
1471 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1472 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1473 	u32 tmp;
1474 
1475 	/*
1476 	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1477 	 * Express [24MHz / target pixel clock] as an exact rational
1478 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1479 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1480 	 */
1481 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1482 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1483 			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1484 	if (em == ATOM_ENCODER_MODE_HDMI) {
1485 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1486 				DCCG_AUDIO_DTO_SEL, 0);
1487 	} else if (ENCODER_MODE_IS_DP(em)) {
1488 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1489 				DCCG_AUDIO_DTO_SEL, 1);
1490 	}
1491 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1492 	if (em == ATOM_ENCODER_MODE_HDMI) {
1493 		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1494 		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1495 	} else if (ENCODER_MODE_IS_DP(em)) {
1496 		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1497 		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1498 	}
1499 }
1500 
1501 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1502 {
1503 	struct drm_device *dev = encoder->dev;
1504 	struct amdgpu_device *adev = dev->dev_private;
1505 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1506 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1507 	u32 tmp;
1508 
1509 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1510 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1511 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1512 
1513 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1514 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1515 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1516 
1517 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1518 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1519 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1520 
1521 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1522 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1523 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1524 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1525 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1526 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1527 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1528 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1529 
1530 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1531 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1532 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1533 
1534 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1535 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1536 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1537 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1538 
1539 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1540 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1541 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1542 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1543 }
1544 
1545 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1546 {
1547 	struct drm_device *dev = encoder->dev;
1548 	struct amdgpu_device *adev = dev->dev_private;
1549 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1550 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1551 	u32 tmp;
1552 
1553 	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1554 	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1555 	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1556 }
1557 
1558 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1559 {
1560 	struct drm_device *dev = encoder->dev;
1561 	struct amdgpu_device *adev = dev->dev_private;
1562 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1563 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1564 	u32 tmp;
1565 
1566 	if (enable) {
1567 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1568 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1569 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1570 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1571 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1572 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1573 
1574 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1575 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1576 		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1577 
1578 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1579 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1580 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1581 	} else {
1582 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1583 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1584 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1585 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1586 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1587 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1588 
1589 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1590 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1591 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1592 	}
1593 }
1594 
1595 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1596 {
1597 	struct drm_device *dev = encoder->dev;
1598 	struct amdgpu_device *adev = dev->dev_private;
1599 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1600 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1601 	u32 tmp;
1602 
1603 	if (enable) {
1604 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1605 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1606 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1607 
1608 		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1609 		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1610 		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1611 
1612 		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1613 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1614 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1615 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1616 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1617 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1618 	} else {
1619 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1620 	}
1621 }
1622 
1623 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1624 				  struct drm_display_mode *mode)
1625 {
1626 	struct drm_device *dev = encoder->dev;
1627 	struct amdgpu_device *adev = dev->dev_private;
1628 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1629 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1630 	struct drm_connector *connector;
1631 	struct amdgpu_connector *amdgpu_connector = NULL;
1632 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1633 	int bpc = 8;
1634 
1635 	if (!dig || !dig->afmt)
1636 		return;
1637 
1638 	list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1639 		if (connector->encoder == encoder) {
1640 			amdgpu_connector = to_amdgpu_connector(connector);
1641 			break;
1642 		}
1643 	}
1644 
1645 	if (!amdgpu_connector) {
1646 		DRM_ERROR("Couldn't find encoder's connector\n");
1647 		return;
1648 	}
1649 
1650 	if (!dig->afmt->enabled)
1651 		return;
1652 
1653 	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1654 	if (!dig->afmt->pin)
1655 		return;
1656 
1657 	if (encoder->crtc) {
1658 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1659 		bpc = amdgpu_crtc->bpc;
1660 	}
1661 
1662 	/* disable audio before setting up hw */
1663 	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1664 
1665 	dce_v6_0_audio_set_mute(encoder, true);
1666 	dce_v6_0_audio_write_speaker_allocation(encoder);
1667 	dce_v6_0_audio_write_sad_regs(encoder);
1668 	dce_v6_0_audio_write_latency_fields(encoder, mode);
1669 	if (em == ATOM_ENCODER_MODE_HDMI) {
1670 		dce_v6_0_audio_set_dto(encoder, mode->clock);
1671 		dce_v6_0_audio_set_vbi_packet(encoder);
1672 		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1673 	} else if (ENCODER_MODE_IS_DP(em)) {
1674 		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1675 	}
1676 	dce_v6_0_audio_set_packet(encoder);
1677 	dce_v6_0_audio_select_pin(encoder);
1678 	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1679 	dce_v6_0_audio_set_mute(encoder, false);
1680 	if (em == ATOM_ENCODER_MODE_HDMI) {
1681 		dce_v6_0_audio_hdmi_enable(encoder, 1);
1682 	} else if (ENCODER_MODE_IS_DP(em)) {
1683 		dce_v6_0_audio_dp_enable(encoder, 1);
1684 	}
1685 
1686 	/* enable audio after setting up hw */
1687 	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1688 }
1689 
1690 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1691 {
1692 	struct drm_device *dev = encoder->dev;
1693 	struct amdgpu_device *adev = dev->dev_private;
1694 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1695 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1696 
1697 	if (!dig || !dig->afmt)
1698 		return;
1699 
1700 	/* Silent, r600_hdmi_enable will raise WARN for us */
1701 	if (enable && dig->afmt->enabled)
1702 		return;
1703 
1704 	if (!enable && !dig->afmt->enabled)
1705 		return;
1706 
1707 	if (!enable && dig->afmt->pin) {
1708 		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1709 		dig->afmt->pin = NULL;
1710 	}
1711 
1712 	dig->afmt->enabled = enable;
1713 
1714 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1715 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1716 }
1717 
1718 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1719 {
1720 	int i, j;
1721 
1722 	for (i = 0; i < adev->mode_info.num_dig; i++)
1723 		adev->mode_info.afmt[i] = NULL;
1724 
1725 	/* DCE6 has audio blocks tied to DIG encoders */
1726 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1727 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1728 		if (adev->mode_info.afmt[i]) {
1729 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1730 			adev->mode_info.afmt[i]->id = i;
1731 		} else {
1732 			for (j = 0; j < i; j++) {
1733 				kfree(adev->mode_info.afmt[j]);
1734 				adev->mode_info.afmt[j] = NULL;
1735 			}
1736 			DRM_ERROR("Out of memory allocating afmt table\n");
1737 			return -ENOMEM;
1738 		}
1739 	}
1740 	return 0;
1741 }
1742 
1743 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1744 {
1745 	int i;
1746 
1747 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1748 		kfree(adev->mode_info.afmt[i]);
1749 		adev->mode_info.afmt[i] = NULL;
1750 	}
1751 }
1752 
1753 static const u32 vga_control_regs[6] =
1754 {
1755 	mmD1VGA_CONTROL,
1756 	mmD2VGA_CONTROL,
1757 	mmD3VGA_CONTROL,
1758 	mmD4VGA_CONTROL,
1759 	mmD5VGA_CONTROL,
1760 	mmD6VGA_CONTROL,
1761 };
1762 
1763 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1764 {
1765 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1766 	struct drm_device *dev = crtc->dev;
1767 	struct amdgpu_device *adev = dev->dev_private;
1768 	u32 vga_control;
1769 
1770 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1771 	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1772 }
1773 
1774 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1775 {
1776 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1777 	struct drm_device *dev = crtc->dev;
1778 	struct amdgpu_device *adev = dev->dev_private;
1779 
1780 	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1781 }
1782 
1783 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1784 				     struct drm_framebuffer *fb,
1785 				     int x, int y, int atomic)
1786 {
1787 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1788 	struct drm_device *dev = crtc->dev;
1789 	struct amdgpu_device *adev = dev->dev_private;
1790 	struct drm_framebuffer *target_fb;
1791 	struct drm_gem_object *obj;
1792 	struct amdgpu_bo *abo;
1793 	uint64_t fb_location, tiling_flags;
1794 	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1795 	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1796 	u32 viewport_w, viewport_h;
1797 	int r;
1798 	bool bypass_lut = false;
1799 	struct drm_format_name_buf format_name;
1800 
1801 	/* no fb bound */
1802 	if (!atomic && !crtc->primary->fb) {
1803 		DRM_DEBUG_KMS("No FB bound\n");
1804 		return 0;
1805 	}
1806 
1807 	if (atomic)
1808 		target_fb = fb;
1809 	else
1810 		target_fb = crtc->primary->fb;
1811 
1812 	/* If atomic, assume fb object is pinned & idle & fenced and
1813 	 * just update base pointers
1814 	 */
1815 	obj = target_fb->obj[0];
1816 	abo = gem_to_amdgpu_bo(obj);
1817 	r = amdgpu_bo_reserve(abo, false);
1818 	if (unlikely(r != 0))
1819 		return r;
1820 
1821 	if (!atomic) {
1822 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1823 		if (unlikely(r != 0)) {
1824 			amdgpu_bo_unreserve(abo);
1825 			return -EINVAL;
1826 		}
1827 	}
1828 	fb_location = amdgpu_bo_gpu_offset(abo);
1829 
1830 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1831 	amdgpu_bo_unreserve(abo);
1832 
1833 	switch (target_fb->format->format) {
1834 	case DRM_FORMAT_C8:
1835 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1836 			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1837 		break;
1838 	case DRM_FORMAT_XRGB4444:
1839 	case DRM_FORMAT_ARGB4444:
1840 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1841 			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1842 #ifdef __BIG_ENDIAN
1843 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1844 #endif
1845 		break;
1846 	case DRM_FORMAT_XRGB1555:
1847 	case DRM_FORMAT_ARGB1555:
1848 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1849 			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1850 #ifdef __BIG_ENDIAN
1851 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1852 #endif
1853 		break;
1854 	case DRM_FORMAT_BGRX5551:
1855 	case DRM_FORMAT_BGRA5551:
1856 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1857 			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1858 #ifdef __BIG_ENDIAN
1859 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1860 #endif
1861 		break;
1862 	case DRM_FORMAT_RGB565:
1863 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1864 			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1865 #ifdef __BIG_ENDIAN
1866 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1867 #endif
1868 		break;
1869 	case DRM_FORMAT_XRGB8888:
1870 	case DRM_FORMAT_ARGB8888:
1871 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1872 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1873 #ifdef __BIG_ENDIAN
1874 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1875 #endif
1876 		break;
1877 	case DRM_FORMAT_XRGB2101010:
1878 	case DRM_FORMAT_ARGB2101010:
1879 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1880 			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1881 #ifdef __BIG_ENDIAN
1882 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1883 #endif
1884 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1885 		bypass_lut = true;
1886 		break;
1887 	case DRM_FORMAT_BGRX1010102:
1888 	case DRM_FORMAT_BGRA1010102:
1889 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1890 			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1891 #ifdef __BIG_ENDIAN
1892 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1893 #endif
1894 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1895 		bypass_lut = true;
1896 		break;
1897 	case DRM_FORMAT_XBGR8888:
1898 	case DRM_FORMAT_ABGR8888:
1899 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1900 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1901 		fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
1902 			   GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
1903 #ifdef __BIG_ENDIAN
1904 		fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1905 #endif
1906 		break;
1907 	default:
1908 		DRM_ERROR("Unsupported screen format %s\n",
1909 		          drm_get_format_name(target_fb->format->format, &format_name));
1910 		return -EINVAL;
1911 	}
1912 
1913 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1914 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1915 
1916 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1917 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1918 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1919 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1920 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1921 
1922 		fb_format |= GRPH_NUM_BANKS(num_banks);
1923 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1924 		fb_format |= GRPH_TILE_SPLIT(tile_split);
1925 		fb_format |= GRPH_BANK_WIDTH(bankw);
1926 		fb_format |= GRPH_BANK_HEIGHT(bankh);
1927 		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1928 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1929 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1930 	}
1931 
1932 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1933 	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1934 
1935 	dce_v6_0_vga_enable(crtc, false);
1936 
1937 	/* Make sure surface address is updated at vertical blank rather than
1938 	 * horizontal blank
1939 	 */
1940 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1941 
1942 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1943 	       upper_32_bits(fb_location));
1944 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1945 	       upper_32_bits(fb_location));
1946 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1947 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1948 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1949 	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1950 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1951 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1952 
1953 	/*
1954 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1955 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1956 	 * retain the full precision throughout the pipeline.
1957 	 */
1958 	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1959 		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1960 		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1961 
1962 	if (bypass_lut)
1963 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1964 
1965 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1966 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1967 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1968 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1969 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1970 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1971 
1972 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1973 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1974 
1975 	dce_v6_0_grph_enable(crtc, true);
1976 
1977 	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1978 		       target_fb->height);
1979 	x &= ~3;
1980 	y &= ~1;
1981 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1982 	       (x << 16) | y);
1983 	viewport_w = crtc->mode.hdisplay;
1984 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1985 
1986 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1987 	       (viewport_w << 16) | viewport_h);
1988 
1989 	/* set pageflip to happen anywhere in vblank interval */
1990 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1991 
1992 	if (!atomic && fb && fb != crtc->primary->fb) {
1993 		abo = gem_to_amdgpu_bo(fb->obj[0]);
1994 		r = amdgpu_bo_reserve(abo, true);
1995 		if (unlikely(r != 0))
1996 			return r;
1997 		amdgpu_bo_unpin(abo);
1998 		amdgpu_bo_unreserve(abo);
1999 	}
2000 
2001 	/* Bytes per pixel may have changed */
2002 	dce_v6_0_bandwidth_update(adev);
2003 
2004 	return 0;
2005 
2006 }
2007 
2008 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2009 				    struct drm_display_mode *mode)
2010 {
2011 	struct drm_device *dev = crtc->dev;
2012 	struct amdgpu_device *adev = dev->dev_private;
2013 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2014 
2015 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2016 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2017 		       INTERLEAVE_EN);
2018 	else
2019 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2020 }
2021 
2022 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2023 {
2024 
2025 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2026 	struct drm_device *dev = crtc->dev;
2027 	struct amdgpu_device *adev = dev->dev_private;
2028 	u16 *r, *g, *b;
2029 	int i;
2030 
2031 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2032 
2033 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2034 	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2035 		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2036 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2037 	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2038 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2039 	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2040 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2041 	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2042 		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2043 
2044 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2045 
2046 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2047 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2048 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2049 
2050 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2051 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2052 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2053 
2054 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2055 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2056 
2057 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2058 	r = crtc->gamma_store;
2059 	g = r + crtc->gamma_size;
2060 	b = g + crtc->gamma_size;
2061 	for (i = 0; i < 256; i++) {
2062 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2063 		       ((*r++ & 0xffc0) << 14) |
2064 		       ((*g++ & 0xffc0) << 4) |
2065 		       (*b++ >> 6));
2066 	}
2067 
2068 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2069 	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2070 		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2071 		ICON_DEGAMMA_MODE(0) |
2072 		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2073 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2074 	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2075 		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2076 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2077 	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2078 		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2079 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2080 	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2081 		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2082 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2083 	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2084 
2085 
2086 }
2087 
2088 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2089 {
2090 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2091 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2092 
2093 	switch (amdgpu_encoder->encoder_id) {
2094 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2095 		return dig->linkb ? 1 : 0;
2096 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2097 		return dig->linkb ? 3 : 2;
2098 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2099 		return dig->linkb ? 5 : 4;
2100 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2101 		return 6;
2102 	default:
2103 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2104 		return 0;
2105 	}
2106 }
2107 
2108 /**
2109  * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2110  *
2111  * @crtc: drm crtc
2112  *
2113  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2114  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2115  * monitors a dedicated PPLL must be used.  If a particular board has
2116  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2117  * as there is no need to program the PLL itself.  If we are not able to
2118  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2119  * avoid messing up an existing monitor.
2120  *
2121  *
2122  */
2123 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2124 {
2125 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2126 	struct drm_device *dev = crtc->dev;
2127 	struct amdgpu_device *adev = dev->dev_private;
2128 	u32 pll_in_use;
2129 	int pll;
2130 
2131 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2132 		if (adev->clock.dp_extclk)
2133 			/* skip PPLL programming if using ext clock */
2134 			return ATOM_PPLL_INVALID;
2135 		else
2136 			return ATOM_PPLL0;
2137 	} else {
2138 		/* use the same PPLL for all monitors with the same clock */
2139 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2140 		if (pll != ATOM_PPLL_INVALID)
2141 			return pll;
2142 	}
2143 
2144 	/*  PPLL1, and PPLL2 */
2145 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2146 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2147 		return ATOM_PPLL2;
2148 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2149 		return ATOM_PPLL1;
2150 	DRM_ERROR("unable to allocate a PPLL\n");
2151 	return ATOM_PPLL_INVALID;
2152 }
2153 
2154 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2155 {
2156 	struct amdgpu_device *adev = crtc->dev->dev_private;
2157 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2158 	uint32_t cur_lock;
2159 
2160 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2161 	if (lock)
2162 		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2163 	else
2164 		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2165 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2166 }
2167 
2168 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2169 {
2170 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2171 	struct amdgpu_device *adev = crtc->dev->dev_private;
2172 
2173 	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2174 		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2175 		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2176 
2177 
2178 }
2179 
2180 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2181 {
2182 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2183 	struct amdgpu_device *adev = crtc->dev->dev_private;
2184 
2185 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2186 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2187 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2188 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2189 
2190 	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2191 		   CUR_CONTROL__CURSOR_EN_MASK |
2192 		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2193 		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2194 
2195 }
2196 
2197 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2198 				       int x, int y)
2199 {
2200 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2201 	struct amdgpu_device *adev = crtc->dev->dev_private;
2202 	int xorigin = 0, yorigin = 0;
2203 
2204 	int w = amdgpu_crtc->cursor_width;
2205 
2206 	amdgpu_crtc->cursor_x = x;
2207 	amdgpu_crtc->cursor_y = y;
2208 
2209 	/* avivo cursor are offset into the total surface */
2210 	x += crtc->x;
2211 	y += crtc->y;
2212 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2213 
2214 	if (x < 0) {
2215 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2216 		x = 0;
2217 	}
2218 	if (y < 0) {
2219 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2220 		y = 0;
2221 	}
2222 
2223 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2224 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2225 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2226 	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2227 
2228 	return 0;
2229 }
2230 
2231 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2232 				     int x, int y)
2233 {
2234 	int ret;
2235 
2236 	dce_v6_0_lock_cursor(crtc, true);
2237 	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2238 	dce_v6_0_lock_cursor(crtc, false);
2239 
2240 	return ret;
2241 }
2242 
2243 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2244 				     struct drm_file *file_priv,
2245 				     uint32_t handle,
2246 				     uint32_t width,
2247 				     uint32_t height,
2248 				     int32_t hot_x,
2249 				     int32_t hot_y)
2250 {
2251 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2252 	struct drm_gem_object *obj;
2253 	struct amdgpu_bo *aobj;
2254 	int ret;
2255 
2256 	if (!handle) {
2257 		/* turn off cursor */
2258 		dce_v6_0_hide_cursor(crtc);
2259 		obj = NULL;
2260 		goto unpin;
2261 	}
2262 
2263 	if ((width > amdgpu_crtc->max_cursor_width) ||
2264 	    (height > amdgpu_crtc->max_cursor_height)) {
2265 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2266 		return -EINVAL;
2267 	}
2268 
2269 	obj = drm_gem_object_lookup(file_priv, handle);
2270 	if (!obj) {
2271 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2272 		return -ENOENT;
2273 	}
2274 
2275 	aobj = gem_to_amdgpu_bo(obj);
2276 	ret = amdgpu_bo_reserve(aobj, false);
2277 	if (ret != 0) {
2278 		drm_gem_object_put_unlocked(obj);
2279 		return ret;
2280 	}
2281 
2282 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2283 	amdgpu_bo_unreserve(aobj);
2284 	if (ret) {
2285 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2286 		drm_gem_object_put_unlocked(obj);
2287 		return ret;
2288 	}
2289 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2290 
2291 	dce_v6_0_lock_cursor(crtc, true);
2292 
2293 	if (width != amdgpu_crtc->cursor_width ||
2294 	    height != amdgpu_crtc->cursor_height ||
2295 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2296 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2297 		int x, y;
2298 
2299 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2300 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2301 
2302 		dce_v6_0_cursor_move_locked(crtc, x, y);
2303 
2304 		amdgpu_crtc->cursor_width = width;
2305 		amdgpu_crtc->cursor_height = height;
2306 		amdgpu_crtc->cursor_hot_x = hot_x;
2307 		amdgpu_crtc->cursor_hot_y = hot_y;
2308 	}
2309 
2310 	dce_v6_0_show_cursor(crtc);
2311 	dce_v6_0_lock_cursor(crtc, false);
2312 
2313 unpin:
2314 	if (amdgpu_crtc->cursor_bo) {
2315 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2316 		ret = amdgpu_bo_reserve(aobj, true);
2317 		if (likely(ret == 0)) {
2318 			amdgpu_bo_unpin(aobj);
2319 			amdgpu_bo_unreserve(aobj);
2320 		}
2321 		drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2322 	}
2323 
2324 	amdgpu_crtc->cursor_bo = obj;
2325 	return 0;
2326 }
2327 
2328 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2329 {
2330 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2331 
2332 	if (amdgpu_crtc->cursor_bo) {
2333 		dce_v6_0_lock_cursor(crtc, true);
2334 
2335 		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2336 					    amdgpu_crtc->cursor_y);
2337 
2338 		dce_v6_0_show_cursor(crtc);
2339 		dce_v6_0_lock_cursor(crtc, false);
2340 	}
2341 }
2342 
2343 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2344 				   u16 *blue, uint32_t size,
2345 				   struct drm_modeset_acquire_ctx *ctx)
2346 {
2347 	dce_v6_0_crtc_load_lut(crtc);
2348 
2349 	return 0;
2350 }
2351 
2352 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2353 {
2354 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2355 
2356 	drm_crtc_cleanup(crtc);
2357 	kfree(amdgpu_crtc);
2358 }
2359 
2360 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2361 	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2362 	.cursor_move = dce_v6_0_crtc_cursor_move,
2363 	.gamma_set = dce_v6_0_crtc_gamma_set,
2364 	.set_config = amdgpu_display_crtc_set_config,
2365 	.destroy = dce_v6_0_crtc_destroy,
2366 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2367 };
2368 
2369 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2370 {
2371 	struct drm_device *dev = crtc->dev;
2372 	struct amdgpu_device *adev = dev->dev_private;
2373 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2374 	unsigned type;
2375 
2376 	switch (mode) {
2377 	case DRM_MODE_DPMS_ON:
2378 		amdgpu_crtc->enabled = true;
2379 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2380 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2381 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2382 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2383 						amdgpu_crtc->crtc_id);
2384 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2385 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2386 		drm_crtc_vblank_on(crtc);
2387 		dce_v6_0_crtc_load_lut(crtc);
2388 		break;
2389 	case DRM_MODE_DPMS_STANDBY:
2390 	case DRM_MODE_DPMS_SUSPEND:
2391 	case DRM_MODE_DPMS_OFF:
2392 		drm_crtc_vblank_off(crtc);
2393 		if (amdgpu_crtc->enabled)
2394 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2395 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2396 		amdgpu_crtc->enabled = false;
2397 		break;
2398 	}
2399 	/* adjust pm to dpms */
2400 	amdgpu_pm_compute_clocks(adev);
2401 }
2402 
2403 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2404 {
2405 	/* disable crtc pair power gating before programming */
2406 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2407 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2408 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2409 }
2410 
2411 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2412 {
2413 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2414 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2415 }
2416 
2417 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2418 {
2419 
2420 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2421 	struct drm_device *dev = crtc->dev;
2422 	struct amdgpu_device *adev = dev->dev_private;
2423 	struct amdgpu_atom_ss ss;
2424 	int i;
2425 
2426 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2427 	if (crtc->primary->fb) {
2428 		int r;
2429 		struct amdgpu_bo *abo;
2430 
2431 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2432 		r = amdgpu_bo_reserve(abo, true);
2433 		if (unlikely(r))
2434 			DRM_ERROR("failed to reserve abo before unpin\n");
2435 		else {
2436 			amdgpu_bo_unpin(abo);
2437 			amdgpu_bo_unreserve(abo);
2438 		}
2439 	}
2440 	/* disable the GRPH */
2441 	dce_v6_0_grph_enable(crtc, false);
2442 
2443 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2444 
2445 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2446 		if (adev->mode_info.crtcs[i] &&
2447 		    adev->mode_info.crtcs[i]->enabled &&
2448 		    i != amdgpu_crtc->crtc_id &&
2449 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2450 			/* one other crtc is using this pll don't turn
2451 			 * off the pll
2452 			 */
2453 			goto done;
2454 		}
2455 	}
2456 
2457 	switch (amdgpu_crtc->pll_id) {
2458 	case ATOM_PPLL1:
2459 	case ATOM_PPLL2:
2460 		/* disable the ppll */
2461 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2462 						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2463 		break;
2464 	default:
2465 		break;
2466 	}
2467 done:
2468 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2469 	amdgpu_crtc->adjusted_clock = 0;
2470 	amdgpu_crtc->encoder = NULL;
2471 	amdgpu_crtc->connector = NULL;
2472 }
2473 
2474 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2475 				  struct drm_display_mode *mode,
2476 				  struct drm_display_mode *adjusted_mode,
2477 				  int x, int y, struct drm_framebuffer *old_fb)
2478 {
2479 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2480 
2481 	if (!amdgpu_crtc->adjusted_clock)
2482 		return -EINVAL;
2483 
2484 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2485 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2486 	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2487 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2488 	amdgpu_atombios_crtc_scaler_setup(crtc);
2489 	dce_v6_0_cursor_reset(crtc);
2490 	/* update the hw version fpr dpm */
2491 	amdgpu_crtc->hw_mode = *adjusted_mode;
2492 
2493 	return 0;
2494 }
2495 
2496 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2497 				     const struct drm_display_mode *mode,
2498 				     struct drm_display_mode *adjusted_mode)
2499 {
2500 
2501 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2502 	struct drm_device *dev = crtc->dev;
2503 	struct drm_encoder *encoder;
2504 
2505 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2506 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2507 		if (encoder->crtc == crtc) {
2508 			amdgpu_crtc->encoder = encoder;
2509 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2510 			break;
2511 		}
2512 	}
2513 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2514 		amdgpu_crtc->encoder = NULL;
2515 		amdgpu_crtc->connector = NULL;
2516 		return false;
2517 	}
2518 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2519 		return false;
2520 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2521 		return false;
2522 	/* pick pll */
2523 	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2524 	/* if we can't get a PPLL for a non-DP encoder, fail */
2525 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2526 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2527 		return false;
2528 
2529 	return true;
2530 }
2531 
2532 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2533 				  struct drm_framebuffer *old_fb)
2534 {
2535 	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2536 }
2537 
2538 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2539 					 struct drm_framebuffer *fb,
2540 					 int x, int y, enum mode_set_atomic state)
2541 {
2542        return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2543 }
2544 
2545 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2546 	.dpms = dce_v6_0_crtc_dpms,
2547 	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2548 	.mode_set = dce_v6_0_crtc_mode_set,
2549 	.mode_set_base = dce_v6_0_crtc_set_base,
2550 	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2551 	.prepare = dce_v6_0_crtc_prepare,
2552 	.commit = dce_v6_0_crtc_commit,
2553 	.disable = dce_v6_0_crtc_disable,
2554 };
2555 
2556 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2557 {
2558 	struct amdgpu_crtc *amdgpu_crtc;
2559 
2560 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2561 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2562 	if (amdgpu_crtc == NULL)
2563 		return -ENOMEM;
2564 
2565 	drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2566 
2567 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2568 	amdgpu_crtc->crtc_id = index;
2569 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2570 
2571 	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2572 	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2573 	adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2574 	adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2575 
2576 	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2577 
2578 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2579 	amdgpu_crtc->adjusted_clock = 0;
2580 	amdgpu_crtc->encoder = NULL;
2581 	amdgpu_crtc->connector = NULL;
2582 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2583 
2584 	return 0;
2585 }
2586 
2587 static int dce_v6_0_early_init(void *handle)
2588 {
2589 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2590 
2591 	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2592 	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2593 
2594 	dce_v6_0_set_display_funcs(adev);
2595 
2596 	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2597 
2598 	switch (adev->asic_type) {
2599 	case CHIP_TAHITI:
2600 	case CHIP_PITCAIRN:
2601 	case CHIP_VERDE:
2602 		adev->mode_info.num_hpd = 6;
2603 		adev->mode_info.num_dig = 6;
2604 		break;
2605 	case CHIP_OLAND:
2606 		adev->mode_info.num_hpd = 2;
2607 		adev->mode_info.num_dig = 2;
2608 		break;
2609 	default:
2610 		return -EINVAL;
2611 	}
2612 
2613 	dce_v6_0_set_irq_funcs(adev);
2614 
2615 	return 0;
2616 }
2617 
2618 static int dce_v6_0_sw_init(void *handle)
2619 {
2620 	int r, i;
2621 	bool ret;
2622 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2623 
2624 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2625 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2626 		if (r)
2627 			return r;
2628 	}
2629 
2630 	for (i = 8; i < 20; i += 2) {
2631 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2632 		if (r)
2633 			return r;
2634 	}
2635 
2636 	/* HPD hotplug */
2637 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2638 	if (r)
2639 		return r;
2640 
2641 	adev->mode_info.mode_config_initialized = true;
2642 
2643 	adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2644 	adev->ddev->mode_config.async_page_flip = true;
2645 	adev->ddev->mode_config.max_width = 16384;
2646 	adev->ddev->mode_config.max_height = 16384;
2647 	adev->ddev->mode_config.preferred_depth = 24;
2648 	adev->ddev->mode_config.prefer_shadow = 1;
2649 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2650 
2651 	r = amdgpu_display_modeset_create_props(adev);
2652 	if (r)
2653 		return r;
2654 
2655 	adev->ddev->mode_config.max_width = 16384;
2656 	adev->ddev->mode_config.max_height = 16384;
2657 
2658 	/* allocate crtcs */
2659 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2660 		r = dce_v6_0_crtc_init(adev, i);
2661 		if (r)
2662 			return r;
2663 	}
2664 
2665 	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2666 	if (ret)
2667 		amdgpu_display_print_display_setup(adev->ddev);
2668 	else
2669 		return -EINVAL;
2670 
2671 	/* setup afmt */
2672 	r = dce_v6_0_afmt_init(adev);
2673 	if (r)
2674 		return r;
2675 
2676 	r = dce_v6_0_audio_init(adev);
2677 	if (r)
2678 		return r;
2679 
2680 	drm_kms_helper_poll_init(adev->ddev);
2681 
2682 	return r;
2683 }
2684 
2685 static int dce_v6_0_sw_fini(void *handle)
2686 {
2687 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2688 
2689 	kfree(adev->mode_info.bios_hardcoded_edid);
2690 
2691 	drm_kms_helper_poll_fini(adev->ddev);
2692 
2693 	dce_v6_0_audio_fini(adev);
2694 	dce_v6_0_afmt_fini(adev);
2695 
2696 	drm_mode_config_cleanup(adev->ddev);
2697 	adev->mode_info.mode_config_initialized = false;
2698 
2699 	return 0;
2700 }
2701 
2702 static int dce_v6_0_hw_init(void *handle)
2703 {
2704 	int i;
2705 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2706 
2707 	/* disable vga render */
2708 	dce_v6_0_set_vga_render_state(adev, false);
2709 	/* init dig PHYs, disp eng pll */
2710 	amdgpu_atombios_encoder_init_dig(adev);
2711 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2712 
2713 	/* initialize hpd */
2714 	dce_v6_0_hpd_init(adev);
2715 
2716 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2717 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2718 	}
2719 
2720 	dce_v6_0_pageflip_interrupt_init(adev);
2721 
2722 	return 0;
2723 }
2724 
2725 static int dce_v6_0_hw_fini(void *handle)
2726 {
2727 	int i;
2728 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2729 
2730 	dce_v6_0_hpd_fini(adev);
2731 
2732 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2733 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2734 	}
2735 
2736 	dce_v6_0_pageflip_interrupt_fini(adev);
2737 
2738 	return 0;
2739 }
2740 
2741 static int dce_v6_0_suspend(void *handle)
2742 {
2743 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2744 
2745 	adev->mode_info.bl_level =
2746 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2747 
2748 	return dce_v6_0_hw_fini(handle);
2749 }
2750 
2751 static int dce_v6_0_resume(void *handle)
2752 {
2753 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2754 	int ret;
2755 
2756 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2757 							   adev->mode_info.bl_level);
2758 
2759 	ret = dce_v6_0_hw_init(handle);
2760 
2761 	/* turn on the BL */
2762 	if (adev->mode_info.bl_encoder) {
2763 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2764 								  adev->mode_info.bl_encoder);
2765 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2766 						    bl_level);
2767 	}
2768 
2769 	return ret;
2770 }
2771 
2772 static bool dce_v6_0_is_idle(void *handle)
2773 {
2774 	return true;
2775 }
2776 
2777 static int dce_v6_0_wait_for_idle(void *handle)
2778 {
2779 	return 0;
2780 }
2781 
2782 static int dce_v6_0_soft_reset(void *handle)
2783 {
2784 	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2785 	return 0;
2786 }
2787 
2788 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2789 						     int crtc,
2790 						     enum amdgpu_interrupt_state state)
2791 {
2792 	u32 reg_block, interrupt_mask;
2793 
2794 	if (crtc >= adev->mode_info.num_crtc) {
2795 		DRM_DEBUG("invalid crtc %d\n", crtc);
2796 		return;
2797 	}
2798 
2799 	switch (crtc) {
2800 	case 0:
2801 		reg_block = SI_CRTC0_REGISTER_OFFSET;
2802 		break;
2803 	case 1:
2804 		reg_block = SI_CRTC1_REGISTER_OFFSET;
2805 		break;
2806 	case 2:
2807 		reg_block = SI_CRTC2_REGISTER_OFFSET;
2808 		break;
2809 	case 3:
2810 		reg_block = SI_CRTC3_REGISTER_OFFSET;
2811 		break;
2812 	case 4:
2813 		reg_block = SI_CRTC4_REGISTER_OFFSET;
2814 		break;
2815 	case 5:
2816 		reg_block = SI_CRTC5_REGISTER_OFFSET;
2817 		break;
2818 	default:
2819 		DRM_DEBUG("invalid crtc %d\n", crtc);
2820 		return;
2821 	}
2822 
2823 	switch (state) {
2824 	case AMDGPU_IRQ_STATE_DISABLE:
2825 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2826 		interrupt_mask &= ~VBLANK_INT_MASK;
2827 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2828 		break;
2829 	case AMDGPU_IRQ_STATE_ENABLE:
2830 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2831 		interrupt_mask |= VBLANK_INT_MASK;
2832 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2833 		break;
2834 	default:
2835 		break;
2836 	}
2837 }
2838 
2839 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2840 						    int crtc,
2841 						    enum amdgpu_interrupt_state state)
2842 {
2843 
2844 }
2845 
2846 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2847 					    struct amdgpu_irq_src *src,
2848 					    unsigned type,
2849 					    enum amdgpu_interrupt_state state)
2850 {
2851 	u32 dc_hpd_int_cntl;
2852 
2853 	if (type >= adev->mode_info.num_hpd) {
2854 		DRM_DEBUG("invalid hdp %d\n", type);
2855 		return 0;
2856 	}
2857 
2858 	switch (state) {
2859 	case AMDGPU_IRQ_STATE_DISABLE:
2860 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2861 		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2862 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2863 		break;
2864 	case AMDGPU_IRQ_STATE_ENABLE:
2865 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2866 		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2867 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2868 		break;
2869 	default:
2870 		break;
2871 	}
2872 
2873 	return 0;
2874 }
2875 
2876 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2877 					     struct amdgpu_irq_src *src,
2878 					     unsigned type,
2879 					     enum amdgpu_interrupt_state state)
2880 {
2881 	switch (type) {
2882 	case AMDGPU_CRTC_IRQ_VBLANK1:
2883 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2884 		break;
2885 	case AMDGPU_CRTC_IRQ_VBLANK2:
2886 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2887 		break;
2888 	case AMDGPU_CRTC_IRQ_VBLANK3:
2889 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2890 		break;
2891 	case AMDGPU_CRTC_IRQ_VBLANK4:
2892 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2893 		break;
2894 	case AMDGPU_CRTC_IRQ_VBLANK5:
2895 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2896 		break;
2897 	case AMDGPU_CRTC_IRQ_VBLANK6:
2898 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2899 		break;
2900 	case AMDGPU_CRTC_IRQ_VLINE1:
2901 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2902 		break;
2903 	case AMDGPU_CRTC_IRQ_VLINE2:
2904 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2905 		break;
2906 	case AMDGPU_CRTC_IRQ_VLINE3:
2907 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2908 		break;
2909 	case AMDGPU_CRTC_IRQ_VLINE4:
2910 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2911 		break;
2912 	case AMDGPU_CRTC_IRQ_VLINE5:
2913 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2914 		break;
2915 	case AMDGPU_CRTC_IRQ_VLINE6:
2916 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2917 		break;
2918 	default:
2919 		break;
2920 	}
2921 	return 0;
2922 }
2923 
2924 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2925 			     struct amdgpu_irq_src *source,
2926 			     struct amdgpu_iv_entry *entry)
2927 {
2928 	unsigned crtc = entry->src_id - 1;
2929 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2930 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2931 								    crtc);
2932 
2933 	switch (entry->src_data[0]) {
2934 	case 0: /* vblank */
2935 		if (disp_int & interrupt_status_offsets[crtc].vblank)
2936 			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2937 		else
2938 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2939 
2940 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
2941 			drm_handle_vblank(adev->ddev, crtc);
2942 		}
2943 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2944 		break;
2945 	case 1: /* vline */
2946 		if (disp_int & interrupt_status_offsets[crtc].vline)
2947 			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2948 		else
2949 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2950 
2951 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2952 		break;
2953 	default:
2954 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
2955 		break;
2956 	}
2957 
2958 	return 0;
2959 }
2960 
2961 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2962 						 struct amdgpu_irq_src *src,
2963 						 unsigned type,
2964 						 enum amdgpu_interrupt_state state)
2965 {
2966 	u32 reg;
2967 
2968 	if (type >= adev->mode_info.num_crtc) {
2969 		DRM_ERROR("invalid pageflip crtc %d\n", type);
2970 		return -EINVAL;
2971 	}
2972 
2973 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
2974 	if (state == AMDGPU_IRQ_STATE_DISABLE)
2975 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2976 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2977 	else
2978 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
2979 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
2980 
2981 	return 0;
2982 }
2983 
2984 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
2985 				 struct amdgpu_irq_src *source,
2986 				 struct amdgpu_iv_entry *entry)
2987 {
2988 	unsigned long flags;
2989 	unsigned crtc_id;
2990 	struct amdgpu_crtc *amdgpu_crtc;
2991 	struct amdgpu_flip_work *works;
2992 
2993 	crtc_id = (entry->src_id - 8) >> 1;
2994 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
2995 
2996 	if (crtc_id >= adev->mode_info.num_crtc) {
2997 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
2998 		return -EINVAL;
2999 	}
3000 
3001 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3002 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3003 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3004 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3005 
3006 	/* IRQ could occur when in initial stage */
3007 	if (amdgpu_crtc == NULL)
3008 		return 0;
3009 
3010 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
3011 	works = amdgpu_crtc->pflip_works;
3012 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3013 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3014 						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3015 						amdgpu_crtc->pflip_status,
3016 						AMDGPU_FLIP_SUBMITTED);
3017 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3018 		return 0;
3019 	}
3020 
3021 	/* page flip completed. clean up */
3022 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3023 	amdgpu_crtc->pflip_works = NULL;
3024 
3025 	/* wakeup usersapce */
3026 	if (works->event)
3027 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3028 
3029 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3030 
3031 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3032 	schedule_work(&works->unpin_work);
3033 
3034 	return 0;
3035 }
3036 
3037 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3038 			    struct amdgpu_irq_src *source,
3039 			    struct amdgpu_iv_entry *entry)
3040 {
3041 	uint32_t disp_int, mask, tmp;
3042 	unsigned hpd;
3043 
3044 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3045 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3046 		return 0;
3047 	}
3048 
3049 	hpd = entry->src_data[0];
3050 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3051 	mask = interrupt_status_offsets[hpd].hpd;
3052 
3053 	if (disp_int & mask) {
3054 		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3055 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3056 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3057 		schedule_work(&adev->hotplug_work);
3058 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3059 	}
3060 
3061 	return 0;
3062 
3063 }
3064 
3065 static int dce_v6_0_set_clockgating_state(void *handle,
3066 					  enum amd_clockgating_state state)
3067 {
3068 	return 0;
3069 }
3070 
3071 static int dce_v6_0_set_powergating_state(void *handle,
3072 					  enum amd_powergating_state state)
3073 {
3074 	return 0;
3075 }
3076 
3077 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3078 	.name = "dce_v6_0",
3079 	.early_init = dce_v6_0_early_init,
3080 	.late_init = NULL,
3081 	.sw_init = dce_v6_0_sw_init,
3082 	.sw_fini = dce_v6_0_sw_fini,
3083 	.hw_init = dce_v6_0_hw_init,
3084 	.hw_fini = dce_v6_0_hw_fini,
3085 	.suspend = dce_v6_0_suspend,
3086 	.resume = dce_v6_0_resume,
3087 	.is_idle = dce_v6_0_is_idle,
3088 	.wait_for_idle = dce_v6_0_wait_for_idle,
3089 	.soft_reset = dce_v6_0_soft_reset,
3090 	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3091 	.set_powergating_state = dce_v6_0_set_powergating_state,
3092 };
3093 
3094 static void
3095 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3096 			  struct drm_display_mode *mode,
3097 			  struct drm_display_mode *adjusted_mode)
3098 {
3099 
3100 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3101 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3102 
3103 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3104 
3105 	/* need to call this here rather than in prepare() since we need some crtc info */
3106 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3107 
3108 	/* set scaler clears this on some chips */
3109 	dce_v6_0_set_interleave(encoder->crtc, mode);
3110 
3111 	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3112 		dce_v6_0_afmt_enable(encoder, true);
3113 		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3114 	}
3115 }
3116 
3117 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3118 {
3119 
3120 	struct amdgpu_device *adev = encoder->dev->dev_private;
3121 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3122 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3123 
3124 	if ((amdgpu_encoder->active_device &
3125 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3126 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3127 	     ENCODER_OBJECT_ID_NONE)) {
3128 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3129 		if (dig) {
3130 			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3131 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3132 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3133 		}
3134 	}
3135 
3136 	amdgpu_atombios_scratch_regs_lock(adev, true);
3137 
3138 	if (connector) {
3139 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3140 
3141 		/* select the clock/data port if it uses a router */
3142 		if (amdgpu_connector->router.cd_valid)
3143 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3144 
3145 		/* turn eDP panel on for mode set */
3146 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3147 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3148 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3149 	}
3150 
3151 	/* this is needed for the pll/ss setup to work correctly in some cases */
3152 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3153 	/* set up the FMT blocks */
3154 	dce_v6_0_program_fmt(encoder);
3155 }
3156 
3157 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3158 {
3159 
3160 	struct drm_device *dev = encoder->dev;
3161 	struct amdgpu_device *adev = dev->dev_private;
3162 
3163 	/* need to call this here as we need the crtc set up */
3164 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3165 	amdgpu_atombios_scratch_regs_lock(adev, false);
3166 }
3167 
3168 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3169 {
3170 
3171 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3172 	struct amdgpu_encoder_atom_dig *dig;
3173 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3174 
3175 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3176 
3177 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
3178 		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3179 			dce_v6_0_afmt_enable(encoder, false);
3180 		dig = amdgpu_encoder->enc_priv;
3181 		dig->dig_encoder = -1;
3182 	}
3183 	amdgpu_encoder->active_device = 0;
3184 }
3185 
3186 /* these are handled by the primary encoders */
3187 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3188 {
3189 
3190 }
3191 
3192 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3193 {
3194 
3195 }
3196 
3197 static void
3198 dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3199 		      struct drm_display_mode *mode,
3200 		      struct drm_display_mode *adjusted_mode)
3201 {
3202 
3203 }
3204 
3205 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3206 {
3207 
3208 }
3209 
3210 static void
3211 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3212 {
3213 
3214 }
3215 
3216 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3217 				    const struct drm_display_mode *mode,
3218 				    struct drm_display_mode *adjusted_mode)
3219 {
3220 	return true;
3221 }
3222 
3223 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3224 	.dpms = dce_v6_0_ext_dpms,
3225 	.mode_fixup = dce_v6_0_ext_mode_fixup,
3226 	.prepare = dce_v6_0_ext_prepare,
3227 	.mode_set = dce_v6_0_ext_mode_set,
3228 	.commit = dce_v6_0_ext_commit,
3229 	.disable = dce_v6_0_ext_disable,
3230 	/* no detect for TMDS/LVDS yet */
3231 };
3232 
3233 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3234 	.dpms = amdgpu_atombios_encoder_dpms,
3235 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3236 	.prepare = dce_v6_0_encoder_prepare,
3237 	.mode_set = dce_v6_0_encoder_mode_set,
3238 	.commit = dce_v6_0_encoder_commit,
3239 	.disable = dce_v6_0_encoder_disable,
3240 	.detect = amdgpu_atombios_encoder_dig_detect,
3241 };
3242 
3243 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3244 	.dpms = amdgpu_atombios_encoder_dpms,
3245 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3246 	.prepare = dce_v6_0_encoder_prepare,
3247 	.mode_set = dce_v6_0_encoder_mode_set,
3248 	.commit = dce_v6_0_encoder_commit,
3249 	.detect = amdgpu_atombios_encoder_dac_detect,
3250 };
3251 
3252 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3253 {
3254 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3255 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3256 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3257 	kfree(amdgpu_encoder->enc_priv);
3258 	drm_encoder_cleanup(encoder);
3259 	kfree(amdgpu_encoder);
3260 }
3261 
3262 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3263 	.destroy = dce_v6_0_encoder_destroy,
3264 };
3265 
3266 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3267 				 uint32_t encoder_enum,
3268 				 uint32_t supported_device,
3269 				 u16 caps)
3270 {
3271 	struct drm_device *dev = adev->ddev;
3272 	struct drm_encoder *encoder;
3273 	struct amdgpu_encoder *amdgpu_encoder;
3274 
3275 	/* see if we already added it */
3276 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3277 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3278 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3279 			amdgpu_encoder->devices |= supported_device;
3280 			return;
3281 		}
3282 
3283 	}
3284 
3285 	/* add a new one */
3286 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3287 	if (!amdgpu_encoder)
3288 		return;
3289 
3290 	encoder = &amdgpu_encoder->base;
3291 	switch (adev->mode_info.num_crtc) {
3292 	case 1:
3293 		encoder->possible_crtcs = 0x1;
3294 		break;
3295 	case 2:
3296 	default:
3297 		encoder->possible_crtcs = 0x3;
3298 		break;
3299 	case 4:
3300 		encoder->possible_crtcs = 0xf;
3301 		break;
3302 	case 6:
3303 		encoder->possible_crtcs = 0x3f;
3304 		break;
3305 	}
3306 
3307 	amdgpu_encoder->enc_priv = NULL;
3308 	amdgpu_encoder->encoder_enum = encoder_enum;
3309 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3310 	amdgpu_encoder->devices = supported_device;
3311 	amdgpu_encoder->rmx_type = RMX_OFF;
3312 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3313 	amdgpu_encoder->is_ext_encoder = false;
3314 	amdgpu_encoder->caps = caps;
3315 
3316 	switch (amdgpu_encoder->encoder_id) {
3317 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3318 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3319 		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3320 				 DRM_MODE_ENCODER_DAC, NULL);
3321 		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3322 		break;
3323 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3324 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3325 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3326 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3327 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3328 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3329 			amdgpu_encoder->rmx_type = RMX_FULL;
3330 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3331 					 DRM_MODE_ENCODER_LVDS, NULL);
3332 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3333 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3334 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3335 					 DRM_MODE_ENCODER_DAC, NULL);
3336 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3337 		} else {
3338 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3339 					 DRM_MODE_ENCODER_TMDS, NULL);
3340 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3341 		}
3342 		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3343 		break;
3344 	case ENCODER_OBJECT_ID_SI170B:
3345 	case ENCODER_OBJECT_ID_CH7303:
3346 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3347 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3348 	case ENCODER_OBJECT_ID_TITFP513:
3349 	case ENCODER_OBJECT_ID_VT1623:
3350 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3351 	case ENCODER_OBJECT_ID_TRAVIS:
3352 	case ENCODER_OBJECT_ID_NUTMEG:
3353 		/* these are handled by the primary encoders */
3354 		amdgpu_encoder->is_ext_encoder = true;
3355 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3356 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3357 					 DRM_MODE_ENCODER_LVDS, NULL);
3358 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3359 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3360 					 DRM_MODE_ENCODER_DAC, NULL);
3361 		else
3362 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3363 					 DRM_MODE_ENCODER_TMDS, NULL);
3364 		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3365 		break;
3366 	}
3367 }
3368 
3369 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3370 	.bandwidth_update = &dce_v6_0_bandwidth_update,
3371 	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3372 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3373 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3374 	.hpd_sense = &dce_v6_0_hpd_sense,
3375 	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3376 	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3377 	.page_flip = &dce_v6_0_page_flip,
3378 	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3379 	.add_encoder = &dce_v6_0_encoder_add,
3380 	.add_connector = &amdgpu_connector_add,
3381 };
3382 
3383 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3384 {
3385 	adev->mode_info.funcs = &dce_v6_0_display_funcs;
3386 }
3387 
3388 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3389 	.set = dce_v6_0_set_crtc_interrupt_state,
3390 	.process = dce_v6_0_crtc_irq,
3391 };
3392 
3393 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3394 	.set = dce_v6_0_set_pageflip_interrupt_state,
3395 	.process = dce_v6_0_pageflip_irq,
3396 };
3397 
3398 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3399 	.set = dce_v6_0_set_hpd_interrupt_state,
3400 	.process = dce_v6_0_hpd_irq,
3401 };
3402 
3403 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3404 {
3405 	if (adev->mode_info.num_crtc > 0)
3406 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3407 	else
3408 		adev->crtc_irq.num_types = 0;
3409 	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3410 
3411 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3412 	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3413 
3414 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3415 	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3416 }
3417 
3418 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3419 {
3420 	.type = AMD_IP_BLOCK_TYPE_DCE,
3421 	.major = 6,
3422 	.minor = 0,
3423 	.rev = 0,
3424 	.funcs = &dce_v6_0_ip_funcs,
3425 };
3426 
3427 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3428 {
3429 	.type = AMD_IP_BLOCK_TYPE_DCE,
3430 	.major = 6,
3431 	.minor = 4,
3432 	.rev = 0,
3433 	.funcs = &dce_v6_0_ip_funcs,
3434 };
3435