xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c (revision 973ad627)
1e2cdf640SKen Wang /*
2e2cdf640SKen Wang  * Copyright 2015 Advanced Micro Devices, Inc.
3e2cdf640SKen Wang  *
4e2cdf640SKen Wang  * Permission is hereby granted, free of charge, to any person obtaining a
5e2cdf640SKen Wang  * copy of this software and associated documentation files (the "Software"),
6e2cdf640SKen Wang  * to deal in the Software without restriction, including without limitation
7e2cdf640SKen Wang  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e2cdf640SKen Wang  * and/or sell copies of the Software, and to permit persons to whom the
9e2cdf640SKen Wang  * Software is furnished to do so, subject to the following conditions:
10e2cdf640SKen Wang  *
11e2cdf640SKen Wang  * The above copyright notice and this permission notice shall be included in
12e2cdf640SKen Wang  * all copies or substantial portions of the Software.
13e2cdf640SKen Wang  *
14e2cdf640SKen Wang  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e2cdf640SKen Wang  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e2cdf640SKen Wang  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e2cdf640SKen Wang  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e2cdf640SKen Wang  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e2cdf640SKen Wang  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e2cdf640SKen Wang  * OTHER DEALINGS IN THE SOFTWARE.
21e2cdf640SKen Wang  *
22e2cdf640SKen Wang  */
2347b757fbSSam Ravnborg 
2447b757fbSSam Ravnborg #include <linux/pci.h>
2547b757fbSSam Ravnborg 
2647b757fbSSam Ravnborg #include <drm/drm_fourcc.h>
27*973ad627SThomas Zimmermann #include <drm/drm_modeset_helper.h>
28*973ad627SThomas Zimmermann #include <drm/drm_modeset_helper_vtables.h>
2947b757fbSSam Ravnborg #include <drm/drm_vblank.h>
3047b757fbSSam Ravnborg 
31e2cdf640SKen Wang #include "amdgpu.h"
32e2cdf640SKen Wang #include "amdgpu_pm.h"
33e2cdf640SKen Wang #include "amdgpu_i2c.h"
34e2cdf640SKen Wang #include "atom.h"
35e2cdf640SKen Wang #include "amdgpu_atombios.h"
36e2cdf640SKen Wang #include "atombios_crtc.h"
37e2cdf640SKen Wang #include "atombios_encoders.h"
38e2cdf640SKen Wang #include "amdgpu_pll.h"
39e2cdf640SKen Wang #include "amdgpu_connectors.h"
405df58525SHuang Rui #include "amdgpu_display.h"
41b00861b9STom St Denis 
42b00861b9STom St Denis #include "bif/bif_3_0_d.h"
43b00861b9STom St Denis #include "bif/bif_3_0_sh_mask.h"
44b00861b9STom St Denis #include "oss/oss_1_0_d.h"
45b00861b9STom St Denis #include "oss/oss_1_0_sh_mask.h"
46b00861b9STom St Denis #include "gca/gfx_6_0_d.h"
47b00861b9STom St Denis #include "gca/gfx_6_0_sh_mask.h"
48b00861b9STom St Denis #include "gmc/gmc_6_0_d.h"
49b00861b9STom St Denis #include "gmc/gmc_6_0_sh_mask.h"
50b00861b9STom St Denis #include "dce/dce_6_0_d.h"
51b00861b9STom St Denis #include "dce/dce_6_0_sh_mask.h"
52b00861b9STom St Denis #include "gca/gfx_7_2_enum.h"
53e4c1d1a9SJean Delvare #include "dce_v6_0.h"
54b00861b9STom St Denis #include "si_enums.h"
55e2cdf640SKen Wang 
56e2cdf640SKen Wang static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
57e2cdf640SKen Wang static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
58e2cdf640SKen Wang 
59e2cdf640SKen Wang static const u32 crtc_offsets[6] =
60e2cdf640SKen Wang {
61e2cdf640SKen Wang 	SI_CRTC0_REGISTER_OFFSET,
62e2cdf640SKen Wang 	SI_CRTC1_REGISTER_OFFSET,
63e2cdf640SKen Wang 	SI_CRTC2_REGISTER_OFFSET,
64e2cdf640SKen Wang 	SI_CRTC3_REGISTER_OFFSET,
65e2cdf640SKen Wang 	SI_CRTC4_REGISTER_OFFSET,
66e2cdf640SKen Wang 	SI_CRTC5_REGISTER_OFFSET
67e2cdf640SKen Wang };
68e2cdf640SKen Wang 
6934386043SAlex Deucher static const u32 hpd_offsets[] =
7034386043SAlex Deucher {
71b00861b9STom St Denis 	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
72b00861b9STom St Denis 	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
73b00861b9STom St Denis 	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
74b00861b9STom St Denis 	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
75b00861b9STom St Denis 	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
76b00861b9STom St Denis 	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
7734386043SAlex Deucher };
7834386043SAlex Deucher 
79e2cdf640SKen Wang static const uint32_t dig_offsets[] = {
80e2cdf640SKen Wang 	SI_CRTC0_REGISTER_OFFSET,
81e2cdf640SKen Wang 	SI_CRTC1_REGISTER_OFFSET,
82e2cdf640SKen Wang 	SI_CRTC2_REGISTER_OFFSET,
83e2cdf640SKen Wang 	SI_CRTC3_REGISTER_OFFSET,
84e2cdf640SKen Wang 	SI_CRTC4_REGISTER_OFFSET,
85e2cdf640SKen Wang 	SI_CRTC5_REGISTER_OFFSET,
86e2cdf640SKen Wang 	(0x13830 - 0x7030) >> 2,
87e2cdf640SKen Wang };
88e2cdf640SKen Wang 
89e2cdf640SKen Wang static const struct {
90e2cdf640SKen Wang 	uint32_t	reg;
91e2cdf640SKen Wang 	uint32_t	vblank;
92e2cdf640SKen Wang 	uint32_t	vline;
93e2cdf640SKen Wang 	uint32_t	hpd;
94e2cdf640SKen Wang 
95e2cdf640SKen Wang } interrupt_status_offsets[6] = { {
96b00861b9STom St Denis 	.reg = mmDISP_INTERRUPT_STATUS,
97e2cdf640SKen Wang 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
98e2cdf640SKen Wang 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
99e2cdf640SKen Wang 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
100e2cdf640SKen Wang }, {
101b00861b9STom St Denis 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
102e2cdf640SKen Wang 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
103e2cdf640SKen Wang 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
104e2cdf640SKen Wang 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
105e2cdf640SKen Wang }, {
106b00861b9STom St Denis 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
107e2cdf640SKen Wang 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
108e2cdf640SKen Wang 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
109e2cdf640SKen Wang 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
110e2cdf640SKen Wang }, {
111b00861b9STom St Denis 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
112e2cdf640SKen Wang 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
113e2cdf640SKen Wang 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
114e2cdf640SKen Wang 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
115e2cdf640SKen Wang }, {
116b00861b9STom St Denis 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
117e2cdf640SKen Wang 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
118e2cdf640SKen Wang 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
119e2cdf640SKen Wang 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
120e2cdf640SKen Wang }, {
121b00861b9STom St Denis 	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
122e2cdf640SKen Wang 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
123e2cdf640SKen Wang 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
124e2cdf640SKen Wang 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
125e2cdf640SKen Wang } };
126e2cdf640SKen Wang 
dce_v6_0_audio_endpt_rreg(struct amdgpu_device * adev,u32 block_offset,u32 reg)127e2cdf640SKen Wang static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
128e2cdf640SKen Wang 				     u32 block_offset, u32 reg)
129e2cdf640SKen Wang {
1304caca706SXiaojie Yuan 	unsigned long flags;
1314caca706SXiaojie Yuan 	u32 r;
1324caca706SXiaojie Yuan 
1334caca706SXiaojie Yuan 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
1344caca706SXiaojie Yuan 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
1354caca706SXiaojie Yuan 	r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
1364caca706SXiaojie Yuan 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
1374caca706SXiaojie Yuan 
1384caca706SXiaojie Yuan 	return r;
139e2cdf640SKen Wang }
140e2cdf640SKen Wang 
dce_v6_0_audio_endpt_wreg(struct amdgpu_device * adev,u32 block_offset,u32 reg,u32 v)141e2cdf640SKen Wang static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
142e2cdf640SKen Wang 				      u32 block_offset, u32 reg, u32 v)
143e2cdf640SKen Wang {
1444caca706SXiaojie Yuan 	unsigned long flags;
1454caca706SXiaojie Yuan 
1464caca706SXiaojie Yuan 	spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
1474caca706SXiaojie Yuan 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
1484caca706SXiaojie Yuan 		reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
1494caca706SXiaojie Yuan 	WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
1504caca706SXiaojie Yuan 	spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
151e2cdf640SKen Wang }
152e2cdf640SKen Wang 
dce_v6_0_vblank_get_counter(struct amdgpu_device * adev,int crtc)153e2cdf640SKen Wang static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154e2cdf640SKen Wang {
155e2cdf640SKen Wang 	if (crtc >= adev->mode_info.num_crtc)
156e2cdf640SKen Wang 		return 0;
157e2cdf640SKen Wang 	else
158b00861b9STom St Denis 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
159e2cdf640SKen Wang }
160e2cdf640SKen Wang 
dce_v6_0_pageflip_interrupt_init(struct amdgpu_device * adev)161e2cdf640SKen Wang static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
162e2cdf640SKen Wang {
163e2cdf640SKen Wang 	unsigned i;
164e2cdf640SKen Wang 
165e2cdf640SKen Wang 	/* Enable pflip interrupts */
16602124a03SAlex Deucher 	for (i = 0; i < adev->mode_info.num_crtc; i++)
167e2cdf640SKen Wang 		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
168e2cdf640SKen Wang }
169e2cdf640SKen Wang 
dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device * adev)170e2cdf640SKen Wang static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
171e2cdf640SKen Wang {
172e2cdf640SKen Wang 	unsigned i;
173e2cdf640SKen Wang 
174e2cdf640SKen Wang 	/* Disable pflip interrupts */
17502124a03SAlex Deucher 	for (i = 0; i < adev->mode_info.num_crtc; i++)
176e2cdf640SKen Wang 		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
177e2cdf640SKen Wang }
178e2cdf640SKen Wang 
179e2cdf640SKen Wang /**
180e2cdf640SKen Wang  * dce_v6_0_page_flip - pageflip callback.
181e2cdf640SKen Wang  *
182e2cdf640SKen Wang  * @adev: amdgpu_device pointer
183e2cdf640SKen Wang  * @crtc_id: crtc to cleanup pageflip on
184e2cdf640SKen Wang  * @crtc_base: new address of the crtc (GPU MC address)
1858a149a9dSLee Jones  * @async: asynchronous flip
186e2cdf640SKen Wang  *
187e2cdf640SKen Wang  * Does the actual pageflip (evergreen+).
188e2cdf640SKen Wang  * During vblank we take the crtc lock and wait for the update_pending
189e2cdf640SKen Wang  * bit to go high, when it does, we release the lock, and allow the
190e2cdf640SKen Wang  * double buffered update to take place.
191e2cdf640SKen Wang  * Returns the current update pending status.
192e2cdf640SKen Wang  */
dce_v6_0_page_flip(struct amdgpu_device * adev,int crtc_id,u64 crtc_base,bool async)193e2cdf640SKen Wang static void dce_v6_0_page_flip(struct amdgpu_device *adev,
194e2cdf640SKen Wang 			       int crtc_id, u64 crtc_base, bool async)
195e2cdf640SKen Wang {
196e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
197965ebe3dSMichel Dänzer 	struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
198e2cdf640SKen Wang 
199e2cdf640SKen Wang 	/* flip at hsync for async, default is vsync */
200b00861b9STom St Denis 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
201b00861b9STom St Denis 	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
202965ebe3dSMichel Dänzer 	/* update pitch */
203965ebe3dSMichel Dänzer 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
204965ebe3dSMichel Dänzer 	       fb->pitches[0] / fb->format->cpp[0]);
205e2cdf640SKen Wang 	/* update the scanout addresses */
206b00861b9STom St Denis 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
207e2cdf640SKen Wang 	       upper_32_bits(crtc_base));
208b00861b9STom St Denis 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
209e2cdf640SKen Wang 	       (u32)crtc_base);
210e2cdf640SKen Wang 
211e2cdf640SKen Wang 	/* post the write */
212b00861b9STom St Denis 	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
213e2cdf640SKen Wang }
214e2cdf640SKen Wang 
dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)215e2cdf640SKen Wang static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
216e2cdf640SKen Wang 					u32 *vbl, u32 *position)
217e2cdf640SKen Wang {
218e2cdf640SKen Wang 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
219e2cdf640SKen Wang 		return -EINVAL;
220b00861b9STom St Denis 	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
221b00861b9STom St Denis 	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
222e2cdf640SKen Wang 
223e2cdf640SKen Wang 	return 0;
224e2cdf640SKen Wang 
225e2cdf640SKen Wang }
226e2cdf640SKen Wang 
227e2cdf640SKen Wang /**
228e2cdf640SKen Wang  * dce_v6_0_hpd_sense - hpd sense callback.
229e2cdf640SKen Wang  *
230e2cdf640SKen Wang  * @adev: amdgpu_device pointer
231e2cdf640SKen Wang  * @hpd: hpd (hotplug detect) pin
232e2cdf640SKen Wang  *
233e2cdf640SKen Wang  * Checks if a digital monitor is connected (evergreen+).
234e2cdf640SKen Wang  * Returns true if connected, false if not connected.
235e2cdf640SKen Wang  */
dce_v6_0_hpd_sense(struct amdgpu_device * adev,enum amdgpu_hpd_id hpd)236e2cdf640SKen Wang static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
237e2cdf640SKen Wang 			       enum amdgpu_hpd_id hpd)
238e2cdf640SKen Wang {
239e2cdf640SKen Wang 	bool connected = false;
240e2cdf640SKen Wang 
24134386043SAlex Deucher 	if (hpd >= adev->mode_info.num_hpd)
24234386043SAlex Deucher 		return connected;
24334386043SAlex Deucher 
244b00861b9STom St Denis 	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
245e2cdf640SKen Wang 		connected = true;
246e2cdf640SKen Wang 
247e2cdf640SKen Wang 	return connected;
248e2cdf640SKen Wang }
249e2cdf640SKen Wang 
250e2cdf640SKen Wang /**
251e2cdf640SKen Wang  * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
252e2cdf640SKen Wang  *
253e2cdf640SKen Wang  * @adev: amdgpu_device pointer
254e2cdf640SKen Wang  * @hpd: hpd (hotplug detect) pin
255e2cdf640SKen Wang  *
256e2cdf640SKen Wang  * Set the polarity of the hpd pin (evergreen+).
257e2cdf640SKen Wang  */
dce_v6_0_hpd_set_polarity(struct amdgpu_device * adev,enum amdgpu_hpd_id hpd)258e2cdf640SKen Wang static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
259e2cdf640SKen Wang 				      enum amdgpu_hpd_id hpd)
260e2cdf640SKen Wang {
261e2cdf640SKen Wang 	u32 tmp;
262e2cdf640SKen Wang 	bool connected = dce_v6_0_hpd_sense(adev, hpd);
263e2cdf640SKen Wang 
26434386043SAlex Deucher 	if (hpd >= adev->mode_info.num_hpd)
26534386043SAlex Deucher 		return;
26634386043SAlex Deucher 
267b00861b9STom St Denis 	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
268e2cdf640SKen Wang 	if (connected)
269b00861b9STom St Denis 		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
270e2cdf640SKen Wang 	else
271b00861b9STom St Denis 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
272b00861b9STom St Denis 	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
273e2cdf640SKen Wang }
274e2cdf640SKen Wang 
275e2cdf640SKen Wang /**
276e2cdf640SKen Wang  * dce_v6_0_hpd_init - hpd setup callback.
277e2cdf640SKen Wang  *
278e2cdf640SKen Wang  * @adev: amdgpu_device pointer
279e2cdf640SKen Wang  *
280e2cdf640SKen Wang  * Setup the hpd pins used by the card (evergreen+).
281e2cdf640SKen Wang  * Enable the pin, set the polarity, and enable the hpd interrupts.
282e2cdf640SKen Wang  */
dce_v6_0_hpd_init(struct amdgpu_device * adev)283e2cdf640SKen Wang static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
284e2cdf640SKen Wang {
2854a580877SLuben Tuikov 	struct drm_device *dev = adev_to_drm(adev);
286e2cdf640SKen Wang 	struct drm_connector *connector;
287f8d2d39eSLyude Paul 	struct drm_connector_list_iter iter;
288079ea190SAlex Deucher 	u32 tmp;
289e2cdf640SKen Wang 
290f8d2d39eSLyude Paul 	drm_connector_list_iter_begin(dev, &iter);
291f8d2d39eSLyude Paul 	drm_for_each_connector_iter(connector, &iter) {
292e2cdf640SKen Wang 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
293e2cdf640SKen Wang 
29434386043SAlex Deucher 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
29534386043SAlex Deucher 			continue;
29634386043SAlex Deucher 
297b00861b9STom St Denis 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
298b00861b9STom St Denis 		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
299b00861b9STom St Denis 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
3002744b647SAlex Deucher 
3012744b647SAlex Deucher 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
3022744b647SAlex Deucher 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
3032744b647SAlex Deucher 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
3042744b647SAlex Deucher 			 * aux dp channel on imac and help (but not completely fix)
3052744b647SAlex Deucher 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
3062744b647SAlex Deucher 			 * also avoid interrupt storms during dpms.
3072744b647SAlex Deucher 			 */
308b00861b9STom St Denis 			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
309b00861b9STom St Denis 			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
310b00861b9STom St Denis 			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
3112744b647SAlex Deucher 			continue;
3122744b647SAlex Deucher 		}
3132744b647SAlex Deucher 
314e2cdf640SKen Wang 		dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
315e2cdf640SKen Wang 		amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
316e2cdf640SKen Wang 	}
317f8d2d39eSLyude Paul 	drm_connector_list_iter_end(&iter);
318e2cdf640SKen Wang }
319e2cdf640SKen Wang 
320e2cdf640SKen Wang /**
321e2cdf640SKen Wang  * dce_v6_0_hpd_fini - hpd tear down callback.
322e2cdf640SKen Wang  *
323e2cdf640SKen Wang  * @adev: amdgpu_device pointer
324e2cdf640SKen Wang  *
325e2cdf640SKen Wang  * Tear down the hpd pins used by the card (evergreen+).
326e2cdf640SKen Wang  * Disable the hpd interrupts.
327e2cdf640SKen Wang  */
dce_v6_0_hpd_fini(struct amdgpu_device * adev)328e2cdf640SKen Wang static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
329e2cdf640SKen Wang {
3304a580877SLuben Tuikov 	struct drm_device *dev = adev_to_drm(adev);
331e2cdf640SKen Wang 	struct drm_connector *connector;
332f8d2d39eSLyude Paul 	struct drm_connector_list_iter iter;
333079ea190SAlex Deucher 	u32 tmp;
334e2cdf640SKen Wang 
335f8d2d39eSLyude Paul 	drm_connector_list_iter_begin(dev, &iter);
336f8d2d39eSLyude Paul 	drm_for_each_connector_iter(connector, &iter) {
337e2cdf640SKen Wang 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
338e2cdf640SKen Wang 
33934386043SAlex Deucher 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
34034386043SAlex Deucher 			continue;
34134386043SAlex Deucher 
342b00861b9STom St Denis 		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
343b00861b9STom St Denis 		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
34440835624SMaíra Canal 		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
34534386043SAlex Deucher 
346e2cdf640SKen Wang 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
347e2cdf640SKen Wang 	}
348f8d2d39eSLyude Paul 	drm_connector_list_iter_end(&iter);
349e2cdf640SKen Wang }
350e2cdf640SKen Wang 
dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device * adev)351e2cdf640SKen Wang static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
352e2cdf640SKen Wang {
353b00861b9STom St Denis 	return mmDC_GPIO_HPD_A;
354e2cdf640SKen Wang }
355e2cdf640SKen Wang 
dce_v6_0_set_vga_render_state(struct amdgpu_device * adev,bool render)356e2cdf640SKen Wang static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
357e2cdf640SKen Wang 					  bool render)
358e2cdf640SKen Wang {
359e2cdf640SKen Wang 	if (!render)
360b00861b9STom St Denis 		WREG32(mmVGA_RENDER_CONTROL,
361b00861b9STom St Denis 			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
362e2cdf640SKen Wang 
363e2cdf640SKen Wang }
364e2cdf640SKen Wang 
dce_v6_0_get_num_crtc(struct amdgpu_device * adev)3651d160f43SAlex Deucher static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
3661d160f43SAlex Deucher {
3671d160f43SAlex Deucher 	switch (adev->asic_type) {
3681d160f43SAlex Deucher 	case CHIP_TAHITI:
3691d160f43SAlex Deucher 	case CHIP_PITCAIRN:
3701d160f43SAlex Deucher 	case CHIP_VERDE:
371c990b718STom St Denis 		return 6;
3721d160f43SAlex Deucher 	case CHIP_OLAND:
373c990b718STom St Denis 		return 2;
3741d160f43SAlex Deucher 	default:
375c990b718STom St Denis 		return 0;
3761d160f43SAlex Deucher 	}
3771d160f43SAlex Deucher }
3781d160f43SAlex Deucher 
dce_v6_0_disable_dce(struct amdgpu_device * adev)3791d160f43SAlex Deucher void dce_v6_0_disable_dce(struct amdgpu_device *adev)
3801d160f43SAlex Deucher {
3811d160f43SAlex Deucher 	/*Disable VGA render and enabled crtc, if has DCE engine*/
3821d160f43SAlex Deucher 	if (amdgpu_atombios_has_dce_engine_info(adev)) {
3831d160f43SAlex Deucher 		u32 tmp;
3841d160f43SAlex Deucher 		int crtc_enabled, i;
3851d160f43SAlex Deucher 
3861d160f43SAlex Deucher 		dce_v6_0_set_vga_render_state(adev, false);
3871d160f43SAlex Deucher 
3881d160f43SAlex Deucher 		/*Disable crtc*/
3891d160f43SAlex Deucher 		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
390b00861b9STom St Denis 			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
391b00861b9STom St Denis 				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
3921d160f43SAlex Deucher 			if (crtc_enabled) {
393b00861b9STom St Denis 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
394b00861b9STom St Denis 				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
395b00861b9STom St Denis 				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
396b00861b9STom St Denis 				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
397b00861b9STom St Denis 				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
3981d160f43SAlex Deucher 			}
3991d160f43SAlex Deucher 		}
4001d160f43SAlex Deucher 	}
4011d160f43SAlex Deucher }
4021d160f43SAlex Deucher 
dce_v6_0_program_fmt(struct drm_encoder * encoder)403e2cdf640SKen Wang static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
404e2cdf640SKen Wang {
405e2cdf640SKen Wang 
406e2cdf640SKen Wang 	struct drm_device *dev = encoder->dev;
4071348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
408e2cdf640SKen Wang 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
409e2cdf640SKen Wang 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
410e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
411e2cdf640SKen Wang 	int bpc = 0;
412e2cdf640SKen Wang 	u32 tmp = 0;
413e2cdf640SKen Wang 	enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
414e2cdf640SKen Wang 
415e2cdf640SKen Wang 	if (connector) {
416e2cdf640SKen Wang 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
417e2cdf640SKen Wang 		bpc = amdgpu_connector_get_monitor_bpc(connector);
418e2cdf640SKen Wang 		dither = amdgpu_connector->dither;
419e2cdf640SKen Wang 	}
420e2cdf640SKen Wang 
421e2cdf640SKen Wang 	/* LVDS FMT is set up by atom */
422e2cdf640SKen Wang 	if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
423e2cdf640SKen Wang 		return;
424e2cdf640SKen Wang 
425e2cdf640SKen Wang 	if (bpc == 0)
426e2cdf640SKen Wang 		return;
427e2cdf640SKen Wang 
428e2cdf640SKen Wang 
429e2cdf640SKen Wang 	switch (bpc) {
430e2cdf640SKen Wang 	case 6:
431e2cdf640SKen Wang 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
432e2cdf640SKen Wang 			/* XXX sort out optimal dither settings */
433b00861b9STom St Denis 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
434b00861b9STom St Denis 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
435b00861b9STom St Denis 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
436e2cdf640SKen Wang 		else
437b00861b9STom St Denis 			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
438e2cdf640SKen Wang 		break;
439e2cdf640SKen Wang 	case 8:
440e2cdf640SKen Wang 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
441e2cdf640SKen Wang 			/* XXX sort out optimal dither settings */
442b00861b9STom St Denis 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
443b00861b9STom St Denis 				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
444b00861b9STom St Denis 				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
445b00861b9STom St Denis 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
446b00861b9STom St Denis 				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
447e2cdf640SKen Wang 		else
448b00861b9STom St Denis 			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
449b00861b9STom St Denis 				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
450e2cdf640SKen Wang 		break;
451e2cdf640SKen Wang 	case 10:
452e2cdf640SKen Wang 	default:
453e2cdf640SKen Wang 		/* not needed */
454e2cdf640SKen Wang 		break;
455e2cdf640SKen Wang 	}
456e2cdf640SKen Wang 
457b00861b9STom St Denis 	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
458e2cdf640SKen Wang }
459e2cdf640SKen Wang 
460e2cdf640SKen Wang /**
461f72c26fcSLee Jones  * si_get_number_of_dram_channels - get the number of dram channels
462e2cdf640SKen Wang  *
463e2cdf640SKen Wang  * @adev: amdgpu_device pointer
464e2cdf640SKen Wang  *
465e2cdf640SKen Wang  * Look up the number of video ram channels (CIK).
466e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
467e2cdf640SKen Wang  * Returns the number of dram channels
468e2cdf640SKen Wang  */
si_get_number_of_dram_channels(struct amdgpu_device * adev)469e2cdf640SKen Wang static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
470e2cdf640SKen Wang {
471b00861b9STom St Denis 	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
472e2cdf640SKen Wang 
473e2cdf640SKen Wang 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
474e2cdf640SKen Wang 	case 0:
475e2cdf640SKen Wang 	default:
476e2cdf640SKen Wang 		return 1;
477e2cdf640SKen Wang 	case 1:
478e2cdf640SKen Wang 		return 2;
479e2cdf640SKen Wang 	case 2:
480e2cdf640SKen Wang 		return 4;
481e2cdf640SKen Wang 	case 3:
482e2cdf640SKen Wang 		return 8;
483e2cdf640SKen Wang 	case 4:
484e2cdf640SKen Wang 		return 3;
485e2cdf640SKen Wang 	case 5:
486e2cdf640SKen Wang 		return 6;
487e2cdf640SKen Wang 	case 6:
488e2cdf640SKen Wang 		return 10;
489e2cdf640SKen Wang 	case 7:
490e2cdf640SKen Wang 		return 12;
491e2cdf640SKen Wang 	case 8:
492e2cdf640SKen Wang 		return 16;
493e2cdf640SKen Wang 	}
494e2cdf640SKen Wang }
495e2cdf640SKen Wang 
496e2cdf640SKen Wang struct dce6_wm_params {
497e2cdf640SKen Wang 	u32 dram_channels; /* number of dram channels */
498e2cdf640SKen Wang 	u32 yclk;          /* bandwidth per dram data pin in kHz */
499e2cdf640SKen Wang 	u32 sclk;          /* engine clock in kHz */
500e2cdf640SKen Wang 	u32 disp_clk;      /* display clock in kHz */
501e2cdf640SKen Wang 	u32 src_width;     /* viewport width */
502e2cdf640SKen Wang 	u32 active_time;   /* active display time in ns */
503e2cdf640SKen Wang 	u32 blank_time;    /* blank time in ns */
504e2cdf640SKen Wang 	bool interlaced;    /* mode is interlaced */
505e2cdf640SKen Wang 	fixed20_12 vsc;    /* vertical scale ratio */
506e2cdf640SKen Wang 	u32 num_heads;     /* number of active crtcs */
507e2cdf640SKen Wang 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
508e2cdf640SKen Wang 	u32 lb_size;       /* line buffer allocated to pipe */
509e2cdf640SKen Wang 	u32 vtaps;         /* vertical scaler taps */
510e2cdf640SKen Wang };
511e2cdf640SKen Wang 
512e2cdf640SKen Wang /**
513e2cdf640SKen Wang  * dce_v6_0_dram_bandwidth - get the dram bandwidth
514e2cdf640SKen Wang  *
515e2cdf640SKen Wang  * @wm: watermark calculation data
516e2cdf640SKen Wang  *
517e2cdf640SKen Wang  * Calculate the raw dram bandwidth (CIK).
518e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
519e2cdf640SKen Wang  * Returns the dram bandwidth in MBytes/s
520e2cdf640SKen Wang  */
dce_v6_0_dram_bandwidth(struct dce6_wm_params * wm)521e2cdf640SKen Wang static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
522e2cdf640SKen Wang {
523e2cdf640SKen Wang 	/* Calculate raw DRAM Bandwidth */
524e2cdf640SKen Wang 	fixed20_12 dram_efficiency; /* 0.7 */
525e2cdf640SKen Wang 	fixed20_12 yclk, dram_channels, bandwidth;
526e2cdf640SKen Wang 	fixed20_12 a;
527e2cdf640SKen Wang 
528e2cdf640SKen Wang 	a.full = dfixed_const(1000);
529e2cdf640SKen Wang 	yclk.full = dfixed_const(wm->yclk);
530e2cdf640SKen Wang 	yclk.full = dfixed_div(yclk, a);
531e2cdf640SKen Wang 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
532e2cdf640SKen Wang 	a.full = dfixed_const(10);
533e2cdf640SKen Wang 	dram_efficiency.full = dfixed_const(7);
534e2cdf640SKen Wang 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
535e2cdf640SKen Wang 	bandwidth.full = dfixed_mul(dram_channels, yclk);
536e2cdf640SKen Wang 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
537e2cdf640SKen Wang 
538e2cdf640SKen Wang 	return dfixed_trunc(bandwidth);
539e2cdf640SKen Wang }
540e2cdf640SKen Wang 
541e2cdf640SKen Wang /**
542e2cdf640SKen Wang  * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
543e2cdf640SKen Wang  *
544e2cdf640SKen Wang  * @wm: watermark calculation data
545e2cdf640SKen Wang  *
546e2cdf640SKen Wang  * Calculate the dram bandwidth used for display (CIK).
547e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
548e2cdf640SKen Wang  * Returns the dram bandwidth for display in MBytes/s
549e2cdf640SKen Wang  */
dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params * wm)550e2cdf640SKen Wang static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
551e2cdf640SKen Wang {
552e2cdf640SKen Wang 	/* Calculate DRAM Bandwidth and the part allocated to display. */
553e2cdf640SKen Wang 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
554e2cdf640SKen Wang 	fixed20_12 yclk, dram_channels, bandwidth;
555e2cdf640SKen Wang 	fixed20_12 a;
556e2cdf640SKen Wang 
557e2cdf640SKen Wang 	a.full = dfixed_const(1000);
558e2cdf640SKen Wang 	yclk.full = dfixed_const(wm->yclk);
559e2cdf640SKen Wang 	yclk.full = dfixed_div(yclk, a);
560e2cdf640SKen Wang 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
561e2cdf640SKen Wang 	a.full = dfixed_const(10);
562e2cdf640SKen Wang 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
563e2cdf640SKen Wang 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
564e2cdf640SKen Wang 	bandwidth.full = dfixed_mul(dram_channels, yclk);
565e2cdf640SKen Wang 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
566e2cdf640SKen Wang 
567e2cdf640SKen Wang 	return dfixed_trunc(bandwidth);
568e2cdf640SKen Wang }
569e2cdf640SKen Wang 
570e2cdf640SKen Wang /**
571e2cdf640SKen Wang  * dce_v6_0_data_return_bandwidth - get the data return bandwidth
572e2cdf640SKen Wang  *
573e2cdf640SKen Wang  * @wm: watermark calculation data
574e2cdf640SKen Wang  *
575e2cdf640SKen Wang  * Calculate the data return bandwidth used for display (CIK).
576e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
577e2cdf640SKen Wang  * Returns the data return bandwidth in MBytes/s
578e2cdf640SKen Wang  */
dce_v6_0_data_return_bandwidth(struct dce6_wm_params * wm)579e2cdf640SKen Wang static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
580e2cdf640SKen Wang {
581e2cdf640SKen Wang 	/* Calculate the display Data return Bandwidth */
582e2cdf640SKen Wang 	fixed20_12 return_efficiency; /* 0.8 */
583e2cdf640SKen Wang 	fixed20_12 sclk, bandwidth;
584e2cdf640SKen Wang 	fixed20_12 a;
585e2cdf640SKen Wang 
586e2cdf640SKen Wang 	a.full = dfixed_const(1000);
587e2cdf640SKen Wang 	sclk.full = dfixed_const(wm->sclk);
588e2cdf640SKen Wang 	sclk.full = dfixed_div(sclk, a);
589e2cdf640SKen Wang 	a.full = dfixed_const(10);
590e2cdf640SKen Wang 	return_efficiency.full = dfixed_const(8);
591e2cdf640SKen Wang 	return_efficiency.full = dfixed_div(return_efficiency, a);
592e2cdf640SKen Wang 	a.full = dfixed_const(32);
593e2cdf640SKen Wang 	bandwidth.full = dfixed_mul(a, sclk);
594e2cdf640SKen Wang 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
595e2cdf640SKen Wang 
596e2cdf640SKen Wang 	return dfixed_trunc(bandwidth);
597e2cdf640SKen Wang }
598e2cdf640SKen Wang 
599e2cdf640SKen Wang /**
600e2cdf640SKen Wang  * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
601e2cdf640SKen Wang  *
602e2cdf640SKen Wang  * @wm: watermark calculation data
603e2cdf640SKen Wang  *
604e2cdf640SKen Wang  * Calculate the dmif bandwidth used for display (CIK).
605e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
606e2cdf640SKen Wang  * Returns the dmif bandwidth in MBytes/s
607e2cdf640SKen Wang  */
dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params * wm)608e2cdf640SKen Wang static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
609e2cdf640SKen Wang {
610e2cdf640SKen Wang 	/* Calculate the DMIF Request Bandwidth */
611e2cdf640SKen Wang 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
612e2cdf640SKen Wang 	fixed20_12 disp_clk, bandwidth;
613e2cdf640SKen Wang 	fixed20_12 a, b;
614e2cdf640SKen Wang 
615e2cdf640SKen Wang 	a.full = dfixed_const(1000);
616e2cdf640SKen Wang 	disp_clk.full = dfixed_const(wm->disp_clk);
617e2cdf640SKen Wang 	disp_clk.full = dfixed_div(disp_clk, a);
618e2cdf640SKen Wang 	a.full = dfixed_const(32);
619e2cdf640SKen Wang 	b.full = dfixed_mul(a, disp_clk);
620e2cdf640SKen Wang 
621e2cdf640SKen Wang 	a.full = dfixed_const(10);
622e2cdf640SKen Wang 	disp_clk_request_efficiency.full = dfixed_const(8);
623e2cdf640SKen Wang 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
624e2cdf640SKen Wang 
625e2cdf640SKen Wang 	bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
626e2cdf640SKen Wang 
627e2cdf640SKen Wang 	return dfixed_trunc(bandwidth);
628e2cdf640SKen Wang }
629e2cdf640SKen Wang 
630e2cdf640SKen Wang /**
631e2cdf640SKen Wang  * dce_v6_0_available_bandwidth - get the min available bandwidth
632e2cdf640SKen Wang  *
633e2cdf640SKen Wang  * @wm: watermark calculation data
634e2cdf640SKen Wang  *
635e2cdf640SKen Wang  * Calculate the min available bandwidth used for display (CIK).
636e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
637e2cdf640SKen Wang  * Returns the min available bandwidth in MBytes/s
638e2cdf640SKen Wang  */
dce_v6_0_available_bandwidth(struct dce6_wm_params * wm)639e2cdf640SKen Wang static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
640e2cdf640SKen Wang {
641e2cdf640SKen Wang 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
642e2cdf640SKen Wang 	u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
643e2cdf640SKen Wang 	u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
644e2cdf640SKen Wang 	u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
645e2cdf640SKen Wang 
646e2cdf640SKen Wang 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
647e2cdf640SKen Wang }
648e2cdf640SKen Wang 
649e2cdf640SKen Wang /**
650e2cdf640SKen Wang  * dce_v6_0_average_bandwidth - get the average available bandwidth
651e2cdf640SKen Wang  *
652e2cdf640SKen Wang  * @wm: watermark calculation data
653e2cdf640SKen Wang  *
654e2cdf640SKen Wang  * Calculate the average available bandwidth used for display (CIK).
655e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
656e2cdf640SKen Wang  * Returns the average available bandwidth in MBytes/s
657e2cdf640SKen Wang  */
dce_v6_0_average_bandwidth(struct dce6_wm_params * wm)658e2cdf640SKen Wang static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
659e2cdf640SKen Wang {
660e2cdf640SKen Wang 	/* Calculate the display mode Average Bandwidth
661e2cdf640SKen Wang 	 * DisplayMode should contain the source and destination dimensions,
662e2cdf640SKen Wang 	 * timing, etc.
663e2cdf640SKen Wang 	 */
664e2cdf640SKen Wang 	fixed20_12 bpp;
665e2cdf640SKen Wang 	fixed20_12 line_time;
666e2cdf640SKen Wang 	fixed20_12 src_width;
667e2cdf640SKen Wang 	fixed20_12 bandwidth;
668e2cdf640SKen Wang 	fixed20_12 a;
669e2cdf640SKen Wang 
670e2cdf640SKen Wang 	a.full = dfixed_const(1000);
671e2cdf640SKen Wang 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
672e2cdf640SKen Wang 	line_time.full = dfixed_div(line_time, a);
673e2cdf640SKen Wang 	bpp.full = dfixed_const(wm->bytes_per_pixel);
674e2cdf640SKen Wang 	src_width.full = dfixed_const(wm->src_width);
675e2cdf640SKen Wang 	bandwidth.full = dfixed_mul(src_width, bpp);
676e2cdf640SKen Wang 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
677e2cdf640SKen Wang 	bandwidth.full = dfixed_div(bandwidth, line_time);
678e2cdf640SKen Wang 
679e2cdf640SKen Wang 	return dfixed_trunc(bandwidth);
680e2cdf640SKen Wang }
681e2cdf640SKen Wang 
682e2cdf640SKen Wang /**
683e2cdf640SKen Wang  * dce_v6_0_latency_watermark - get the latency watermark
684e2cdf640SKen Wang  *
685e2cdf640SKen Wang  * @wm: watermark calculation data
686e2cdf640SKen Wang  *
687e2cdf640SKen Wang  * Calculate the latency watermark (CIK).
688e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
689e2cdf640SKen Wang  * Returns the latency watermark in ns
690e2cdf640SKen Wang  */
dce_v6_0_latency_watermark(struct dce6_wm_params * wm)691e2cdf640SKen Wang static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
692e2cdf640SKen Wang {
693e2cdf640SKen Wang 	/* First calculate the latency in ns */
694e2cdf640SKen Wang 	u32 mc_latency = 2000; /* 2000 ns. */
695e2cdf640SKen Wang 	u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
696e2cdf640SKen Wang 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
697e2cdf640SKen Wang 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
698e2cdf640SKen Wang 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
699e2cdf640SKen Wang 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
700e2cdf640SKen Wang 		(wm->num_heads * cursor_line_pair_return_time);
701e2cdf640SKen Wang 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
702e2cdf640SKen Wang 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
703e2cdf640SKen Wang 	u32 tmp, dmif_size = 12288;
704e2cdf640SKen Wang 	fixed20_12 a, b, c;
705e2cdf640SKen Wang 
706e2cdf640SKen Wang 	if (wm->num_heads == 0)
707e2cdf640SKen Wang 		return 0;
708e2cdf640SKen Wang 
709e2cdf640SKen Wang 	a.full = dfixed_const(2);
710e2cdf640SKen Wang 	b.full = dfixed_const(1);
711e2cdf640SKen Wang 	if ((wm->vsc.full > a.full) ||
712e2cdf640SKen Wang 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
713e2cdf640SKen Wang 	    (wm->vtaps >= 5) ||
714e2cdf640SKen Wang 	    ((wm->vsc.full >= a.full) && wm->interlaced))
715e2cdf640SKen Wang 		max_src_lines_per_dst_line = 4;
716e2cdf640SKen Wang 	else
717e2cdf640SKen Wang 		max_src_lines_per_dst_line = 2;
718e2cdf640SKen Wang 
719e2cdf640SKen Wang 	a.full = dfixed_const(available_bandwidth);
720e2cdf640SKen Wang 	b.full = dfixed_const(wm->num_heads);
721e2cdf640SKen Wang 	a.full = dfixed_div(a, b);
722e190ed1eSMario Kleiner 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
723e190ed1eSMario Kleiner 	tmp = min(dfixed_trunc(a), tmp);
724e2cdf640SKen Wang 
725e190ed1eSMario Kleiner 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
726e2cdf640SKen Wang 
727e2cdf640SKen Wang 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
728e2cdf640SKen Wang 	b.full = dfixed_const(1000);
729e2cdf640SKen Wang 	c.full = dfixed_const(lb_fill_bw);
730e2cdf640SKen Wang 	b.full = dfixed_div(c, b);
731e2cdf640SKen Wang 	a.full = dfixed_div(a, b);
732e2cdf640SKen Wang 	line_fill_time = dfixed_trunc(a);
733e2cdf640SKen Wang 
734e2cdf640SKen Wang 	if (line_fill_time < wm->active_time)
735e2cdf640SKen Wang 		return latency;
736e2cdf640SKen Wang 	else
737e2cdf640SKen Wang 		return latency + (line_fill_time - wm->active_time);
738e2cdf640SKen Wang 
739e2cdf640SKen Wang }
740e2cdf640SKen Wang 
741e2cdf640SKen Wang /**
742e2cdf640SKen Wang  * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
743e2cdf640SKen Wang  * average and available dram bandwidth
744e2cdf640SKen Wang  *
745e2cdf640SKen Wang  * @wm: watermark calculation data
746e2cdf640SKen Wang  *
747e2cdf640SKen Wang  * Check if the display average bandwidth fits in the display
748e2cdf640SKen Wang  * dram bandwidth (CIK).
749e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
750e2cdf640SKen Wang  * Returns true if the display fits, false if not.
751e2cdf640SKen Wang  */
dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params * wm)752e2cdf640SKen Wang static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
753e2cdf640SKen Wang {
754e2cdf640SKen Wang 	if (dce_v6_0_average_bandwidth(wm) <=
755e2cdf640SKen Wang 	    (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
756e2cdf640SKen Wang 		return true;
757e2cdf640SKen Wang 	else
758e2cdf640SKen Wang 		return false;
759e2cdf640SKen Wang }
760e2cdf640SKen Wang 
761e2cdf640SKen Wang /**
762e2cdf640SKen Wang  * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
763e2cdf640SKen Wang  * average and available bandwidth
764e2cdf640SKen Wang  *
765e2cdf640SKen Wang  * @wm: watermark calculation data
766e2cdf640SKen Wang  *
767e2cdf640SKen Wang  * Check if the display average bandwidth fits in the display
768e2cdf640SKen Wang  * available bandwidth (CIK).
769e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
770e2cdf640SKen Wang  * Returns true if the display fits, false if not.
771e2cdf640SKen Wang  */
dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params * wm)772e2cdf640SKen Wang static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
773e2cdf640SKen Wang {
774e2cdf640SKen Wang 	if (dce_v6_0_average_bandwidth(wm) <=
775e2cdf640SKen Wang 	    (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
776e2cdf640SKen Wang 		return true;
777e2cdf640SKen Wang 	else
778e2cdf640SKen Wang 		return false;
779e2cdf640SKen Wang }
780e2cdf640SKen Wang 
781e2cdf640SKen Wang /**
782e2cdf640SKen Wang  * dce_v6_0_check_latency_hiding - check latency hiding
783e2cdf640SKen Wang  *
784e2cdf640SKen Wang  * @wm: watermark calculation data
785e2cdf640SKen Wang  *
786e2cdf640SKen Wang  * Check latency hiding (CIK).
787e2cdf640SKen Wang  * Used for display watermark bandwidth calculations
788e2cdf640SKen Wang  * Returns true if the display fits, false if not.
789e2cdf640SKen Wang  */
dce_v6_0_check_latency_hiding(struct dce6_wm_params * wm)790e2cdf640SKen Wang static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
791e2cdf640SKen Wang {
792e2cdf640SKen Wang 	u32 lb_partitions = wm->lb_size / wm->src_width;
793e2cdf640SKen Wang 	u32 line_time = wm->active_time + wm->blank_time;
794e2cdf640SKen Wang 	u32 latency_tolerant_lines;
795e2cdf640SKen Wang 	u32 latency_hiding;
796e2cdf640SKen Wang 	fixed20_12 a;
797e2cdf640SKen Wang 
798e2cdf640SKen Wang 	a.full = dfixed_const(1);
799e2cdf640SKen Wang 	if (wm->vsc.full > a.full)
800e2cdf640SKen Wang 		latency_tolerant_lines = 1;
801e2cdf640SKen Wang 	else {
802e2cdf640SKen Wang 		if (lb_partitions <= (wm->vtaps + 1))
803e2cdf640SKen Wang 			latency_tolerant_lines = 1;
804e2cdf640SKen Wang 		else
805e2cdf640SKen Wang 			latency_tolerant_lines = 2;
806e2cdf640SKen Wang 	}
807e2cdf640SKen Wang 
808e2cdf640SKen Wang 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
809e2cdf640SKen Wang 
810e2cdf640SKen Wang 	if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
811e2cdf640SKen Wang 		return true;
812e2cdf640SKen Wang 	else
813e2cdf640SKen Wang 		return false;
814e2cdf640SKen Wang }
815e2cdf640SKen Wang 
816e2cdf640SKen Wang /**
817e2cdf640SKen Wang  * dce_v6_0_program_watermarks - program display watermarks
818e2cdf640SKen Wang  *
819e2cdf640SKen Wang  * @adev: amdgpu_device pointer
820e2cdf640SKen Wang  * @amdgpu_crtc: the selected display controller
821e2cdf640SKen Wang  * @lb_size: line buffer size
822e2cdf640SKen Wang  * @num_heads: number of display controllers in use
823e2cdf640SKen Wang  *
824e2cdf640SKen Wang  * Calculate and program the display watermarks for the
825e2cdf640SKen Wang  * selected display controller (CIK).
826e2cdf640SKen Wang  */
dce_v6_0_program_watermarks(struct amdgpu_device * adev,struct amdgpu_crtc * amdgpu_crtc,u32 lb_size,u32 num_heads)827e2cdf640SKen Wang static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
828e2cdf640SKen Wang 					struct amdgpu_crtc *amdgpu_crtc,
829e2cdf640SKen Wang 					u32 lb_size, u32 num_heads)
830e2cdf640SKen Wang {
831e2cdf640SKen Wang 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
832e2cdf640SKen Wang 	struct dce6_wm_params wm_low, wm_high;
833e2cdf640SKen Wang 	u32 dram_channels;
834d63c277dSMario Kleiner 	u32 active_time;
835e2cdf640SKen Wang 	u32 line_time = 0;
836e2cdf640SKen Wang 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
837e2cdf640SKen Wang 	u32 priority_a_mark = 0, priority_b_mark = 0;
838e2cdf640SKen Wang 	u32 priority_a_cnt = PRIORITY_OFF;
839e2cdf640SKen Wang 	u32 priority_b_cnt = PRIORITY_OFF;
840effaf848SMario Kleiner 	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
841e2cdf640SKen Wang 	fixed20_12 a, b, c;
842e2cdf640SKen Wang 
843e2cdf640SKen Wang 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
844bea10413SMario Kleiner 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
845bea10413SMario Kleiner 					    (u32)mode->clock);
846bea10413SMario Kleiner 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
847bea10413SMario Kleiner 					  (u32)mode->clock);
848bea10413SMario Kleiner 		line_time = min(line_time, (u32)65535);
849e2cdf640SKen Wang 		priority_a_cnt = 0;
850e2cdf640SKen Wang 		priority_b_cnt = 0;
851e2cdf640SKen Wang 
852e2cdf640SKen Wang 		dram_channels = si_get_number_of_dram_channels(adev);
853e2cdf640SKen Wang 
854e2cdf640SKen Wang 		/* watermark for high clocks */
855e2cdf640SKen Wang 		if (adev->pm.dpm_enabled) {
856e2cdf640SKen Wang 			wm_high.yclk =
857e2cdf640SKen Wang 				amdgpu_dpm_get_mclk(adev, false) * 10;
858e2cdf640SKen Wang 			wm_high.sclk =
859e2cdf640SKen Wang 				amdgpu_dpm_get_sclk(adev, false) * 10;
860e2cdf640SKen Wang 		} else {
861e2cdf640SKen Wang 			wm_high.yclk = adev->pm.current_mclk * 10;
862e2cdf640SKen Wang 			wm_high.sclk = adev->pm.current_sclk * 10;
863e2cdf640SKen Wang 		}
864e2cdf640SKen Wang 
865e2cdf640SKen Wang 		wm_high.disp_clk = mode->clock;
866e2cdf640SKen Wang 		wm_high.src_width = mode->crtc_hdisplay;
867d63c277dSMario Kleiner 		wm_high.active_time = active_time;
868e2cdf640SKen Wang 		wm_high.blank_time = line_time - wm_high.active_time;
869e2cdf640SKen Wang 		wm_high.interlaced = false;
870e2cdf640SKen Wang 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
871e2cdf640SKen Wang 			wm_high.interlaced = true;
872e2cdf640SKen Wang 		wm_high.vsc = amdgpu_crtc->vsc;
873e2cdf640SKen Wang 		wm_high.vtaps = 1;
874e2cdf640SKen Wang 		if (amdgpu_crtc->rmx_type != RMX_OFF)
875e2cdf640SKen Wang 			wm_high.vtaps = 2;
876e2cdf640SKen Wang 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
877e2cdf640SKen Wang 		wm_high.lb_size = lb_size;
878e2cdf640SKen Wang 		wm_high.dram_channels = dram_channels;
879e2cdf640SKen Wang 		wm_high.num_heads = num_heads;
880e2cdf640SKen Wang 
881e2cdf640SKen Wang 		if (adev->pm.dpm_enabled) {
882e2cdf640SKen Wang 		/* watermark for low clocks */
883e2cdf640SKen Wang 			wm_low.yclk =
884e2cdf640SKen Wang 				amdgpu_dpm_get_mclk(adev, true) * 10;
885e2cdf640SKen Wang 			wm_low.sclk =
886e2cdf640SKen Wang 				amdgpu_dpm_get_sclk(adev, true) * 10;
887e2cdf640SKen Wang 		} else {
888e2cdf640SKen Wang 			wm_low.yclk = adev->pm.current_mclk * 10;
889e2cdf640SKen Wang 			wm_low.sclk = adev->pm.current_sclk * 10;
890e2cdf640SKen Wang 		}
891e2cdf640SKen Wang 
892e2cdf640SKen Wang 		wm_low.disp_clk = mode->clock;
893e2cdf640SKen Wang 		wm_low.src_width = mode->crtc_hdisplay;
894d63c277dSMario Kleiner 		wm_low.active_time = active_time;
895e2cdf640SKen Wang 		wm_low.blank_time = line_time - wm_low.active_time;
896e2cdf640SKen Wang 		wm_low.interlaced = false;
897e2cdf640SKen Wang 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
898e2cdf640SKen Wang 			wm_low.interlaced = true;
899e2cdf640SKen Wang 		wm_low.vsc = amdgpu_crtc->vsc;
900e2cdf640SKen Wang 		wm_low.vtaps = 1;
901e2cdf640SKen Wang 		if (amdgpu_crtc->rmx_type != RMX_OFF)
902e2cdf640SKen Wang 			wm_low.vtaps = 2;
903e2cdf640SKen Wang 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
904e2cdf640SKen Wang 		wm_low.lb_size = lb_size;
905e2cdf640SKen Wang 		wm_low.dram_channels = dram_channels;
906e2cdf640SKen Wang 		wm_low.num_heads = num_heads;
907e2cdf640SKen Wang 
908e2cdf640SKen Wang 		/* set for high clocks */
909e2cdf640SKen Wang 		latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
910e2cdf640SKen Wang 		/* set for low clocks */
911e2cdf640SKen Wang 		latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
912e2cdf640SKen Wang 
913e2cdf640SKen Wang 		/* possibly force display priority to high */
914e2cdf640SKen Wang 		/* should really do this at mode validation time... */
915e2cdf640SKen Wang 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
916e2cdf640SKen Wang 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
917e2cdf640SKen Wang 		    !dce_v6_0_check_latency_hiding(&wm_high) ||
918e2cdf640SKen Wang 		    (adev->mode_info.disp_priority == 2)) {
919e2cdf640SKen Wang 			DRM_DEBUG_KMS("force priority to high\n");
920e2cdf640SKen Wang 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
921e2cdf640SKen Wang 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
922e2cdf640SKen Wang 		}
923e2cdf640SKen Wang 		if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
924e2cdf640SKen Wang 		    !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
925e2cdf640SKen Wang 		    !dce_v6_0_check_latency_hiding(&wm_low) ||
926e2cdf640SKen Wang 		    (adev->mode_info.disp_priority == 2)) {
927e2cdf640SKen Wang 			DRM_DEBUG_KMS("force priority to high\n");
928e2cdf640SKen Wang 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
929e2cdf640SKen Wang 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
930e2cdf640SKen Wang 		}
931e2cdf640SKen Wang 
932e2cdf640SKen Wang 		a.full = dfixed_const(1000);
933e2cdf640SKen Wang 		b.full = dfixed_const(mode->clock);
934e2cdf640SKen Wang 		b.full = dfixed_div(b, a);
935e2cdf640SKen Wang 		c.full = dfixed_const(latency_watermark_a);
936e2cdf640SKen Wang 		c.full = dfixed_mul(c, b);
937e2cdf640SKen Wang 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
938e2cdf640SKen Wang 		c.full = dfixed_div(c, a);
939e2cdf640SKen Wang 		a.full = dfixed_const(16);
940e2cdf640SKen Wang 		c.full = dfixed_div(c, a);
941e2cdf640SKen Wang 		priority_a_mark = dfixed_trunc(c);
942e2cdf640SKen Wang 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
943e2cdf640SKen Wang 
944e2cdf640SKen Wang 		a.full = dfixed_const(1000);
945e2cdf640SKen Wang 		b.full = dfixed_const(mode->clock);
946e2cdf640SKen Wang 		b.full = dfixed_div(b, a);
947e2cdf640SKen Wang 		c.full = dfixed_const(latency_watermark_b);
948e2cdf640SKen Wang 		c.full = dfixed_mul(c, b);
949e2cdf640SKen Wang 		c.full = dfixed_mul(c, amdgpu_crtc->hsc);
950e2cdf640SKen Wang 		c.full = dfixed_div(c, a);
951e2cdf640SKen Wang 		a.full = dfixed_const(16);
952e2cdf640SKen Wang 		c.full = dfixed_div(c, a);
953e2cdf640SKen Wang 		priority_b_mark = dfixed_trunc(c);
954e2cdf640SKen Wang 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
955effaf848SMario Kleiner 
956effaf848SMario Kleiner 		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
957e2cdf640SKen Wang 	}
958e2cdf640SKen Wang 
959e2cdf640SKen Wang 	/* select wm A */
960b00861b9STom St Denis 	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
961e2cdf640SKen Wang 	tmp = arb_control3;
962e2cdf640SKen Wang 	tmp &= ~LATENCY_WATERMARK_MASK(3);
963e2cdf640SKen Wang 	tmp |= LATENCY_WATERMARK_MASK(1);
964b00861b9STom St Denis 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
965b00861b9STom St Denis 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
966b00861b9STom St Denis 	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
967b00861b9STom St Denis 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
968e2cdf640SKen Wang 	/* select wm B */
969b00861b9STom St Denis 	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
970e2cdf640SKen Wang 	tmp &= ~LATENCY_WATERMARK_MASK(3);
971e2cdf640SKen Wang 	tmp |= LATENCY_WATERMARK_MASK(2);
972b00861b9STom St Denis 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
973b00861b9STom St Denis 	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
974b00861b9STom St Denis 	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
975b00861b9STom St Denis 		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
976e2cdf640SKen Wang 	/* restore original selection */
977b00861b9STom St Denis 	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
978e2cdf640SKen Wang 
979e2cdf640SKen Wang 	/* write the priority marks */
980b00861b9STom St Denis 	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
981b00861b9STom St Denis 	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
982e2cdf640SKen Wang 
983e2cdf640SKen Wang 	/* save values for DPM */
984e2cdf640SKen Wang 	amdgpu_crtc->line_time = line_time;
985e2cdf640SKen Wang 	amdgpu_crtc->wm_high = latency_watermark_a;
986effaf848SMario Kleiner 
987effaf848SMario Kleiner 	/* Save number of lines the linebuffer leads before the scanout */
988effaf848SMario Kleiner 	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
989e2cdf640SKen Wang }
990e2cdf640SKen Wang 
991e2cdf640SKen Wang /* watermark setup */
dce_v6_0_line_buffer_adjust(struct amdgpu_device * adev,struct amdgpu_crtc * amdgpu_crtc,struct drm_display_mode * mode,struct drm_display_mode * other_mode)992e2cdf640SKen Wang static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
993e2cdf640SKen Wang 				   struct amdgpu_crtc *amdgpu_crtc,
994e2cdf640SKen Wang 				   struct drm_display_mode *mode,
995e2cdf640SKen Wang 				   struct drm_display_mode *other_mode)
996e2cdf640SKen Wang {
997e2cdf640SKen Wang 	u32 tmp, buffer_alloc, i;
998e2cdf640SKen Wang 	u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
999e2cdf640SKen Wang 	/*
1000e2cdf640SKen Wang 	 * Line Buffer Setup
1001e2cdf640SKen Wang 	 * There are 3 line buffers, each one shared by 2 display controllers.
1002b00861b9STom St Denis 	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1003e2cdf640SKen Wang 	 * the display controllers.  The paritioning is done via one of four
1004e2cdf640SKen Wang 	 * preset allocations specified in bits 21:20:
1005e2cdf640SKen Wang 	 *  0 - half lb
1006e2cdf640SKen Wang 	 *  2 - whole lb, other crtc must be disabled
1007e2cdf640SKen Wang 	 */
1008e2cdf640SKen Wang 	/* this can get tricky if we have two large displays on a paired group
1009e2cdf640SKen Wang 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1010e2cdf640SKen Wang 	 * non-linked crtcs for maximum line buffer allocation.
1011e2cdf640SKen Wang 	 */
1012e2cdf640SKen Wang 	if (amdgpu_crtc->base.enabled && mode) {
1013e2cdf640SKen Wang 		if (other_mode) {
1014e2cdf640SKen Wang 			tmp = 0; /* 1/2 */
1015e2cdf640SKen Wang 			buffer_alloc = 1;
1016e2cdf640SKen Wang 		} else {
1017e2cdf640SKen Wang 			tmp = 2; /* whole */
1018e2cdf640SKen Wang 			buffer_alloc = 2;
1019e2cdf640SKen Wang 		}
1020e2cdf640SKen Wang 	} else {
1021e2cdf640SKen Wang 		tmp = 0;
1022e2cdf640SKen Wang 		buffer_alloc = 0;
1023e2cdf640SKen Wang 	}
1024e2cdf640SKen Wang 
1025b00861b9STom St Denis 	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1026e2cdf640SKen Wang 	       DC_LB_MEMORY_CONFIG(tmp));
1027e2cdf640SKen Wang 
1028b00861b9STom St Denis 	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1029b00861b9STom St Denis 	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1030e2cdf640SKen Wang 	for (i = 0; i < adev->usec_timeout; i++) {
1031b00861b9STom St Denis 		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1032b00861b9STom St Denis 		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1033e2cdf640SKen Wang 			break;
1034e2cdf640SKen Wang 		udelay(1);
1035e2cdf640SKen Wang 	}
1036e2cdf640SKen Wang 
1037e2cdf640SKen Wang 	if (amdgpu_crtc->base.enabled && mode) {
1038e2cdf640SKen Wang 		switch (tmp) {
1039e2cdf640SKen Wang 		case 0:
1040e2cdf640SKen Wang 		default:
1041e2cdf640SKen Wang 			return 4096 * 2;
1042e2cdf640SKen Wang 		case 2:
1043e2cdf640SKen Wang 			return 8192 * 2;
1044e2cdf640SKen Wang 		}
1045e2cdf640SKen Wang 	}
1046e2cdf640SKen Wang 
1047e2cdf640SKen Wang 	/* controller not enabled, so no lb used */
1048e2cdf640SKen Wang 	return 0;
1049e2cdf640SKen Wang }
1050e2cdf640SKen Wang 
1051e2cdf640SKen Wang 
1052e2cdf640SKen Wang /**
1053e2cdf640SKen Wang  * dce_v6_0_bandwidth_update - program display watermarks
1054e2cdf640SKen Wang  *
1055e2cdf640SKen Wang  * @adev: amdgpu_device pointer
1056e2cdf640SKen Wang  *
1057e2cdf640SKen Wang  * Calculate and program the display watermarks and line
1058e2cdf640SKen Wang  * buffer allocation (CIK).
1059e2cdf640SKen Wang  */
dce_v6_0_bandwidth_update(struct amdgpu_device * adev)1060e2cdf640SKen Wang static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1061e2cdf640SKen Wang {
1062e2cdf640SKen Wang 	struct drm_display_mode *mode0 = NULL;
1063e2cdf640SKen Wang 	struct drm_display_mode *mode1 = NULL;
1064e2cdf640SKen Wang 	u32 num_heads = 0, lb_size;
1065e2cdf640SKen Wang 	int i;
1066e2cdf640SKen Wang 
1067e2cdf640SKen Wang 	if (!adev->mode_info.mode_config_initialized)
1068e2cdf640SKen Wang 		return;
1069e2cdf640SKen Wang 
1070166140fbSSamuel Li 	amdgpu_display_update_priority(adev);
1071e2cdf640SKen Wang 
1072e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
1073e2cdf640SKen Wang 		if (adev->mode_info.crtcs[i]->base.enabled)
1074e2cdf640SKen Wang 			num_heads++;
1075e2cdf640SKen Wang 	}
1076e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1077e2cdf640SKen Wang 		mode0 = &adev->mode_info.crtcs[i]->base.mode;
1078e2cdf640SKen Wang 		mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1079e2cdf640SKen Wang 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1080e2cdf640SKen Wang 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1081e2cdf640SKen Wang 		lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1082e2cdf640SKen Wang 		dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1083e2cdf640SKen Wang 	}
1084e2cdf640SKen Wang }
10854caca706SXiaojie Yuan 
dce_v6_0_audio_get_connected_pins(struct amdgpu_device * adev)1086e2cdf640SKen Wang static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1087e2cdf640SKen Wang {
1088e2cdf640SKen Wang 	int i;
10894caca706SXiaojie Yuan 	u32 tmp;
1090e2cdf640SKen Wang 
1091e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
10924caca706SXiaojie Yuan 		tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
10934caca706SXiaojie Yuan 				ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
10944caca706SXiaojie Yuan 		if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
10954caca706SXiaojie Yuan 					PORT_CONNECTIVITY))
1096e2cdf640SKen Wang 			adev->mode_info.audio.pin[i].connected = false;
1097e2cdf640SKen Wang 		else
1098e2cdf640SKen Wang 			adev->mode_info.audio.pin[i].connected = true;
1099e2cdf640SKen Wang 	}
1100e2cdf640SKen Wang 
1101e2cdf640SKen Wang }
1102e2cdf640SKen Wang 
dce_v6_0_audio_get_pin(struct amdgpu_device * adev)1103e2cdf640SKen Wang static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1104e2cdf640SKen Wang {
1105e2cdf640SKen Wang 	int i;
1106e2cdf640SKen Wang 
1107e2cdf640SKen Wang 	dce_v6_0_audio_get_connected_pins(adev);
1108e2cdf640SKen Wang 
1109e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1110e2cdf640SKen Wang 		if (adev->mode_info.audio.pin[i].connected)
1111e2cdf640SKen Wang 			return &adev->mode_info.audio.pin[i];
1112e2cdf640SKen Wang 	}
1113e2cdf640SKen Wang 	DRM_ERROR("No connected audio pins found!\n");
1114e2cdf640SKen Wang 	return NULL;
1115e2cdf640SKen Wang }
1116e2cdf640SKen Wang 
dce_v6_0_audio_select_pin(struct drm_encoder * encoder)11174caca706SXiaojie Yuan static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1118e2cdf640SKen Wang {
11191348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1120e2cdf640SKen Wang 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1121e2cdf640SKen Wang 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1122e2cdf640SKen Wang 
1123e2cdf640SKen Wang 	if (!dig || !dig->afmt || !dig->afmt->pin)
1124e2cdf640SKen Wang 		return;
1125e2cdf640SKen Wang 
11264caca706SXiaojie Yuan 	WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
11274caca706SXiaojie Yuan 	       REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
11284caca706SXiaojie Yuan 		             dig->afmt->pin->id));
1129e2cdf640SKen Wang }
1130e2cdf640SKen Wang 
dce_v6_0_audio_write_latency_fields(struct drm_encoder * encoder,struct drm_display_mode * mode)1131e2cdf640SKen Wang static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1132e2cdf640SKen Wang 						struct drm_display_mode *mode)
1133e2cdf640SKen Wang {
1134f8d2d39eSLyude Paul 	struct drm_device *dev = encoder->dev;
11351348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
11364caca706SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
11374caca706SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
11384caca706SXiaojie Yuan 	struct drm_connector *connector;
1139f8d2d39eSLyude Paul 	struct drm_connector_list_iter iter;
11404caca706SXiaojie Yuan 	struct amdgpu_connector *amdgpu_connector = NULL;
11414caca706SXiaojie Yuan 	int interlace = 0;
11424caca706SXiaojie Yuan 	u32 tmp;
11434caca706SXiaojie Yuan 
1144f8d2d39eSLyude Paul 	drm_connector_list_iter_begin(dev, &iter);
1145f8d2d39eSLyude Paul 	drm_for_each_connector_iter(connector, &iter) {
11464caca706SXiaojie Yuan 		if (connector->encoder == encoder) {
11474caca706SXiaojie Yuan 			amdgpu_connector = to_amdgpu_connector(connector);
11484caca706SXiaojie Yuan 			break;
11494caca706SXiaojie Yuan 		}
11504caca706SXiaojie Yuan 	}
1151f8d2d39eSLyude Paul 	drm_connector_list_iter_end(&iter);
11524caca706SXiaojie Yuan 
11534caca706SXiaojie Yuan 	if (!amdgpu_connector) {
11544caca706SXiaojie Yuan 		DRM_ERROR("Couldn't find encoder's connector\n");
11554caca706SXiaojie Yuan 		return;
11564caca706SXiaojie Yuan 	}
11574caca706SXiaojie Yuan 
11584caca706SXiaojie Yuan 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
11594caca706SXiaojie Yuan 		interlace = 1;
11604caca706SXiaojie Yuan 
11614caca706SXiaojie Yuan 	if (connector->latency_present[interlace]) {
11624caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
11634caca706SXiaojie Yuan 				VIDEO_LIPSYNC, connector->video_latency[interlace]);
11644caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
11654caca706SXiaojie Yuan 				AUDIO_LIPSYNC, connector->audio_latency[interlace]);
11664caca706SXiaojie Yuan 	} else {
11674caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
11684caca706SXiaojie Yuan 				VIDEO_LIPSYNC, 0);
11694caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
11704caca706SXiaojie Yuan 				AUDIO_LIPSYNC, 0);
11714caca706SXiaojie Yuan 	}
11724caca706SXiaojie Yuan 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
11734caca706SXiaojie Yuan 			   ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1174e2cdf640SKen Wang }
1175e2cdf640SKen Wang 
dce_v6_0_audio_write_speaker_allocation(struct drm_encoder * encoder)1176e2cdf640SKen Wang static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1177e2cdf640SKen Wang {
1178f8d2d39eSLyude Paul 	struct drm_device *dev = encoder->dev;
11791348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
11804caca706SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
11814caca706SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
11824caca706SXiaojie Yuan 	struct drm_connector *connector;
1183f8d2d39eSLyude Paul 	struct drm_connector_list_iter iter;
11844caca706SXiaojie Yuan 	struct amdgpu_connector *amdgpu_connector = NULL;
11854caca706SXiaojie Yuan 	u8 *sadb = NULL;
11864caca706SXiaojie Yuan 	int sad_count;
11874caca706SXiaojie Yuan 	u32 tmp;
11884caca706SXiaojie Yuan 
1189f8d2d39eSLyude Paul 	drm_connector_list_iter_begin(dev, &iter);
1190f8d2d39eSLyude Paul 	drm_for_each_connector_iter(connector, &iter) {
11914caca706SXiaojie Yuan 		if (connector->encoder == encoder) {
11924caca706SXiaojie Yuan 			amdgpu_connector = to_amdgpu_connector(connector);
11934caca706SXiaojie Yuan 			break;
11944caca706SXiaojie Yuan 		}
11954caca706SXiaojie Yuan 	}
1196f8d2d39eSLyude Paul 	drm_connector_list_iter_end(&iter);
11974caca706SXiaojie Yuan 
11984caca706SXiaojie Yuan 	if (!amdgpu_connector) {
11994caca706SXiaojie Yuan 		DRM_ERROR("Couldn't find encoder's connector\n");
12004caca706SXiaojie Yuan 		return;
12014caca706SXiaojie Yuan 	}
12024caca706SXiaojie Yuan 
12034caca706SXiaojie Yuan 	sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
12044caca706SXiaojie Yuan 	if (sad_count < 0) {
12054caca706SXiaojie Yuan 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
12064caca706SXiaojie Yuan 		sad_count = 0;
12074caca706SXiaojie Yuan 	}
12084caca706SXiaojie Yuan 
12094caca706SXiaojie Yuan 	/* program the speaker allocation */
12104caca706SXiaojie Yuan 	tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
12114caca706SXiaojie Yuan 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
12124caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
12134caca706SXiaojie Yuan 			HDMI_CONNECTION, 0);
12144caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
12154caca706SXiaojie Yuan 			DP_CONNECTION, 0);
12164caca706SXiaojie Yuan 
12174caca706SXiaojie Yuan 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
12184caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
12194caca706SXiaojie Yuan 				DP_CONNECTION, 1);
12204caca706SXiaojie Yuan 	else
12214caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
12224caca706SXiaojie Yuan 				HDMI_CONNECTION, 1);
12234caca706SXiaojie Yuan 
12244caca706SXiaojie Yuan 	if (sad_count)
12254caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
12264caca706SXiaojie Yuan 				SPEAKER_ALLOCATION, sadb[0]);
12274caca706SXiaojie Yuan 	else
12284caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
12294caca706SXiaojie Yuan 				SPEAKER_ALLOCATION, 5); /* stereo */
12304caca706SXiaojie Yuan 
12314caca706SXiaojie Yuan 	WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
12324caca706SXiaojie Yuan 			ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
12334caca706SXiaojie Yuan 
12344caca706SXiaojie Yuan 	kfree(sadb);
1235e2cdf640SKen Wang }
1236e2cdf640SKen Wang 
dce_v6_0_audio_write_sad_regs(struct drm_encoder * encoder)1237e2cdf640SKen Wang static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1238e2cdf640SKen Wang {
1239f8d2d39eSLyude Paul 	struct drm_device *dev = encoder->dev;
12401348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
12414caca706SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
12424caca706SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
12434caca706SXiaojie Yuan 	struct drm_connector *connector;
1244f8d2d39eSLyude Paul 	struct drm_connector_list_iter iter;
12454caca706SXiaojie Yuan 	struct amdgpu_connector *amdgpu_connector = NULL;
12464caca706SXiaojie Yuan 	struct cea_sad *sads;
12474caca706SXiaojie Yuan 	int i, sad_count;
12484caca706SXiaojie Yuan 
12494caca706SXiaojie Yuan 	static const u16 eld_reg_to_type[][2] = {
12504caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
12514caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
12524caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
12534caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
12544caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
12554caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
12564caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
12574caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
12584caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
12594caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
12604caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
12614caca706SXiaojie Yuan 		{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
12624caca706SXiaojie Yuan 	};
12634caca706SXiaojie Yuan 
1264f8d2d39eSLyude Paul 	drm_connector_list_iter_begin(dev, &iter);
1265f8d2d39eSLyude Paul 	drm_for_each_connector_iter(connector, &iter) {
12664caca706SXiaojie Yuan 		if (connector->encoder == encoder) {
12674caca706SXiaojie Yuan 			amdgpu_connector = to_amdgpu_connector(connector);
12684caca706SXiaojie Yuan 			break;
12694caca706SXiaojie Yuan 		}
12704caca706SXiaojie Yuan 	}
1271f8d2d39eSLyude Paul 	drm_connector_list_iter_end(&iter);
12724caca706SXiaojie Yuan 
12734caca706SXiaojie Yuan 	if (!amdgpu_connector) {
12744caca706SXiaojie Yuan 		DRM_ERROR("Couldn't find encoder's connector\n");
12754caca706SXiaojie Yuan 		return;
12764caca706SXiaojie Yuan 	}
12774caca706SXiaojie Yuan 
12784caca706SXiaojie Yuan 	sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1279ae2a3495SJean Delvare 	if (sad_count < 0)
12804caca706SXiaojie Yuan 		DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1281ae2a3495SJean Delvare 	if (sad_count <= 0)
12824caca706SXiaojie Yuan 		return;
12834caca706SXiaojie Yuan 
12844caca706SXiaojie Yuan 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
12854caca706SXiaojie Yuan 		u32 tmp = 0;
12864caca706SXiaojie Yuan 		u8 stereo_freqs = 0;
12874caca706SXiaojie Yuan 		int max_channels = -1;
12884caca706SXiaojie Yuan 		int j;
12894caca706SXiaojie Yuan 
12904caca706SXiaojie Yuan 		for (j = 0; j < sad_count; j++) {
12914caca706SXiaojie Yuan 			struct cea_sad *sad = &sads[j];
12924caca706SXiaojie Yuan 
12934caca706SXiaojie Yuan 			if (sad->format == eld_reg_to_type[i][1]) {
12944caca706SXiaojie Yuan 				if (sad->channels > max_channels) {
12954caca706SXiaojie Yuan 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
12964caca706SXiaojie Yuan 							MAX_CHANNELS, sad->channels);
12974caca706SXiaojie Yuan 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
12984caca706SXiaojie Yuan 							DESCRIPTOR_BYTE_2, sad->byte2);
12994caca706SXiaojie Yuan 					tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
13004caca706SXiaojie Yuan 							SUPPORTED_FREQUENCIES, sad->freq);
13014caca706SXiaojie Yuan 					max_channels = sad->channels;
13024caca706SXiaojie Yuan 				}
13034caca706SXiaojie Yuan 
13044caca706SXiaojie Yuan 				if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
13054caca706SXiaojie Yuan 					stereo_freqs |= sad->freq;
13064caca706SXiaojie Yuan 				else
13074caca706SXiaojie Yuan 					break;
13084caca706SXiaojie Yuan 			}
13094caca706SXiaojie Yuan 		}
13104caca706SXiaojie Yuan 
13114caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
13124caca706SXiaojie Yuan 				SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
13134caca706SXiaojie Yuan 		WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
13144caca706SXiaojie Yuan 	}
13154caca706SXiaojie Yuan 
13164caca706SXiaojie Yuan 	kfree(sads);
1317e2cdf640SKen Wang 
1318e2cdf640SKen Wang }
13194caca706SXiaojie Yuan 
dce_v6_0_audio_enable(struct amdgpu_device * adev,struct amdgpu_audio_pin * pin,bool enable)1320e2cdf640SKen Wang static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1321e2cdf640SKen Wang 				  struct amdgpu_audio_pin *pin,
1322e2cdf640SKen Wang 				  bool enable)
1323e2cdf640SKen Wang {
13244caca706SXiaojie Yuan 	if (!pin)
13254caca706SXiaojie Yuan 		return;
13264caca706SXiaojie Yuan 
13274caca706SXiaojie Yuan 	WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
13284caca706SXiaojie Yuan 			enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1329e2cdf640SKen Wang }
1330e2cdf640SKen Wang 
1331e2cdf640SKen Wang static const u32 pin_offsets[7] =
1332e2cdf640SKen Wang {
1333e2cdf640SKen Wang 	(0x1780 - 0x1780),
1334e2cdf640SKen Wang 	(0x1786 - 0x1780),
1335e2cdf640SKen Wang 	(0x178c - 0x1780),
1336e2cdf640SKen Wang 	(0x1792 - 0x1780),
1337e2cdf640SKen Wang 	(0x1798 - 0x1780),
1338e2cdf640SKen Wang 	(0x179d - 0x1780),
1339e2cdf640SKen Wang 	(0x17a4 - 0x1780),
1340e2cdf640SKen Wang };
1341e2cdf640SKen Wang 
dce_v6_0_audio_init(struct amdgpu_device * adev)1342e2cdf640SKen Wang static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1343e2cdf640SKen Wang {
13444caca706SXiaojie Yuan 	int i;
13454caca706SXiaojie Yuan 
13464caca706SXiaojie Yuan 	if (!amdgpu_audio)
13474caca706SXiaojie Yuan 		return 0;
13484caca706SXiaojie Yuan 
13494caca706SXiaojie Yuan 	adev->mode_info.audio.enabled = true;
13504caca706SXiaojie Yuan 
13514caca706SXiaojie Yuan 	switch (adev->asic_type) {
13524caca706SXiaojie Yuan 	case CHIP_TAHITI:
13534caca706SXiaojie Yuan 	case CHIP_PITCAIRN:
13544caca706SXiaojie Yuan 	case CHIP_VERDE:
13554caca706SXiaojie Yuan 	default:
13564caca706SXiaojie Yuan 		adev->mode_info.audio.num_pins = 6;
13574caca706SXiaojie Yuan 		break;
13584caca706SXiaojie Yuan 	case CHIP_OLAND:
13594caca706SXiaojie Yuan 		adev->mode_info.audio.num_pins = 2;
13604caca706SXiaojie Yuan 		break;
13614caca706SXiaojie Yuan 	}
13624caca706SXiaojie Yuan 
13634caca706SXiaojie Yuan 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
13644caca706SXiaojie Yuan 		adev->mode_info.audio.pin[i].channels = -1;
13654caca706SXiaojie Yuan 		adev->mode_info.audio.pin[i].rate = -1;
13664caca706SXiaojie Yuan 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
13674caca706SXiaojie Yuan 		adev->mode_info.audio.pin[i].status_bits = 0;
13684caca706SXiaojie Yuan 		adev->mode_info.audio.pin[i].category_code = 0;
13694caca706SXiaojie Yuan 		adev->mode_info.audio.pin[i].connected = false;
13704caca706SXiaojie Yuan 		adev->mode_info.audio.pin[i].offset = pin_offsets[i];
13714caca706SXiaojie Yuan 		adev->mode_info.audio.pin[i].id = i;
13724caca706SXiaojie Yuan 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
13734caca706SXiaojie Yuan 	}
13744caca706SXiaojie Yuan 
1375e2cdf640SKen Wang 	return 0;
1376e2cdf640SKen Wang }
1377e2cdf640SKen Wang 
dce_v6_0_audio_fini(struct amdgpu_device * adev)1378e2cdf640SKen Wang static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1379e2cdf640SKen Wang {
13804caca706SXiaojie Yuan 	int i;
1381e2cdf640SKen Wang 
13824caca706SXiaojie Yuan 	if (!amdgpu_audio)
13834caca706SXiaojie Yuan 		return;
13844caca706SXiaojie Yuan 
13854caca706SXiaojie Yuan 	if (!adev->mode_info.audio.enabled)
13864caca706SXiaojie Yuan 		return;
13874caca706SXiaojie Yuan 
13884caca706SXiaojie Yuan 	for (i = 0; i < adev->mode_info.audio.num_pins; i++)
13894caca706SXiaojie Yuan 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
13904caca706SXiaojie Yuan 
13914caca706SXiaojie Yuan 	adev->mode_info.audio.enabled = false;
1392e2cdf640SKen Wang }
1393e2cdf640SKen Wang 
dce_v6_0_audio_set_vbi_packet(struct drm_encoder * encoder)1394e6f7c765SXiaojie Yuan static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1395e2cdf640SKen Wang {
1396e6f7c765SXiaojie Yuan 	struct drm_device *dev = encoder->dev;
13971348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
1398e6f7c765SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1399e6f7c765SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1400e6f7c765SXiaojie Yuan 	u32 tmp;
1401e6f7c765SXiaojie Yuan 
1402e6f7c765SXiaojie Yuan 	tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1403e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1404e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1405e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1406e6f7c765SXiaojie Yuan 	WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1407e2cdf640SKen Wang }
1408e6f7c765SXiaojie Yuan 
dce_v6_0_audio_set_acr(struct drm_encoder * encoder,uint32_t clock,int bpc)1409e6f7c765SXiaojie Yuan static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1410e6f7c765SXiaojie Yuan 				   uint32_t clock, int bpc)
1411e2cdf640SKen Wang {
1412e6f7c765SXiaojie Yuan 	struct drm_device *dev = encoder->dev;
14131348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
1414e6f7c765SXiaojie Yuan 	struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1415e6f7c765SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1416e6f7c765SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1417e6f7c765SXiaojie Yuan 	u32 tmp;
1418e6f7c765SXiaojie Yuan 
1419e6f7c765SXiaojie Yuan 	tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1420e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1421e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1422e6f7c765SXiaojie Yuan 			bpc > 8 ? 0 : 1);
1423e6f7c765SXiaojie Yuan 	WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1424e6f7c765SXiaojie Yuan 
1425e6f7c765SXiaojie Yuan 	tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1426e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1427e6f7c765SXiaojie Yuan 	WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1428e6f7c765SXiaojie Yuan 	tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1429e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1430e6f7c765SXiaojie Yuan 	WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1431e6f7c765SXiaojie Yuan 
1432e6f7c765SXiaojie Yuan 	tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1433e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1434e6f7c765SXiaojie Yuan 	WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1435e6f7c765SXiaojie Yuan 	tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1436e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1437e6f7c765SXiaojie Yuan 	WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1438e6f7c765SXiaojie Yuan 
1439e6f7c765SXiaojie Yuan 	tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1440e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1441e6f7c765SXiaojie Yuan 	WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1442e6f7c765SXiaojie Yuan 	tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1443e6f7c765SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1444e6f7c765SXiaojie Yuan 	WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1445e6f7c765SXiaojie Yuan }
14464caca706SXiaojie Yuan 
dce_v6_0_audio_set_avi_infoframe(struct drm_encoder * encoder,struct drm_display_mode * mode)14474caca706SXiaojie Yuan static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
14484caca706SXiaojie Yuan 					       struct drm_display_mode *mode)
1449e2cdf640SKen Wang {
14504caca706SXiaojie Yuan 	struct drm_device *dev = encoder->dev;
14511348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
14524caca706SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
14534caca706SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
145413d0add3SVille Syrjälä 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
14554caca706SXiaojie Yuan 	struct hdmi_avi_infoframe frame;
14564caca706SXiaojie Yuan 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
14574caca706SXiaojie Yuan 	uint8_t *payload = buffer + 3;
14584caca706SXiaojie Yuan 	uint8_t *header = buffer;
14594caca706SXiaojie Yuan 	ssize_t err;
14604caca706SXiaojie Yuan 	u32 tmp;
14614caca706SXiaojie Yuan 
146213d0add3SVille Syrjälä 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
14634caca706SXiaojie Yuan 	if (err < 0) {
14644caca706SXiaojie Yuan 		DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
14654caca706SXiaojie Yuan 		return;
14664caca706SXiaojie Yuan 	}
14674caca706SXiaojie Yuan 
14684caca706SXiaojie Yuan 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
14694caca706SXiaojie Yuan 	if (err < 0) {
14704caca706SXiaojie Yuan 		DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
14714caca706SXiaojie Yuan 		return;
14724caca706SXiaojie Yuan 	}
14734caca706SXiaojie Yuan 
14744caca706SXiaojie Yuan 	WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
14754caca706SXiaojie Yuan 	       payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
14764caca706SXiaojie Yuan 	WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
14774caca706SXiaojie Yuan 	       payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
14784caca706SXiaojie Yuan 	WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
14794caca706SXiaojie Yuan 	       payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
14804caca706SXiaojie Yuan 	WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
14814caca706SXiaojie Yuan 	       payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
14824caca706SXiaojie Yuan 
14834caca706SXiaojie Yuan 	tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
14844caca706SXiaojie Yuan 	/* anything other than 0 */
14854caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
14864caca706SXiaojie Yuan 			HDMI_AUDIO_INFO_LINE, 2);
14874caca706SXiaojie Yuan 	WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1488e2cdf640SKen Wang }
1489e2cdf640SKen Wang 
dce_v6_0_audio_set_dto(struct drm_encoder * encoder,u32 clock)1490e2cdf640SKen Wang static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1491e2cdf640SKen Wang {
14924caca706SXiaojie Yuan 	struct drm_device *dev = encoder->dev;
14931348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
14944caca706SXiaojie Yuan 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1495e6f7c765SXiaojie Yuan 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
14964caca706SXiaojie Yuan 	u32 tmp;
14974caca706SXiaojie Yuan 
1498e2cdf640SKen Wang 	/*
14994caca706SXiaojie Yuan 	 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
15004caca706SXiaojie Yuan 	 * Express [24MHz / target pixel clock] as an exact rational
15014caca706SXiaojie Yuan 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
15024caca706SXiaojie Yuan 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1503e2cdf640SKen Wang 	 */
15044caca706SXiaojie Yuan 	tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
15054caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
15064caca706SXiaojie Yuan 			DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1507e6f7c765SXiaojie Yuan 	if (em == ATOM_ENCODER_MODE_HDMI) {
1508e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1509e6f7c765SXiaojie Yuan 				DCCG_AUDIO_DTO_SEL, 0);
1510e6f7c765SXiaojie Yuan 	} else if (ENCODER_MODE_IS_DP(em)) {
1511e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1512e6f7c765SXiaojie Yuan 				DCCG_AUDIO_DTO_SEL, 1);
1513e6f7c765SXiaojie Yuan 	}
15144caca706SXiaojie Yuan 	WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1515e6f7c765SXiaojie Yuan 	if (em == ATOM_ENCODER_MODE_HDMI) {
1516e6f7c765SXiaojie Yuan 		WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1517e6f7c765SXiaojie Yuan 		WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1518e6f7c765SXiaojie Yuan 	} else if (ENCODER_MODE_IS_DP(em)) {
15194caca706SXiaojie Yuan 		WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
15204caca706SXiaojie Yuan 		WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
15214caca706SXiaojie Yuan 	}
1522e6f7c765SXiaojie Yuan }
15234caca706SXiaojie Yuan 
dce_v6_0_audio_set_packet(struct drm_encoder * encoder)15244caca706SXiaojie Yuan static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
15254caca706SXiaojie Yuan {
15264caca706SXiaojie Yuan 	struct drm_device *dev = encoder->dev;
15271348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
15284caca706SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
15294caca706SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
15304caca706SXiaojie Yuan 	u32 tmp;
15314caca706SXiaojie Yuan 
15324caca706SXiaojie Yuan 	tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
15334caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
15344caca706SXiaojie Yuan 	WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
15354caca706SXiaojie Yuan 
15364caca706SXiaojie Yuan 	tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
15374caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
15384caca706SXiaojie Yuan 	WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
15394caca706SXiaojie Yuan 
15404caca706SXiaojie Yuan 	tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
15414caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
15424caca706SXiaojie Yuan 	WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
15434caca706SXiaojie Yuan 
15444caca706SXiaojie Yuan 	tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
15454caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
15464caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
15474caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
15484caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
15494caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
15504caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
15514caca706SXiaojie Yuan 	WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
15524caca706SXiaojie Yuan 
15534caca706SXiaojie Yuan 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
15544caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
15554caca706SXiaojie Yuan 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
15564caca706SXiaojie Yuan 
15574caca706SXiaojie Yuan 	tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
15584caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
15594caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
15604caca706SXiaojie Yuan 	WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
15614caca706SXiaojie Yuan 
15624caca706SXiaojie Yuan 	tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
15634caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
15644caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
15654caca706SXiaojie Yuan 	WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
15664caca706SXiaojie Yuan }
15674caca706SXiaojie Yuan 
dce_v6_0_audio_set_mute(struct drm_encoder * encoder,bool mute)15684caca706SXiaojie Yuan static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
15694caca706SXiaojie Yuan {
15704caca706SXiaojie Yuan 	struct drm_device *dev = encoder->dev;
15711348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
15724caca706SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
15734caca706SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
15744caca706SXiaojie Yuan 	u32 tmp;
15754caca706SXiaojie Yuan 
15764caca706SXiaojie Yuan 	tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
15774caca706SXiaojie Yuan 	tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
15784caca706SXiaojie Yuan 	WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
15794caca706SXiaojie Yuan }
15804caca706SXiaojie Yuan 
dce_v6_0_audio_hdmi_enable(struct drm_encoder * encoder,bool enable)1581e6f7c765SXiaojie Yuan static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1582e6f7c765SXiaojie Yuan {
1583e6f7c765SXiaojie Yuan 	struct drm_device *dev = encoder->dev;
15841348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
1585e6f7c765SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1586e6f7c765SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1587e6f7c765SXiaojie Yuan 	u32 tmp;
1588e6f7c765SXiaojie Yuan 
1589e6f7c765SXiaojie Yuan 	if (enable) {
1590e6f7c765SXiaojie Yuan 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1591e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1592e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1593e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1594e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1595e6f7c765SXiaojie Yuan 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1596e6f7c765SXiaojie Yuan 
1597e6f7c765SXiaojie Yuan 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1598e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1599e6f7c765SXiaojie Yuan 		WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1600e6f7c765SXiaojie Yuan 
1601e6f7c765SXiaojie Yuan 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1602e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1603e6f7c765SXiaojie Yuan 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1604e6f7c765SXiaojie Yuan 	} else {
1605e6f7c765SXiaojie Yuan 		tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1606e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1607e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1608e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1609e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1610e6f7c765SXiaojie Yuan 		WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1611e6f7c765SXiaojie Yuan 
1612e6f7c765SXiaojie Yuan 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1613e6f7c765SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1614e6f7c765SXiaojie Yuan 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1615e6f7c765SXiaojie Yuan 	}
1616e6f7c765SXiaojie Yuan }
1617e6f7c765SXiaojie Yuan 
dce_v6_0_audio_dp_enable(struct drm_encoder * encoder,bool enable)16184caca706SXiaojie Yuan static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
16194caca706SXiaojie Yuan {
16204caca706SXiaojie Yuan 	struct drm_device *dev = encoder->dev;
16211348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
16224caca706SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
16234caca706SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
16244caca706SXiaojie Yuan 	u32 tmp;
16254caca706SXiaojie Yuan 
16264caca706SXiaojie Yuan 	if (enable) {
16274caca706SXiaojie Yuan 		tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
16284caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
16294caca706SXiaojie Yuan 		WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
16304caca706SXiaojie Yuan 
16314caca706SXiaojie Yuan 		tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
16324caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
16334caca706SXiaojie Yuan 		WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
16344caca706SXiaojie Yuan 
16354caca706SXiaojie Yuan 		tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
16364caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
16374caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
16384caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
16394caca706SXiaojie Yuan 		tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
16404caca706SXiaojie Yuan 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
16414caca706SXiaojie Yuan 	} else {
16424caca706SXiaojie Yuan 		WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
16434caca706SXiaojie Yuan 	}
16444caca706SXiaojie Yuan }
16454caca706SXiaojie Yuan 
dce_v6_0_afmt_setmode(struct drm_encoder * encoder,struct drm_display_mode * mode)1646e2cdf640SKen Wang static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1647e2cdf640SKen Wang 				  struct drm_display_mode *mode)
1648e2cdf640SKen Wang {
16494caca706SXiaojie Yuan 	struct drm_device *dev = encoder->dev;
16501348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
16514caca706SXiaojie Yuan 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
16524caca706SXiaojie Yuan 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
16534caca706SXiaojie Yuan 	struct drm_connector *connector;
1654f8d2d39eSLyude Paul 	struct drm_connector_list_iter iter;
16554caca706SXiaojie Yuan 	struct amdgpu_connector *amdgpu_connector = NULL;
1656e6f7c765SXiaojie Yuan 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1657e6f7c765SXiaojie Yuan 	int bpc = 8;
16584caca706SXiaojie Yuan 
16594caca706SXiaojie Yuan 	if (!dig || !dig->afmt)
16604caca706SXiaojie Yuan 		return;
16614caca706SXiaojie Yuan 
1662f8d2d39eSLyude Paul 	drm_connector_list_iter_begin(dev, &iter);
1663f8d2d39eSLyude Paul 	drm_for_each_connector_iter(connector, &iter) {
16644caca706SXiaojie Yuan 		if (connector->encoder == encoder) {
16654caca706SXiaojie Yuan 			amdgpu_connector = to_amdgpu_connector(connector);
16664caca706SXiaojie Yuan 			break;
16674caca706SXiaojie Yuan 		}
16684caca706SXiaojie Yuan 	}
1669f8d2d39eSLyude Paul 	drm_connector_list_iter_end(&iter);
16704caca706SXiaojie Yuan 
16714caca706SXiaojie Yuan 	if (!amdgpu_connector) {
16724caca706SXiaojie Yuan 		DRM_ERROR("Couldn't find encoder's connector\n");
16734caca706SXiaojie Yuan 		return;
16744caca706SXiaojie Yuan 	}
16754caca706SXiaojie Yuan 
16764caca706SXiaojie Yuan 	if (!dig->afmt->enabled)
16774caca706SXiaojie Yuan 		return;
16784caca706SXiaojie Yuan 
16794caca706SXiaojie Yuan 	dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
16804caca706SXiaojie Yuan 	if (!dig->afmt->pin)
16814caca706SXiaojie Yuan 		return;
16824caca706SXiaojie Yuan 
1683e6f7c765SXiaojie Yuan 	if (encoder->crtc) {
1684e6f7c765SXiaojie Yuan 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1685e6f7c765SXiaojie Yuan 		bpc = amdgpu_crtc->bpc;
1686e6f7c765SXiaojie Yuan 	}
1687e6f7c765SXiaojie Yuan 
16884caca706SXiaojie Yuan 	/* disable audio before setting up hw */
16894caca706SXiaojie Yuan 	dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
16904caca706SXiaojie Yuan 
16914caca706SXiaojie Yuan 	dce_v6_0_audio_set_mute(encoder, true);
16924caca706SXiaojie Yuan 	dce_v6_0_audio_write_speaker_allocation(encoder);
16934caca706SXiaojie Yuan 	dce_v6_0_audio_write_sad_regs(encoder);
16944caca706SXiaojie Yuan 	dce_v6_0_audio_write_latency_fields(encoder, mode);
1695e6f7c765SXiaojie Yuan 	if (em == ATOM_ENCODER_MODE_HDMI) {
1696e6f7c765SXiaojie Yuan 		dce_v6_0_audio_set_dto(encoder, mode->clock);
1697e6f7c765SXiaojie Yuan 		dce_v6_0_audio_set_vbi_packet(encoder);
1698e6f7c765SXiaojie Yuan 		dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1699e6f7c765SXiaojie Yuan 	} else if (ENCODER_MODE_IS_DP(em)) {
17004caca706SXiaojie Yuan 		dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1701e6f7c765SXiaojie Yuan 	}
17024caca706SXiaojie Yuan 	dce_v6_0_audio_set_packet(encoder);
17034caca706SXiaojie Yuan 	dce_v6_0_audio_select_pin(encoder);
17044caca706SXiaojie Yuan 	dce_v6_0_audio_set_avi_infoframe(encoder, mode);
17054caca706SXiaojie Yuan 	dce_v6_0_audio_set_mute(encoder, false);
1706e6f7c765SXiaojie Yuan 	if (em == ATOM_ENCODER_MODE_HDMI) {
1707e6f7c765SXiaojie Yuan 		dce_v6_0_audio_hdmi_enable(encoder, 1);
1708e6f7c765SXiaojie Yuan 	} else if (ENCODER_MODE_IS_DP(em)) {
17094caca706SXiaojie Yuan 		dce_v6_0_audio_dp_enable(encoder, 1);
1710e6f7c765SXiaojie Yuan 	}
17114caca706SXiaojie Yuan 
17124caca706SXiaojie Yuan 	/* enable audio after setting up hw */
17134caca706SXiaojie Yuan 	dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1714e2cdf640SKen Wang }
1715e2cdf640SKen Wang 
dce_v6_0_afmt_enable(struct drm_encoder * encoder,bool enable)1716e2cdf640SKen Wang static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1717e2cdf640SKen Wang {
1718e2cdf640SKen Wang 	struct drm_device *dev = encoder->dev;
17191348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
1720e2cdf640SKen Wang 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1721e2cdf640SKen Wang 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1722e2cdf640SKen Wang 
1723e2cdf640SKen Wang 	if (!dig || !dig->afmt)
1724e2cdf640SKen Wang 		return;
1725e2cdf640SKen Wang 
1726e2cdf640SKen Wang 	/* Silent, r600_hdmi_enable will raise WARN for us */
1727e2cdf640SKen Wang 	if (enable && dig->afmt->enabled)
1728e2cdf640SKen Wang 		return;
17294caca706SXiaojie Yuan 
1730e2cdf640SKen Wang 	if (!enable && !dig->afmt->enabled)
1731e2cdf640SKen Wang 		return;
1732e2cdf640SKen Wang 
1733e2cdf640SKen Wang 	if (!enable && dig->afmt->pin) {
1734e2cdf640SKen Wang 		dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1735e2cdf640SKen Wang 		dig->afmt->pin = NULL;
1736e2cdf640SKen Wang 	}
1737e2cdf640SKen Wang 
1738e2cdf640SKen Wang 	dig->afmt->enabled = enable;
1739e2cdf640SKen Wang 
1740e2cdf640SKen Wang 	DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1741e2cdf640SKen Wang 		  enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1742e2cdf640SKen Wang }
1743e2cdf640SKen Wang 
dce_v6_0_afmt_init(struct amdgpu_device * adev)1744beb86f29STom St Denis static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1745e2cdf640SKen Wang {
1746beb86f29STom St Denis 	int i, j;
1747e2cdf640SKen Wang 
1748e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.num_dig; i++)
1749e2cdf640SKen Wang 		adev->mode_info.afmt[i] = NULL;
1750e2cdf640SKen Wang 
1751beb86f29STom St Denis 	/* DCE6 has audio blocks tied to DIG encoders */
1752e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1753e2cdf640SKen Wang 		adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1754e2cdf640SKen Wang 		if (adev->mode_info.afmt[i]) {
1755e2cdf640SKen Wang 			adev->mode_info.afmt[i]->offset = dig_offsets[i];
1756e2cdf640SKen Wang 			adev->mode_info.afmt[i]->id = i;
1757beb86f29STom St Denis 		} else {
1758beb86f29STom St Denis 			for (j = 0; j < i; j++) {
1759beb86f29STom St Denis 				kfree(adev->mode_info.afmt[j]);
1760beb86f29STom St Denis 				adev->mode_info.afmt[j] = NULL;
1761beb86f29STom St Denis 			}
1762beb86f29STom St Denis 			DRM_ERROR("Out of memory allocating afmt table\n");
1763beb86f29STom St Denis 			return -ENOMEM;
1764e2cdf640SKen Wang 		}
1765e2cdf640SKen Wang 	}
1766beb86f29STom St Denis 	return 0;
1767e2cdf640SKen Wang }
1768e2cdf640SKen Wang 
dce_v6_0_afmt_fini(struct amdgpu_device * adev)1769e2cdf640SKen Wang static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1770e2cdf640SKen Wang {
1771e2cdf640SKen Wang 	int i;
1772e2cdf640SKen Wang 
1773e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.num_dig; i++) {
1774e2cdf640SKen Wang 		kfree(adev->mode_info.afmt[i]);
1775e2cdf640SKen Wang 		adev->mode_info.afmt[i] = NULL;
1776e2cdf640SKen Wang 	}
1777e2cdf640SKen Wang }
1778e2cdf640SKen Wang 
1779e2cdf640SKen Wang static const u32 vga_control_regs[6] =
1780e2cdf640SKen Wang {
1781b00861b9STom St Denis 	mmD1VGA_CONTROL,
1782b00861b9STom St Denis 	mmD2VGA_CONTROL,
1783b00861b9STom St Denis 	mmD3VGA_CONTROL,
1784b00861b9STom St Denis 	mmD4VGA_CONTROL,
1785b00861b9STom St Denis 	mmD5VGA_CONTROL,
1786b00861b9STom St Denis 	mmD6VGA_CONTROL,
1787e2cdf640SKen Wang };
1788e2cdf640SKen Wang 
dce_v6_0_vga_enable(struct drm_crtc * crtc,bool enable)1789e2cdf640SKen Wang static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1790e2cdf640SKen Wang {
1791e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1792e2cdf640SKen Wang 	struct drm_device *dev = crtc->dev;
17931348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
1794e2cdf640SKen Wang 	u32 vga_control;
1795e2cdf640SKen Wang 
1796e2cdf640SKen Wang 	vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
17973d5f4d47STom St Denis 	WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1798e2cdf640SKen Wang }
1799e2cdf640SKen Wang 
dce_v6_0_grph_enable(struct drm_crtc * crtc,bool enable)1800e2cdf640SKen Wang static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1801e2cdf640SKen Wang {
1802e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1803e2cdf640SKen Wang 	struct drm_device *dev = crtc->dev;
18041348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
1805e2cdf640SKen Wang 
1806b00861b9STom St Denis 	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1807e2cdf640SKen Wang }
1808e2cdf640SKen Wang 
dce_v6_0_crtc_do_set_base(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y,int atomic)1809e2cdf640SKen Wang static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1810e2cdf640SKen Wang 				     struct drm_framebuffer *fb,
1811e2cdf640SKen Wang 				     int x, int y, int atomic)
1812e2cdf640SKen Wang {
1813e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1814e2cdf640SKen Wang 	struct drm_device *dev = crtc->dev;
18151348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
1816e2cdf640SKen Wang 	struct drm_framebuffer *target_fb;
1817e2cdf640SKen Wang 	struct drm_gem_object *obj;
1818765e7fbfSChristian König 	struct amdgpu_bo *abo;
1819e2cdf640SKen Wang 	uint64_t fb_location, tiling_flags;
1820e2cdf640SKen Wang 	uint32_t fb_format, fb_pitch_pixels, pipe_config;
1821b00861b9STom St Denis 	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1822e2cdf640SKen Wang 	u32 viewport_w, viewport_h;
1823e2cdf640SKen Wang 	int r;
1824e2cdf640SKen Wang 	bool bypass_lut = false;
1825e2cdf640SKen Wang 
1826e2cdf640SKen Wang 	/* no fb bound */
1827e2cdf640SKen Wang 	if (!atomic && !crtc->primary->fb) {
1828e2cdf640SKen Wang 		DRM_DEBUG_KMS("No FB bound\n");
1829e2cdf640SKen Wang 		return 0;
1830e2cdf640SKen Wang 	}
1831e2cdf640SKen Wang 
1832e68d14ddSDaniel Stone 	if (atomic)
1833e2cdf640SKen Wang 		target_fb = fb;
1834e68d14ddSDaniel Stone 	else
1835e2cdf640SKen Wang 		target_fb = crtc->primary->fb;
1836e2cdf640SKen Wang 
1837e2cdf640SKen Wang 	/* If atomic, assume fb object is pinned & idle & fenced and
1838e2cdf640SKen Wang 	 * just update base pointers
1839e2cdf640SKen Wang 	 */
1840e68d14ddSDaniel Stone 	obj = target_fb->obj[0];
1841765e7fbfSChristian König 	abo = gem_to_amdgpu_bo(obj);
1842765e7fbfSChristian König 	r = amdgpu_bo_reserve(abo, false);
1843e2cdf640SKen Wang 	if (unlikely(r != 0))
1844e2cdf640SKen Wang 		return r;
1845e2cdf640SKen Wang 
18467b7c6c81SJunwei Zhang 	if (!atomic) {
18477b7c6c81SJunwei Zhang 		r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1848e2cdf640SKen Wang 		if (unlikely(r != 0)) {
1849765e7fbfSChristian König 			amdgpu_bo_unreserve(abo);
1850e2cdf640SKen Wang 			return -EINVAL;
1851e2cdf640SKen Wang 		}
1852e2cdf640SKen Wang 	}
18537b7c6c81SJunwei Zhang 	fb_location = amdgpu_bo_gpu_offset(abo);
1854e2cdf640SKen Wang 
1855765e7fbfSChristian König 	amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1856765e7fbfSChristian König 	amdgpu_bo_unreserve(abo);
1857e2cdf640SKen Wang 
1858438b74a5SVille Syrjälä 	switch (target_fb->format->format) {
1859e2cdf640SKen Wang 	case DRM_FORMAT_C8:
1860b00861b9STom St Denis 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1861b00861b9STom St Denis 			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1862e2cdf640SKen Wang 		break;
1863e2cdf640SKen Wang 	case DRM_FORMAT_XRGB4444:
1864e2cdf640SKen Wang 	case DRM_FORMAT_ARGB4444:
1865b00861b9STom St Denis 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1866b00861b9STom St Denis 			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1867e2cdf640SKen Wang #ifdef __BIG_ENDIAN
1868b00861b9STom St Denis 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1869e2cdf640SKen Wang #endif
1870e2cdf640SKen Wang 		break;
1871e2cdf640SKen Wang 	case DRM_FORMAT_XRGB1555:
1872e2cdf640SKen Wang 	case DRM_FORMAT_ARGB1555:
1873b00861b9STom St Denis 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1874b00861b9STom St Denis 			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1875e2cdf640SKen Wang #ifdef __BIG_ENDIAN
1876b00861b9STom St Denis 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1877e2cdf640SKen Wang #endif
1878e2cdf640SKen Wang 		break;
1879e2cdf640SKen Wang 	case DRM_FORMAT_BGRX5551:
1880e2cdf640SKen Wang 	case DRM_FORMAT_BGRA5551:
1881b00861b9STom St Denis 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1882b00861b9STom St Denis 			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1883e2cdf640SKen Wang #ifdef __BIG_ENDIAN
1884b00861b9STom St Denis 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1885e2cdf640SKen Wang #endif
1886e2cdf640SKen Wang 		break;
1887e2cdf640SKen Wang 	case DRM_FORMAT_RGB565:
1888b00861b9STom St Denis 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1889b00861b9STom St Denis 			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1890e2cdf640SKen Wang #ifdef __BIG_ENDIAN
1891b00861b9STom St Denis 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1892e2cdf640SKen Wang #endif
1893e2cdf640SKen Wang 		break;
1894e2cdf640SKen Wang 	case DRM_FORMAT_XRGB8888:
1895e2cdf640SKen Wang 	case DRM_FORMAT_ARGB8888:
1896b00861b9STom St Denis 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1897b00861b9STom St Denis 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1898e2cdf640SKen Wang #ifdef __BIG_ENDIAN
1899b00861b9STom St Denis 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1900e2cdf640SKen Wang #endif
1901e2cdf640SKen Wang 		break;
1902e2cdf640SKen Wang 	case DRM_FORMAT_XRGB2101010:
1903e2cdf640SKen Wang 	case DRM_FORMAT_ARGB2101010:
1904b00861b9STom St Denis 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1905b00861b9STom St Denis 			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1906e2cdf640SKen Wang #ifdef __BIG_ENDIAN
1907b00861b9STom St Denis 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1908e2cdf640SKen Wang #endif
1909e2cdf640SKen Wang 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1910e2cdf640SKen Wang 		bypass_lut = true;
1911e2cdf640SKen Wang 		break;
1912e2cdf640SKen Wang 	case DRM_FORMAT_BGRX1010102:
1913e2cdf640SKen Wang 	case DRM_FORMAT_BGRA1010102:
1914b00861b9STom St Denis 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1915b00861b9STom St Denis 			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1916e2cdf640SKen Wang #ifdef __BIG_ENDIAN
1917b00861b9STom St Denis 		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1918e2cdf640SKen Wang #endif
1919e2cdf640SKen Wang 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1920e2cdf640SKen Wang 		bypass_lut = true;
1921e2cdf640SKen Wang 		break;
192200ecc6e6SMauro Rossi 	case DRM_FORMAT_XBGR8888:
192300ecc6e6SMauro Rossi 	case DRM_FORMAT_ABGR8888:
192400ecc6e6SMauro Rossi 		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
192500ecc6e6SMauro Rossi 			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
192600ecc6e6SMauro Rossi 		fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
192700ecc6e6SMauro Rossi 			   GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
192800ecc6e6SMauro Rossi #ifdef __BIG_ENDIAN
192900ecc6e6SMauro Rossi 		fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
193000ecc6e6SMauro Rossi #endif
193100ecc6e6SMauro Rossi 		break;
1932e2cdf640SKen Wang 	default:
193392f1d09cSSakari Ailus 		DRM_ERROR("Unsupported screen format %p4cc\n",
193492f1d09cSSakari Ailus 			  &target_fb->format->format);
1935e2cdf640SKen Wang 		return -EINVAL;
1936e2cdf640SKen Wang 	}
1937e2cdf640SKen Wang 
1938e2cdf640SKen Wang 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1939e2cdf640SKen Wang 		unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1940e2cdf640SKen Wang 
1941e2cdf640SKen Wang 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1942e2cdf640SKen Wang 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1943e2cdf640SKen Wang 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1944e2cdf640SKen Wang 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1945e2cdf640SKen Wang 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1946e2cdf640SKen Wang 
1947b00861b9STom St Denis 		fb_format |= GRPH_NUM_BANKS(num_banks);
1948b00861b9STom St Denis 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1949b00861b9STom St Denis 		fb_format |= GRPH_TILE_SPLIT(tile_split);
1950b00861b9STom St Denis 		fb_format |= GRPH_BANK_WIDTH(bankw);
1951b00861b9STom St Denis 		fb_format |= GRPH_BANK_HEIGHT(bankh);
1952b00861b9STom St Denis 		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
19533d5f4d47STom St Denis 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1954b00861b9STom St Denis 		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
19553d5f4d47STom St Denis 	}
1956e2cdf640SKen Wang 
1957e2cdf640SKen Wang 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1958b00861b9STom St Denis 	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1959e2cdf640SKen Wang 
1960e2cdf640SKen Wang 	dce_v6_0_vga_enable(crtc, false);
1961e2cdf640SKen Wang 
1962e2cdf640SKen Wang 	/* Make sure surface address is updated at vertical blank rather than
1963e2cdf640SKen Wang 	 * horizontal blank
1964e2cdf640SKen Wang 	 */
1965b00861b9STom St Denis 	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1966e2cdf640SKen Wang 
1967b00861b9STom St Denis 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1968e2cdf640SKen Wang 	       upper_32_bits(fb_location));
1969b00861b9STom St Denis 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1970e2cdf640SKen Wang 	       upper_32_bits(fb_location));
1971b00861b9STom St Denis 	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1972b00861b9STom St Denis 	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1973b00861b9STom St Denis 	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1974b00861b9STom St Denis 	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1975b00861b9STom St Denis 	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1976b00861b9STom St Denis 	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1977e2cdf640SKen Wang 
1978e2cdf640SKen Wang 	/*
1979e2cdf640SKen Wang 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1980e2cdf640SKen Wang 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1981e2cdf640SKen Wang 	 * retain the full precision throughout the pipeline.
1982e2cdf640SKen Wang 	 */
1983b00861b9STom St Denis 	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1984b00861b9STom St Denis 		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1985b00861b9STom St Denis 		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1986e2cdf640SKen Wang 
1987e2cdf640SKen Wang 	if (bypass_lut)
1988e2cdf640SKen Wang 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1989e2cdf640SKen Wang 
1990b00861b9STom St Denis 	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1991b00861b9STom St Denis 	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1992b00861b9STom St Denis 	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1993b00861b9STom St Denis 	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1994b00861b9STom St Denis 	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1995b00861b9STom St Denis 	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1996e2cdf640SKen Wang 
1997272725c7SVille Syrjälä 	fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1998b00861b9STom St Denis 	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1999e2cdf640SKen Wang 
2000e2cdf640SKen Wang 	dce_v6_0_grph_enable(crtc, true);
2001e2cdf640SKen Wang 
2002b00861b9STom St Denis 	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2003e2cdf640SKen Wang 		       target_fb->height);
2004e2cdf640SKen Wang 	x &= ~3;
2005e2cdf640SKen Wang 	y &= ~1;
2006b00861b9STom St Denis 	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2007e2cdf640SKen Wang 	       (x << 16) | y);
2008e2cdf640SKen Wang 	viewport_w = crtc->mode.hdisplay;
2009e2cdf640SKen Wang 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2010e2cdf640SKen Wang 
2011b00861b9STom St Denis 	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2012e2cdf640SKen Wang 	       (viewport_w << 16) | viewport_h);
2013e2cdf640SKen Wang 
2014e2cdf640SKen Wang 	/* set pageflip to happen anywhere in vblank interval */
2015b00861b9STom St Denis 	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2016e2cdf640SKen Wang 
2017e2cdf640SKen Wang 	if (!atomic && fb && fb != crtc->primary->fb) {
2018e68d14ddSDaniel Stone 		abo = gem_to_amdgpu_bo(fb->obj[0]);
2019c81a1a74SMichel Dänzer 		r = amdgpu_bo_reserve(abo, true);
2020e2cdf640SKen Wang 		if (unlikely(r != 0))
2021e2cdf640SKen Wang 			return r;
2022765e7fbfSChristian König 		amdgpu_bo_unpin(abo);
2023765e7fbfSChristian König 		amdgpu_bo_unreserve(abo);
2024e2cdf640SKen Wang 	}
2025e2cdf640SKen Wang 
2026e2cdf640SKen Wang 	/* Bytes per pixel may have changed */
2027e2cdf640SKen Wang 	dce_v6_0_bandwidth_update(adev);
2028e2cdf640SKen Wang 
2029e2cdf640SKen Wang 	return 0;
2030e2cdf640SKen Wang 
2031e2cdf640SKen Wang }
2032e2cdf640SKen Wang 
dce_v6_0_set_interleave(struct drm_crtc * crtc,struct drm_display_mode * mode)2033e2cdf640SKen Wang static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2034e2cdf640SKen Wang 				    struct drm_display_mode *mode)
2035e2cdf640SKen Wang {
2036e2cdf640SKen Wang 	struct drm_device *dev = crtc->dev;
20371348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
2038e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2039e2cdf640SKen Wang 
2040e2cdf640SKen Wang 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2041b00861b9STom St Denis 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2042b00861b9STom St Denis 		       INTERLEAVE_EN);
2043e2cdf640SKen Wang 	else
2044b00861b9STom St Denis 		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2045e2cdf640SKen Wang }
2046e2cdf640SKen Wang 
dce_v6_0_crtc_load_lut(struct drm_crtc * crtc)2047e2cdf640SKen Wang static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2048e2cdf640SKen Wang {
2049e2cdf640SKen Wang 
2050e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2051e2cdf640SKen Wang 	struct drm_device *dev = crtc->dev;
20521348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
205376dd3cd8SPeter Rosin 	u16 *r, *g, *b;
2054e2cdf640SKen Wang 	int i;
2055e2cdf640SKen Wang 
2056e2cdf640SKen Wang 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2057e2cdf640SKen Wang 
2058b00861b9STom St Denis 	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2059b00861b9STom St Denis 	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2060b00861b9STom St Denis 		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2061b00861b9STom St Denis 	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2062b00861b9STom St Denis 	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2063b00861b9STom St Denis 	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2064b00861b9STom St Denis 	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2065b00861b9STom St Denis 	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2066b00861b9STom St Denis 	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2067b00861b9STom St Denis 		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2068e2cdf640SKen Wang 
2069b00861b9STom St Denis 	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2070e2cdf640SKen Wang 
2071b00861b9STom St Denis 	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2072b00861b9STom St Denis 	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2073b00861b9STom St Denis 	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2074e2cdf640SKen Wang 
2075b00861b9STom St Denis 	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2076b00861b9STom St Denis 	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2077b00861b9STom St Denis 	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2078e2cdf640SKen Wang 
2079b00861b9STom St Denis 	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2080b00861b9STom St Denis 	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2081e2cdf640SKen Wang 
2082b00861b9STom St Denis 	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
208376dd3cd8SPeter Rosin 	r = crtc->gamma_store;
208476dd3cd8SPeter Rosin 	g = r + crtc->gamma_size;
208576dd3cd8SPeter Rosin 	b = g + crtc->gamma_size;
2086e2cdf640SKen Wang 	for (i = 0; i < 256; i++) {
2087b00861b9STom St Denis 		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
208876dd3cd8SPeter Rosin 		       ((*r++ & 0xffc0) << 14) |
208976dd3cd8SPeter Rosin 		       ((*g++ & 0xffc0) << 4) |
209076dd3cd8SPeter Rosin 		       (*b++ >> 6));
2091e2cdf640SKen Wang 	}
2092e2cdf640SKen Wang 
2093b00861b9STom St Denis 	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2094b00861b9STom St Denis 	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2095b00861b9STom St Denis 		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2096b00861b9STom St Denis 		ICON_DEGAMMA_MODE(0) |
2097b00861b9STom St Denis 		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2098b00861b9STom St Denis 	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2099b00861b9STom St Denis 	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2100b00861b9STom St Denis 		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2101b00861b9STom St Denis 	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2102b00861b9STom St Denis 	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2103b00861b9STom St Denis 		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2104b00861b9STom St Denis 	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2105b00861b9STom St Denis 	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2106b00861b9STom St Denis 		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2107e2cdf640SKen Wang 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
2108e2cdf640SKen Wang 	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2109e2cdf640SKen Wang 
2110e2cdf640SKen Wang 
2111e2cdf640SKen Wang }
2112e2cdf640SKen Wang 
dce_v6_0_pick_dig_encoder(struct drm_encoder * encoder)2113e2cdf640SKen Wang static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2114e2cdf640SKen Wang {
2115e2cdf640SKen Wang 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2116e2cdf640SKen Wang 	struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2117e2cdf640SKen Wang 
2118e2cdf640SKen Wang 	switch (amdgpu_encoder->encoder_id) {
2119e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
21203d5f4d47STom St Denis 		return dig->linkb ? 1 : 0;
2121e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
21223d5f4d47STom St Denis 		return dig->linkb ? 3 : 2;
2123e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
21243d5f4d47STom St Denis 		return dig->linkb ? 5 : 4;
2125e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2126e2cdf640SKen Wang 		return 6;
2127e2cdf640SKen Wang 	default:
2128e2cdf640SKen Wang 		DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2129e2cdf640SKen Wang 		return 0;
2130e2cdf640SKen Wang 	}
2131e2cdf640SKen Wang }
2132e2cdf640SKen Wang 
2133e2cdf640SKen Wang /**
2134e2cdf640SKen Wang  * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2135e2cdf640SKen Wang  *
2136e2cdf640SKen Wang  * @crtc: drm crtc
2137e2cdf640SKen Wang  *
2138e2cdf640SKen Wang  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2139e2cdf640SKen Wang  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2140e2cdf640SKen Wang  * monitors a dedicated PPLL must be used.  If a particular board has
2141e2cdf640SKen Wang  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2142e2cdf640SKen Wang  * as there is no need to program the PLL itself.  If we are not able to
2143e2cdf640SKen Wang  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2144e2cdf640SKen Wang  * avoid messing up an existing monitor.
2145e2cdf640SKen Wang  *
2146e2cdf640SKen Wang  *
2147e2cdf640SKen Wang  */
dce_v6_0_pick_pll(struct drm_crtc * crtc)2148e2cdf640SKen Wang static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2149e2cdf640SKen Wang {
2150e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2151e2cdf640SKen Wang 	struct drm_device *dev = crtc->dev;
21521348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
2153e2cdf640SKen Wang 	u32 pll_in_use;
2154e2cdf640SKen Wang 	int pll;
2155e2cdf640SKen Wang 
2156e2cdf640SKen Wang 	if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2157e2cdf640SKen Wang 		if (adev->clock.dp_extclk)
2158e2cdf640SKen Wang 			/* skip PPLL programming if using ext clock */
2159e2cdf640SKen Wang 			return ATOM_PPLL_INVALID;
2160e2cdf640SKen Wang 		else
2161e2cdf640SKen Wang 			return ATOM_PPLL0;
2162e2cdf640SKen Wang 	} else {
2163e2cdf640SKen Wang 		/* use the same PPLL for all monitors with the same clock */
2164e2cdf640SKen Wang 		pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2165e2cdf640SKen Wang 		if (pll != ATOM_PPLL_INVALID)
2166e2cdf640SKen Wang 			return pll;
2167e2cdf640SKen Wang 	}
2168e2cdf640SKen Wang 
2169e2cdf640SKen Wang 	/*  PPLL1, and PPLL2 */
2170e2cdf640SKen Wang 	pll_in_use = amdgpu_pll_get_use_mask(crtc);
2171e2cdf640SKen Wang 	if (!(pll_in_use & (1 << ATOM_PPLL2)))
2172e2cdf640SKen Wang 		return ATOM_PPLL2;
2173e2cdf640SKen Wang 	if (!(pll_in_use & (1 << ATOM_PPLL1)))
2174e2cdf640SKen Wang 		return ATOM_PPLL1;
2175e2cdf640SKen Wang 	DRM_ERROR("unable to allocate a PPLL\n");
2176e2cdf640SKen Wang 	return ATOM_PPLL_INVALID;
2177e2cdf640SKen Wang }
2178e2cdf640SKen Wang 
dce_v6_0_lock_cursor(struct drm_crtc * crtc,bool lock)2179e2cdf640SKen Wang static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2180e2cdf640SKen Wang {
21811348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2182e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2183e2cdf640SKen Wang 	uint32_t cur_lock;
2184e2cdf640SKen Wang 
2185b00861b9STom St Denis 	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2186e2cdf640SKen Wang 	if (lock)
2187b00861b9STom St Denis 		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2188e2cdf640SKen Wang 	else
2189b00861b9STom St Denis 		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2190b00861b9STom St Denis 	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2191e2cdf640SKen Wang }
2192e2cdf640SKen Wang 
dce_v6_0_hide_cursor(struct drm_crtc * crtc)2193e2cdf640SKen Wang static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2194e2cdf640SKen Wang {
2195e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
21961348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2197e2cdf640SKen Wang 
219846e840edSHawking Zhang 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2199b00861b9STom St Denis 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2200b00861b9STom St Denis 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2201e2cdf640SKen Wang 
2202e2cdf640SKen Wang 
2203e2cdf640SKen Wang }
2204e2cdf640SKen Wang 
dce_v6_0_show_cursor(struct drm_crtc * crtc)2205e2cdf640SKen Wang static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2206e2cdf640SKen Wang {
2207e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
22081348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2209e2cdf640SKen Wang 
2210b00861b9STom St Denis 	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2211e2cdf640SKen Wang 	       upper_32_bits(amdgpu_crtc->cursor_addr));
2212b00861b9STom St Denis 	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2213e2cdf640SKen Wang 	       lower_32_bits(amdgpu_crtc->cursor_addr));
2214e2cdf640SKen Wang 
221546e840edSHawking Zhang 	WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2216b00861b9STom St Denis 	       CUR_CONTROL__CURSOR_EN_MASK |
2217b00861b9STom St Denis 	       (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2218b00861b9STom St Denis 	       (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2219e2cdf640SKen Wang 
2220e2cdf640SKen Wang }
2221e2cdf640SKen Wang 
dce_v6_0_cursor_move_locked(struct drm_crtc * crtc,int x,int y)2222e2cdf640SKen Wang static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2223e2cdf640SKen Wang 				       int x, int y)
2224e2cdf640SKen Wang {
2225e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
22261348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2227e2cdf640SKen Wang 	int xorigin = 0, yorigin = 0;
2228e2cdf640SKen Wang 
222969bcc0b7SMichel Dänzer 	int w = amdgpu_crtc->cursor_width;
223069bcc0b7SMichel Dänzer 
22318e57ec61SMichel Dänzer 	amdgpu_crtc->cursor_x = x;
22328e57ec61SMichel Dänzer 	amdgpu_crtc->cursor_y = y;
22338e57ec61SMichel Dänzer 
2234e2cdf640SKen Wang 	/* avivo cursor are offset into the total surface */
2235e2cdf640SKen Wang 	x += crtc->x;
2236e2cdf640SKen Wang 	y += crtc->y;
2237e2cdf640SKen Wang 	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2238e2cdf640SKen Wang 
2239e2cdf640SKen Wang 	if (x < 0) {
2240e2cdf640SKen Wang 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2241e2cdf640SKen Wang 		x = 0;
2242e2cdf640SKen Wang 	}
2243e2cdf640SKen Wang 	if (y < 0) {
2244e2cdf640SKen Wang 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2245e2cdf640SKen Wang 		y = 0;
2246e2cdf640SKen Wang 	}
2247e2cdf640SKen Wang 
2248b00861b9STom St Denis 	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2249b00861b9STom St Denis 	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
225069bcc0b7SMichel Dänzer 	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
225169bcc0b7SMichel Dänzer 	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2252e2cdf640SKen Wang 
2253e2cdf640SKen Wang 	return 0;
2254e2cdf640SKen Wang }
2255e2cdf640SKen Wang 
dce_v6_0_crtc_cursor_move(struct drm_crtc * crtc,int x,int y)2256e2cdf640SKen Wang static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2257e2cdf640SKen Wang 				     int x, int y)
2258e2cdf640SKen Wang {
2259e2cdf640SKen Wang 	int ret;
2260e2cdf640SKen Wang 
2261e2cdf640SKen Wang 	dce_v6_0_lock_cursor(crtc, true);
2262e2cdf640SKen Wang 	ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2263e2cdf640SKen Wang 	dce_v6_0_lock_cursor(crtc, false);
2264e2cdf640SKen Wang 
2265e2cdf640SKen Wang 	return ret;
2266e2cdf640SKen Wang }
2267e2cdf640SKen Wang 
dce_v6_0_crtc_cursor_set2(struct drm_crtc * crtc,struct drm_file * file_priv,uint32_t handle,uint32_t width,uint32_t height,int32_t hot_x,int32_t hot_y)2268e2cdf640SKen Wang static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2269e2cdf640SKen Wang 				     struct drm_file *file_priv,
2270e2cdf640SKen Wang 				     uint32_t handle,
2271e2cdf640SKen Wang 				     uint32_t width,
2272e2cdf640SKen Wang 				     uint32_t height,
2273e2cdf640SKen Wang 				     int32_t hot_x,
2274e2cdf640SKen Wang 				     int32_t hot_y)
2275e2cdf640SKen Wang {
2276e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2277e2cdf640SKen Wang 	struct drm_gem_object *obj;
2278e2cdf640SKen Wang 	struct amdgpu_bo *aobj;
2279e2cdf640SKen Wang 	int ret;
2280e2cdf640SKen Wang 
2281e2cdf640SKen Wang 	if (!handle) {
2282e2cdf640SKen Wang 		/* turn off cursor */
2283e2cdf640SKen Wang 		dce_v6_0_hide_cursor(crtc);
2284e2cdf640SKen Wang 		obj = NULL;
2285e2cdf640SKen Wang 		goto unpin;
2286e2cdf640SKen Wang 	}
2287e2cdf640SKen Wang 
2288e2cdf640SKen Wang 	if ((width > amdgpu_crtc->max_cursor_width) ||
2289e2cdf640SKen Wang 	    (height > amdgpu_crtc->max_cursor_height)) {
2290e2cdf640SKen Wang 		DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2291e2cdf640SKen Wang 		return -EINVAL;
2292e2cdf640SKen Wang 	}
2293e2cdf640SKen Wang 
2294e2cdf640SKen Wang 	obj = drm_gem_object_lookup(file_priv, handle);
2295e2cdf640SKen Wang 	if (!obj) {
2296e2cdf640SKen Wang 		DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2297e2cdf640SKen Wang 		return -ENOENT;
2298e2cdf640SKen Wang 	}
2299e2cdf640SKen Wang 
2300e2cdf640SKen Wang 	aobj = gem_to_amdgpu_bo(obj);
2301e2cdf640SKen Wang 	ret = amdgpu_bo_reserve(aobj, false);
2302e2cdf640SKen Wang 	if (ret != 0) {
2303e07ddb0cSEmil Velikov 		drm_gem_object_put(obj);
2304e2cdf640SKen Wang 		return ret;
2305e2cdf640SKen Wang 	}
2306e2cdf640SKen Wang 
23077b7c6c81SJunwei Zhang 	ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2308e2cdf640SKen Wang 	amdgpu_bo_unreserve(aobj);
2309e2cdf640SKen Wang 	if (ret) {
2310e2cdf640SKen Wang 		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2311e07ddb0cSEmil Velikov 		drm_gem_object_put(obj);
2312e2cdf640SKen Wang 		return ret;
2313e2cdf640SKen Wang 	}
23147b7c6c81SJunwei Zhang 	amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2315e2cdf640SKen Wang 
2316e2cdf640SKen Wang 	dce_v6_0_lock_cursor(crtc, true);
2317e2cdf640SKen Wang 
231869bcc0b7SMichel Dänzer 	if (width != amdgpu_crtc->cursor_width ||
231969bcc0b7SMichel Dänzer 	    height != amdgpu_crtc->cursor_height ||
232069bcc0b7SMichel Dänzer 	    hot_x != amdgpu_crtc->cursor_hot_x ||
2321e2cdf640SKen Wang 	    hot_y != amdgpu_crtc->cursor_hot_y) {
2322e2cdf640SKen Wang 		int x, y;
2323e2cdf640SKen Wang 
2324e2cdf640SKen Wang 		x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2325e2cdf640SKen Wang 		y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2326e2cdf640SKen Wang 
2327e2cdf640SKen Wang 		dce_v6_0_cursor_move_locked(crtc, x, y);
2328e2cdf640SKen Wang 
23297c83d7abSMichel Dänzer 		amdgpu_crtc->cursor_width = width;
23307c83d7abSMichel Dänzer 		amdgpu_crtc->cursor_height = height;
233169bcc0b7SMichel Dänzer 		amdgpu_crtc->cursor_hot_x = hot_x;
233269bcc0b7SMichel Dänzer 		amdgpu_crtc->cursor_hot_y = hot_y;
23337c83d7abSMichel Dänzer 	}
23347c83d7abSMichel Dänzer 
2335e2cdf640SKen Wang 	dce_v6_0_show_cursor(crtc);
2336e2cdf640SKen Wang 	dce_v6_0_lock_cursor(crtc, false);
2337e2cdf640SKen Wang 
2338e2cdf640SKen Wang unpin:
2339e2cdf640SKen Wang 	if (amdgpu_crtc->cursor_bo) {
2340e2cdf640SKen Wang 		struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2341c81a1a74SMichel Dänzer 		ret = amdgpu_bo_reserve(aobj, true);
2342e2cdf640SKen Wang 		if (likely(ret == 0)) {
2343e2cdf640SKen Wang 			amdgpu_bo_unpin(aobj);
2344e2cdf640SKen Wang 			amdgpu_bo_unreserve(aobj);
2345e2cdf640SKen Wang 		}
2346e07ddb0cSEmil Velikov 		drm_gem_object_put(amdgpu_crtc->cursor_bo);
2347e2cdf640SKen Wang 	}
2348e2cdf640SKen Wang 
2349e2cdf640SKen Wang 	amdgpu_crtc->cursor_bo = obj;
2350e2cdf640SKen Wang 	return 0;
2351e2cdf640SKen Wang }
2352e2cdf640SKen Wang 
dce_v6_0_cursor_reset(struct drm_crtc * crtc)2353e2cdf640SKen Wang static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2354e2cdf640SKen Wang {
2355e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2356e2cdf640SKen Wang 
2357e2cdf640SKen Wang 	if (amdgpu_crtc->cursor_bo) {
2358e2cdf640SKen Wang 		dce_v6_0_lock_cursor(crtc, true);
2359e2cdf640SKen Wang 
2360e2cdf640SKen Wang 		dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2361e2cdf640SKen Wang 					    amdgpu_crtc->cursor_y);
2362e2cdf640SKen Wang 
2363e2cdf640SKen Wang 		dce_v6_0_show_cursor(crtc);
2364e2cdf640SKen Wang 		dce_v6_0_lock_cursor(crtc, false);
2365e2cdf640SKen Wang 	}
2366e2cdf640SKen Wang }
2367e2cdf640SKen Wang 
dce_v6_0_crtc_gamma_set(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2368e2cdf640SKen Wang static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
23696d124ff8SDaniel Vetter 				   u16 *blue, uint32_t size,
23706d124ff8SDaniel Vetter 				   struct drm_modeset_acquire_ctx *ctx)
2371e2cdf640SKen Wang {
2372e2cdf640SKen Wang 	dce_v6_0_crtc_load_lut(crtc);
2373e2cdf640SKen Wang 
2374e2cdf640SKen Wang 	return 0;
2375e2cdf640SKen Wang }
2376e2cdf640SKen Wang 
dce_v6_0_crtc_destroy(struct drm_crtc * crtc)2377e2cdf640SKen Wang static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2378e2cdf640SKen Wang {
2379e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2380e2cdf640SKen Wang 
2381e2cdf640SKen Wang 	drm_crtc_cleanup(crtc);
2382e2cdf640SKen Wang 	kfree(amdgpu_crtc);
2383e2cdf640SKen Wang }
2384e2cdf640SKen Wang 
2385e2cdf640SKen Wang static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2386e2cdf640SKen Wang 	.cursor_set2 = dce_v6_0_crtc_cursor_set2,
2387e2cdf640SKen Wang 	.cursor_move = dce_v6_0_crtc_cursor_move,
2388e2cdf640SKen Wang 	.gamma_set = dce_v6_0_crtc_gamma_set,
2389775a8364SSamuel Li 	.set_config = amdgpu_display_crtc_set_config,
2390e2cdf640SKen Wang 	.destroy = dce_v6_0_crtc_destroy,
23910cd11932SSamuel Li 	.page_flip_target = amdgpu_display_crtc_page_flip_target,
2392e3eff4b5SThomas Zimmermann 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
2393e3eff4b5SThomas Zimmermann 	.enable_vblank = amdgpu_enable_vblank_kms,
2394e3eff4b5SThomas Zimmermann 	.disable_vblank = amdgpu_disable_vblank_kms,
2395e3eff4b5SThomas Zimmermann 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2396e2cdf640SKen Wang };
2397e2cdf640SKen Wang 
dce_v6_0_crtc_dpms(struct drm_crtc * crtc,int mode)2398e2cdf640SKen Wang static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2399e2cdf640SKen Wang {
2400e2cdf640SKen Wang 	struct drm_device *dev = crtc->dev;
24011348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
2402e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2403e2cdf640SKen Wang 	unsigned type;
2404e2cdf640SKen Wang 
2405e2cdf640SKen Wang 	switch (mode) {
2406e2cdf640SKen Wang 	case DRM_MODE_DPMS_ON:
2407e2cdf640SKen Wang 		amdgpu_crtc->enabled = true;
2408e2cdf640SKen Wang 		amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2409e2cdf640SKen Wang 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2410e2cdf640SKen Wang 		/* Make sure VBLANK and PFLIP interrupts are still enabled */
2411734dd01dSSamuel Li 		type = amdgpu_display_crtc_idx_to_irq_type(adev,
2412734dd01dSSamuel Li 						amdgpu_crtc->crtc_id);
2413e2cdf640SKen Wang 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
2414e2cdf640SKen Wang 		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2415778e1a54SDaniel Vetter 		drm_crtc_vblank_on(crtc);
2416e2cdf640SKen Wang 		dce_v6_0_crtc_load_lut(crtc);
2417e2cdf640SKen Wang 		break;
2418e2cdf640SKen Wang 	case DRM_MODE_DPMS_STANDBY:
2419e2cdf640SKen Wang 	case DRM_MODE_DPMS_SUSPEND:
2420e2cdf640SKen Wang 	case DRM_MODE_DPMS_OFF:
2421778e1a54SDaniel Vetter 		drm_crtc_vblank_off(crtc);
2422e2cdf640SKen Wang 		if (amdgpu_crtc->enabled)
2423e2cdf640SKen Wang 			amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2424e2cdf640SKen Wang 		amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2425e2cdf640SKen Wang 		amdgpu_crtc->enabled = false;
2426e2cdf640SKen Wang 		break;
2427e2cdf640SKen Wang 	}
2428e2cdf640SKen Wang 	/* adjust pm to dpms */
242984176663SEvan Quan 	amdgpu_dpm_compute_clocks(adev);
2430e2cdf640SKen Wang }
2431e2cdf640SKen Wang 
dce_v6_0_crtc_prepare(struct drm_crtc * crtc)2432e2cdf640SKen Wang static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2433e2cdf640SKen Wang {
2434e2cdf640SKen Wang 	/* disable crtc pair power gating before programming */
2435e2cdf640SKen Wang 	amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2436e2cdf640SKen Wang 	amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2437e2cdf640SKen Wang 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2438e2cdf640SKen Wang }
2439e2cdf640SKen Wang 
dce_v6_0_crtc_commit(struct drm_crtc * crtc)2440e2cdf640SKen Wang static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2441e2cdf640SKen Wang {
2442e2cdf640SKen Wang 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2443e2cdf640SKen Wang 	amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2444e2cdf640SKen Wang }
2445e2cdf640SKen Wang 
dce_v6_0_crtc_disable(struct drm_crtc * crtc)2446e2cdf640SKen Wang static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2447e2cdf640SKen Wang {
2448e2cdf640SKen Wang 
2449e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2450e2cdf640SKen Wang 	struct drm_device *dev = crtc->dev;
24511348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
2452e2cdf640SKen Wang 	struct amdgpu_atom_ss ss;
2453e2cdf640SKen Wang 	int i;
2454e2cdf640SKen Wang 
2455e2cdf640SKen Wang 	dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2456e2cdf640SKen Wang 	if (crtc->primary->fb) {
2457e2cdf640SKen Wang 		int r;
2458765e7fbfSChristian König 		struct amdgpu_bo *abo;
2459e2cdf640SKen Wang 
2460e68d14ddSDaniel Stone 		abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2461c81a1a74SMichel Dänzer 		r = amdgpu_bo_reserve(abo, true);
2462e2cdf640SKen Wang 		if (unlikely(r))
2463765e7fbfSChristian König 			DRM_ERROR("failed to reserve abo before unpin\n");
2464e2cdf640SKen Wang 		else {
2465765e7fbfSChristian König 			amdgpu_bo_unpin(abo);
2466765e7fbfSChristian König 			amdgpu_bo_unreserve(abo);
2467e2cdf640SKen Wang 		}
2468e2cdf640SKen Wang 	}
2469e2cdf640SKen Wang 	/* disable the GRPH */
2470e2cdf640SKen Wang 	dce_v6_0_grph_enable(crtc, false);
2471e2cdf640SKen Wang 
2472e2cdf640SKen Wang 	amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2473e2cdf640SKen Wang 
2474e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2475e2cdf640SKen Wang 		if (adev->mode_info.crtcs[i] &&
2476e2cdf640SKen Wang 		    adev->mode_info.crtcs[i]->enabled &&
2477e2cdf640SKen Wang 		    i != amdgpu_crtc->crtc_id &&
2478e2cdf640SKen Wang 		    amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2479e2cdf640SKen Wang 			/* one other crtc is using this pll don't turn
2480e2cdf640SKen Wang 			 * off the pll
2481e2cdf640SKen Wang 			 */
2482e2cdf640SKen Wang 			goto done;
2483e2cdf640SKen Wang 		}
2484e2cdf640SKen Wang 	}
2485e2cdf640SKen Wang 
2486e2cdf640SKen Wang 	switch (amdgpu_crtc->pll_id) {
2487e2cdf640SKen Wang 	case ATOM_PPLL1:
2488e2cdf640SKen Wang 	case ATOM_PPLL2:
2489e2cdf640SKen Wang 		/* disable the ppll */
2490e2cdf640SKen Wang 		amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2491e2cdf640SKen Wang 						 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2492e2cdf640SKen Wang 		break;
2493e2cdf640SKen Wang 	default:
2494e2cdf640SKen Wang 		break;
2495e2cdf640SKen Wang 	}
2496e2cdf640SKen Wang done:
2497e2cdf640SKen Wang 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2498e2cdf640SKen Wang 	amdgpu_crtc->adjusted_clock = 0;
2499e2cdf640SKen Wang 	amdgpu_crtc->encoder = NULL;
2500e2cdf640SKen Wang 	amdgpu_crtc->connector = NULL;
2501e2cdf640SKen Wang }
2502e2cdf640SKen Wang 
dce_v6_0_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)2503e2cdf640SKen Wang static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2504e2cdf640SKen Wang 				  struct drm_display_mode *mode,
2505e2cdf640SKen Wang 				  struct drm_display_mode *adjusted_mode,
2506e2cdf640SKen Wang 				  int x, int y, struct drm_framebuffer *old_fb)
2507e2cdf640SKen Wang {
2508e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2509e2cdf640SKen Wang 
2510e2cdf640SKen Wang 	if (!amdgpu_crtc->adjusted_clock)
2511e2cdf640SKen Wang 		return -EINVAL;
2512e2cdf640SKen Wang 
2513e2cdf640SKen Wang 	amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2514e2cdf640SKen Wang 	amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2515e2cdf640SKen Wang 	dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2516e2cdf640SKen Wang 	amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2517e2cdf640SKen Wang 	amdgpu_atombios_crtc_scaler_setup(crtc);
2518e2cdf640SKen Wang 	dce_v6_0_cursor_reset(crtc);
2519e2cdf640SKen Wang 	/* update the hw version fpr dpm */
2520e2cdf640SKen Wang 	amdgpu_crtc->hw_mode = *adjusted_mode;
2521e2cdf640SKen Wang 
2522e2cdf640SKen Wang 	return 0;
2523e2cdf640SKen Wang }
2524e2cdf640SKen Wang 
dce_v6_0_crtc_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)2525e2cdf640SKen Wang static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2526e2cdf640SKen Wang 				     const struct drm_display_mode *mode,
2527e2cdf640SKen Wang 				     struct drm_display_mode *adjusted_mode)
2528e2cdf640SKen Wang {
2529e2cdf640SKen Wang 
2530e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2531e2cdf640SKen Wang 	struct drm_device *dev = crtc->dev;
2532e2cdf640SKen Wang 	struct drm_encoder *encoder;
2533e2cdf640SKen Wang 
2534e2cdf640SKen Wang 	/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2535e2cdf640SKen Wang 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2536e2cdf640SKen Wang 		if (encoder->crtc == crtc) {
2537e2cdf640SKen Wang 			amdgpu_crtc->encoder = encoder;
2538e2cdf640SKen Wang 			amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2539e2cdf640SKen Wang 			break;
2540e2cdf640SKen Wang 		}
2541e2cdf640SKen Wang 	}
2542e2cdf640SKen Wang 	if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2543e2cdf640SKen Wang 		amdgpu_crtc->encoder = NULL;
2544e2cdf640SKen Wang 		amdgpu_crtc->connector = NULL;
2545e2cdf640SKen Wang 		return false;
2546e2cdf640SKen Wang 	}
25470c16443aSSamuel Li 	if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2548e2cdf640SKen Wang 		return false;
2549e2cdf640SKen Wang 	if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2550e2cdf640SKen Wang 		return false;
2551e2cdf640SKen Wang 	/* pick pll */
2552e2cdf640SKen Wang 	amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2553e2cdf640SKen Wang 	/* if we can't get a PPLL for a non-DP encoder, fail */
2554e2cdf640SKen Wang 	if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2555e2cdf640SKen Wang 	    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2556e2cdf640SKen Wang 		return false;
2557e2cdf640SKen Wang 
2558e2cdf640SKen Wang 	return true;
2559e2cdf640SKen Wang }
2560e2cdf640SKen Wang 
dce_v6_0_crtc_set_base(struct drm_crtc * crtc,int x,int y,struct drm_framebuffer * old_fb)2561e2cdf640SKen Wang static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2562e2cdf640SKen Wang 				  struct drm_framebuffer *old_fb)
2563e2cdf640SKen Wang {
2564e2cdf640SKen Wang 	return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2565e2cdf640SKen Wang }
2566e2cdf640SKen Wang 
dce_v6_0_crtc_set_base_atomic(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y,enum mode_set_atomic state)2567e2cdf640SKen Wang static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2568e2cdf640SKen Wang 					 struct drm_framebuffer *fb,
2569e2cdf640SKen Wang 					 int x, int y, enum mode_set_atomic state)
2570e2cdf640SKen Wang {
2571e2cdf640SKen Wang 	return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2572e2cdf640SKen Wang }
2573e2cdf640SKen Wang 
2574e2cdf640SKen Wang static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2575e2cdf640SKen Wang 	.dpms = dce_v6_0_crtc_dpms,
2576e2cdf640SKen Wang 	.mode_fixup = dce_v6_0_crtc_mode_fixup,
2577e2cdf640SKen Wang 	.mode_set = dce_v6_0_crtc_mode_set,
2578e2cdf640SKen Wang 	.mode_set_base = dce_v6_0_crtc_set_base,
2579e2cdf640SKen Wang 	.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2580e2cdf640SKen Wang 	.prepare = dce_v6_0_crtc_prepare,
2581e2cdf640SKen Wang 	.commit = dce_v6_0_crtc_commit,
2582e2cdf640SKen Wang 	.disable = dce_v6_0_crtc_disable,
2583ea702333SThomas Zimmermann 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
2584e2cdf640SKen Wang };
2585e2cdf640SKen Wang 
dce_v6_0_crtc_init(struct amdgpu_device * adev,int index)2586e2cdf640SKen Wang static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2587e2cdf640SKen Wang {
2588e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc;
2589e2cdf640SKen Wang 
2590e2cdf640SKen Wang 	amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2591e2cdf640SKen Wang 			      (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2592e2cdf640SKen Wang 	if (amdgpu_crtc == NULL)
2593e2cdf640SKen Wang 		return -ENOMEM;
2594e2cdf640SKen Wang 
25954a580877SLuben Tuikov 	drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2596e2cdf640SKen Wang 
2597e2cdf640SKen Wang 	drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2598e2cdf640SKen Wang 	amdgpu_crtc->crtc_id = index;
2599e2cdf640SKen Wang 	adev->mode_info.crtcs[index] = amdgpu_crtc;
2600e2cdf640SKen Wang 
2601e2cdf640SKen Wang 	amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2602e2cdf640SKen Wang 	amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
26034a580877SLuben Tuikov 	adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
26044a580877SLuben Tuikov 	adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2605e2cdf640SKen Wang 
2606e2cdf640SKen Wang 	amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2607e2cdf640SKen Wang 
2608e2cdf640SKen Wang 	amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2609e2cdf640SKen Wang 	amdgpu_crtc->adjusted_clock = 0;
2610e2cdf640SKen Wang 	amdgpu_crtc->encoder = NULL;
2611e2cdf640SKen Wang 	amdgpu_crtc->connector = NULL;
2612e2cdf640SKen Wang 	drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2613e2cdf640SKen Wang 
2614e2cdf640SKen Wang 	return 0;
2615e2cdf640SKen Wang }
2616e2cdf640SKen Wang 
dce_v6_0_early_init(void * handle)2617e2cdf640SKen Wang static int dce_v6_0_early_init(void *handle)
2618e2cdf640SKen Wang {
2619e2cdf640SKen Wang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2620e2cdf640SKen Wang 
2621e2cdf640SKen Wang 	adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2622e2cdf640SKen Wang 	adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2623e2cdf640SKen Wang 
2624e2cdf640SKen Wang 	dce_v6_0_set_display_funcs(adev);
2625e2cdf640SKen Wang 
26261d160f43SAlex Deucher 	adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
26271d160f43SAlex Deucher 
2628e2cdf640SKen Wang 	switch (adev->asic_type) {
2629e2cdf640SKen Wang 	case CHIP_TAHITI:
2630e2cdf640SKen Wang 	case CHIP_PITCAIRN:
2631e2cdf640SKen Wang 	case CHIP_VERDE:
2632e2cdf640SKen Wang 		adev->mode_info.num_hpd = 6;
2633e2cdf640SKen Wang 		adev->mode_info.num_dig = 6;
2634e2cdf640SKen Wang 		break;
2635e2cdf640SKen Wang 	case CHIP_OLAND:
2636e2cdf640SKen Wang 		adev->mode_info.num_hpd = 2;
2637e2cdf640SKen Wang 		adev->mode_info.num_dig = 2;
2638e2cdf640SKen Wang 		break;
2639e2cdf640SKen Wang 	default:
2640e2cdf640SKen Wang 		return -EINVAL;
2641e2cdf640SKen Wang 	}
2642e2cdf640SKen Wang 
2643d794b9f8SMichel Dänzer 	dce_v6_0_set_irq_funcs(adev);
2644d794b9f8SMichel Dänzer 
2645e2cdf640SKen Wang 	return 0;
2646e2cdf640SKen Wang }
2647e2cdf640SKen Wang 
dce_v6_0_sw_init(void * handle)2648e2cdf640SKen Wang static int dce_v6_0_sw_init(void *handle)
2649e2cdf640SKen Wang {
2650e2cdf640SKen Wang 	int r, i;
2651e2cdf640SKen Wang 	bool ret;
2652e2cdf640SKen Wang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2653e2cdf640SKen Wang 
2654e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
26551ffdeca6SChristian König 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2656e2cdf640SKen Wang 		if (r)
2657e2cdf640SKen Wang 			return r;
2658e2cdf640SKen Wang 	}
2659e2cdf640SKen Wang 
2660e2cdf640SKen Wang 	for (i = 8; i < 20; i += 2) {
26611ffdeca6SChristian König 		r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2662e2cdf640SKen Wang 		if (r)
2663e2cdf640SKen Wang 			return r;
2664e2cdf640SKen Wang 	}
2665e2cdf640SKen Wang 
2666e2cdf640SKen Wang 	/* HPD hotplug */
26671ffdeca6SChristian König 	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2668e2cdf640SKen Wang 	if (r)
2669e2cdf640SKen Wang 		return r;
2670e2cdf640SKen Wang 
2671e2cdf640SKen Wang 	adev->mode_info.mode_config_initialized = true;
2672e2cdf640SKen Wang 
26734a580877SLuben Tuikov 	adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
26744a580877SLuben Tuikov 	adev_to_drm(adev)->mode_config.async_page_flip = true;
26754a580877SLuben Tuikov 	adev_to_drm(adev)->mode_config.max_width = 16384;
26764a580877SLuben Tuikov 	adev_to_drm(adev)->mode_config.max_height = 16384;
26774a580877SLuben Tuikov 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2678a6250bdbSAlex Deucher 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
26792af10429STomohito Esaki 	adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2680e2cdf640SKen Wang 
26813dc9b1ceSSamuel Li 	r = amdgpu_display_modeset_create_props(adev);
2682e2cdf640SKen Wang 	if (r)
2683e2cdf640SKen Wang 		return r;
2684e2cdf640SKen Wang 
26854a580877SLuben Tuikov 	adev_to_drm(adev)->mode_config.max_width = 16384;
26864a580877SLuben Tuikov 	adev_to_drm(adev)->mode_config.max_height = 16384;
2687e2cdf640SKen Wang 
2688e2cdf640SKen Wang 	/* allocate crtcs */
2689e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2690e2cdf640SKen Wang 		r = dce_v6_0_crtc_init(adev, i);
2691e2cdf640SKen Wang 		if (r)
2692e2cdf640SKen Wang 			return r;
2693e2cdf640SKen Wang 	}
2694e2cdf640SKen Wang 
2695e2cdf640SKen Wang 	ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2696e2cdf640SKen Wang 	if (ret)
26974a580877SLuben Tuikov 		amdgpu_display_print_display_setup(adev_to_drm(adev));
2698e2cdf640SKen Wang 	else
2699e2cdf640SKen Wang 		return -EINVAL;
2700e2cdf640SKen Wang 
2701e2cdf640SKen Wang 	/* setup afmt */
2702beb86f29STom St Denis 	r = dce_v6_0_afmt_init(adev);
2703beb86f29STom St Denis 	if (r)
2704beb86f29STom St Denis 		return r;
2705e2cdf640SKen Wang 
2706e2cdf640SKen Wang 	r = dce_v6_0_audio_init(adev);
2707e2cdf640SKen Wang 	if (r)
2708e2cdf640SKen Wang 		return r;
2709e2cdf640SKen Wang 
2710a347ca97SAlex Deucher 	/* Disable vblank IRQs aggressively for power-saving */
2711a347ca97SAlex Deucher 	/* XXX: can this be enabled for DC? */
2712a347ca97SAlex Deucher 	adev_to_drm(adev)->vblank_disable_immediate = true;
2713a347ca97SAlex Deucher 
2714a347ca97SAlex Deucher 	r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2715a347ca97SAlex Deucher 	if (r)
2716a347ca97SAlex Deucher 		return r;
2717a347ca97SAlex Deucher 
2718a347ca97SAlex Deucher 	/* Pre-DCE11 */
2719a347ca97SAlex Deucher 	INIT_DELAYED_WORK(&adev->hotplug_work,
2720a347ca97SAlex Deucher 		  amdgpu_display_hotplug_work_func);
2721a347ca97SAlex Deucher 
27224a580877SLuben Tuikov 	drm_kms_helper_poll_init(adev_to_drm(adev));
2723e2cdf640SKen Wang 
2724e2cdf640SKen Wang 	return r;
2725e2cdf640SKen Wang }
2726e2cdf640SKen Wang 
dce_v6_0_sw_fini(void * handle)2727e2cdf640SKen Wang static int dce_v6_0_sw_fini(void *handle)
2728e2cdf640SKen Wang {
2729e2cdf640SKen Wang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2730e2cdf640SKen Wang 
2731e2cdf640SKen Wang 	kfree(adev->mode_info.bios_hardcoded_edid);
2732e2cdf640SKen Wang 
27334a580877SLuben Tuikov 	drm_kms_helper_poll_fini(adev_to_drm(adev));
2734e2cdf640SKen Wang 
2735e2cdf640SKen Wang 	dce_v6_0_audio_fini(adev);
2736e2cdf640SKen Wang 	dce_v6_0_afmt_fini(adev);
2737e2cdf640SKen Wang 
27384a580877SLuben Tuikov 	drm_mode_config_cleanup(adev_to_drm(adev));
2739e2cdf640SKen Wang 	adev->mode_info.mode_config_initialized = false;
2740e2cdf640SKen Wang 
2741e2cdf640SKen Wang 	return 0;
2742e2cdf640SKen Wang }
2743e2cdf640SKen Wang 
dce_v6_0_hw_init(void * handle)2744e2cdf640SKen Wang static int dce_v6_0_hw_init(void *handle)
2745e2cdf640SKen Wang {
2746e2cdf640SKen Wang 	int i;
2747e2cdf640SKen Wang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2748e2cdf640SKen Wang 
274984b5d3d1SAlex Deucher 	/* disable vga render */
275084b5d3d1SAlex Deucher 	dce_v6_0_set_vga_render_state(adev, false);
2751e2cdf640SKen Wang 	/* init dig PHYs, disp eng pll */
2752e2cdf640SKen Wang 	amdgpu_atombios_encoder_init_dig(adev);
2753e2cdf640SKen Wang 	amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2754e2cdf640SKen Wang 
2755e2cdf640SKen Wang 	/* initialize hpd */
2756e2cdf640SKen Wang 	dce_v6_0_hpd_init(adev);
2757e2cdf640SKen Wang 
2758e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2759e2cdf640SKen Wang 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2760e2cdf640SKen Wang 	}
2761e2cdf640SKen Wang 
2762e2cdf640SKen Wang 	dce_v6_0_pageflip_interrupt_init(adev);
2763e2cdf640SKen Wang 
2764e2cdf640SKen Wang 	return 0;
2765e2cdf640SKen Wang }
2766e2cdf640SKen Wang 
dce_v6_0_hw_fini(void * handle)2767e2cdf640SKen Wang static int dce_v6_0_hw_fini(void *handle)
2768e2cdf640SKen Wang {
2769e2cdf640SKen Wang 	int i;
2770e2cdf640SKen Wang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2771e2cdf640SKen Wang 
2772e2cdf640SKen Wang 	dce_v6_0_hpd_fini(adev);
2773e2cdf640SKen Wang 
2774e2cdf640SKen Wang 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2775e2cdf640SKen Wang 		dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2776e2cdf640SKen Wang 	}
2777e2cdf640SKen Wang 
2778e2cdf640SKen Wang 	dce_v6_0_pageflip_interrupt_fini(adev);
2779e2cdf640SKen Wang 
2780a347ca97SAlex Deucher 	flush_delayed_work(&adev->hotplug_work);
2781a347ca97SAlex Deucher 
2782e2cdf640SKen Wang 	return 0;
2783e2cdf640SKen Wang }
2784e2cdf640SKen Wang 
dce_v6_0_suspend(void * handle)2785e2cdf640SKen Wang static int dce_v6_0_suspend(void *handle)
2786e2cdf640SKen Wang {
2787a59b3c80SAlex Deucher 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2788a2e15b0eSAlex Deucher 	int r;
2789a59b3c80SAlex Deucher 
2790a2e15b0eSAlex Deucher 	r = amdgpu_display_suspend_helper(adev);
2791a2e15b0eSAlex Deucher 	if (r)
2792a2e15b0eSAlex Deucher 		return r;
2793a59b3c80SAlex Deucher 	adev->mode_info.bl_level =
2794a59b3c80SAlex Deucher 		amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2795a59b3c80SAlex Deucher 
2796e2cdf640SKen Wang 	return dce_v6_0_hw_fini(handle);
2797e2cdf640SKen Wang }
2798e2cdf640SKen Wang 
dce_v6_0_resume(void * handle)2799e2cdf640SKen Wang static int dce_v6_0_resume(void *handle)
2800e2cdf640SKen Wang {
2801e2cdf640SKen Wang 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2802e2cdf640SKen Wang 	int ret;
2803e2cdf640SKen Wang 
2804a59b3c80SAlex Deucher 	amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2805a59b3c80SAlex Deucher 							   adev->mode_info.bl_level);
2806a59b3c80SAlex Deucher 
2807e2cdf640SKen Wang 	ret = dce_v6_0_hw_init(handle);
2808e2cdf640SKen Wang 
2809e2cdf640SKen Wang 	/* turn on the BL */
2810e2cdf640SKen Wang 	if (adev->mode_info.bl_encoder) {
2811e2cdf640SKen Wang 		u8 bl_level = amdgpu_display_backlight_get_level(adev,
2812e2cdf640SKen Wang 								  adev->mode_info.bl_encoder);
2813e2cdf640SKen Wang 		amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2814e2cdf640SKen Wang 						    bl_level);
2815e2cdf640SKen Wang 	}
2816a2e15b0eSAlex Deucher 	if (ret)
2817e2cdf640SKen Wang 		return ret;
2818a2e15b0eSAlex Deucher 
2819a2e15b0eSAlex Deucher 	return amdgpu_display_resume_helper(adev);
2820e2cdf640SKen Wang }
2821e2cdf640SKen Wang 
dce_v6_0_is_idle(void * handle)2822e2cdf640SKen Wang static bool dce_v6_0_is_idle(void *handle)
2823e2cdf640SKen Wang {
2824e2cdf640SKen Wang 	return true;
2825e2cdf640SKen Wang }
2826e2cdf640SKen Wang 
dce_v6_0_wait_for_idle(void * handle)2827e2cdf640SKen Wang static int dce_v6_0_wait_for_idle(void *handle)
2828e2cdf640SKen Wang {
2829e2cdf640SKen Wang 	return 0;
2830e2cdf640SKen Wang }
2831e2cdf640SKen Wang 
dce_v6_0_soft_reset(void * handle)2832e2cdf640SKen Wang static int dce_v6_0_soft_reset(void *handle)
2833e2cdf640SKen Wang {
2834e2cdf640SKen Wang 	DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2835e2cdf640SKen Wang 	return 0;
2836e2cdf640SKen Wang }
2837e2cdf640SKen Wang 
dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device * adev,int crtc,enum amdgpu_interrupt_state state)2838e2cdf640SKen Wang static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2839e2cdf640SKen Wang 						     int crtc,
2840e2cdf640SKen Wang 						     enum amdgpu_interrupt_state state)
2841e2cdf640SKen Wang {
2842e2cdf640SKen Wang 	u32 reg_block, interrupt_mask;
2843e2cdf640SKen Wang 
2844e2cdf640SKen Wang 	if (crtc >= adev->mode_info.num_crtc) {
2845e2cdf640SKen Wang 		DRM_DEBUG("invalid crtc %d\n", crtc);
2846e2cdf640SKen Wang 		return;
2847e2cdf640SKen Wang 	}
2848e2cdf640SKen Wang 
2849e2cdf640SKen Wang 	switch (crtc) {
2850e2cdf640SKen Wang 	case 0:
2851e2cdf640SKen Wang 		reg_block = SI_CRTC0_REGISTER_OFFSET;
2852e2cdf640SKen Wang 		break;
2853e2cdf640SKen Wang 	case 1:
2854e2cdf640SKen Wang 		reg_block = SI_CRTC1_REGISTER_OFFSET;
2855e2cdf640SKen Wang 		break;
2856e2cdf640SKen Wang 	case 2:
2857e2cdf640SKen Wang 		reg_block = SI_CRTC2_REGISTER_OFFSET;
2858e2cdf640SKen Wang 		break;
2859e2cdf640SKen Wang 	case 3:
2860e2cdf640SKen Wang 		reg_block = SI_CRTC3_REGISTER_OFFSET;
2861e2cdf640SKen Wang 		break;
2862e2cdf640SKen Wang 	case 4:
2863e2cdf640SKen Wang 		reg_block = SI_CRTC4_REGISTER_OFFSET;
2864e2cdf640SKen Wang 		break;
2865e2cdf640SKen Wang 	case 5:
2866e2cdf640SKen Wang 		reg_block = SI_CRTC5_REGISTER_OFFSET;
2867e2cdf640SKen Wang 		break;
2868e2cdf640SKen Wang 	default:
2869e2cdf640SKen Wang 		DRM_DEBUG("invalid crtc %d\n", crtc);
2870e2cdf640SKen Wang 		return;
2871e2cdf640SKen Wang 	}
2872e2cdf640SKen Wang 
2873e2cdf640SKen Wang 	switch (state) {
2874e2cdf640SKen Wang 	case AMDGPU_IRQ_STATE_DISABLE:
2875b00861b9STom St Denis 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2876e2cdf640SKen Wang 		interrupt_mask &= ~VBLANK_INT_MASK;
2877b00861b9STom St Denis 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2878e2cdf640SKen Wang 		break;
2879e2cdf640SKen Wang 	case AMDGPU_IRQ_STATE_ENABLE:
2880b00861b9STom St Denis 		interrupt_mask = RREG32(mmINT_MASK + reg_block);
2881e2cdf640SKen Wang 		interrupt_mask |= VBLANK_INT_MASK;
2882b00861b9STom St Denis 		WREG32(mmINT_MASK + reg_block, interrupt_mask);
2883e2cdf640SKen Wang 		break;
2884e2cdf640SKen Wang 	default:
2885e2cdf640SKen Wang 		break;
2886e2cdf640SKen Wang 	}
2887e2cdf640SKen Wang }
2888e2cdf640SKen Wang 
dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device * adev,int crtc,enum amdgpu_interrupt_state state)2889e2cdf640SKen Wang static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2890e2cdf640SKen Wang 						    int crtc,
2891e2cdf640SKen Wang 						    enum amdgpu_interrupt_state state)
2892e2cdf640SKen Wang {
2893e2cdf640SKen Wang 
2894e2cdf640SKen Wang }
2895e2cdf640SKen Wang 
dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)2896e2cdf640SKen Wang static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2897e2cdf640SKen Wang 					    struct amdgpu_irq_src *src,
2898e2cdf640SKen Wang 					    unsigned type,
2899e2cdf640SKen Wang 					    enum amdgpu_interrupt_state state)
2900e2cdf640SKen Wang {
290134386043SAlex Deucher 	u32 dc_hpd_int_cntl;
2902e2cdf640SKen Wang 
290334386043SAlex Deucher 	if (type >= adev->mode_info.num_hpd) {
2904e2cdf640SKen Wang 		DRM_DEBUG("invalid hdp %d\n", type);
2905e2cdf640SKen Wang 		return 0;
2906e2cdf640SKen Wang 	}
2907e2cdf640SKen Wang 
2908e2cdf640SKen Wang 	switch (state) {
2909e2cdf640SKen Wang 	case AMDGPU_IRQ_STATE_DISABLE:
2910b00861b9STom St Denis 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
291146c9cc11SAlex Deucher 		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2912b00861b9STom St Denis 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2913e2cdf640SKen Wang 		break;
2914e2cdf640SKen Wang 	case AMDGPU_IRQ_STATE_ENABLE:
2915b00861b9STom St Denis 		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
291646c9cc11SAlex Deucher 		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2917b00861b9STom St Denis 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2918e2cdf640SKen Wang 		break;
2919e2cdf640SKen Wang 	default:
2920e2cdf640SKen Wang 		break;
2921e2cdf640SKen Wang 	}
2922e2cdf640SKen Wang 
2923e2cdf640SKen Wang 	return 0;
2924e2cdf640SKen Wang }
2925e2cdf640SKen Wang 
dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)2926e2cdf640SKen Wang static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2927e2cdf640SKen Wang 					     struct amdgpu_irq_src *src,
2928e2cdf640SKen Wang 					     unsigned type,
2929e2cdf640SKen Wang 					     enum amdgpu_interrupt_state state)
2930e2cdf640SKen Wang {
2931e2cdf640SKen Wang 	switch (type) {
2932e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VBLANK1:
2933e2cdf640SKen Wang 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2934e2cdf640SKen Wang 		break;
2935e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VBLANK2:
2936e2cdf640SKen Wang 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2937e2cdf640SKen Wang 		break;
2938e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VBLANK3:
2939e2cdf640SKen Wang 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2940e2cdf640SKen Wang 		break;
2941e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VBLANK4:
2942e2cdf640SKen Wang 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2943e2cdf640SKen Wang 		break;
2944e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VBLANK5:
2945e2cdf640SKen Wang 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2946e2cdf640SKen Wang 		break;
2947e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VBLANK6:
2948e2cdf640SKen Wang 		dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2949e2cdf640SKen Wang 		break;
2950e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VLINE1:
2951e2cdf640SKen Wang 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2952e2cdf640SKen Wang 		break;
2953e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VLINE2:
2954e2cdf640SKen Wang 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2955e2cdf640SKen Wang 		break;
2956e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VLINE3:
2957e2cdf640SKen Wang 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2958e2cdf640SKen Wang 		break;
2959e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VLINE4:
2960e2cdf640SKen Wang 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2961e2cdf640SKen Wang 		break;
2962e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VLINE5:
2963e2cdf640SKen Wang 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2964e2cdf640SKen Wang 		break;
2965e2cdf640SKen Wang 	case AMDGPU_CRTC_IRQ_VLINE6:
2966e2cdf640SKen Wang 		dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2967e2cdf640SKen Wang 		break;
2968e2cdf640SKen Wang 	default:
2969e2cdf640SKen Wang 		break;
2970e2cdf640SKen Wang 	}
2971e2cdf640SKen Wang 	return 0;
2972e2cdf640SKen Wang }
2973e2cdf640SKen Wang 
dce_v6_0_crtc_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)2974e2cdf640SKen Wang static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2975e2cdf640SKen Wang 			     struct amdgpu_irq_src *source,
2976e2cdf640SKen Wang 			     struct amdgpu_iv_entry *entry)
2977e2cdf640SKen Wang {
2978e2cdf640SKen Wang 	unsigned crtc = entry->src_id - 1;
2979e2cdf640SKen Wang 	uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2980734dd01dSSamuel Li 	unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2981734dd01dSSamuel Li 								    crtc);
2982e2cdf640SKen Wang 
29837ccf5aa8SAlex Deucher 	switch (entry->src_data[0]) {
2984e2cdf640SKen Wang 	case 0: /* vblank */
2985e2cdf640SKen Wang 		if (disp_int & interrupt_status_offsets[crtc].vblank)
2986b00861b9STom St Denis 			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2987e2cdf640SKen Wang 		else
2988e2cdf640SKen Wang 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2989e2cdf640SKen Wang 
2990e2cdf640SKen Wang 		if (amdgpu_irq_enabled(adev, source, irq_type)) {
29914a580877SLuben Tuikov 			drm_handle_vblank(adev_to_drm(adev), crtc);
2992e2cdf640SKen Wang 		}
2993e2cdf640SKen Wang 		DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2994e2cdf640SKen Wang 		break;
2995e2cdf640SKen Wang 	case 1: /* vline */
2996e2cdf640SKen Wang 		if (disp_int & interrupt_status_offsets[crtc].vline)
2997b00861b9STom St Denis 			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2998e2cdf640SKen Wang 		else
2999e2cdf640SKen Wang 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3000e2cdf640SKen Wang 
3001e2cdf640SKen Wang 		DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3002e2cdf640SKen Wang 		break;
3003e2cdf640SKen Wang 	default:
30047ccf5aa8SAlex Deucher 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3005e2cdf640SKen Wang 		break;
3006e2cdf640SKen Wang 	}
3007e2cdf640SKen Wang 
3008e2cdf640SKen Wang 	return 0;
3009e2cdf640SKen Wang }
3010e2cdf640SKen Wang 
dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3011e2cdf640SKen Wang static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3012e2cdf640SKen Wang 						 struct amdgpu_irq_src *src,
3013e2cdf640SKen Wang 						 unsigned type,
3014e2cdf640SKen Wang 						 enum amdgpu_interrupt_state state)
3015e2cdf640SKen Wang {
3016e2cdf640SKen Wang 	u32 reg;
3017e2cdf640SKen Wang 
3018e2cdf640SKen Wang 	if (type >= adev->mode_info.num_crtc) {
3019e2cdf640SKen Wang 		DRM_ERROR("invalid pageflip crtc %d\n", type);
3020e2cdf640SKen Wang 		return -EINVAL;
3021e2cdf640SKen Wang 	}
3022e2cdf640SKen Wang 
3023b00861b9STom St Denis 	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3024e2cdf640SKen Wang 	if (state == AMDGPU_IRQ_STATE_DISABLE)
3025b00861b9STom St Denis 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3026e2cdf640SKen Wang 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3027e2cdf640SKen Wang 	else
3028b00861b9STom St Denis 		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3029e2cdf640SKen Wang 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3030e2cdf640SKen Wang 
3031e2cdf640SKen Wang 	return 0;
3032e2cdf640SKen Wang }
3033e2cdf640SKen Wang 
dce_v6_0_pageflip_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3034e2cdf640SKen Wang static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
3035e2cdf640SKen Wang 				 struct amdgpu_irq_src *source,
3036e2cdf640SKen Wang 				 struct amdgpu_iv_entry *entry)
3037e2cdf640SKen Wang {
3038e2cdf640SKen Wang 	unsigned long flags;
3039e2cdf640SKen Wang 	unsigned crtc_id;
3040e2cdf640SKen Wang 	struct amdgpu_crtc *amdgpu_crtc;
3041e2cdf640SKen Wang 	struct amdgpu_flip_work *works;
3042e2cdf640SKen Wang 
3043e2cdf640SKen Wang 	crtc_id = (entry->src_id - 8) >> 1;
3044e2cdf640SKen Wang 	amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3045e2cdf640SKen Wang 
3046e2cdf640SKen Wang 	if (crtc_id >= adev->mode_info.num_crtc) {
3047e2cdf640SKen Wang 		DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3048e2cdf640SKen Wang 		return -EINVAL;
3049e2cdf640SKen Wang 	}
3050e2cdf640SKen Wang 
3051b00861b9STom St Denis 	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3052e2cdf640SKen Wang 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3053b00861b9STom St Denis 		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3054e2cdf640SKen Wang 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3055e2cdf640SKen Wang 
3056e2cdf640SKen Wang 	/* IRQ could occur when in initial stage */
3057e2cdf640SKen Wang 	if (amdgpu_crtc == NULL)
3058e2cdf640SKen Wang 		return 0;
3059e2cdf640SKen Wang 
30604a580877SLuben Tuikov 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3061e2cdf640SKen Wang 	works = amdgpu_crtc->pflip_works;
3062e2cdf640SKen Wang 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3063e2cdf640SKen Wang 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3064e2cdf640SKen Wang 						"AMDGPU_FLIP_SUBMITTED(%d)\n",
3065e2cdf640SKen Wang 						amdgpu_crtc->pflip_status,
3066e2cdf640SKen Wang 						AMDGPU_FLIP_SUBMITTED);
30674a580877SLuben Tuikov 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3068e2cdf640SKen Wang 		return 0;
3069e2cdf640SKen Wang 	}
3070e2cdf640SKen Wang 
3071e2cdf640SKen Wang 	/* page flip completed. clean up */
3072e2cdf640SKen Wang 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3073e2cdf640SKen Wang 	amdgpu_crtc->pflip_works = NULL;
3074e2cdf640SKen Wang 
3075e2cdf640SKen Wang 	/* wakeup usersapce */
3076e2cdf640SKen Wang 	if (works->event)
3077e2cdf640SKen Wang 		drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3078e2cdf640SKen Wang 
30794a580877SLuben Tuikov 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3080e2cdf640SKen Wang 
3081e2cdf640SKen Wang 	drm_crtc_vblank_put(&amdgpu_crtc->base);
3082e2cdf640SKen Wang 	schedule_work(&works->unpin_work);
3083e2cdf640SKen Wang 
3084e2cdf640SKen Wang 	return 0;
3085e2cdf640SKen Wang }
3086e2cdf640SKen Wang 
dce_v6_0_hpd_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3087e2cdf640SKen Wang static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3088e2cdf640SKen Wang 			    struct amdgpu_irq_src *source,
3089e2cdf640SKen Wang 			    struct amdgpu_iv_entry *entry)
3090e2cdf640SKen Wang {
309134386043SAlex Deucher 	uint32_t disp_int, mask, tmp;
3092e2cdf640SKen Wang 	unsigned hpd;
3093e2cdf640SKen Wang 
30947ccf5aa8SAlex Deucher 	if (entry->src_data[0] >= adev->mode_info.num_hpd) {
30957ccf5aa8SAlex Deucher 		DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3096e2cdf640SKen Wang 		return 0;
3097e2cdf640SKen Wang 	}
3098e2cdf640SKen Wang 
30997ccf5aa8SAlex Deucher 	hpd = entry->src_data[0];
3100e2cdf640SKen Wang 	disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3101e2cdf640SKen Wang 	mask = interrupt_status_offsets[hpd].hpd;
3102e2cdf640SKen Wang 
3103e2cdf640SKen Wang 	if (disp_int & mask) {
3104b00861b9STom St Denis 		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3105e2cdf640SKen Wang 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3106b00861b9STom St Denis 		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3107e2cdf640SKen Wang 		schedule_delayed_work(&adev->hotplug_work, 0);
3108a44f8626SMichel Dänzer 		DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3109e2cdf640SKen Wang 	}
3110e2cdf640SKen Wang 
3111e2cdf640SKen Wang 	return 0;
3112e2cdf640SKen Wang 
3113e2cdf640SKen Wang }
3114e2cdf640SKen Wang 
dce_v6_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)3115e2cdf640SKen Wang static int dce_v6_0_set_clockgating_state(void *handle,
3116e2cdf640SKen Wang 					  enum amd_clockgating_state state)
3117e2cdf640SKen Wang {
3118e2cdf640SKen Wang 	return 0;
3119e2cdf640SKen Wang }
3120e2cdf640SKen Wang 
dce_v6_0_set_powergating_state(void * handle,enum amd_powergating_state state)3121e2cdf640SKen Wang static int dce_v6_0_set_powergating_state(void *handle,
3122e2cdf640SKen Wang 					  enum amd_powergating_state state)
3123e2cdf640SKen Wang {
3124e2cdf640SKen Wang 	return 0;
3125e2cdf640SKen Wang }
3126e2cdf640SKen Wang 
3127a1255107SAlex Deucher static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3128e2cdf640SKen Wang 	.name = "dce_v6_0",
3129e2cdf640SKen Wang 	.early_init = dce_v6_0_early_init,
3130e2cdf640SKen Wang 	.late_init = NULL,
3131e2cdf640SKen Wang 	.sw_init = dce_v6_0_sw_init,
3132e2cdf640SKen Wang 	.sw_fini = dce_v6_0_sw_fini,
3133e2cdf640SKen Wang 	.hw_init = dce_v6_0_hw_init,
3134e2cdf640SKen Wang 	.hw_fini = dce_v6_0_hw_fini,
3135e2cdf640SKen Wang 	.suspend = dce_v6_0_suspend,
3136e2cdf640SKen Wang 	.resume = dce_v6_0_resume,
3137e2cdf640SKen Wang 	.is_idle = dce_v6_0_is_idle,
3138e2cdf640SKen Wang 	.wait_for_idle = dce_v6_0_wait_for_idle,
3139e2cdf640SKen Wang 	.soft_reset = dce_v6_0_soft_reset,
3140e2cdf640SKen Wang 	.set_clockgating_state = dce_v6_0_set_clockgating_state,
3141e2cdf640SKen Wang 	.set_powergating_state = dce_v6_0_set_powergating_state,
3142e2cdf640SKen Wang };
3143e2cdf640SKen Wang 
3144e2cdf640SKen Wang static void
dce_v6_0_encoder_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3145e2cdf640SKen Wang dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3146e2cdf640SKen Wang 			  struct drm_display_mode *mode,
3147e2cdf640SKen Wang 			  struct drm_display_mode *adjusted_mode)
3148e2cdf640SKen Wang {
3149e2cdf640SKen Wang 
3150e2cdf640SKen Wang 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
31514caca706SXiaojie Yuan 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3152e2cdf640SKen Wang 
3153e2cdf640SKen Wang 	amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3154e2cdf640SKen Wang 
3155e2cdf640SKen Wang 	/* need to call this here rather than in prepare() since we need some crtc info */
3156e2cdf640SKen Wang 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3157e2cdf640SKen Wang 
3158e2cdf640SKen Wang 	/* set scaler clears this on some chips */
3159e2cdf640SKen Wang 	dce_v6_0_set_interleave(encoder->crtc, mode);
3160e2cdf640SKen Wang 
31614caca706SXiaojie Yuan 	if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3162e2cdf640SKen Wang 		dce_v6_0_afmt_enable(encoder, true);
3163e2cdf640SKen Wang 		dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3164e2cdf640SKen Wang 	}
3165e2cdf640SKen Wang }
3166e2cdf640SKen Wang 
dce_v6_0_encoder_prepare(struct drm_encoder * encoder)3167e2cdf640SKen Wang static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3168e2cdf640SKen Wang {
3169e2cdf640SKen Wang 
31701348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3171e2cdf640SKen Wang 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3172e2cdf640SKen Wang 	struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3173e2cdf640SKen Wang 
3174e2cdf640SKen Wang 	if ((amdgpu_encoder->active_device &
3175e2cdf640SKen Wang 	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3176e2cdf640SKen Wang 	    (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3177e2cdf640SKen Wang 	     ENCODER_OBJECT_ID_NONE)) {
3178e2cdf640SKen Wang 		struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3179e2cdf640SKen Wang 		if (dig) {
3180e2cdf640SKen Wang 			dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3181e2cdf640SKen Wang 			if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3182e2cdf640SKen Wang 				dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3183e2cdf640SKen Wang 		}
3184e2cdf640SKen Wang 	}
3185e2cdf640SKen Wang 
3186e2cdf640SKen Wang 	amdgpu_atombios_scratch_regs_lock(adev, true);
3187e2cdf640SKen Wang 
3188e2cdf640SKen Wang 	if (connector) {
3189e2cdf640SKen Wang 		struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3190e2cdf640SKen Wang 
3191e2cdf640SKen Wang 		/* select the clock/data port if it uses a router */
3192e2cdf640SKen Wang 		if (amdgpu_connector->router.cd_valid)
3193e2cdf640SKen Wang 			amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3194e2cdf640SKen Wang 
3195e2cdf640SKen Wang 		/* turn eDP panel on for mode set */
3196e2cdf640SKen Wang 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3197e2cdf640SKen Wang 			amdgpu_atombios_encoder_set_edp_panel_power(connector,
3198e2cdf640SKen Wang 							     ATOM_TRANSMITTER_ACTION_POWER_ON);
3199e2cdf640SKen Wang 	}
3200e2cdf640SKen Wang 
3201e2cdf640SKen Wang 	/* this is needed for the pll/ss setup to work correctly in some cases */
3202e2cdf640SKen Wang 	amdgpu_atombios_encoder_set_crtc_source(encoder);
3203e2cdf640SKen Wang 	/* set up the FMT blocks */
3204e2cdf640SKen Wang 	dce_v6_0_program_fmt(encoder);
3205e2cdf640SKen Wang }
3206e2cdf640SKen Wang 
dce_v6_0_encoder_commit(struct drm_encoder * encoder)3207e2cdf640SKen Wang static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3208e2cdf640SKen Wang {
3209e2cdf640SKen Wang 
3210e2cdf640SKen Wang 	struct drm_device *dev = encoder->dev;
32111348969aSLuben Tuikov 	struct amdgpu_device *adev = drm_to_adev(dev);
3212e2cdf640SKen Wang 
3213e2cdf640SKen Wang 	/* need to call this here as we need the crtc set up */
3214e2cdf640SKen Wang 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3215e2cdf640SKen Wang 	amdgpu_atombios_scratch_regs_lock(adev, false);
3216e2cdf640SKen Wang }
3217e2cdf640SKen Wang 
dce_v6_0_encoder_disable(struct drm_encoder * encoder)3218e2cdf640SKen Wang static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3219e2cdf640SKen Wang {
3220e2cdf640SKen Wang 
3221e2cdf640SKen Wang 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3222e2cdf640SKen Wang 	struct amdgpu_encoder_atom_dig *dig;
32234caca706SXiaojie Yuan 	int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3224e2cdf640SKen Wang 
3225e2cdf640SKen Wang 	amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3226e2cdf640SKen Wang 
3227e2cdf640SKen Wang 	if (amdgpu_atombios_encoder_is_digital(encoder)) {
32284caca706SXiaojie Yuan 		if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3229e2cdf640SKen Wang 			dce_v6_0_afmt_enable(encoder, false);
3230e2cdf640SKen Wang 		dig = amdgpu_encoder->enc_priv;
3231e2cdf640SKen Wang 		dig->dig_encoder = -1;
3232e2cdf640SKen Wang 	}
3233e2cdf640SKen Wang 	amdgpu_encoder->active_device = 0;
3234e2cdf640SKen Wang }
3235e2cdf640SKen Wang 
3236e2cdf640SKen Wang /* these are handled by the primary encoders */
dce_v6_0_ext_prepare(struct drm_encoder * encoder)3237e2cdf640SKen Wang static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3238e2cdf640SKen Wang {
3239e2cdf640SKen Wang 
3240e2cdf640SKen Wang }
3241e2cdf640SKen Wang 
dce_v6_0_ext_commit(struct drm_encoder * encoder)3242e2cdf640SKen Wang static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3243e2cdf640SKen Wang {
3244e2cdf640SKen Wang 
3245e2cdf640SKen Wang }
3246e2cdf640SKen Wang 
3247e2cdf640SKen Wang static void
dce_v6_0_ext_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3248e2cdf640SKen Wang dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3249e2cdf640SKen Wang 		      struct drm_display_mode *mode,
3250e2cdf640SKen Wang 		      struct drm_display_mode *adjusted_mode)
3251e2cdf640SKen Wang {
3252e2cdf640SKen Wang 
3253e2cdf640SKen Wang }
3254e2cdf640SKen Wang 
dce_v6_0_ext_disable(struct drm_encoder * encoder)3255e2cdf640SKen Wang static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3256e2cdf640SKen Wang {
3257e2cdf640SKen Wang 
3258e2cdf640SKen Wang }
3259e2cdf640SKen Wang 
3260e2cdf640SKen Wang static void
dce_v6_0_ext_dpms(struct drm_encoder * encoder,int mode)3261e2cdf640SKen Wang dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3262e2cdf640SKen Wang {
3263e2cdf640SKen Wang 
3264e2cdf640SKen Wang }
3265e2cdf640SKen Wang 
dce_v6_0_ext_mode_fixup(struct drm_encoder * encoder,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3266e2cdf640SKen Wang static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3267e2cdf640SKen Wang 				    const struct drm_display_mode *mode,
3268e2cdf640SKen Wang 				    struct drm_display_mode *adjusted_mode)
3269e2cdf640SKen Wang {
3270e2cdf640SKen Wang 	return true;
3271e2cdf640SKen Wang }
3272e2cdf640SKen Wang 
3273e2cdf640SKen Wang static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3274e2cdf640SKen Wang 	.dpms = dce_v6_0_ext_dpms,
3275e2cdf640SKen Wang 	.mode_fixup = dce_v6_0_ext_mode_fixup,
3276e2cdf640SKen Wang 	.prepare = dce_v6_0_ext_prepare,
3277e2cdf640SKen Wang 	.mode_set = dce_v6_0_ext_mode_set,
3278e2cdf640SKen Wang 	.commit = dce_v6_0_ext_commit,
3279e2cdf640SKen Wang 	.disable = dce_v6_0_ext_disable,
3280e2cdf640SKen Wang 	/* no detect for TMDS/LVDS yet */
3281e2cdf640SKen Wang };
3282e2cdf640SKen Wang 
3283e2cdf640SKen Wang static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3284e2cdf640SKen Wang 	.dpms = amdgpu_atombios_encoder_dpms,
3285e2cdf640SKen Wang 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3286e2cdf640SKen Wang 	.prepare = dce_v6_0_encoder_prepare,
3287e2cdf640SKen Wang 	.mode_set = dce_v6_0_encoder_mode_set,
3288e2cdf640SKen Wang 	.commit = dce_v6_0_encoder_commit,
3289e2cdf640SKen Wang 	.disable = dce_v6_0_encoder_disable,
3290e2cdf640SKen Wang 	.detect = amdgpu_atombios_encoder_dig_detect,
3291e2cdf640SKen Wang };
3292e2cdf640SKen Wang 
3293e2cdf640SKen Wang static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3294e2cdf640SKen Wang 	.dpms = amdgpu_atombios_encoder_dpms,
3295e2cdf640SKen Wang 	.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3296e2cdf640SKen Wang 	.prepare = dce_v6_0_encoder_prepare,
3297e2cdf640SKen Wang 	.mode_set = dce_v6_0_encoder_mode_set,
3298e2cdf640SKen Wang 	.commit = dce_v6_0_encoder_commit,
3299e2cdf640SKen Wang 	.detect = amdgpu_atombios_encoder_dac_detect,
3300e2cdf640SKen Wang };
3301e2cdf640SKen Wang 
dce_v6_0_encoder_destroy(struct drm_encoder * encoder)3302e2cdf640SKen Wang static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3303e2cdf640SKen Wang {
3304e2cdf640SKen Wang 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3305e2cdf640SKen Wang 	if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3306e2cdf640SKen Wang 		amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3307e2cdf640SKen Wang 	kfree(amdgpu_encoder->enc_priv);
3308e2cdf640SKen Wang 	drm_encoder_cleanup(encoder);
3309e2cdf640SKen Wang 	kfree(amdgpu_encoder);
3310e2cdf640SKen Wang }
3311e2cdf640SKen Wang 
3312e2cdf640SKen Wang static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3313e2cdf640SKen Wang 	.destroy = dce_v6_0_encoder_destroy,
3314e2cdf640SKen Wang };
3315e2cdf640SKen Wang 
dce_v6_0_encoder_add(struct amdgpu_device * adev,uint32_t encoder_enum,uint32_t supported_device,u16 caps)3316e2cdf640SKen Wang static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3317e2cdf640SKen Wang 				 uint32_t encoder_enum,
3318e2cdf640SKen Wang 				 uint32_t supported_device,
3319e2cdf640SKen Wang 				 u16 caps)
3320e2cdf640SKen Wang {
33214a580877SLuben Tuikov 	struct drm_device *dev = adev_to_drm(adev);
3322e2cdf640SKen Wang 	struct drm_encoder *encoder;
3323e2cdf640SKen Wang 	struct amdgpu_encoder *amdgpu_encoder;
3324e2cdf640SKen Wang 
3325e2cdf640SKen Wang 	/* see if we already added it */
3326e2cdf640SKen Wang 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3327e2cdf640SKen Wang 		amdgpu_encoder = to_amdgpu_encoder(encoder);
3328e2cdf640SKen Wang 		if (amdgpu_encoder->encoder_enum == encoder_enum) {
3329e2cdf640SKen Wang 			amdgpu_encoder->devices |= supported_device;
3330e2cdf640SKen Wang 			return;
3331e2cdf640SKen Wang 		}
3332e2cdf640SKen Wang 
3333e2cdf640SKen Wang 	}
3334e2cdf640SKen Wang 
3335e2cdf640SKen Wang 	/* add a new one */
3336e2cdf640SKen Wang 	amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3337e2cdf640SKen Wang 	if (!amdgpu_encoder)
3338e2cdf640SKen Wang 		return;
3339e2cdf640SKen Wang 
3340e2cdf640SKen Wang 	encoder = &amdgpu_encoder->base;
3341e2cdf640SKen Wang 	switch (adev->mode_info.num_crtc) {
3342e2cdf640SKen Wang 	case 1:
3343e2cdf640SKen Wang 		encoder->possible_crtcs = 0x1;
3344e2cdf640SKen Wang 		break;
3345e2cdf640SKen Wang 	case 2:
3346e2cdf640SKen Wang 	default:
3347e2cdf640SKen Wang 		encoder->possible_crtcs = 0x3;
3348e2cdf640SKen Wang 		break;
3349e2cdf640SKen Wang 	case 4:
3350e2cdf640SKen Wang 		encoder->possible_crtcs = 0xf;
3351e2cdf640SKen Wang 		break;
3352e2cdf640SKen Wang 	case 6:
3353e2cdf640SKen Wang 		encoder->possible_crtcs = 0x3f;
3354e2cdf640SKen Wang 		break;
3355e2cdf640SKen Wang 	}
3356e2cdf640SKen Wang 
3357e2cdf640SKen Wang 	amdgpu_encoder->enc_priv = NULL;
3358e2cdf640SKen Wang 	amdgpu_encoder->encoder_enum = encoder_enum;
3359e2cdf640SKen Wang 	amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3360e2cdf640SKen Wang 	amdgpu_encoder->devices = supported_device;
3361e2cdf640SKen Wang 	amdgpu_encoder->rmx_type = RMX_OFF;
3362e2cdf640SKen Wang 	amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3363e2cdf640SKen Wang 	amdgpu_encoder->is_ext_encoder = false;
3364e2cdf640SKen Wang 	amdgpu_encoder->caps = caps;
3365e2cdf640SKen Wang 
3366e2cdf640SKen Wang 	switch (amdgpu_encoder->encoder_id) {
3367e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3368e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3369e2cdf640SKen Wang 		drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3370e2cdf640SKen Wang 				 DRM_MODE_ENCODER_DAC, NULL);
3371e2cdf640SKen Wang 		drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3372e2cdf640SKen Wang 		break;
3373e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3374e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3375e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3376e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3377e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3378e2cdf640SKen Wang 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3379e2cdf640SKen Wang 			amdgpu_encoder->rmx_type = RMX_FULL;
3380e2cdf640SKen Wang 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3381e2cdf640SKen Wang 					 DRM_MODE_ENCODER_LVDS, NULL);
3382e2cdf640SKen Wang 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3383e2cdf640SKen Wang 		} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3384e2cdf640SKen Wang 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3385e2cdf640SKen Wang 					 DRM_MODE_ENCODER_DAC, NULL);
3386e2cdf640SKen Wang 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3387e2cdf640SKen Wang 		} else {
3388e2cdf640SKen Wang 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3389e2cdf640SKen Wang 					 DRM_MODE_ENCODER_TMDS, NULL);
3390e2cdf640SKen Wang 			amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3391e2cdf640SKen Wang 		}
3392e2cdf640SKen Wang 		drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3393e2cdf640SKen Wang 		break;
3394e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_SI170B:
3395e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_CH7303:
3396e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3397e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3398e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_TITFP513:
3399e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_VT1623:
3400e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_HDMI_SI1930:
3401e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_TRAVIS:
3402e2cdf640SKen Wang 	case ENCODER_OBJECT_ID_NUTMEG:
3403e2cdf640SKen Wang 		/* these are handled by the primary encoders */
3404e2cdf640SKen Wang 		amdgpu_encoder->is_ext_encoder = true;
3405e2cdf640SKen Wang 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3406e2cdf640SKen Wang 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3407e2cdf640SKen Wang 					 DRM_MODE_ENCODER_LVDS, NULL);
3408e2cdf640SKen Wang 		else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3409e2cdf640SKen Wang 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3410e2cdf640SKen Wang 					 DRM_MODE_ENCODER_DAC, NULL);
3411e2cdf640SKen Wang 		else
3412e2cdf640SKen Wang 			drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3413e2cdf640SKen Wang 					 DRM_MODE_ENCODER_TMDS, NULL);
3414e2cdf640SKen Wang 		drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3415e2cdf640SKen Wang 		break;
3416e2cdf640SKen Wang 	}
3417e2cdf640SKen Wang }
3418e2cdf640SKen Wang 
3419e2cdf640SKen Wang static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3420e2cdf640SKen Wang 	.bandwidth_update = &dce_v6_0_bandwidth_update,
3421e2cdf640SKen Wang 	.vblank_get_counter = &dce_v6_0_vblank_get_counter,
3422e2cdf640SKen Wang 	.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3423e2cdf640SKen Wang 	.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3424e2cdf640SKen Wang 	.hpd_sense = &dce_v6_0_hpd_sense,
3425e2cdf640SKen Wang 	.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3426e2cdf640SKen Wang 	.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3427e2cdf640SKen Wang 	.page_flip = &dce_v6_0_page_flip,
3428e2cdf640SKen Wang 	.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3429e2cdf640SKen Wang 	.add_encoder = &dce_v6_0_encoder_add,
3430e2cdf640SKen Wang 	.add_connector = &amdgpu_connector_add,
3431e2cdf640SKen Wang };
3432e2cdf640SKen Wang 
dce_v6_0_set_display_funcs(struct amdgpu_device * adev)3433e2cdf640SKen Wang static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3434e2cdf640SKen Wang {
3435e2cdf640SKen Wang 	adev->mode_info.funcs = &dce_v6_0_display_funcs;
3436e2cdf640SKen Wang }
3437e2cdf640SKen Wang 
3438e2cdf640SKen Wang static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3439e2cdf640SKen Wang 	.set = dce_v6_0_set_crtc_interrupt_state,
3440e2cdf640SKen Wang 	.process = dce_v6_0_crtc_irq,
3441e2cdf640SKen Wang };
3442e2cdf640SKen Wang 
3443e2cdf640SKen Wang static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3444e2cdf640SKen Wang 	.set = dce_v6_0_set_pageflip_interrupt_state,
3445e2cdf640SKen Wang 	.process = dce_v6_0_pageflip_irq,
3446e2cdf640SKen Wang };
3447e2cdf640SKen Wang 
3448e2cdf640SKen Wang static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3449e2cdf640SKen Wang 	.set = dce_v6_0_set_hpd_interrupt_state,
3450e2cdf640SKen Wang 	.process = dce_v6_0_hpd_irq,
3451e2cdf640SKen Wang };
3452e2cdf640SKen Wang 
dce_v6_0_set_irq_funcs(struct amdgpu_device * adev)3453e2cdf640SKen Wang static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3454e2cdf640SKen Wang {
3455d794b9f8SMichel Dänzer 	if (adev->mode_info.num_crtc > 0)
3456d794b9f8SMichel Dänzer 		adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3457d794b9f8SMichel Dänzer 	else
3458d794b9f8SMichel Dänzer 		adev->crtc_irq.num_types = 0;
3459e2cdf640SKen Wang 	adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3460e2cdf640SKen Wang 
3461d794b9f8SMichel Dänzer 	adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3462e2cdf640SKen Wang 	adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3463e2cdf640SKen Wang 
3464d794b9f8SMichel Dänzer 	adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3465e2cdf640SKen Wang 	adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3466e2cdf640SKen Wang }
3467a1255107SAlex Deucher 
3468a1255107SAlex Deucher const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3469a1255107SAlex Deucher {
3470a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_DCE,
3471a1255107SAlex Deucher 	.major = 6,
3472a1255107SAlex Deucher 	.minor = 0,
3473a1255107SAlex Deucher 	.rev = 0,
3474a1255107SAlex Deucher 	.funcs = &dce_v6_0_ip_funcs,
3475a1255107SAlex Deucher };
3476a1255107SAlex Deucher 
3477a1255107SAlex Deucher const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3478a1255107SAlex Deucher {
3479a1255107SAlex Deucher 	.type = AMD_IP_BLOCK_TYPE_DCE,
3480a1255107SAlex Deucher 	.major = 6,
3481a1255107SAlex Deucher 	.minor = 4,
3482a1255107SAlex Deucher 	.rev = 0,
3483a1255107SAlex Deucher 	.funcs = &dce_v6_0_ip_funcs,
3484a1255107SAlex Deucher };
3485