1fe3c9489SFeifei Xu /*
2fe3c9489SFeifei Xu  * Copyright 2018 Advanced Micro Devices, Inc.
3fe3c9489SFeifei Xu  *
4fe3c9489SFeifei Xu  * Permission is hereby granted, free of charge, to any person obtaining a
5fe3c9489SFeifei Xu  * copy of this software and associated documentation files (the "Software"),
6fe3c9489SFeifei Xu  * to deal in the Software without restriction, including without limitation
7fe3c9489SFeifei Xu  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8fe3c9489SFeifei Xu  * and/or sell copies of the Software, and to permit persons to whom the
9fe3c9489SFeifei Xu  * Software is furnished to do so, subject to the following conditions:
10fe3c9489SFeifei Xu  *
11fe3c9489SFeifei Xu  * The above copyright notice and this permission notice shall be included in
12fe3c9489SFeifei Xu  * all copies or substantial portions of the Software.
13fe3c9489SFeifei Xu  *
14fe3c9489SFeifei Xu  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15fe3c9489SFeifei Xu  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16fe3c9489SFeifei Xu  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17fe3c9489SFeifei Xu  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18fe3c9489SFeifei Xu  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19fe3c9489SFeifei Xu  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20fe3c9489SFeifei Xu  * OTHER DEALINGS IN THE SOFTWARE.
21fe3c9489SFeifei Xu  *
22fe3c9489SFeifei Xu  */
23fe3c9489SFeifei Xu #include "amdgpu.h"
24fe3c9489SFeifei Xu #include "amdgpu_atombios.h"
25fe3c9489SFeifei Xu #include "nbio_v7_4.h"
26fe3c9489SFeifei Xu 
27fe3c9489SFeifei Xu #include "nbio/nbio_7_4_offset.h"
28fe3c9489SFeifei Xu #include "nbio/nbio_7_4_sh_mask.h"
29fe3c9489SFeifei Xu 
30fe3c9489SFeifei Xu #define smnNBIF_MGCG_CTRL_LCLK	0x1013a21c
31fe3c9489SFeifei Xu 
32fe3c9489SFeifei Xu #define smnCPM_CONTROL                                                                                  0x11180460
33fe3c9489SFeifei Xu #define smnPCIE_CNTL2                                                                                   0x11180070
34fe3c9489SFeifei Xu 
35fe3c9489SFeifei Xu static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
36fe3c9489SFeifei Xu {
37fe3c9489SFeifei Xu     u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
38fe3c9489SFeifei Xu 
39fe3c9489SFeifei Xu 	tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
40fe3c9489SFeifei Xu 	tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
41fe3c9489SFeifei Xu 
42fe3c9489SFeifei Xu 	return tmp;
43fe3c9489SFeifei Xu }
44fe3c9489SFeifei Xu 
45fe3c9489SFeifei Xu static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
46fe3c9489SFeifei Xu {
47fe3c9489SFeifei Xu 	if (enable)
48fe3c9489SFeifei Xu 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
49fe3c9489SFeifei Xu 			BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
50fe3c9489SFeifei Xu 	else
51fe3c9489SFeifei Xu 		WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
52fe3c9489SFeifei Xu }
53fe3c9489SFeifei Xu 
54fe3c9489SFeifei Xu static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev,
55fe3c9489SFeifei Xu 				struct amdgpu_ring *ring)
56fe3c9489SFeifei Xu {
57fe3c9489SFeifei Xu 	if (!ring || !ring->funcs->emit_wreg)
58fe3c9489SFeifei Xu 		WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
59fe3c9489SFeifei Xu 	else
60fe3c9489SFeifei Xu 		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
61fe3c9489SFeifei Xu 			NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
62fe3c9489SFeifei Xu }
63fe3c9489SFeifei Xu 
64fe3c9489SFeifei Xu static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
65fe3c9489SFeifei Xu {
66fe3c9489SFeifei Xu 	return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
67fe3c9489SFeifei Xu }
68fe3c9489SFeifei Xu 
69fe3c9489SFeifei Xu static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
70fe3c9489SFeifei Xu 					  bool use_doorbell, int doorbell_index)
71fe3c9489SFeifei Xu {
72fe3c9489SFeifei Xu 	u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
73fe3c9489SFeifei Xu 			SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
74fe3c9489SFeifei Xu 
75fe3c9489SFeifei Xu 	u32 doorbell_range = RREG32(reg);
76fe3c9489SFeifei Xu 
77fe3c9489SFeifei Xu 	if (use_doorbell) {
78fe3c9489SFeifei Xu 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
79fe3c9489SFeifei Xu 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 8);
80fe3c9489SFeifei Xu 	} else
81fe3c9489SFeifei Xu 		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
82fe3c9489SFeifei Xu 
83fe3c9489SFeifei Xu 	WREG32(reg, doorbell_range);
84fe3c9489SFeifei Xu }
85fe3c9489SFeifei Xu 
86fe3c9489SFeifei Xu static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
87fe3c9489SFeifei Xu 					       bool enable)
88fe3c9489SFeifei Xu {
89fe3c9489SFeifei Xu 	WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
90fe3c9489SFeifei Xu }
91fe3c9489SFeifei Xu 
92fe3c9489SFeifei Xu static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
93fe3c9489SFeifei Xu 							bool enable)
94fe3c9489SFeifei Xu {
95fe3c9489SFeifei Xu 
96fe3c9489SFeifei Xu }
97fe3c9489SFeifei Xu 
98fe3c9489SFeifei Xu static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
99fe3c9489SFeifei Xu 					bool use_doorbell, int doorbell_index)
100fe3c9489SFeifei Xu {
101fe3c9489SFeifei Xu 	u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
102fe3c9489SFeifei Xu 
103fe3c9489SFeifei Xu 	if (use_doorbell) {
104fe3c9489SFeifei Xu 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
105fe3c9489SFeifei Xu 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
106fe3c9489SFeifei Xu 	} else
107fe3c9489SFeifei Xu 		ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
108fe3c9489SFeifei Xu 
109fe3c9489SFeifei Xu 	WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
110fe3c9489SFeifei Xu }
111fe3c9489SFeifei Xu 
112fe3c9489SFeifei Xu 
113fe3c9489SFeifei Xu static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
114fe3c9489SFeifei Xu 						       bool enable)
115fe3c9489SFeifei Xu {
116fe3c9489SFeifei Xu 	//TODO: Add support for v7.4
117fe3c9489SFeifei Xu }
118fe3c9489SFeifei Xu 
119fe3c9489SFeifei Xu static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
120fe3c9489SFeifei Xu 						      bool enable)
121fe3c9489SFeifei Xu {
122fe3c9489SFeifei Xu 	uint32_t def, data;
123fe3c9489SFeifei Xu 
124fe3c9489SFeifei Xu 	def = data = RREG32_PCIE(smnPCIE_CNTL2);
125fe3c9489SFeifei Xu 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
126fe3c9489SFeifei Xu 		data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
127fe3c9489SFeifei Xu 			 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
128fe3c9489SFeifei Xu 			 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
129fe3c9489SFeifei Xu 	} else {
130fe3c9489SFeifei Xu 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
131fe3c9489SFeifei Xu 			  PCIE_CNTL2__MST_MEM_LS_EN_MASK |
132fe3c9489SFeifei Xu 			  PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
133fe3c9489SFeifei Xu 	}
134fe3c9489SFeifei Xu 
135fe3c9489SFeifei Xu 	if (def != data)
136fe3c9489SFeifei Xu 		WREG32_PCIE(smnPCIE_CNTL2, data);
137fe3c9489SFeifei Xu }
138fe3c9489SFeifei Xu 
139fe3c9489SFeifei Xu static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
140fe3c9489SFeifei Xu 					    u32 *flags)
141fe3c9489SFeifei Xu {
142fe3c9489SFeifei Xu 	int data;
143fe3c9489SFeifei Xu 
144fe3c9489SFeifei Xu 	/* AMD_CG_SUPPORT_BIF_MGCG */
145fe3c9489SFeifei Xu 	data = RREG32_PCIE(smnCPM_CONTROL);
146fe3c9489SFeifei Xu 	if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
147fe3c9489SFeifei Xu 		*flags |= AMD_CG_SUPPORT_BIF_MGCG;
148fe3c9489SFeifei Xu 
149fe3c9489SFeifei Xu 	/* AMD_CG_SUPPORT_BIF_LS */
150fe3c9489SFeifei Xu 	data = RREG32_PCIE(smnPCIE_CNTL2);
151fe3c9489SFeifei Xu 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
152fe3c9489SFeifei Xu 		*flags |= AMD_CG_SUPPORT_BIF_LS;
153fe3c9489SFeifei Xu }
154fe3c9489SFeifei Xu 
155fe3c9489SFeifei Xu static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
156fe3c9489SFeifei Xu {
157fe3c9489SFeifei Xu 	u32 interrupt_cntl;
158fe3c9489SFeifei Xu 
159fe3c9489SFeifei Xu 	/* setup interrupt control */
160fe3c9489SFeifei Xu 	WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
161fe3c9489SFeifei Xu 	interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
162fe3c9489SFeifei Xu 	/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
163fe3c9489SFeifei Xu 	 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
164fe3c9489SFeifei Xu 	 */
165fe3c9489SFeifei Xu 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
166fe3c9489SFeifei Xu 	/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
167fe3c9489SFeifei Xu 	interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
168fe3c9489SFeifei Xu 	WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
169fe3c9489SFeifei Xu }
170fe3c9489SFeifei Xu 
171fe3c9489SFeifei Xu static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
172fe3c9489SFeifei Xu {
173fe3c9489SFeifei Xu 	return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
174fe3c9489SFeifei Xu }
175fe3c9489SFeifei Xu 
176fe3c9489SFeifei Xu static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
177fe3c9489SFeifei Xu {
178fe3c9489SFeifei Xu 	return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
179fe3c9489SFeifei Xu }
180fe3c9489SFeifei Xu 
181fe3c9489SFeifei Xu static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
182fe3c9489SFeifei Xu {
183fe3c9489SFeifei Xu 	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
184fe3c9489SFeifei Xu }
185fe3c9489SFeifei Xu 
186fe3c9489SFeifei Xu static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
187fe3c9489SFeifei Xu {
188fe3c9489SFeifei Xu 	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
189fe3c9489SFeifei Xu }
190fe3c9489SFeifei Xu 
191fe3c9489SFeifei Xu static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
192fe3c9489SFeifei Xu 	.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
193fe3c9489SFeifei Xu 	.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
194fe3c9489SFeifei Xu 	.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
195fe3c9489SFeifei Xu 	.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
196fe3c9489SFeifei Xu 	.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
197fe3c9489SFeifei Xu 	.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
198fe3c9489SFeifei Xu 	.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
199fe3c9489SFeifei Xu 	.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
200fe3c9489SFeifei Xu 	.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
201fe3c9489SFeifei Xu 	.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
202fe3c9489SFeifei Xu 	.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
203fe3c9489SFeifei Xu 	.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
204fe3c9489SFeifei Xu };
205fe3c9489SFeifei Xu 
206fe3c9489SFeifei Xu static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
207fe3c9489SFeifei Xu {
208fe3c9489SFeifei Xu 	if (is_virtual_machine())	/* passthrough mode exclus sriov mod */
209fe3c9489SFeifei Xu 		adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
210fe3c9489SFeifei Xu }
211fe3c9489SFeifei Xu 
212fe3c9489SFeifei Xu static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
213fe3c9489SFeifei Xu {
214fe3c9489SFeifei Xu 
215fe3c9489SFeifei Xu }
216fe3c9489SFeifei Xu 
217fe3c9489SFeifei Xu const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
218fe3c9489SFeifei Xu 	.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
219fe3c9489SFeifei Xu 	.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
220fe3c9489SFeifei Xu 	.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
221fe3c9489SFeifei Xu 	.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
222fe3c9489SFeifei Xu 	.get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
223fe3c9489SFeifei Xu 	.get_rev_id = nbio_v7_4_get_rev_id,
224fe3c9489SFeifei Xu 	.mc_access_enable = nbio_v7_4_mc_access_enable,
225fe3c9489SFeifei Xu 	.hdp_flush = nbio_v7_4_hdp_flush,
226fe3c9489SFeifei Xu 	.get_memsize = nbio_v7_4_get_memsize,
227fe3c9489SFeifei Xu 	.sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
228fe3c9489SFeifei Xu 	.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
229fe3c9489SFeifei Xu 	.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
230fe3c9489SFeifei Xu 	.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
231fe3c9489SFeifei Xu 	.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
232fe3c9489SFeifei Xu 	.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
233fe3c9489SFeifei Xu 	.get_clockgating_state = nbio_v7_4_get_clockgating_state,
234fe3c9489SFeifei Xu 	.ih_control = nbio_v7_4_ih_control,
235fe3c9489SFeifei Xu 	.init_registers = nbio_v7_4_init_registers,
236fe3c9489SFeifei Xu 	.detect_hw_virt = nbio_v7_4_detect_hw_virt,
237fe3c9489SFeifei Xu };
238