xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/si.c (revision ce7d8811)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 
29 #include <drm/amdgpu_drm.h>
30 
31 #include "amdgpu.h"
32 #include "amdgpu_atombios.h"
33 #include "amdgpu_ih.h"
34 #include "amdgpu_uvd.h"
35 #include "amdgpu_vce.h"
36 #include "atom.h"
37 #include "amd_pcie.h"
38 #include "si_dpm.h"
39 #include "sid.h"
40 #include "si_ih.h"
41 #include "gfx_v6_0.h"
42 #include "gmc_v6_0.h"
43 #include "si_dma.h"
44 #include "dce_v6_0.h"
45 #include "si.h"
46 #include "uvd_v3_1.h"
47 #include "amdgpu_vkms.h"
48 #include "gca/gfx_6_0_d.h"
49 #include "oss/oss_1_0_d.h"
50 #include "oss/oss_1_0_sh_mask.h"
51 #include "gmc/gmc_6_0_d.h"
52 #include "dce/dce_6_0_d.h"
53 #include "uvd/uvd_4_0_d.h"
54 #include "bif/bif_3_0_d.h"
55 #include "bif/bif_3_0_sh_mask.h"
56 
57 #include "amdgpu_dm.h"
58 
59 static const u32 tahiti_golden_registers[] =
60 {
61 	mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
62 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
63 	mmDB_DEBUG, 0xffffffff, 0x00000000,
64 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
65 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
66 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
67 	0x340c, 0x000000c0, 0x00800040,
68 	0x360c, 0x000000c0, 0x00800040,
69 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
70 	mmFBC_MISC, 0x00200000, 0x50100000,
71 	mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
72 	mmMC_ARB_WTM_CNTL_RD, 0x00000003, 0x000007ff,
73 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
74 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
75 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
76 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
77 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
78 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x2a00126a,
79 	0x000c, 0xffffffff, 0x0040,
80 	0x000d, 0x00000040, 0x00004040,
81 	mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
82 	mmSQ_DED_CNT, 0x01ff1f3f, 0x00000000,
83 	mmSQ_SEC_CNT, 0x01ff1f3f, 0x00000000,
84 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
85 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
86 	mmTCP_ADDR_CONFIG, 0x00000200, 0x000002fb,
87 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
88 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
89 	mmVGT_FIFO_DEPTHS, 0xffffffff, 0x000fff40,
90 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
91 	mmVM_CONTEXT0_CNTL, 0x20000000, 0x20fffed8,
92 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
93 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
94 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
95 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
96 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
97 };
98 
99 static const u32 tahiti_golden_registers2[] =
100 {
101 	mmMCIF_MEM_CONTROL, 0x00000001, 0x00000001,
102 };
103 
104 static const u32 tahiti_golden_rlc_registers[] =
105 {
106 	mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
107 	mmRLC_LB_PARAMS, 0xffffffff, 0x00601005,
108 	0x311f, 0xffffffff, 0x10104040,
109 	0x3122, 0xffffffff, 0x0100000a,
110 	mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800,
111 	mmRLC_LB_CNTL, 0xffffffff, 0x800000f4,
112 	mmUVD_CGC_GATE, 0x00000008, 0x00000000,
113 };
114 
115 static const u32 pitcairn_golden_registers[] =
116 {
117 	mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
118 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
119 	mmDB_DEBUG, 0xffffffff, 0x00000000,
120 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
121 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
122 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
123 	0x340c, 0x000300c0, 0x00800040,
124 	0x360c, 0x000300c0, 0x00800040,
125 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
126 	mmFBC_MISC, 0x00200000, 0x50100000,
127 	mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
128 	mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
129 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
130 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
131 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
132 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
133 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
134 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x2a00126a,
135 	0x000c, 0xffffffff, 0x0040,
136 	0x000d, 0x00000040, 0x00004040,
137 	mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
138 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
139 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
140 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
141 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
142 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
143 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
144 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
145 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
146 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
147 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
148 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
149 };
150 
151 static const u32 pitcairn_golden_rlc_registers[] =
152 {
153 	mmGB_ADDR_CONFIG, 0xffffffff, 0x12011003,
154 	mmRLC_LB_PARAMS, 0xffffffff, 0x00601004,
155 	0x311f, 0xffffffff, 0x10102020,
156 	0x3122, 0xffffffff, 0x01000020,
157 	mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800,
158 	mmRLC_LB_CNTL, 0xffffffff, 0x800000a4,
159 };
160 
161 static const u32 verde_pg_init[] =
162 {
163 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x40000,
164 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x200010ff,
165 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
166 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
167 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
168 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
169 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
170 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x7007,
171 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x300010ff,
172 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
173 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
174 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
175 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
176 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
177 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x400000,
178 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x100010ff,
179 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
180 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
181 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
182 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
183 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
184 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x120200,
185 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x500010ff,
186 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
187 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
188 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
189 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
190 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
191 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x1e1e16,
192 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x600010ff,
193 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
194 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
195 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
196 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
197 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
198 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x171f1e,
199 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x700010ff,
200 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
201 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
202 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
203 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
204 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
205 	mmGMCON_PGFSM_WRITE, 0xffffffff, 0x0,
206 	mmGMCON_PGFSM_CONFIG, 0xffffffff, 0x9ff,
207 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x0,
208 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10000800,
209 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf,
210 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf,
211 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x4,
212 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1000051e,
213 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xffff,
214 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xffff,
215 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x8,
216 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x80500,
217 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x12,
218 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x9050c,
219 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1d,
220 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xb052c,
221 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x2a,
222 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1053e,
223 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x2d,
224 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10546,
225 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x30,
226 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xa054e,
227 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x3c,
228 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1055f,
229 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x3f,
230 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10567,
231 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x42,
232 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1056f,
233 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x45,
234 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x10572,
235 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x48,
236 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20575,
237 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x4c,
238 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x190801,
239 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x67,
240 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1082a,
241 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x6a,
242 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1b082d,
243 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x87,
244 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x310851,
245 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xba,
246 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x891,
247 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xbc,
248 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x893,
249 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xbe,
250 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20895,
251 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xc2,
252 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x20899,
253 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xc6,
254 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x2089d,
255 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xca,
256 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x8a1,
257 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xcc,
258 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x8a3,
259 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xce,
260 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x308a5,
261 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0xd3,
262 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x6d08cd,
263 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x142,
264 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x2000095a,
265 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x1,
266 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x144,
267 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x301f095b,
268 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x165,
269 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xc094d,
270 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x173,
271 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xf096d,
272 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x184,
273 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x15097f,
274 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x19b,
275 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xc0998,
276 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1a9,
277 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x409a7,
278 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1af,
279 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0xcdc,
280 	mmGMCON_RENG_RAM_INDEX, 0xffffffff, 0x1b1,
281 	mmGMCON_RENG_RAM_DATA, 0xffffffff, 0x800,
282 	mmGMCON_RENG_EXECUTE, 0xffffffff, 0x6c9b2000,
283 	mmGMCON_MISC2, 0xfc00, 0x2000,
284 	mmGMCON_MISC3, 0xffffffff, 0xfc0,
285 	mmMC_PMG_AUTO_CFG, 0x00000100, 0x100,
286 };
287 
288 static const u32 verde_golden_rlc_registers[] =
289 {
290 	mmGB_ADDR_CONFIG, 0xffffffff, 0x02010002,
291 	mmRLC_LB_PARAMS, 0xffffffff, 0x033f1005,
292 	0x311f, 0xffffffff, 0x10808020,
293 	0x3122, 0xffffffff, 0x00800008,
294 	mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00001000,
295 	mmRLC_LB_CNTL, 0xffffffff, 0x80010014,
296 };
297 
298 static const u32 verde_golden_registers[] =
299 {
300 	mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
301 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
302 	mmDB_DEBUG, 0xffffffff, 0x00000000,
303 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
304 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
305 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
306 	0x340c, 0x000300c0, 0x00800040,
307 	0x360c, 0x000300c0, 0x00800040,
308 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
309 	mmFBC_MISC, 0x00200000, 0x50100000,
310 	mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
311 	mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
312 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
313 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
314 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
315 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
316 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
317 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x0000124a,
318 	0x000c, 0xffffffff, 0x0040,
319 	0x000d, 0x00000040, 0x00004040,
320 	mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
321 	mmSQ_DED_CNT, 0x01ff1f3f, 0x00000000,
322 	mmSQ_SEC_CNT, 0x01ff1f3f, 0x00000000,
323 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
324 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
325 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x00000003,
326 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
327 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001032,
328 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
329 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
330 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
331 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
332 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
333 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
334 };
335 
336 static const u32 oland_golden_registers[] =
337 {
338 	mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011,
339 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
340 	mmDB_DEBUG, 0xffffffff, 0x00000000,
341 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
342 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
343 	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
344 	0x340c, 0x000300c0, 0x00800040,
345 	0x360c, 0x000300c0, 0x00800040,
346 	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
347 	mmFBC_MISC, 0x00200000, 0x50100000,
348 	mmDIG0_HDMI_CONTROL, 0x31000311, 0x00000011,
349 	mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
350 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
351 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
352 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
353 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
354 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
355 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x00000082,
356 	0x000c, 0xffffffff, 0x0040,
357 	0x000d, 0x00000040, 0x00004040,
358 	mmSPI_CONFIG_CNTL, 0x07ffffff, 0x03000000,
359 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
360 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
361 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
362 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
363 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
364 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
365 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
366 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
367 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
368 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
369 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
370 
371 };
372 
373 static const u32 oland_golden_rlc_registers[] =
374 {
375 	mmGB_ADDR_CONFIG, 0xffffffff, 0x02010002,
376 	mmRLC_LB_PARAMS, 0xffffffff, 0x00601005,
377 	0x311f, 0xffffffff, 0x10104040,
378 	0x3122, 0xffffffff, 0x0100000a,
379 	mmRLC_LB_CNTR_MAX, 0xffffffff, 0x00000800,
380 	mmRLC_LB_CNTL, 0xffffffff, 0x800000f4,
381 };
382 
383 static const u32 hainan_golden_registers[] =
384 {
385 	0x17bc, 0x00000030, 0x00000011,
386 	mmCB_HW_CONTROL, 0x00010000, 0x00018208,
387 	mmDB_DEBUG, 0xffffffff, 0x00000000,
388 	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
389 	mmDB_DEBUG3, 0x0002021c, 0x00020200,
390 	0x031e, 0x00000080, 0x00000000,
391 	0x3430, 0xff000fff, 0x00000100,
392 	0x340c, 0x000300c0, 0x00800040,
393 	0x3630, 0xff000fff, 0x00000100,
394 	0x360c, 0x000300c0, 0x00800040,
395 	0x16ec, 0x000000f0, 0x00000070,
396 	0x16f0, 0x00200000, 0x50100000,
397 	0x1c0c, 0x31000311, 0x00000011,
398 	mmMC_SEQ_PMG_PG_HWCNTL, 0x00073ffe, 0x000022a2,
399 	mmMC_XPB_P2P_BAR_CFG, 0x000007ff, 0x00000000,
400 	mmPA_CL_ENHANCE, 0xf000001f, 0x00000007,
401 	mmPA_SC_FORCE_EOV_MAX_CNTS, 0xffffffff, 0x00ffffff,
402 	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
403 	mmPA_SC_MODE_CNTL_1, 0x07ffffff, 0x4e000000,
404 	mmPA_SC_RASTER_CONFIG, 0x3f3f3fff, 0x00000000,
405 	0x000c, 0xffffffff, 0x0040,
406 	0x000d, 0x00000040, 0x00004040,
407 	mmSPI_CONFIG_CNTL, 0x03e00000, 0x03600000,
408 	mmSX_DEBUG_1, 0x0000007f, 0x00000020,
409 	mmTA_CNTL_AUX, 0x00010000, 0x00010000,
410 	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
411 	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
412 	mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
413 	mmVGT_GS_VERTEX_REUSE, 0x0000001f, 0x00000010,
414 	mmVM_L2_CG, 0x000c0fc0, 0x000c0400,
415 	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0xffffffff,
416 	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
417 	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
418 	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
419 };
420 
421 static const u32 hainan_golden_registers2[] =
422 {
423 	mmGB_ADDR_CONFIG, 0xffffffff, 0x2011003,
424 };
425 
426 static const u32 tahiti_mgcg_cgcg_init[] =
427 {
428 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
429 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
430 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
431 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
432 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
433 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
434 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
435 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
436 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
437 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
438 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
439 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
440 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
441 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
442 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
443 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
444 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
445 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
446 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
447 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
448 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
449 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
450 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
451 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
452 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
453 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
454 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
455 	0x2458, 0xffffffff, 0x00010000,
456 	0x2459, 0xffffffff, 0x00030002,
457 	0x245a, 0xffffffff, 0x00040007,
458 	0x245b, 0xffffffff, 0x00060005,
459 	0x245c, 0xffffffff, 0x00090008,
460 	0x245d, 0xffffffff, 0x00020001,
461 	0x245e, 0xffffffff, 0x00040003,
462 	0x245f, 0xffffffff, 0x00000007,
463 	0x2460, 0xffffffff, 0x00060005,
464 	0x2461, 0xffffffff, 0x00090008,
465 	0x2462, 0xffffffff, 0x00030002,
466 	0x2463, 0xffffffff, 0x00050004,
467 	0x2464, 0xffffffff, 0x00000008,
468 	0x2465, 0xffffffff, 0x00070006,
469 	0x2466, 0xffffffff, 0x000a0009,
470 	0x2467, 0xffffffff, 0x00040003,
471 	0x2468, 0xffffffff, 0x00060005,
472 	0x2469, 0xffffffff, 0x00000009,
473 	0x246a, 0xffffffff, 0x00080007,
474 	0x246b, 0xffffffff, 0x000b000a,
475 	0x246c, 0xffffffff, 0x00050004,
476 	0x246d, 0xffffffff, 0x00070006,
477 	0x246e, 0xffffffff, 0x0008000b,
478 	0x246f, 0xffffffff, 0x000a0009,
479 	0x2470, 0xffffffff, 0x000d000c,
480 	0x2471, 0xffffffff, 0x00060005,
481 	0x2472, 0xffffffff, 0x00080007,
482 	0x2473, 0xffffffff, 0x0000000b,
483 	0x2474, 0xffffffff, 0x000a0009,
484 	0x2475, 0xffffffff, 0x000d000c,
485 	0x2476, 0xffffffff, 0x00070006,
486 	0x2477, 0xffffffff, 0x00090008,
487 	0x2478, 0xffffffff, 0x0000000c,
488 	0x2479, 0xffffffff, 0x000b000a,
489 	0x247a, 0xffffffff, 0x000e000d,
490 	0x247b, 0xffffffff, 0x00080007,
491 	0x247c, 0xffffffff, 0x000a0009,
492 	0x247d, 0xffffffff, 0x0000000d,
493 	0x247e, 0xffffffff, 0x000c000b,
494 	0x247f, 0xffffffff, 0x000f000e,
495 	0x2480, 0xffffffff, 0x00090008,
496 	0x2481, 0xffffffff, 0x000b000a,
497 	0x2482, 0xffffffff, 0x000c000f,
498 	0x2483, 0xffffffff, 0x000e000d,
499 	0x2484, 0xffffffff, 0x00110010,
500 	0x2485, 0xffffffff, 0x000a0009,
501 	0x2486, 0xffffffff, 0x000c000b,
502 	0x2487, 0xffffffff, 0x0000000f,
503 	0x2488, 0xffffffff, 0x000e000d,
504 	0x2489, 0xffffffff, 0x00110010,
505 	0x248a, 0xffffffff, 0x000b000a,
506 	0x248b, 0xffffffff, 0x000d000c,
507 	0x248c, 0xffffffff, 0x00000010,
508 	0x248d, 0xffffffff, 0x000f000e,
509 	0x248e, 0xffffffff, 0x00120011,
510 	0x248f, 0xffffffff, 0x000c000b,
511 	0x2490, 0xffffffff, 0x000e000d,
512 	0x2491, 0xffffffff, 0x00000011,
513 	0x2492, 0xffffffff, 0x0010000f,
514 	0x2493, 0xffffffff, 0x00130012,
515 	0x2494, 0xffffffff, 0x000d000c,
516 	0x2495, 0xffffffff, 0x000f000e,
517 	0x2496, 0xffffffff, 0x00100013,
518 	0x2497, 0xffffffff, 0x00120011,
519 	0x2498, 0xffffffff, 0x00150014,
520 	0x2499, 0xffffffff, 0x000e000d,
521 	0x249a, 0xffffffff, 0x0010000f,
522 	0x249b, 0xffffffff, 0x00000013,
523 	0x249c, 0xffffffff, 0x00120011,
524 	0x249d, 0xffffffff, 0x00150014,
525 	0x249e, 0xffffffff, 0x000f000e,
526 	0x249f, 0xffffffff, 0x00110010,
527 	0x24a0, 0xffffffff, 0x00000014,
528 	0x24a1, 0xffffffff, 0x00130012,
529 	0x24a2, 0xffffffff, 0x00160015,
530 	0x24a3, 0xffffffff, 0x0010000f,
531 	0x24a4, 0xffffffff, 0x00120011,
532 	0x24a5, 0xffffffff, 0x00000015,
533 	0x24a6, 0xffffffff, 0x00140013,
534 	0x24a7, 0xffffffff, 0x00170016,
535 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
536 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
537 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
538 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
539 	0x000c, 0xffffffff, 0x0000001c,
540 	0x000d, 0x000f0000, 0x000f0000,
541 	0x0583, 0xffffffff, 0x00000100,
542 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
543 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
544 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
545 	mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
546 	mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
547 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
548 	0x157a, 0x00000001, 0x00000001,
549 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
550 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
551 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
552 	0x3430, 0xfffffff0, 0x00000100,
553 	0x3630, 0xfffffff0, 0x00000100,
554 };
555 static const u32 pitcairn_mgcg_cgcg_init[] =
556 {
557 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
558 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
559 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
560 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
561 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
562 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
563 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
564 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
565 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
566 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
567 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
568 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
569 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
570 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
571 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
572 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
573 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
574 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
575 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
576 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
577 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
578 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
579 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
580 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
581 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
582 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
583 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
584 	0x2458, 0xffffffff, 0x00010000,
585 	0x2459, 0xffffffff, 0x00030002,
586 	0x245a, 0xffffffff, 0x00040007,
587 	0x245b, 0xffffffff, 0x00060005,
588 	0x245c, 0xffffffff, 0x00090008,
589 	0x245d, 0xffffffff, 0x00020001,
590 	0x245e, 0xffffffff, 0x00040003,
591 	0x245f, 0xffffffff, 0x00000007,
592 	0x2460, 0xffffffff, 0x00060005,
593 	0x2461, 0xffffffff, 0x00090008,
594 	0x2462, 0xffffffff, 0x00030002,
595 	0x2463, 0xffffffff, 0x00050004,
596 	0x2464, 0xffffffff, 0x00000008,
597 	0x2465, 0xffffffff, 0x00070006,
598 	0x2466, 0xffffffff, 0x000a0009,
599 	0x2467, 0xffffffff, 0x00040003,
600 	0x2468, 0xffffffff, 0x00060005,
601 	0x2469, 0xffffffff, 0x00000009,
602 	0x246a, 0xffffffff, 0x00080007,
603 	0x246b, 0xffffffff, 0x000b000a,
604 	0x246c, 0xffffffff, 0x00050004,
605 	0x246d, 0xffffffff, 0x00070006,
606 	0x246e, 0xffffffff, 0x0008000b,
607 	0x246f, 0xffffffff, 0x000a0009,
608 	0x2470, 0xffffffff, 0x000d000c,
609 	0x2480, 0xffffffff, 0x00090008,
610 	0x2481, 0xffffffff, 0x000b000a,
611 	0x2482, 0xffffffff, 0x000c000f,
612 	0x2483, 0xffffffff, 0x000e000d,
613 	0x2484, 0xffffffff, 0x00110010,
614 	0x2485, 0xffffffff, 0x000a0009,
615 	0x2486, 0xffffffff, 0x000c000b,
616 	0x2487, 0xffffffff, 0x0000000f,
617 	0x2488, 0xffffffff, 0x000e000d,
618 	0x2489, 0xffffffff, 0x00110010,
619 	0x248a, 0xffffffff, 0x000b000a,
620 	0x248b, 0xffffffff, 0x000d000c,
621 	0x248c, 0xffffffff, 0x00000010,
622 	0x248d, 0xffffffff, 0x000f000e,
623 	0x248e, 0xffffffff, 0x00120011,
624 	0x248f, 0xffffffff, 0x000c000b,
625 	0x2490, 0xffffffff, 0x000e000d,
626 	0x2491, 0xffffffff, 0x00000011,
627 	0x2492, 0xffffffff, 0x0010000f,
628 	0x2493, 0xffffffff, 0x00130012,
629 	0x2494, 0xffffffff, 0x000d000c,
630 	0x2495, 0xffffffff, 0x000f000e,
631 	0x2496, 0xffffffff, 0x00100013,
632 	0x2497, 0xffffffff, 0x00120011,
633 	0x2498, 0xffffffff, 0x00150014,
634 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
635 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
636 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
637 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
638 	0x000c, 0xffffffff, 0x0000001c,
639 	0x000d, 0x000f0000, 0x000f0000,
640 	0x0583, 0xffffffff, 0x00000100,
641 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
642 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
643 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
644 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
645 	0x157a, 0x00000001, 0x00000001,
646 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
647 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
648 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
649 	0x3430, 0xfffffff0, 0x00000100,
650 	0x3630, 0xfffffff0, 0x00000100,
651 };
652 
653 static const u32 verde_mgcg_cgcg_init[] =
654 {
655 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
656 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
657 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
658 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
659 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
660 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
661 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
662 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
663 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
664 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
665 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
666 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
667 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
668 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
669 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
670 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
671 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
672 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
673 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
674 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
675 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
676 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
677 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
678 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
679 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
680 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
681 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
682 	0x2458, 0xffffffff, 0x00010000,
683 	0x2459, 0xffffffff, 0x00030002,
684 	0x245a, 0xffffffff, 0x00040007,
685 	0x245b, 0xffffffff, 0x00060005,
686 	0x245c, 0xffffffff, 0x00090008,
687 	0x245d, 0xffffffff, 0x00020001,
688 	0x245e, 0xffffffff, 0x00040003,
689 	0x245f, 0xffffffff, 0x00000007,
690 	0x2460, 0xffffffff, 0x00060005,
691 	0x2461, 0xffffffff, 0x00090008,
692 	0x2462, 0xffffffff, 0x00030002,
693 	0x2463, 0xffffffff, 0x00050004,
694 	0x2464, 0xffffffff, 0x00000008,
695 	0x2465, 0xffffffff, 0x00070006,
696 	0x2466, 0xffffffff, 0x000a0009,
697 	0x2467, 0xffffffff, 0x00040003,
698 	0x2468, 0xffffffff, 0x00060005,
699 	0x2469, 0xffffffff, 0x00000009,
700 	0x246a, 0xffffffff, 0x00080007,
701 	0x246b, 0xffffffff, 0x000b000a,
702 	0x246c, 0xffffffff, 0x00050004,
703 	0x246d, 0xffffffff, 0x00070006,
704 	0x246e, 0xffffffff, 0x0008000b,
705 	0x246f, 0xffffffff, 0x000a0009,
706 	0x2470, 0xffffffff, 0x000d000c,
707 	0x2480, 0xffffffff, 0x00090008,
708 	0x2481, 0xffffffff, 0x000b000a,
709 	0x2482, 0xffffffff, 0x000c000f,
710 	0x2483, 0xffffffff, 0x000e000d,
711 	0x2484, 0xffffffff, 0x00110010,
712 	0x2485, 0xffffffff, 0x000a0009,
713 	0x2486, 0xffffffff, 0x000c000b,
714 	0x2487, 0xffffffff, 0x0000000f,
715 	0x2488, 0xffffffff, 0x000e000d,
716 	0x2489, 0xffffffff, 0x00110010,
717 	0x248a, 0xffffffff, 0x000b000a,
718 	0x248b, 0xffffffff, 0x000d000c,
719 	0x248c, 0xffffffff, 0x00000010,
720 	0x248d, 0xffffffff, 0x000f000e,
721 	0x248e, 0xffffffff, 0x00120011,
722 	0x248f, 0xffffffff, 0x000c000b,
723 	0x2490, 0xffffffff, 0x000e000d,
724 	0x2491, 0xffffffff, 0x00000011,
725 	0x2492, 0xffffffff, 0x0010000f,
726 	0x2493, 0xffffffff, 0x00130012,
727 	0x2494, 0xffffffff, 0x000d000c,
728 	0x2495, 0xffffffff, 0x000f000e,
729 	0x2496, 0xffffffff, 0x00100013,
730 	0x2497, 0xffffffff, 0x00120011,
731 	0x2498, 0xffffffff, 0x00150014,
732 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
733 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
734 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
735 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
736 	0x000c, 0xffffffff, 0x0000001c,
737 	0x000d, 0x000f0000, 0x000f0000,
738 	0x0583, 0xffffffff, 0x00000100,
739 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
740 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
741 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
742 	mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
743 	mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
744 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
745 	0x157a, 0x00000001, 0x00000001,
746 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
747 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
748 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
749 	0x3430, 0xfffffff0, 0x00000100,
750 	0x3630, 0xfffffff0, 0x00000100,
751 };
752 
753 static const u32 oland_mgcg_cgcg_init[] =
754 {
755 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
756 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
757 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
758 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
759 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
760 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
761 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
762 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
763 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
764 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
765 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
766 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
767 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
768 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
769 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
770 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
771 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
772 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
773 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
774 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
775 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
776 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
777 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
778 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
779 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
780 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
781 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
782 	0x2458, 0xffffffff, 0x00010000,
783 	0x2459, 0xffffffff, 0x00030002,
784 	0x245a, 0xffffffff, 0x00040007,
785 	0x245b, 0xffffffff, 0x00060005,
786 	0x245c, 0xffffffff, 0x00090008,
787 	0x245d, 0xffffffff, 0x00020001,
788 	0x245e, 0xffffffff, 0x00040003,
789 	0x245f, 0xffffffff, 0x00000007,
790 	0x2460, 0xffffffff, 0x00060005,
791 	0x2461, 0xffffffff, 0x00090008,
792 	0x2462, 0xffffffff, 0x00030002,
793 	0x2463, 0xffffffff, 0x00050004,
794 	0x2464, 0xffffffff, 0x00000008,
795 	0x2465, 0xffffffff, 0x00070006,
796 	0x2466, 0xffffffff, 0x000a0009,
797 	0x2467, 0xffffffff, 0x00040003,
798 	0x2468, 0xffffffff, 0x00060005,
799 	0x2469, 0xffffffff, 0x00000009,
800 	0x246a, 0xffffffff, 0x00080007,
801 	0x246b, 0xffffffff, 0x000b000a,
802 	0x246c, 0xffffffff, 0x00050004,
803 	0x246d, 0xffffffff, 0x00070006,
804 	0x246e, 0xffffffff, 0x0008000b,
805 	0x246f, 0xffffffff, 0x000a0009,
806 	0x2470, 0xffffffff, 0x000d000c,
807 	0x2471, 0xffffffff, 0x00060005,
808 	0x2472, 0xffffffff, 0x00080007,
809 	0x2473, 0xffffffff, 0x0000000b,
810 	0x2474, 0xffffffff, 0x000a0009,
811 	0x2475, 0xffffffff, 0x000d000c,
812 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
813 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
814 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
815 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
816 	0x000c, 0xffffffff, 0x0000001c,
817 	0x000d, 0x000f0000, 0x000f0000,
818 	0x0583, 0xffffffff, 0x00000100,
819 	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
820 	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
821 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
822 	mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
823 	mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
824 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
825 	0x157a, 0x00000001, 0x00000001,
826 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
827 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
828 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
829 	0x3430, 0xfffffff0, 0x00000100,
830 	0x3630, 0xfffffff0, 0x00000100,
831 };
832 
833 static const u32 hainan_mgcg_cgcg_init[] =
834 {
835 	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xfffffffc,
836 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
837 	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
838 	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
839 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
840 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
841 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
842 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
843 	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
844 	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
845 	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
846 	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
847 	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
848 	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
849 	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
850 	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
851 	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
852 	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
853 	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
854 	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
855 	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
856 	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
857 	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
858 	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
859 	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
860 	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
861 	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
862 	0x2458, 0xffffffff, 0x00010000,
863 	0x2459, 0xffffffff, 0x00030002,
864 	0x245a, 0xffffffff, 0x00040007,
865 	0x245b, 0xffffffff, 0x00060005,
866 	0x245c, 0xffffffff, 0x00090008,
867 	0x245d, 0xffffffff, 0x00020001,
868 	0x245e, 0xffffffff, 0x00040003,
869 	0x245f, 0xffffffff, 0x00000007,
870 	0x2460, 0xffffffff, 0x00060005,
871 	0x2461, 0xffffffff, 0x00090008,
872 	0x2462, 0xffffffff, 0x00030002,
873 	0x2463, 0xffffffff, 0x00050004,
874 	0x2464, 0xffffffff, 0x00000008,
875 	0x2465, 0xffffffff, 0x00070006,
876 	0x2466, 0xffffffff, 0x000a0009,
877 	0x2467, 0xffffffff, 0x00040003,
878 	0x2468, 0xffffffff, 0x00060005,
879 	0x2469, 0xffffffff, 0x00000009,
880 	0x246a, 0xffffffff, 0x00080007,
881 	0x246b, 0xffffffff, 0x000b000a,
882 	0x246c, 0xffffffff, 0x00050004,
883 	0x246d, 0xffffffff, 0x00070006,
884 	0x246e, 0xffffffff, 0x0008000b,
885 	0x246f, 0xffffffff, 0x000a0009,
886 	0x2470, 0xffffffff, 0x000d000c,
887 	0x2471, 0xffffffff, 0x00060005,
888 	0x2472, 0xffffffff, 0x00080007,
889 	0x2473, 0xffffffff, 0x0000000b,
890 	0x2474, 0xffffffff, 0x000a0009,
891 	0x2475, 0xffffffff, 0x000d000c,
892 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
893 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
894 	mmRLC_GCPM_GENERAL_3, 0xffffffff, 0x00000080,
895 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
896 	0x000c, 0xffffffff, 0x0000001c,
897 	0x000d, 0x000f0000, 0x000f0000,
898 	0x0583, 0xffffffff, 0x00000100,
899 	0x0409, 0xffffffff, 0x00000100,
900 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
901 	mmMC_CITF_MISC_WR_CG, 0x000c0000, 0x000c0000,
902 	mmMC_CITF_MISC_RD_CG, 0x000c0000, 0x000c0000,
903 	mmHDP_MEM_POWER_LS, 0x00000001, 0x00000001,
904 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
905 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
906 	0x3430, 0xfffffff0, 0x00000100,
907 	0x3630, 0xfffffff0, 0x00000100,
908 };
909 
910 /* XXX: update when we support VCE */
911 #if 0
912 /* tahiti, pitcarin, verde */
913 static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] =
914 {
915 	{
916 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
917 		.max_width = 2048,
918 		.max_height = 1152,
919 		.max_pixels_per_frame = 2048 * 1152,
920 		.max_level = 0,
921 	},
922 };
923 
924 static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
925 {
926 	.codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array),
927 	.codec_array = tahiti_video_codecs_encode_array,
928 };
929 #else
930 static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
931 {
932 	.codec_count = 0,
933 	.codec_array = NULL,
934 };
935 #endif
936 /* oland and hainan don't support encode */
937 static const struct amdgpu_video_codecs hainan_video_codecs_encode =
938 {
939 	.codec_count = 0,
940 	.codec_array = NULL,
941 };
942 
943 /* tahiti, pitcarin, verde, oland */
944 static const struct amdgpu_video_codec_info tahiti_video_codecs_decode_array[] =
945 {
946 	{
947 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
948 		.max_width = 2048,
949 		.max_height = 1152,
950 		.max_pixels_per_frame = 2048 * 1152,
951 		.max_level = 3,
952 	},
953 	{
954 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
955 		.max_width = 2048,
956 		.max_height = 1152,
957 		.max_pixels_per_frame = 2048 * 1152,
958 		.max_level = 5,
959 	},
960 	{
961 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
962 		.max_width = 2048,
963 		.max_height = 1152,
964 		.max_pixels_per_frame = 2048 * 1152,
965 		.max_level = 41,
966 	},
967 	{
968 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
969 		.max_width = 2048,
970 		.max_height = 1152,
971 		.max_pixels_per_frame = 2048 * 1152,
972 		.max_level = 4,
973 	},
974 };
975 
976 static const struct amdgpu_video_codecs tahiti_video_codecs_decode =
977 {
978 	.codec_count = ARRAY_SIZE(tahiti_video_codecs_decode_array),
979 	.codec_array = tahiti_video_codecs_decode_array,
980 };
981 
982 /* hainan doesn't support decode */
983 static const struct amdgpu_video_codecs hainan_video_codecs_decode =
984 {
985 	.codec_count = 0,
986 	.codec_array = NULL,
987 };
988 
si_query_video_codecs(struct amdgpu_device * adev,bool encode,const struct amdgpu_video_codecs ** codecs)989 static int si_query_video_codecs(struct amdgpu_device *adev, bool encode,
990 				 const struct amdgpu_video_codecs **codecs)
991 {
992 	switch (adev->asic_type) {
993 	case CHIP_VERDE:
994 	case CHIP_TAHITI:
995 	case CHIP_PITCAIRN:
996 		if (encode)
997 			*codecs = &tahiti_video_codecs_encode;
998 		else
999 			*codecs = &tahiti_video_codecs_decode;
1000 		return 0;
1001 	case CHIP_OLAND:
1002 		if (encode)
1003 			*codecs = &hainan_video_codecs_encode;
1004 		else
1005 			*codecs = &tahiti_video_codecs_decode;
1006 		return 0;
1007 	case CHIP_HAINAN:
1008 		if (encode)
1009 			*codecs = &hainan_video_codecs_encode;
1010 		else
1011 			*codecs = &hainan_video_codecs_decode;
1012 		return 0;
1013 	default:
1014 		return -EINVAL;
1015 	}
1016 }
1017 
si_pcie_rreg(struct amdgpu_device * adev,u32 reg)1018 static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
1019 {
1020 	unsigned long flags;
1021 	u32 r;
1022 
1023 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1024 	WREG32(AMDGPU_PCIE_INDEX, reg);
1025 	(void)RREG32(AMDGPU_PCIE_INDEX);
1026 	r = RREG32(AMDGPU_PCIE_DATA);
1027 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1028 	return r;
1029 }
1030 
si_pcie_wreg(struct amdgpu_device * adev,u32 reg,u32 v)1031 static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
1032 {
1033 	unsigned long flags;
1034 
1035 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1036 	WREG32(AMDGPU_PCIE_INDEX, reg);
1037 	(void)RREG32(AMDGPU_PCIE_INDEX);
1038 	WREG32(AMDGPU_PCIE_DATA, v);
1039 	(void)RREG32(AMDGPU_PCIE_DATA);
1040 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1041 }
1042 
si_pciep_rreg(struct amdgpu_device * adev,u32 reg)1043 static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg)
1044 {
1045 	unsigned long flags;
1046 	u32 r;
1047 
1048 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1049 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1050 	(void)RREG32(PCIE_PORT_INDEX);
1051 	r = RREG32(PCIE_PORT_DATA);
1052 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1053 	return r;
1054 }
1055 
si_pciep_wreg(struct amdgpu_device * adev,u32 reg,u32 v)1056 static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
1057 {
1058 	unsigned long flags;
1059 
1060 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1061 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1062 	(void)RREG32(PCIE_PORT_INDEX);
1063 	WREG32(PCIE_PORT_DATA, (v));
1064 	(void)RREG32(PCIE_PORT_DATA);
1065 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1066 }
1067 
si_smc_rreg(struct amdgpu_device * adev,u32 reg)1068 static u32 si_smc_rreg(struct amdgpu_device *adev, u32 reg)
1069 {
1070 	unsigned long flags;
1071 	u32 r;
1072 
1073 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
1074 	WREG32(SMC_IND_INDEX_0, (reg));
1075 	r = RREG32(SMC_IND_DATA_0);
1076 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
1077 	return r;
1078 }
1079 
si_smc_wreg(struct amdgpu_device * adev,u32 reg,u32 v)1080 static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
1081 {
1082 	unsigned long flags;
1083 
1084 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
1085 	WREG32(SMC_IND_INDEX_0, (reg));
1086 	WREG32(SMC_IND_DATA_0, (v));
1087 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
1088 }
1089 
si_uvd_ctx_rreg(struct amdgpu_device * adev,u32 reg)1090 static u32 si_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
1091 {
1092 	unsigned long flags;
1093 	u32 r;
1094 
1095 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
1096 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
1097 	r = RREG32(mmUVD_CTX_DATA);
1098 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
1099 	return r;
1100 }
1101 
si_uvd_ctx_wreg(struct amdgpu_device * adev,u32 reg,u32 v)1102 static void si_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
1103 {
1104 	unsigned long flags;
1105 
1106 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
1107 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
1108 	WREG32(mmUVD_CTX_DATA, (v));
1109 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
1110 }
1111 
1112 static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
1113 	{GRBM_STATUS},
1114 	{mmGRBM_STATUS2},
1115 	{mmGRBM_STATUS_SE0},
1116 	{mmGRBM_STATUS_SE1},
1117 	{mmSRBM_STATUS},
1118 	{mmSRBM_STATUS2},
1119 	{DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
1120 	{DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
1121 	{mmCP_STAT},
1122 	{mmCP_STALLED_STAT1},
1123 	{mmCP_STALLED_STAT2},
1124 	{mmCP_STALLED_STAT3},
1125 	{GB_ADDR_CONFIG},
1126 	{MC_ARB_RAMCFG},
1127 	{GB_TILE_MODE0},
1128 	{GB_TILE_MODE1},
1129 	{GB_TILE_MODE2},
1130 	{GB_TILE_MODE3},
1131 	{GB_TILE_MODE4},
1132 	{GB_TILE_MODE5},
1133 	{GB_TILE_MODE6},
1134 	{GB_TILE_MODE7},
1135 	{GB_TILE_MODE8},
1136 	{GB_TILE_MODE9},
1137 	{GB_TILE_MODE10},
1138 	{GB_TILE_MODE11},
1139 	{GB_TILE_MODE12},
1140 	{GB_TILE_MODE13},
1141 	{GB_TILE_MODE14},
1142 	{GB_TILE_MODE15},
1143 	{GB_TILE_MODE16},
1144 	{GB_TILE_MODE17},
1145 	{GB_TILE_MODE18},
1146 	{GB_TILE_MODE19},
1147 	{GB_TILE_MODE20},
1148 	{GB_TILE_MODE21},
1149 	{GB_TILE_MODE22},
1150 	{GB_TILE_MODE23},
1151 	{GB_TILE_MODE24},
1152 	{GB_TILE_MODE25},
1153 	{GB_TILE_MODE26},
1154 	{GB_TILE_MODE27},
1155 	{GB_TILE_MODE28},
1156 	{GB_TILE_MODE29},
1157 	{GB_TILE_MODE30},
1158 	{GB_TILE_MODE31},
1159 	{CC_RB_BACKEND_DISABLE, true},
1160 	{GC_USER_RB_BACKEND_DISABLE, true},
1161 	{PA_SC_RASTER_CONFIG, true},
1162 };
1163 
si_get_register_value(struct amdgpu_device * adev,bool indexed,u32 se_num,u32 sh_num,u32 reg_offset)1164 static uint32_t si_get_register_value(struct amdgpu_device *adev,
1165 				      bool indexed, u32 se_num,
1166 				      u32 sh_num, u32 reg_offset)
1167 {
1168 	if (indexed) {
1169 		uint32_t val;
1170 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
1171 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
1172 
1173 		switch (reg_offset) {
1174 		case mmCC_RB_BACKEND_DISABLE:
1175 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
1176 		case mmGC_USER_RB_BACKEND_DISABLE:
1177 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
1178 		case mmPA_SC_RASTER_CONFIG:
1179 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
1180 		}
1181 
1182 		mutex_lock(&adev->grbm_idx_mutex);
1183 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
1184 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
1185 
1186 		val = RREG32(reg_offset);
1187 
1188 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
1189 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
1190 		mutex_unlock(&adev->grbm_idx_mutex);
1191 		return val;
1192 	} else {
1193 		unsigned idx;
1194 
1195 		switch (reg_offset) {
1196 		case mmGB_ADDR_CONFIG:
1197 			return adev->gfx.config.gb_addr_config;
1198 		case mmMC_ARB_RAMCFG:
1199 			return adev->gfx.config.mc_arb_ramcfg;
1200 		case mmGB_TILE_MODE0:
1201 		case mmGB_TILE_MODE1:
1202 		case mmGB_TILE_MODE2:
1203 		case mmGB_TILE_MODE3:
1204 		case mmGB_TILE_MODE4:
1205 		case mmGB_TILE_MODE5:
1206 		case mmGB_TILE_MODE6:
1207 		case mmGB_TILE_MODE7:
1208 		case mmGB_TILE_MODE8:
1209 		case mmGB_TILE_MODE9:
1210 		case mmGB_TILE_MODE10:
1211 		case mmGB_TILE_MODE11:
1212 		case mmGB_TILE_MODE12:
1213 		case mmGB_TILE_MODE13:
1214 		case mmGB_TILE_MODE14:
1215 		case mmGB_TILE_MODE15:
1216 		case mmGB_TILE_MODE16:
1217 		case mmGB_TILE_MODE17:
1218 		case mmGB_TILE_MODE18:
1219 		case mmGB_TILE_MODE19:
1220 		case mmGB_TILE_MODE20:
1221 		case mmGB_TILE_MODE21:
1222 		case mmGB_TILE_MODE22:
1223 		case mmGB_TILE_MODE23:
1224 		case mmGB_TILE_MODE24:
1225 		case mmGB_TILE_MODE25:
1226 		case mmGB_TILE_MODE26:
1227 		case mmGB_TILE_MODE27:
1228 		case mmGB_TILE_MODE28:
1229 		case mmGB_TILE_MODE29:
1230 		case mmGB_TILE_MODE30:
1231 		case mmGB_TILE_MODE31:
1232 			idx = (reg_offset - mmGB_TILE_MODE0);
1233 			return adev->gfx.config.tile_mode_array[idx];
1234 		default:
1235 			return RREG32(reg_offset);
1236 		}
1237 	}
1238 }
si_read_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset,u32 * value)1239 static int si_read_register(struct amdgpu_device *adev, u32 se_num,
1240 			     u32 sh_num, u32 reg_offset, u32 *value)
1241 {
1242 	uint32_t i;
1243 
1244 	*value = 0;
1245 	for (i = 0; i < ARRAY_SIZE(si_allowed_read_registers); i++) {
1246 		bool indexed = si_allowed_read_registers[i].grbm_indexed;
1247 
1248 		if (reg_offset != si_allowed_read_registers[i].reg_offset)
1249 			continue;
1250 
1251 		*value = si_get_register_value(adev, indexed, se_num, sh_num,
1252 					       reg_offset);
1253 		return 0;
1254 	}
1255 	return -EINVAL;
1256 }
1257 
si_read_disabled_bios(struct amdgpu_device * adev)1258 static bool si_read_disabled_bios(struct amdgpu_device *adev)
1259 {
1260 	u32 bus_cntl;
1261 	u32 d1vga_control = 0;
1262 	u32 d2vga_control = 0;
1263 	u32 vga_render_control = 0;
1264 	u32 rom_cntl;
1265 	bool r;
1266 
1267 	bus_cntl = RREG32(R600_BUS_CNTL);
1268 	if (adev->mode_info.num_crtc) {
1269 		d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
1270 		d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
1271 		vga_render_control = RREG32(VGA_RENDER_CONTROL);
1272 	}
1273 	rom_cntl = RREG32(R600_ROM_CNTL);
1274 
1275 	/* enable the rom */
1276 	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
1277 	if (adev->mode_info.num_crtc) {
1278 		/* Disable VGA mode */
1279 		WREG32(AVIVO_D1VGA_CONTROL,
1280 		       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
1281 					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
1282 		WREG32(AVIVO_D2VGA_CONTROL,
1283 		       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
1284 					  AVIVO_DVGA_CONTROL_TIMING_SELECT)));
1285 		WREG32(VGA_RENDER_CONTROL,
1286 		       (vga_render_control & C_000300_VGA_VSTATUS_CNTL));
1287 	}
1288 	WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
1289 
1290 	r = amdgpu_read_bios(adev);
1291 
1292 	/* restore regs */
1293 	WREG32(R600_BUS_CNTL, bus_cntl);
1294 	if (adev->mode_info.num_crtc) {
1295 		WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
1296 		WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
1297 		WREG32(VGA_RENDER_CONTROL, vga_render_control);
1298 	}
1299 	WREG32(R600_ROM_CNTL, rom_cntl);
1300 	return r;
1301 }
1302 
1303 #define mmROM_INDEX 0x2A
1304 #define mmROM_DATA  0x2B
1305 
si_read_bios_from_rom(struct amdgpu_device * adev,u8 * bios,u32 length_bytes)1306 static bool si_read_bios_from_rom(struct amdgpu_device *adev,
1307 				  u8 *bios, u32 length_bytes)
1308 {
1309 	u32 *dw_ptr;
1310 	u32 i, length_dw;
1311 
1312 	if (bios == NULL)
1313 		return false;
1314 	if (length_bytes == 0)
1315 		return false;
1316 	/* APU vbios image is part of sbios image */
1317 	if (adev->flags & AMD_IS_APU)
1318 		return false;
1319 
1320 	dw_ptr = (u32 *)bios;
1321 	length_dw = ALIGN(length_bytes, 4) / 4;
1322 	/* set rom index to 0 */
1323 	WREG32(mmROM_INDEX, 0);
1324 	for (i = 0; i < length_dw; i++)
1325 		dw_ptr[i] = RREG32(mmROM_DATA);
1326 
1327 	return true;
1328 }
1329 
si_set_clk_bypass_mode(struct amdgpu_device * adev)1330 static void si_set_clk_bypass_mode(struct amdgpu_device *adev)
1331 {
1332 	u32 tmp, i;
1333 
1334 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
1335 	tmp |= SPLL_BYPASS_EN;
1336 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
1337 
1338 	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
1339 	tmp |= SPLL_CTLREQ_CHG;
1340 	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
1341 
1342 	for (i = 0; i < adev->usec_timeout; i++) {
1343 		if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
1344 			break;
1345 		udelay(1);
1346 	}
1347 
1348 	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
1349 	tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
1350 	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
1351 
1352 	tmp = RREG32(MPLL_CNTL_MODE);
1353 	tmp &= ~MPLL_MCLK_SEL;
1354 	WREG32(MPLL_CNTL_MODE, tmp);
1355 }
1356 
si_spll_powerdown(struct amdgpu_device * adev)1357 static void si_spll_powerdown(struct amdgpu_device *adev)
1358 {
1359 	u32 tmp;
1360 
1361 	tmp = RREG32(SPLL_CNTL_MODE);
1362 	tmp |= SPLL_SW_DIR_CONTROL;
1363 	WREG32(SPLL_CNTL_MODE, tmp);
1364 
1365 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
1366 	tmp |= SPLL_RESET;
1367 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
1368 
1369 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
1370 	tmp |= SPLL_SLEEP;
1371 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
1372 
1373 	tmp = RREG32(SPLL_CNTL_MODE);
1374 	tmp &= ~SPLL_SW_DIR_CONTROL;
1375 	WREG32(SPLL_CNTL_MODE, tmp);
1376 }
1377 
si_gpu_pci_config_reset(struct amdgpu_device * adev)1378 static int si_gpu_pci_config_reset(struct amdgpu_device *adev)
1379 {
1380 	u32 i;
1381 	int r = -EINVAL;
1382 
1383 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
1384 
1385 	/* set mclk/sclk to bypass */
1386 	si_set_clk_bypass_mode(adev);
1387 	/* powerdown spll */
1388 	si_spll_powerdown(adev);
1389 	/* disable BM */
1390 	pci_clear_master(adev->pdev);
1391 	/* reset */
1392 	amdgpu_device_pci_config_reset(adev);
1393 
1394 	udelay(100);
1395 
1396 	/* wait for asic to come out of reset */
1397 	for (i = 0; i < adev->usec_timeout; i++) {
1398 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
1399 			/* enable BM */
1400 			pci_set_master(adev->pdev);
1401 			adev->has_hw_reset = true;
1402 			r = 0;
1403 			break;
1404 		}
1405 		udelay(1);
1406 	}
1407 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
1408 
1409 	return r;
1410 }
1411 
si_asic_supports_baco(struct amdgpu_device * adev)1412 static bool si_asic_supports_baco(struct amdgpu_device *adev)
1413 {
1414 	return false;
1415 }
1416 
1417 static enum amd_reset_method
si_asic_reset_method(struct amdgpu_device * adev)1418 si_asic_reset_method(struct amdgpu_device *adev)
1419 {
1420 	if (amdgpu_reset_method == AMD_RESET_METHOD_PCI)
1421 		return amdgpu_reset_method;
1422 	else if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY &&
1423 		 amdgpu_reset_method != -1)
1424 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
1425 			 amdgpu_reset_method);
1426 
1427 	return AMD_RESET_METHOD_LEGACY;
1428 }
1429 
si_asic_reset(struct amdgpu_device * adev)1430 static int si_asic_reset(struct amdgpu_device *adev)
1431 {
1432 	int r;
1433 
1434 	switch (si_asic_reset_method(adev)) {
1435 	case AMD_RESET_METHOD_PCI:
1436 		dev_info(adev->dev, "PCI reset\n");
1437 		r = amdgpu_device_pci_reset(adev);
1438 		break;
1439 	default:
1440 		dev_info(adev->dev, "PCI CONFIG reset\n");
1441 		r = si_gpu_pci_config_reset(adev);
1442 		break;
1443 	}
1444 
1445 	return r;
1446 }
1447 
si_get_config_memsize(struct amdgpu_device * adev)1448 static u32 si_get_config_memsize(struct amdgpu_device *adev)
1449 {
1450 	return RREG32(mmCONFIG_MEMSIZE);
1451 }
1452 
si_vga_set_state(struct amdgpu_device * adev,bool state)1453 static void si_vga_set_state(struct amdgpu_device *adev, bool state)
1454 {
1455 	uint32_t temp;
1456 
1457 	temp = RREG32(CONFIG_CNTL);
1458 	if (!state) {
1459 		temp &= ~(1<<0);
1460 		temp |= (1<<1);
1461 	} else {
1462 		temp &= ~(1<<1);
1463 	}
1464 	WREG32(CONFIG_CNTL, temp);
1465 }
1466 
si_get_xclk(struct amdgpu_device * adev)1467 static u32 si_get_xclk(struct amdgpu_device *adev)
1468 {
1469 	u32 reference_clock = adev->clock.spll.reference_freq;
1470 	u32 tmp;
1471 
1472 	tmp = RREG32(CG_CLKPIN_CNTL_2);
1473 	if (tmp & MUX_TCLK_TO_XCLK)
1474 		return TCLK;
1475 
1476 	tmp = RREG32(CG_CLKPIN_CNTL);
1477 	if (tmp & XTALIN_DIVIDE)
1478 		return reference_clock / 4;
1479 
1480 	return reference_clock;
1481 }
1482 
si_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)1483 static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1484 {
1485 	if (!ring || !ring->funcs->emit_wreg) {
1486 		WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1487 		RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1488 	} else {
1489 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1490 	}
1491 }
1492 
si_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)1493 static void si_invalidate_hdp(struct amdgpu_device *adev,
1494 			      struct amdgpu_ring *ring)
1495 {
1496 	if (!ring || !ring->funcs->emit_wreg) {
1497 		WREG32(mmHDP_DEBUG0, 1);
1498 		RREG32(mmHDP_DEBUG0);
1499 	} else {
1500 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1501 	}
1502 }
1503 
si_need_full_reset(struct amdgpu_device * adev)1504 static bool si_need_full_reset(struct amdgpu_device *adev)
1505 {
1506 	/* change this when we support soft reset */
1507 	return true;
1508 }
1509 
si_need_reset_on_init(struct amdgpu_device * adev)1510 static bool si_need_reset_on_init(struct amdgpu_device *adev)
1511 {
1512 	return false;
1513 }
1514 
si_get_pcie_lanes(struct amdgpu_device * adev)1515 static int si_get_pcie_lanes(struct amdgpu_device *adev)
1516 {
1517 	u32 link_width_cntl;
1518 
1519 	if (adev->flags & AMD_IS_APU)
1520 		return 0;
1521 
1522 	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1523 
1524 	switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
1525 	case LC_LINK_WIDTH_X1:
1526 		return 1;
1527 	case LC_LINK_WIDTH_X2:
1528 		return 2;
1529 	case LC_LINK_WIDTH_X4:
1530 		return 4;
1531 	case LC_LINK_WIDTH_X8:
1532 		return 8;
1533 	case LC_LINK_WIDTH_X0:
1534 	case LC_LINK_WIDTH_X16:
1535 	default:
1536 		return 16;
1537 	}
1538 }
1539 
si_set_pcie_lanes(struct amdgpu_device * adev,int lanes)1540 static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
1541 {
1542 	u32 link_width_cntl, mask;
1543 
1544 	if (adev->flags & AMD_IS_APU)
1545 		return;
1546 
1547 	switch (lanes) {
1548 	case 0:
1549 		mask = LC_LINK_WIDTH_X0;
1550 		break;
1551 	case 1:
1552 		mask = LC_LINK_WIDTH_X1;
1553 		break;
1554 	case 2:
1555 		mask = LC_LINK_WIDTH_X2;
1556 		break;
1557 	case 4:
1558 		mask = LC_LINK_WIDTH_X4;
1559 		break;
1560 	case 8:
1561 		mask = LC_LINK_WIDTH_X8;
1562 		break;
1563 	case 16:
1564 		mask = LC_LINK_WIDTH_X16;
1565 		break;
1566 	default:
1567 		DRM_ERROR("invalid pcie lane request: %d\n", lanes);
1568 		return;
1569 	}
1570 
1571 	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1572 	link_width_cntl &= ~LC_LINK_WIDTH_MASK;
1573 	link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
1574 	link_width_cntl |= (LC_RECONFIG_NOW |
1575 			    LC_RECONFIG_ARC_MISSING_ESCAPE);
1576 
1577 	WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1578 }
1579 
si_get_pcie_usage(struct amdgpu_device * adev,uint64_t * count0,uint64_t * count1)1580 static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1581 			      uint64_t *count1)
1582 {
1583 	uint32_t perfctr = 0;
1584 	uint64_t cnt0_of, cnt1_of;
1585 	int tmp;
1586 
1587 	/* This reports 0 on APUs, so return to avoid writing/reading registers
1588 	 * that may or may not be different from their GPU counterparts
1589 	 */
1590 	if (adev->flags & AMD_IS_APU)
1591 		return;
1592 
1593 	/* Set the 2 events that we wish to watch, defined above */
1594 	/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1595 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1596 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1597 
1598 	/* Write to enable desired perf counters */
1599 	WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1600 	/* Zero out and enable the perf counters
1601 	 * Write 0x5:
1602 	 * Bit 0 = Start all counters(1)
1603 	 * Bit 2 = Global counter reset enable(1)
1604 	 */
1605 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1606 
1607 	msleep(1000);
1608 
1609 	/* Load the shadow and disable the perf counters
1610 	 * Write 0x2:
1611 	 * Bit 0 = Stop counters(0)
1612 	 * Bit 1 = Load the shadow counters(1)
1613 	 */
1614 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1615 
1616 	/* Read register values to get any >32bit overflow */
1617 	tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1618 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1619 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1620 
1621 	/* Get the values and add the overflow */
1622 	*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1623 	*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1624 }
1625 
si_get_pcie_replay_count(struct amdgpu_device * adev)1626 static uint64_t si_get_pcie_replay_count(struct amdgpu_device *adev)
1627 {
1628 	uint64_t nak_r, nak_g;
1629 
1630 	/* Get the number of NAKs received and generated */
1631 	nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1632 	nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1633 
1634 	/* Add the total number of NAKs, i.e the number of replays */
1635 	return (nak_r + nak_g);
1636 }
1637 
si_uvd_send_upll_ctlreq(struct amdgpu_device * adev,unsigned cg_upll_func_cntl)1638 static int si_uvd_send_upll_ctlreq(struct amdgpu_device *adev,
1639 				   unsigned cg_upll_func_cntl)
1640 {
1641 	unsigned i;
1642 
1643 	/* Make sure UPLL_CTLREQ is deasserted */
1644 	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
1645 
1646 	mdelay(10);
1647 
1648 	/* Assert UPLL_CTLREQ */
1649 	WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
1650 
1651 	/* Wait for CTLACK and CTLACK2 to get asserted */
1652 	for (i = 0; i < SI_MAX_CTLACKS_ASSERTION_WAIT; ++i) {
1653 		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
1654 
1655 		if ((RREG32(cg_upll_func_cntl) & mask) == mask)
1656 			break;
1657 		mdelay(10);
1658 	}
1659 
1660 	/* Deassert UPLL_CTLREQ */
1661 	WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
1662 
1663 	if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) {
1664 		DRM_ERROR("Timeout setting UVD clocks!\n");
1665 		return -ETIMEDOUT;
1666 	}
1667 
1668 	return 0;
1669 }
1670 
si_uvd_calc_upll_post_div(unsigned vco_freq,unsigned target_freq,unsigned pd_min,unsigned pd_even)1671 static unsigned si_uvd_calc_upll_post_div(unsigned vco_freq,
1672 					  unsigned target_freq,
1673 					  unsigned pd_min,
1674 					  unsigned pd_even)
1675 {
1676 	unsigned post_div = vco_freq / target_freq;
1677 
1678 	/* Adjust to post divider minimum value */
1679 	if (post_div < pd_min)
1680 		post_div = pd_min;
1681 
1682 	/* We alway need a frequency less than or equal the target */
1683 	if ((vco_freq / post_div) > target_freq)
1684 		post_div += 1;
1685 
1686 	/* Post dividers above a certain value must be even */
1687 	if (post_div > pd_even && post_div % 2)
1688 		post_div += 1;
1689 
1690 	return post_div;
1691 }
1692 
1693 /**
1694  * si_calc_upll_dividers - calc UPLL clock dividers
1695  *
1696  * @adev: amdgpu_device pointer
1697  * @vclk: wanted VCLK
1698  * @dclk: wanted DCLK
1699  * @vco_min: minimum VCO frequency
1700  * @vco_max: maximum VCO frequency
1701  * @fb_factor: factor to multiply vco freq with
1702  * @fb_mask: limit and bitmask for feedback divider
1703  * @pd_min: post divider minimum
1704  * @pd_max: post divider maximum
1705  * @pd_even: post divider must be even above this value
1706  * @optimal_fb_div: resulting feedback divider
1707  * @optimal_vclk_div: resulting vclk post divider
1708  * @optimal_dclk_div: resulting dclk post divider
1709  *
1710  * Calculate dividers for UVDs UPLL (except APUs).
1711  * Returns zero on success; -EINVAL on error.
1712  */
si_calc_upll_dividers(struct amdgpu_device * adev,unsigned vclk,unsigned dclk,unsigned vco_min,unsigned vco_max,unsigned fb_factor,unsigned fb_mask,unsigned pd_min,unsigned pd_max,unsigned pd_even,unsigned * optimal_fb_div,unsigned * optimal_vclk_div,unsigned * optimal_dclk_div)1713 static int si_calc_upll_dividers(struct amdgpu_device *adev,
1714 				 unsigned vclk, unsigned dclk,
1715 				 unsigned vco_min, unsigned vco_max,
1716 				 unsigned fb_factor, unsigned fb_mask,
1717 				 unsigned pd_min, unsigned pd_max,
1718 				 unsigned pd_even,
1719 				 unsigned *optimal_fb_div,
1720 				 unsigned *optimal_vclk_div,
1721 				 unsigned *optimal_dclk_div)
1722 {
1723 	unsigned vco_freq, ref_freq = adev->clock.spll.reference_freq;
1724 
1725 	/* Start off with something large */
1726 	unsigned optimal_score = ~0;
1727 
1728 	/* Loop through vco from low to high */
1729 	vco_min = max(max(vco_min, vclk), dclk);
1730 	for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
1731 		uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
1732 		unsigned vclk_div, dclk_div, score;
1733 
1734 		do_div(fb_div, ref_freq);
1735 
1736 		/* fb div out of range ? */
1737 		if (fb_div > fb_mask)
1738 			break; /* It can oly get worse */
1739 
1740 		fb_div &= fb_mask;
1741 
1742 		/* Calc vclk divider with current vco freq */
1743 		vclk_div = si_uvd_calc_upll_post_div(vco_freq, vclk,
1744 						     pd_min, pd_even);
1745 		if (vclk_div > pd_max)
1746 			break; /* vco is too big, it has to stop */
1747 
1748 		/* Calc dclk divider with current vco freq */
1749 		dclk_div = si_uvd_calc_upll_post_div(vco_freq, dclk,
1750 						     pd_min, pd_even);
1751 		if (dclk_div > pd_max)
1752 			break; /* vco is too big, it has to stop */
1753 
1754 		/* Calc score with current vco freq */
1755 		score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
1756 
1757 		/* Determine if this vco setting is better than current optimal settings */
1758 		if (score < optimal_score) {
1759 			*optimal_fb_div = fb_div;
1760 			*optimal_vclk_div = vclk_div;
1761 			*optimal_dclk_div = dclk_div;
1762 			optimal_score = score;
1763 			if (optimal_score == 0)
1764 				break; /* It can't get better than this */
1765 		}
1766 	}
1767 
1768 	/* Did we found a valid setup ? */
1769 	if (optimal_score == ~0)
1770 		return -EINVAL;
1771 
1772 	return 0;
1773 }
1774 
si_set_uvd_clocks(struct amdgpu_device * adev,u32 vclk,u32 dclk)1775 static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1776 {
1777 	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
1778 	int r;
1779 
1780 	/* Bypass vclk and dclk with bclk */
1781 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
1782 		 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
1783 		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1784 
1785 	/* Put PLL in bypass mode */
1786 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
1787 
1788 	if (!vclk || !dclk) {
1789 		/* Keep the Bypass mode */
1790 		return 0;
1791 	}
1792 
1793 	r = si_calc_upll_dividers(adev, vclk, dclk, 125000, 250000,
1794 				  16384, 0x03FFFFFF, 0, 128, 5,
1795 				  &fb_div, &vclk_div, &dclk_div);
1796 	if (r)
1797 		return r;
1798 
1799 	/* Set RESET_ANTI_MUX to 0 */
1800 	WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
1801 
1802 	/* Set VCO_MODE to 1 */
1803 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
1804 
1805 	/* Disable sleep mode */
1806 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
1807 
1808 	/* Deassert UPLL_RESET */
1809 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1810 
1811 	mdelay(1);
1812 
1813 	r = si_uvd_send_upll_ctlreq(adev, CG_UPLL_FUNC_CNTL);
1814 	if (r)
1815 		return r;
1816 
1817 	/* Assert UPLL_RESET again */
1818 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
1819 
1820 	/* Disable spread spectrum. */
1821 	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
1822 
1823 	/* Set feedback divider */
1824 	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
1825 
1826 	/* Set ref divider to 0 */
1827 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
1828 
1829 	if (fb_div < 307200)
1830 		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
1831 	else
1832 		WREG32_P(CG_UPLL_FUNC_CNTL_4,
1833 			 UPLL_SPARE_ISPARE9,
1834 			 ~UPLL_SPARE_ISPARE9);
1835 
1836 	/* Set PDIV_A and PDIV_B */
1837 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
1838 		 UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
1839 		 ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
1840 
1841 	/* Give the PLL some time to settle */
1842 	mdelay(15);
1843 
1844 	/* Deassert PLL_RESET */
1845 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
1846 
1847 	mdelay(15);
1848 
1849 	/* Switch from bypass mode to normal mode */
1850 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
1851 
1852 	r = si_uvd_send_upll_ctlreq(adev, CG_UPLL_FUNC_CNTL);
1853 	if (r)
1854 		return r;
1855 
1856 	/* Switch VCLK and DCLK selection */
1857 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
1858 		 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
1859 		 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
1860 
1861 	mdelay(100);
1862 
1863 	return 0;
1864 }
1865 
si_vce_send_vcepll_ctlreq(struct amdgpu_device * adev)1866 static int si_vce_send_vcepll_ctlreq(struct amdgpu_device *adev)
1867 {
1868 	unsigned i;
1869 
1870 	/* Make sure VCEPLL_CTLREQ is deasserted */
1871 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
1872 
1873 	mdelay(10);
1874 
1875 	/* Assert UPLL_CTLREQ */
1876 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
1877 
1878 	/* Wait for CTLACK and CTLACK2 to get asserted */
1879 	for (i = 0; i < SI_MAX_CTLACKS_ASSERTION_WAIT; ++i) {
1880 		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
1881 
1882 		if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
1883 			break;
1884 		mdelay(10);
1885 	}
1886 
1887 	/* Deassert UPLL_CTLREQ */
1888 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
1889 
1890 	if (i == SI_MAX_CTLACKS_ASSERTION_WAIT) {
1891 		DRM_ERROR("Timeout setting UVD clocks!\n");
1892 		return -ETIMEDOUT;
1893 	}
1894 
1895 	return 0;
1896 }
1897 
si_set_vce_clocks(struct amdgpu_device * adev,u32 evclk,u32 ecclk)1898 static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1899 {
1900 	unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
1901 	int r;
1902 
1903 	/* Bypass evclk and ecclk with bclk */
1904 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
1905 		     EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
1906 		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
1907 
1908 	/* Put PLL in bypass mode */
1909 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
1910 		     ~VCEPLL_BYPASS_EN_MASK);
1911 
1912 	if (!evclk || !ecclk) {
1913 		/* Keep the Bypass mode, put PLL to sleep */
1914 		WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
1915 			     ~VCEPLL_SLEEP_MASK);
1916 		return 0;
1917 	}
1918 
1919 	r = si_calc_upll_dividers(adev, evclk, ecclk, 125000, 250000,
1920 				  16384, 0x03FFFFFF, 0, 128, 5,
1921 				  &fb_div, &evclk_div, &ecclk_div);
1922 	if (r)
1923 		return r;
1924 
1925 	/* Set RESET_ANTI_MUX to 0 */
1926 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
1927 
1928 	/* Set VCO_MODE to 1 */
1929 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
1930 		     ~VCEPLL_VCO_MODE_MASK);
1931 
1932 	/* Toggle VCEPLL_SLEEP to 1 then back to 0 */
1933 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
1934 		     ~VCEPLL_SLEEP_MASK);
1935 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
1936 
1937 	/* Deassert VCEPLL_RESET */
1938 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
1939 
1940 	mdelay(1);
1941 
1942 	r = si_vce_send_vcepll_ctlreq(adev);
1943 	if (r)
1944 		return r;
1945 
1946 	/* Assert VCEPLL_RESET again */
1947 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
1948 
1949 	/* Disable spread spectrum. */
1950 	WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
1951 
1952 	/* Set feedback divider */
1953 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3,
1954 		     VCEPLL_FB_DIV(fb_div),
1955 		     ~VCEPLL_FB_DIV_MASK);
1956 
1957 	/* Set ref divider to 0 */
1958 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
1959 
1960 	/* Set PDIV_A and PDIV_B */
1961 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
1962 		     VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
1963 		     ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
1964 
1965 	/* Give the PLL some time to settle */
1966 	mdelay(15);
1967 
1968 	/* Deassert PLL_RESET */
1969 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
1970 
1971 	mdelay(15);
1972 
1973 	/* Switch from bypass mode to normal mode */
1974 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
1975 
1976 	r = si_vce_send_vcepll_ctlreq(adev);
1977 	if (r)
1978 		return r;
1979 
1980 	/* Switch VCLK and DCLK selection */
1981 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
1982 		     EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
1983 		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
1984 
1985 	mdelay(100);
1986 
1987 	return 0;
1988 }
1989 
si_pre_asic_init(struct amdgpu_device * adev)1990 static void si_pre_asic_init(struct amdgpu_device *adev)
1991 {
1992 }
1993 
1994 static const struct amdgpu_asic_funcs si_asic_funcs =
1995 {
1996 	.read_disabled_bios = &si_read_disabled_bios,
1997 	.read_bios_from_rom = &si_read_bios_from_rom,
1998 	.read_register = &si_read_register,
1999 	.reset = &si_asic_reset,
2000 	.reset_method = &si_asic_reset_method,
2001 	.set_vga_state = &si_vga_set_state,
2002 	.get_xclk = &si_get_xclk,
2003 	.set_uvd_clocks = &si_set_uvd_clocks,
2004 	.set_vce_clocks = &si_set_vce_clocks,
2005 	.get_pcie_lanes = &si_get_pcie_lanes,
2006 	.set_pcie_lanes = &si_set_pcie_lanes,
2007 	.get_config_memsize = &si_get_config_memsize,
2008 	.flush_hdp = &si_flush_hdp,
2009 	.invalidate_hdp = &si_invalidate_hdp,
2010 	.need_full_reset = &si_need_full_reset,
2011 	.get_pcie_usage = &si_get_pcie_usage,
2012 	.need_reset_on_init = &si_need_reset_on_init,
2013 	.get_pcie_replay_count = &si_get_pcie_replay_count,
2014 	.supports_baco = &si_asic_supports_baco,
2015 	.pre_asic_init = &si_pre_asic_init,
2016 	.query_video_codecs = &si_query_video_codecs,
2017 };
2018 
si_get_rev_id(struct amdgpu_device * adev)2019 static uint32_t si_get_rev_id(struct amdgpu_device *adev)
2020 {
2021 	return (RREG32(CC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
2022 		>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
2023 }
2024 
si_common_early_init(void * handle)2025 static int si_common_early_init(void *handle)
2026 {
2027 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2028 
2029 	adev->smc_rreg = &si_smc_rreg;
2030 	adev->smc_wreg = &si_smc_wreg;
2031 	adev->pcie_rreg = &si_pcie_rreg;
2032 	adev->pcie_wreg = &si_pcie_wreg;
2033 	adev->pciep_rreg = &si_pciep_rreg;
2034 	adev->pciep_wreg = &si_pciep_wreg;
2035 	adev->uvd_ctx_rreg = si_uvd_ctx_rreg;
2036 	adev->uvd_ctx_wreg = si_uvd_ctx_wreg;
2037 	adev->didt_rreg = NULL;
2038 	adev->didt_wreg = NULL;
2039 
2040 	adev->asic_funcs = &si_asic_funcs;
2041 
2042 	adev->rev_id = si_get_rev_id(adev);
2043 	adev->external_rev_id = 0xFF;
2044 	switch (adev->asic_type) {
2045 	case CHIP_TAHITI:
2046 		adev->cg_flags =
2047 			AMD_CG_SUPPORT_GFX_MGCG |
2048 			AMD_CG_SUPPORT_GFX_MGLS |
2049 			/*AMD_CG_SUPPORT_GFX_CGCG |*/
2050 			AMD_CG_SUPPORT_GFX_CGLS |
2051 			AMD_CG_SUPPORT_GFX_CGTS |
2052 			AMD_CG_SUPPORT_GFX_CP_LS |
2053 			AMD_CG_SUPPORT_MC_MGCG |
2054 			AMD_CG_SUPPORT_SDMA_MGCG |
2055 			AMD_CG_SUPPORT_BIF_LS |
2056 			AMD_CG_SUPPORT_VCE_MGCG |
2057 			AMD_CG_SUPPORT_UVD_MGCG |
2058 			AMD_CG_SUPPORT_HDP_LS |
2059 			AMD_CG_SUPPORT_HDP_MGCG;
2060 		adev->pg_flags = 0;
2061 		adev->external_rev_id = (adev->rev_id == 0) ? 1 :
2062 					(adev->rev_id == 1) ? 5 : 6;
2063 		break;
2064 	case CHIP_PITCAIRN:
2065 		adev->cg_flags =
2066 			AMD_CG_SUPPORT_GFX_MGCG |
2067 			AMD_CG_SUPPORT_GFX_MGLS |
2068 			/*AMD_CG_SUPPORT_GFX_CGCG |*/
2069 			AMD_CG_SUPPORT_GFX_CGLS |
2070 			AMD_CG_SUPPORT_GFX_CGTS |
2071 			AMD_CG_SUPPORT_GFX_CP_LS |
2072 			AMD_CG_SUPPORT_GFX_RLC_LS |
2073 			AMD_CG_SUPPORT_MC_LS |
2074 			AMD_CG_SUPPORT_MC_MGCG |
2075 			AMD_CG_SUPPORT_SDMA_MGCG |
2076 			AMD_CG_SUPPORT_BIF_LS |
2077 			AMD_CG_SUPPORT_VCE_MGCG |
2078 			AMD_CG_SUPPORT_UVD_MGCG |
2079 			AMD_CG_SUPPORT_HDP_LS |
2080 			AMD_CG_SUPPORT_HDP_MGCG;
2081 		adev->pg_flags = 0;
2082 		adev->external_rev_id = adev->rev_id + 20;
2083 		break;
2084 
2085 	case CHIP_VERDE:
2086 		adev->cg_flags =
2087 			AMD_CG_SUPPORT_GFX_MGCG |
2088 			AMD_CG_SUPPORT_GFX_MGLS |
2089 			AMD_CG_SUPPORT_GFX_CGLS |
2090 			AMD_CG_SUPPORT_GFX_CGTS |
2091 			AMD_CG_SUPPORT_GFX_CGTS_LS |
2092 			AMD_CG_SUPPORT_GFX_CP_LS |
2093 			AMD_CG_SUPPORT_MC_LS |
2094 			AMD_CG_SUPPORT_MC_MGCG |
2095 			AMD_CG_SUPPORT_SDMA_MGCG |
2096 			AMD_CG_SUPPORT_SDMA_LS |
2097 			AMD_CG_SUPPORT_BIF_LS |
2098 			AMD_CG_SUPPORT_VCE_MGCG |
2099 			AMD_CG_SUPPORT_UVD_MGCG |
2100 			AMD_CG_SUPPORT_HDP_LS |
2101 			AMD_CG_SUPPORT_HDP_MGCG;
2102 		adev->pg_flags = 0;
2103 		//???
2104 		adev->external_rev_id = adev->rev_id + 40;
2105 		break;
2106 	case CHIP_OLAND:
2107 		adev->cg_flags =
2108 			AMD_CG_SUPPORT_GFX_MGCG |
2109 			AMD_CG_SUPPORT_GFX_MGLS |
2110 			/*AMD_CG_SUPPORT_GFX_CGCG |*/
2111 			AMD_CG_SUPPORT_GFX_CGLS |
2112 			AMD_CG_SUPPORT_GFX_CGTS |
2113 			AMD_CG_SUPPORT_GFX_CP_LS |
2114 			AMD_CG_SUPPORT_GFX_RLC_LS |
2115 			AMD_CG_SUPPORT_MC_LS |
2116 			AMD_CG_SUPPORT_MC_MGCG |
2117 			AMD_CG_SUPPORT_SDMA_MGCG |
2118 			AMD_CG_SUPPORT_BIF_LS |
2119 			AMD_CG_SUPPORT_UVD_MGCG |
2120 			AMD_CG_SUPPORT_HDP_LS |
2121 			AMD_CG_SUPPORT_HDP_MGCG;
2122 		adev->pg_flags = 0;
2123 		adev->external_rev_id = 60;
2124 		break;
2125 	case CHIP_HAINAN:
2126 		adev->cg_flags =
2127 			AMD_CG_SUPPORT_GFX_MGCG |
2128 			AMD_CG_SUPPORT_GFX_MGLS |
2129 			/*AMD_CG_SUPPORT_GFX_CGCG |*/
2130 			AMD_CG_SUPPORT_GFX_CGLS |
2131 			AMD_CG_SUPPORT_GFX_CGTS |
2132 			AMD_CG_SUPPORT_GFX_CP_LS |
2133 			AMD_CG_SUPPORT_GFX_RLC_LS |
2134 			AMD_CG_SUPPORT_MC_LS |
2135 			AMD_CG_SUPPORT_MC_MGCG |
2136 			AMD_CG_SUPPORT_SDMA_MGCG |
2137 			AMD_CG_SUPPORT_BIF_LS |
2138 			AMD_CG_SUPPORT_HDP_LS |
2139 			AMD_CG_SUPPORT_HDP_MGCG;
2140 		adev->pg_flags = 0;
2141 		adev->external_rev_id = 70;
2142 		break;
2143 
2144 	default:
2145 		return -EINVAL;
2146 	}
2147 
2148 	return 0;
2149 }
2150 
si_common_sw_init(void * handle)2151 static int si_common_sw_init(void *handle)
2152 {
2153 	return 0;
2154 }
2155 
si_common_sw_fini(void * handle)2156 static int si_common_sw_fini(void *handle)
2157 {
2158 	return 0;
2159 }
2160 
2161 
si_init_golden_registers(struct amdgpu_device * adev)2162 static void si_init_golden_registers(struct amdgpu_device *adev)
2163 {
2164 	switch (adev->asic_type) {
2165 	case CHIP_TAHITI:
2166 		amdgpu_device_program_register_sequence(adev,
2167 							tahiti_golden_registers,
2168 							ARRAY_SIZE(tahiti_golden_registers));
2169 		amdgpu_device_program_register_sequence(adev,
2170 							tahiti_golden_rlc_registers,
2171 							ARRAY_SIZE(tahiti_golden_rlc_registers));
2172 		amdgpu_device_program_register_sequence(adev,
2173 							tahiti_mgcg_cgcg_init,
2174 							ARRAY_SIZE(tahiti_mgcg_cgcg_init));
2175 		amdgpu_device_program_register_sequence(adev,
2176 							tahiti_golden_registers2,
2177 							ARRAY_SIZE(tahiti_golden_registers2));
2178 		break;
2179 	case CHIP_PITCAIRN:
2180 		amdgpu_device_program_register_sequence(adev,
2181 							pitcairn_golden_registers,
2182 							ARRAY_SIZE(pitcairn_golden_registers));
2183 		amdgpu_device_program_register_sequence(adev,
2184 							pitcairn_golden_rlc_registers,
2185 							ARRAY_SIZE(pitcairn_golden_rlc_registers));
2186 		amdgpu_device_program_register_sequence(adev,
2187 							pitcairn_mgcg_cgcg_init,
2188 							ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
2189 		break;
2190 	case CHIP_VERDE:
2191 		amdgpu_device_program_register_sequence(adev,
2192 							verde_golden_registers,
2193 							ARRAY_SIZE(verde_golden_registers));
2194 		amdgpu_device_program_register_sequence(adev,
2195 							verde_golden_rlc_registers,
2196 							ARRAY_SIZE(verde_golden_rlc_registers));
2197 		amdgpu_device_program_register_sequence(adev,
2198 							verde_mgcg_cgcg_init,
2199 							ARRAY_SIZE(verde_mgcg_cgcg_init));
2200 		amdgpu_device_program_register_sequence(adev,
2201 							verde_pg_init,
2202 							ARRAY_SIZE(verde_pg_init));
2203 		break;
2204 	case CHIP_OLAND:
2205 		amdgpu_device_program_register_sequence(adev,
2206 							oland_golden_registers,
2207 							ARRAY_SIZE(oland_golden_registers));
2208 		amdgpu_device_program_register_sequence(adev,
2209 							oland_golden_rlc_registers,
2210 							ARRAY_SIZE(oland_golden_rlc_registers));
2211 		amdgpu_device_program_register_sequence(adev,
2212 							oland_mgcg_cgcg_init,
2213 							ARRAY_SIZE(oland_mgcg_cgcg_init));
2214 		break;
2215 	case CHIP_HAINAN:
2216 		amdgpu_device_program_register_sequence(adev,
2217 							hainan_golden_registers,
2218 							ARRAY_SIZE(hainan_golden_registers));
2219 		amdgpu_device_program_register_sequence(adev,
2220 							hainan_golden_registers2,
2221 							ARRAY_SIZE(hainan_golden_registers2));
2222 		amdgpu_device_program_register_sequence(adev,
2223 							hainan_mgcg_cgcg_init,
2224 							ARRAY_SIZE(hainan_mgcg_cgcg_init));
2225 		break;
2226 
2227 
2228 	default:
2229 		BUG();
2230 	}
2231 }
2232 
si_pcie_gen3_enable(struct amdgpu_device * adev)2233 static void si_pcie_gen3_enable(struct amdgpu_device *adev)
2234 {
2235 	struct pci_dev *root = adev->pdev->bus->self;
2236 	u32 speed_cntl, current_data_rate;
2237 	int i;
2238 	u16 tmp16;
2239 
2240 	if (pci_is_root_bus(adev->pdev->bus))
2241 		return;
2242 
2243 	if (amdgpu_pcie_gen2 == 0)
2244 		return;
2245 
2246 	if (adev->flags & AMD_IS_APU)
2247 		return;
2248 
2249 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2250 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
2251 		return;
2252 
2253 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2254 	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
2255 		LC_CURRENT_DATA_RATE_SHIFT;
2256 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
2257 		if (current_data_rate == 2) {
2258 			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
2259 			return;
2260 		}
2261 		DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
2262 	} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
2263 		if (current_data_rate == 1) {
2264 			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
2265 			return;
2266 		}
2267 		DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
2268 	}
2269 
2270 	if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
2271 		return;
2272 
2273 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
2274 		if (current_data_rate != 2) {
2275 			u16 bridge_cfg, gpu_cfg;
2276 			u16 bridge_cfg2, gpu_cfg2;
2277 			u32 max_lw, current_lw, tmp;
2278 
2279 			pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
2280 			pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
2281 
2282 			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
2283 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
2284 			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
2285 
2286 			if (current_lw < max_lw) {
2287 				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
2288 				if (tmp & LC_RENEGOTIATION_SUPPORT) {
2289 					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
2290 					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
2291 					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
2292 					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
2293 				}
2294 			}
2295 
2296 			for (i = 0; i < 10; i++) {
2297 				pcie_capability_read_word(adev->pdev,
2298 							  PCI_EXP_DEVSTA,
2299 							  &tmp16);
2300 				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
2301 					break;
2302 
2303 				pcie_capability_read_word(root, PCI_EXP_LNKCTL,
2304 							  &bridge_cfg);
2305 				pcie_capability_read_word(adev->pdev,
2306 							  PCI_EXP_LNKCTL,
2307 							  &gpu_cfg);
2308 
2309 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
2310 							  &bridge_cfg2);
2311 				pcie_capability_read_word(adev->pdev,
2312 							  PCI_EXP_LNKCTL2,
2313 							  &gpu_cfg2);
2314 
2315 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
2316 				tmp |= LC_SET_QUIESCE;
2317 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
2318 
2319 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
2320 				tmp |= LC_REDO_EQ;
2321 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
2322 
2323 				mdelay(100);
2324 
2325 				pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
2326 								   PCI_EXP_LNKCTL_HAWD,
2327 								   bridge_cfg &
2328 								   PCI_EXP_LNKCTL_HAWD);
2329 				pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
2330 								   PCI_EXP_LNKCTL_HAWD,
2331 								   gpu_cfg &
2332 								   PCI_EXP_LNKCTL_HAWD);
2333 
2334 				pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
2335 							  &tmp16);
2336 				tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
2337 					   PCI_EXP_LNKCTL2_TX_MARGIN);
2338 				tmp16 |= (bridge_cfg2 &
2339 					  (PCI_EXP_LNKCTL2_ENTER_COMP |
2340 					   PCI_EXP_LNKCTL2_TX_MARGIN));
2341 				pcie_capability_write_word(root,
2342 							   PCI_EXP_LNKCTL2,
2343 							   tmp16);
2344 
2345 				pcie_capability_read_word(adev->pdev,
2346 							  PCI_EXP_LNKCTL2,
2347 							  &tmp16);
2348 				tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
2349 					   PCI_EXP_LNKCTL2_TX_MARGIN);
2350 				tmp16 |= (gpu_cfg2 &
2351 					  (PCI_EXP_LNKCTL2_ENTER_COMP |
2352 					   PCI_EXP_LNKCTL2_TX_MARGIN));
2353 				pcie_capability_write_word(adev->pdev,
2354 							   PCI_EXP_LNKCTL2,
2355 							   tmp16);
2356 
2357 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
2358 				tmp &= ~LC_SET_QUIESCE;
2359 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
2360 			}
2361 		}
2362 	}
2363 
2364 	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
2365 	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
2366 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
2367 
2368 	pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
2369 	tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
2370 
2371 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2372 		tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
2373 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
2374 		tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
2375 	else
2376 		tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
2377 	pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
2378 
2379 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2380 	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
2381 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
2382 
2383 	for (i = 0; i < adev->usec_timeout; i++) {
2384 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
2385 		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
2386 			break;
2387 		udelay(1);
2388 	}
2389 }
2390 
si_pif_phy0_rreg(struct amdgpu_device * adev,u32 reg)2391 static inline u32 si_pif_phy0_rreg(struct amdgpu_device *adev, u32 reg)
2392 {
2393 	unsigned long flags;
2394 	u32 r;
2395 
2396 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
2397 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2398 	r = RREG32(EVERGREEN_PIF_PHY0_DATA);
2399 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
2400 	return r;
2401 }
2402 
si_pif_phy0_wreg(struct amdgpu_device * adev,u32 reg,u32 v)2403 static inline void si_pif_phy0_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
2404 {
2405 	unsigned long flags;
2406 
2407 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
2408 	WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
2409 	WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
2410 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
2411 }
2412 
si_pif_phy1_rreg(struct amdgpu_device * adev,u32 reg)2413 static inline u32 si_pif_phy1_rreg(struct amdgpu_device *adev, u32 reg)
2414 {
2415 	unsigned long flags;
2416 	u32 r;
2417 
2418 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
2419 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2420 	r = RREG32(EVERGREEN_PIF_PHY1_DATA);
2421 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
2422 	return r;
2423 }
2424 
si_pif_phy1_wreg(struct amdgpu_device * adev,u32 reg,u32 v)2425 static inline void si_pif_phy1_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
2426 {
2427 	unsigned long flags;
2428 
2429 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
2430 	WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
2431 	WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
2432 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
2433 }
si_program_aspm(struct amdgpu_device * adev)2434 static void si_program_aspm(struct amdgpu_device *adev)
2435 {
2436 	u32 data, orig;
2437 	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
2438 	bool disable_clkreq = false;
2439 
2440 	if (!amdgpu_device_should_use_aspm(adev))
2441 		return;
2442 
2443 	if (adev->flags & AMD_IS_APU)
2444 		return;
2445 	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
2446 	data &= ~LC_XMIT_N_FTS_MASK;
2447 	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
2448 	if (orig != data)
2449 		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
2450 
2451 	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
2452 	data |= LC_GO_TO_RECOVERY;
2453 	if (orig != data)
2454 		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
2455 
2456 	orig = data = RREG32_PCIE(PCIE_P_CNTL);
2457 	data |= P_IGNORE_EDB_ERR;
2458 	if (orig != data)
2459 		WREG32_PCIE(PCIE_P_CNTL, data);
2460 
2461 	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
2462 	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
2463 	data |= LC_PMI_TO_L1_DIS;
2464 	if (!disable_l0s)
2465 		data |= LC_L0S_INACTIVITY(7);
2466 
2467 	if (!disable_l1) {
2468 		data |= LC_L1_INACTIVITY(7);
2469 		data &= ~LC_PMI_TO_L1_DIS;
2470 		if (orig != data)
2471 			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
2472 
2473 		if (!disable_plloff_in_l1) {
2474 			bool clk_req_support;
2475 
2476 			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
2477 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
2478 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
2479 			if (orig != data)
2480 				si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
2481 
2482 			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
2483 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
2484 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
2485 			if (orig != data)
2486 				si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
2487 
2488 			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
2489 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
2490 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
2491 			if (orig != data)
2492 				si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
2493 
2494 			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
2495 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
2496 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
2497 			if (orig != data)
2498 				si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
2499 
2500 			if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
2501 				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
2502 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
2503 				if (orig != data)
2504 					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_0, data);
2505 
2506 				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_1);
2507 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
2508 				if (orig != data)
2509 					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_1, data);
2510 
2511 				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_2);
2512 				data &= ~PLL_RAMP_UP_TIME_2_MASK;
2513 				if (orig != data)
2514 					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_2, data);
2515 
2516 				orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_3);
2517 				data &= ~PLL_RAMP_UP_TIME_3_MASK;
2518 				if (orig != data)
2519 					si_pif_phy0_wreg(adev,PB0_PIF_PWRDOWN_3, data);
2520 
2521 				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_0);
2522 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
2523 				if (orig != data)
2524 					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_0, data);
2525 
2526 				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_1);
2527 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
2528 				if (orig != data)
2529 					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
2530 
2531 				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_2);
2532 				data &= ~PLL_RAMP_UP_TIME_2_MASK;
2533 				if (orig != data)
2534 					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_2, data);
2535 
2536 				orig = data = si_pif_phy1_rreg(adev,PB1_PIF_PWRDOWN_3);
2537 				data &= ~PLL_RAMP_UP_TIME_3_MASK;
2538 				if (orig != data)
2539 					si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_3, data);
2540 			}
2541 			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
2542 			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
2543 			data |= LC_DYN_LANES_PWR_STATE(3);
2544 			if (orig != data)
2545 				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
2546 
2547 			orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
2548 			data &= ~LS2_EXIT_TIME_MASK;
2549 			if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
2550 				data |= LS2_EXIT_TIME(5);
2551 			if (orig != data)
2552 				si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
2553 
2554 			orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
2555 			data &= ~LS2_EXIT_TIME_MASK;
2556 			if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
2557 				data |= LS2_EXIT_TIME(5);
2558 			if (orig != data)
2559 				si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
2560 
2561 			if (!disable_clkreq &&
2562 			    !pci_is_root_bus(adev->pdev->bus)) {
2563 				struct pci_dev *root = adev->pdev->bus->self;
2564 				u32 lnkcap;
2565 
2566 				clk_req_support = false;
2567 				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
2568 				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
2569 					clk_req_support = true;
2570 			} else {
2571 				clk_req_support = false;
2572 			}
2573 
2574 			if (clk_req_support) {
2575 				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
2576 				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
2577 				if (orig != data)
2578 					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
2579 
2580 				orig = data = RREG32(THM_CLK_CNTL);
2581 				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
2582 				data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
2583 				if (orig != data)
2584 					WREG32(THM_CLK_CNTL, data);
2585 
2586 				orig = data = RREG32(MISC_CLK_CNTL);
2587 				data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
2588 				data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
2589 				if (orig != data)
2590 					WREG32(MISC_CLK_CNTL, data);
2591 
2592 				orig = data = RREG32(CG_CLKPIN_CNTL);
2593 				data &= ~BCLK_AS_XCLK;
2594 				if (orig != data)
2595 					WREG32(CG_CLKPIN_CNTL, data);
2596 
2597 				orig = data = RREG32(CG_CLKPIN_CNTL_2);
2598 				data &= ~FORCE_BIF_REFCLK_EN;
2599 				if (orig != data)
2600 					WREG32(CG_CLKPIN_CNTL_2, data);
2601 
2602 				orig = data = RREG32(MPLL_BYPASSCLK_SEL);
2603 				data &= ~MPLL_CLKOUT_SEL_MASK;
2604 				data |= MPLL_CLKOUT_SEL(4);
2605 				if (orig != data)
2606 					WREG32(MPLL_BYPASSCLK_SEL, data);
2607 
2608 				orig = data = RREG32(SPLL_CNTL_MODE);
2609 				data &= ~SPLL_REFCLK_SEL_MASK;
2610 				if (orig != data)
2611 					WREG32(SPLL_CNTL_MODE, data);
2612 			}
2613 		}
2614 	} else {
2615 		if (orig != data)
2616 			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
2617 	}
2618 
2619 	orig = data = RREG32_PCIE(PCIE_CNTL2);
2620 	data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
2621 	if (orig != data)
2622 		WREG32_PCIE(PCIE_CNTL2, data);
2623 
2624 	if (!disable_l0s) {
2625 		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
2626 		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
2627 			data = RREG32_PCIE(PCIE_LC_STATUS1);
2628 			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
2629 				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
2630 				data &= ~LC_L0S_INACTIVITY_MASK;
2631 				if (orig != data)
2632 					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
2633 			}
2634 		}
2635 	}
2636 }
2637 
si_fix_pci_max_read_req_size(struct amdgpu_device * adev)2638 static void si_fix_pci_max_read_req_size(struct amdgpu_device *adev)
2639 {
2640 	int readrq;
2641 	u16 v;
2642 
2643 	readrq = pcie_get_readrq(adev->pdev);
2644 	v = ffs(readrq) - 8;
2645 	if ((v == 0) || (v == 6) || (v == 7))
2646 		pcie_set_readrq(adev->pdev, 512);
2647 }
2648 
si_common_hw_init(void * handle)2649 static int si_common_hw_init(void *handle)
2650 {
2651 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2652 
2653 	si_fix_pci_max_read_req_size(adev);
2654 	si_init_golden_registers(adev);
2655 	si_pcie_gen3_enable(adev);
2656 	si_program_aspm(adev);
2657 
2658 	return 0;
2659 }
2660 
si_common_hw_fini(void * handle)2661 static int si_common_hw_fini(void *handle)
2662 {
2663 	return 0;
2664 }
2665 
si_common_suspend(void * handle)2666 static int si_common_suspend(void *handle)
2667 {
2668 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2669 
2670 	return si_common_hw_fini(adev);
2671 }
2672 
si_common_resume(void * handle)2673 static int si_common_resume(void *handle)
2674 {
2675 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2676 
2677 	return si_common_hw_init(adev);
2678 }
2679 
si_common_is_idle(void * handle)2680 static bool si_common_is_idle(void *handle)
2681 {
2682 	return true;
2683 }
2684 
si_common_wait_for_idle(void * handle)2685 static int si_common_wait_for_idle(void *handle)
2686 {
2687 	return 0;
2688 }
2689 
si_common_soft_reset(void * handle)2690 static int si_common_soft_reset(void *handle)
2691 {
2692 	return 0;
2693 }
2694 
si_common_set_clockgating_state(void * handle,enum amd_clockgating_state state)2695 static int si_common_set_clockgating_state(void *handle,
2696 					    enum amd_clockgating_state state)
2697 {
2698 	return 0;
2699 }
2700 
si_common_set_powergating_state(void * handle,enum amd_powergating_state state)2701 static int si_common_set_powergating_state(void *handle,
2702 					    enum amd_powergating_state state)
2703 {
2704 	return 0;
2705 }
2706 
2707 static const struct amd_ip_funcs si_common_ip_funcs = {
2708 	.name = "si_common",
2709 	.early_init = si_common_early_init,
2710 	.late_init = NULL,
2711 	.sw_init = si_common_sw_init,
2712 	.sw_fini = si_common_sw_fini,
2713 	.hw_init = si_common_hw_init,
2714 	.hw_fini = si_common_hw_fini,
2715 	.suspend = si_common_suspend,
2716 	.resume = si_common_resume,
2717 	.is_idle = si_common_is_idle,
2718 	.wait_for_idle = si_common_wait_for_idle,
2719 	.soft_reset = si_common_soft_reset,
2720 	.set_clockgating_state = si_common_set_clockgating_state,
2721 	.set_powergating_state = si_common_set_powergating_state,
2722 };
2723 
2724 static const struct amdgpu_ip_block_version si_common_ip_block =
2725 {
2726 	.type = AMD_IP_BLOCK_TYPE_COMMON,
2727 	.major = 1,
2728 	.minor = 0,
2729 	.rev = 0,
2730 	.funcs = &si_common_ip_funcs,
2731 };
2732 
si_set_ip_blocks(struct amdgpu_device * adev)2733 int si_set_ip_blocks(struct amdgpu_device *adev)
2734 {
2735 	switch (adev->asic_type) {
2736 	case CHIP_VERDE:
2737 	case CHIP_TAHITI:
2738 	case CHIP_PITCAIRN:
2739 		amdgpu_device_ip_block_add(adev, &si_common_ip_block);
2740 		amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
2741 		amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2742 		amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
2743 		amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
2744 		amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2745 		if (adev->enable_virtual_display)
2746 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2747 #if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
2748 		else if (amdgpu_device_has_dc_support(adev))
2749 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
2750 #endif
2751 		else
2752 			amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
2753 		amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
2754 		/* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
2755 		break;
2756 	case CHIP_OLAND:
2757 		amdgpu_device_ip_block_add(adev, &si_common_ip_block);
2758 		amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
2759 		amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2760 		amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
2761 		amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
2762 		amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2763 		if (adev->enable_virtual_display)
2764 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2765 #if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
2766 		else if (amdgpu_device_has_dc_support(adev))
2767 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
2768 #endif
2769 		else
2770 			amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
2771 		amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
2772 		/* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
2773 		break;
2774 	case CHIP_HAINAN:
2775 		amdgpu_device_ip_block_add(adev, &si_common_ip_block);
2776 		amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
2777 		amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
2778 		amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
2779 		amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
2780 		amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
2781 		if (adev->enable_virtual_display)
2782 			amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2783 		break;
2784 	default:
2785 		BUG();
2786 	}
2787 	return 0;
2788 }
2789 
2790