1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26
27 #include <drm/amdgpu_drm.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_atombios.h"
31 #include "amdgpu_ih.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
34 #include "amdgpu_ucode.h"
35 #include "atom.h"
36 #include "amd_pcie.h"
37
38 #include "gmc/gmc_8_1_d.h"
39 #include "gmc/gmc_8_1_sh_mask.h"
40
41 #include "oss/oss_3_0_d.h"
42 #include "oss/oss_3_0_sh_mask.h"
43
44 #include "bif/bif_5_0_d.h"
45 #include "bif/bif_5_0_sh_mask.h"
46
47 #include "gca/gfx_8_0_d.h"
48 #include "gca/gfx_8_0_sh_mask.h"
49
50 #include "smu/smu_7_1_1_d.h"
51 #include "smu/smu_7_1_1_sh_mask.h"
52
53 #include "uvd/uvd_5_0_d.h"
54 #include "uvd/uvd_5_0_sh_mask.h"
55
56 #include "vce/vce_3_0_d.h"
57 #include "vce/vce_3_0_sh_mask.h"
58
59 #include "dce/dce_10_0_d.h"
60 #include "dce/dce_10_0_sh_mask.h"
61
62 #include "vid.h"
63 #include "vi.h"
64 #include "gmc_v8_0.h"
65 #include "gmc_v7_0.h"
66 #include "gfx_v8_0.h"
67 #include "sdma_v2_4.h"
68 #include "sdma_v3_0.h"
69 #include "dce_v10_0.h"
70 #include "dce_v11_0.h"
71 #include "iceland_ih.h"
72 #include "tonga_ih.h"
73 #include "cz_ih.h"
74 #include "uvd_v5_0.h"
75 #include "uvd_v6_0.h"
76 #include "vce_v3_0.h"
77 #if defined(CONFIG_DRM_AMD_ACP)
78 #include "amdgpu_acp.h"
79 #endif
80 #include "amdgpu_vkms.h"
81 #include "mxgpu_vi.h"
82 #include "amdgpu_dm.h"
83
84 #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
85 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
86 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
87 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L
88 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L
89 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L
90 #define ixPCIE_L1_PM_SUB_CNTL 0x378
91 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L
92 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L
93 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L
94 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L
95 #define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L
96 #define LINK_CAP 0x64
97 #define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
98 #define ixCPM_CONTROL 0x1400118
99 #define ixPCIE_LC_CNTL7 0x100100BC
100 #define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L
101 #define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007
102 #define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009
103 #define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L
104 #define PCIE_L1_PM_SUB_CNTL 0x378
105 #define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \
106 (asic_type <= CHIP_POLARIS12) && \
107 (rid >= 0x6E))
108 /* Topaz */
109 static const struct amdgpu_video_codecs topaz_video_codecs_encode =
110 {
111 .codec_count = 0,
112 .codec_array = NULL,
113 };
114
115 /* Tonga, CZ, ST, Fiji */
116 static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] =
117 {
118 {
119 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
120 .max_width = 4096,
121 .max_height = 2304,
122 .max_pixels_per_frame = 4096 * 2304,
123 .max_level = 0,
124 },
125 };
126
127 static const struct amdgpu_video_codecs tonga_video_codecs_encode =
128 {
129 .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array),
130 .codec_array = tonga_video_codecs_encode_array,
131 };
132
133 /* Polaris */
134 static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] =
135 {
136 {
137 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
138 .max_width = 4096,
139 .max_height = 2304,
140 .max_pixels_per_frame = 4096 * 2304,
141 .max_level = 0,
142 },
143 {
144 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
145 .max_width = 4096,
146 .max_height = 2304,
147 .max_pixels_per_frame = 4096 * 2304,
148 .max_level = 0,
149 },
150 };
151
152 static const struct amdgpu_video_codecs polaris_video_codecs_encode =
153 {
154 .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array),
155 .codec_array = polaris_video_codecs_encode_array,
156 };
157
158 /* Topaz */
159 static const struct amdgpu_video_codecs topaz_video_codecs_decode =
160 {
161 .codec_count = 0,
162 .codec_array = NULL,
163 };
164
165 /* Tonga */
166 static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
167 {
168 {
169 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
170 .max_width = 4096,
171 .max_height = 4096,
172 .max_pixels_per_frame = 4096 * 4096,
173 .max_level = 3,
174 },
175 {
176 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
177 .max_width = 4096,
178 .max_height = 4096,
179 .max_pixels_per_frame = 4096 * 4096,
180 .max_level = 5,
181 },
182 {
183 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
184 .max_width = 4096,
185 .max_height = 4096,
186 .max_pixels_per_frame = 4096 * 4096,
187 .max_level = 52,
188 },
189 {
190 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
191 .max_width = 4096,
192 .max_height = 4096,
193 .max_pixels_per_frame = 4096 * 4096,
194 .max_level = 4,
195 },
196 };
197
198 static const struct amdgpu_video_codecs tonga_video_codecs_decode =
199 {
200 .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array),
201 .codec_array = tonga_video_codecs_decode_array,
202 };
203
204 /* CZ, ST, Fiji, Polaris */
205 static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
206 {
207 {
208 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
209 .max_width = 4096,
210 .max_height = 4096,
211 .max_pixels_per_frame = 4096 * 4096,
212 .max_level = 3,
213 },
214 {
215 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
216 .max_width = 4096,
217 .max_height = 4096,
218 .max_pixels_per_frame = 4096 * 4096,
219 .max_level = 5,
220 },
221 {
222 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
223 .max_width = 4096,
224 .max_height = 4096,
225 .max_pixels_per_frame = 4096 * 4096,
226 .max_level = 52,
227 },
228 {
229 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
230 .max_width = 4096,
231 .max_height = 4096,
232 .max_pixels_per_frame = 4096 * 4096,
233 .max_level = 4,
234 },
235 {
236 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
237 .max_width = 4096,
238 .max_height = 4096,
239 .max_pixels_per_frame = 4096 * 4096,
240 .max_level = 186,
241 },
242 {
243 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
244 .max_width = 4096,
245 .max_height = 4096,
246 .max_pixels_per_frame = 4096 * 4096,
247 .max_level = 0,
248 },
249 };
250
251 static const struct amdgpu_video_codecs cz_video_codecs_decode =
252 {
253 .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array),
254 .codec_array = cz_video_codecs_decode_array,
255 };
256
vi_query_video_codecs(struct amdgpu_device * adev,bool encode,const struct amdgpu_video_codecs ** codecs)257 static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode,
258 const struct amdgpu_video_codecs **codecs)
259 {
260 switch (adev->asic_type) {
261 case CHIP_TOPAZ:
262 if (encode)
263 *codecs = &topaz_video_codecs_encode;
264 else
265 *codecs = &topaz_video_codecs_decode;
266 return 0;
267 case CHIP_TONGA:
268 if (encode)
269 *codecs = &tonga_video_codecs_encode;
270 else
271 *codecs = &tonga_video_codecs_decode;
272 return 0;
273 case CHIP_POLARIS10:
274 case CHIP_POLARIS11:
275 case CHIP_POLARIS12:
276 case CHIP_VEGAM:
277 if (encode)
278 *codecs = &polaris_video_codecs_encode;
279 else
280 *codecs = &cz_video_codecs_decode;
281 return 0;
282 case CHIP_FIJI:
283 case CHIP_CARRIZO:
284 case CHIP_STONEY:
285 if (encode)
286 *codecs = &tonga_video_codecs_encode;
287 else
288 *codecs = &cz_video_codecs_decode;
289 return 0;
290 default:
291 return -EINVAL;
292 }
293 }
294
295 /*
296 * Indirect registers accessor
297 */
vi_pcie_rreg(struct amdgpu_device * adev,u32 reg)298 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
299 {
300 unsigned long flags;
301 u32 r;
302
303 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
304 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
305 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
306 r = RREG32_NO_KIQ(mmPCIE_DATA);
307 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
308 return r;
309 }
310
vi_pcie_wreg(struct amdgpu_device * adev,u32 reg,u32 v)311 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
312 {
313 unsigned long flags;
314
315 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
316 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
317 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
318 WREG32_NO_KIQ(mmPCIE_DATA, v);
319 (void)RREG32_NO_KIQ(mmPCIE_DATA);
320 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
321 }
322
vi_smc_rreg(struct amdgpu_device * adev,u32 reg)323 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
324 {
325 unsigned long flags;
326 u32 r;
327
328 spin_lock_irqsave(&adev->smc_idx_lock, flags);
329 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
330 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
331 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
332 return r;
333 }
334
vi_smc_wreg(struct amdgpu_device * adev,u32 reg,u32 v)335 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
336 {
337 unsigned long flags;
338
339 spin_lock_irqsave(&adev->smc_idx_lock, flags);
340 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
341 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
342 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
343 }
344
345 /* smu_8_0_d.h */
346 #define mmMP0PUB_IND_INDEX 0x180
347 #define mmMP0PUB_IND_DATA 0x181
348
cz_smc_rreg(struct amdgpu_device * adev,u32 reg)349 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
350 {
351 unsigned long flags;
352 u32 r;
353
354 spin_lock_irqsave(&adev->smc_idx_lock, flags);
355 WREG32(mmMP0PUB_IND_INDEX, (reg));
356 r = RREG32(mmMP0PUB_IND_DATA);
357 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
358 return r;
359 }
360
cz_smc_wreg(struct amdgpu_device * adev,u32 reg,u32 v)361 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
362 {
363 unsigned long flags;
364
365 spin_lock_irqsave(&adev->smc_idx_lock, flags);
366 WREG32(mmMP0PUB_IND_INDEX, (reg));
367 WREG32(mmMP0PUB_IND_DATA, (v));
368 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
369 }
370
vi_uvd_ctx_rreg(struct amdgpu_device * adev,u32 reg)371 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
372 {
373 unsigned long flags;
374 u32 r;
375
376 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
377 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
378 r = RREG32(mmUVD_CTX_DATA);
379 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
380 return r;
381 }
382
vi_uvd_ctx_wreg(struct amdgpu_device * adev,u32 reg,u32 v)383 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
384 {
385 unsigned long flags;
386
387 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
388 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
389 WREG32(mmUVD_CTX_DATA, (v));
390 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
391 }
392
vi_didt_rreg(struct amdgpu_device * adev,u32 reg)393 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
394 {
395 unsigned long flags;
396 u32 r;
397
398 spin_lock_irqsave(&adev->didt_idx_lock, flags);
399 WREG32(mmDIDT_IND_INDEX, (reg));
400 r = RREG32(mmDIDT_IND_DATA);
401 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
402 return r;
403 }
404
vi_didt_wreg(struct amdgpu_device * adev,u32 reg,u32 v)405 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
406 {
407 unsigned long flags;
408
409 spin_lock_irqsave(&adev->didt_idx_lock, flags);
410 WREG32(mmDIDT_IND_INDEX, (reg));
411 WREG32(mmDIDT_IND_DATA, (v));
412 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
413 }
414
vi_gc_cac_rreg(struct amdgpu_device * adev,u32 reg)415 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
416 {
417 unsigned long flags;
418 u32 r;
419
420 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
421 WREG32(mmGC_CAC_IND_INDEX, (reg));
422 r = RREG32(mmGC_CAC_IND_DATA);
423 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
424 return r;
425 }
426
vi_gc_cac_wreg(struct amdgpu_device * adev,u32 reg,u32 v)427 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
428 {
429 unsigned long flags;
430
431 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
432 WREG32(mmGC_CAC_IND_INDEX, (reg));
433 WREG32(mmGC_CAC_IND_DATA, (v));
434 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
435 }
436
437
438 static const u32 tonga_mgcg_cgcg_init[] =
439 {
440 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
441 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
442 mmPCIE_DATA, 0x000f0000, 0x00000000,
443 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
444 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
445 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
446 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
447 };
448
449 static const u32 fiji_mgcg_cgcg_init[] =
450 {
451 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
452 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
453 mmPCIE_DATA, 0x000f0000, 0x00000000,
454 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
455 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
456 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
457 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
458 };
459
460 static const u32 iceland_mgcg_cgcg_init[] =
461 {
462 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
463 mmPCIE_DATA, 0x000f0000, 0x00000000,
464 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
465 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
466 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
467 };
468
469 static const u32 cz_mgcg_cgcg_init[] =
470 {
471 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
472 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
473 mmPCIE_DATA, 0x000f0000, 0x00000000,
474 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
475 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
476 };
477
478 static const u32 stoney_mgcg_cgcg_init[] =
479 {
480 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
481 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
482 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
483 };
484
vi_init_golden_registers(struct amdgpu_device * adev)485 static void vi_init_golden_registers(struct amdgpu_device *adev)
486 {
487 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
488 mutex_lock(&adev->grbm_idx_mutex);
489
490 if (amdgpu_sriov_vf(adev)) {
491 xgpu_vi_init_golden_registers(adev);
492 mutex_unlock(&adev->grbm_idx_mutex);
493 return;
494 }
495
496 switch (adev->asic_type) {
497 case CHIP_TOPAZ:
498 amdgpu_device_program_register_sequence(adev,
499 iceland_mgcg_cgcg_init,
500 ARRAY_SIZE(iceland_mgcg_cgcg_init));
501 break;
502 case CHIP_FIJI:
503 amdgpu_device_program_register_sequence(adev,
504 fiji_mgcg_cgcg_init,
505 ARRAY_SIZE(fiji_mgcg_cgcg_init));
506 break;
507 case CHIP_TONGA:
508 amdgpu_device_program_register_sequence(adev,
509 tonga_mgcg_cgcg_init,
510 ARRAY_SIZE(tonga_mgcg_cgcg_init));
511 break;
512 case CHIP_CARRIZO:
513 amdgpu_device_program_register_sequence(adev,
514 cz_mgcg_cgcg_init,
515 ARRAY_SIZE(cz_mgcg_cgcg_init));
516 break;
517 case CHIP_STONEY:
518 amdgpu_device_program_register_sequence(adev,
519 stoney_mgcg_cgcg_init,
520 ARRAY_SIZE(stoney_mgcg_cgcg_init));
521 break;
522 case CHIP_POLARIS10:
523 case CHIP_POLARIS11:
524 case CHIP_POLARIS12:
525 case CHIP_VEGAM:
526 default:
527 break;
528 }
529 mutex_unlock(&adev->grbm_idx_mutex);
530 }
531
532 /**
533 * vi_get_xclk - get the xclk
534 *
535 * @adev: amdgpu_device pointer
536 *
537 * Returns the reference clock used by the gfx engine
538 * (VI).
539 */
vi_get_xclk(struct amdgpu_device * adev)540 static u32 vi_get_xclk(struct amdgpu_device *adev)
541 {
542 u32 reference_clock = adev->clock.spll.reference_freq;
543 u32 tmp;
544
545 if (adev->flags & AMD_IS_APU) {
546 switch (adev->asic_type) {
547 case CHIP_STONEY:
548 /* vbios says 48Mhz, but the actual freq is 100Mhz */
549 return 10000;
550 default:
551 return reference_clock;
552 }
553 }
554
555 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
556 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
557 return 1000;
558
559 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
560 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
561 return reference_clock / 4;
562
563 return reference_clock;
564 }
565
566 /**
567 * vi_srbm_select - select specific register instances
568 *
569 * @adev: amdgpu_device pointer
570 * @me: selected ME (micro engine)
571 * @pipe: pipe
572 * @queue: queue
573 * @vmid: VMID
574 *
575 * Switches the currently active registers instances. Some
576 * registers are instanced per VMID, others are instanced per
577 * me/pipe/queue combination.
578 */
vi_srbm_select(struct amdgpu_device * adev,u32 me,u32 pipe,u32 queue,u32 vmid)579 void vi_srbm_select(struct amdgpu_device *adev,
580 u32 me, u32 pipe, u32 queue, u32 vmid)
581 {
582 u32 srbm_gfx_cntl = 0;
583 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
584 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
585 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
586 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
587 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
588 }
589
vi_read_disabled_bios(struct amdgpu_device * adev)590 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
591 {
592 u32 bus_cntl;
593 u32 d1vga_control = 0;
594 u32 d2vga_control = 0;
595 u32 vga_render_control = 0;
596 u32 rom_cntl;
597 bool r;
598
599 bus_cntl = RREG32(mmBUS_CNTL);
600 if (adev->mode_info.num_crtc) {
601 d1vga_control = RREG32(mmD1VGA_CONTROL);
602 d2vga_control = RREG32(mmD2VGA_CONTROL);
603 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
604 }
605 rom_cntl = RREG32_SMC(ixROM_CNTL);
606
607 /* enable the rom */
608 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
609 if (adev->mode_info.num_crtc) {
610 /* Disable VGA mode */
611 WREG32(mmD1VGA_CONTROL,
612 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
613 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
614 WREG32(mmD2VGA_CONTROL,
615 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
616 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
617 WREG32(mmVGA_RENDER_CONTROL,
618 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
619 }
620 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
621
622 r = amdgpu_read_bios(adev);
623
624 /* restore regs */
625 WREG32(mmBUS_CNTL, bus_cntl);
626 if (adev->mode_info.num_crtc) {
627 WREG32(mmD1VGA_CONTROL, d1vga_control);
628 WREG32(mmD2VGA_CONTROL, d2vga_control);
629 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
630 }
631 WREG32_SMC(ixROM_CNTL, rom_cntl);
632 return r;
633 }
634
vi_read_bios_from_rom(struct amdgpu_device * adev,u8 * bios,u32 length_bytes)635 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
636 u8 *bios, u32 length_bytes)
637 {
638 u32 *dw_ptr;
639 unsigned long flags;
640 u32 i, length_dw;
641
642 if (bios == NULL)
643 return false;
644 if (length_bytes == 0)
645 return false;
646 /* APU vbios image is part of sbios image */
647 if (adev->flags & AMD_IS_APU)
648 return false;
649
650 dw_ptr = (u32 *)bios;
651 length_dw = ALIGN(length_bytes, 4) / 4;
652 /* take the smc lock since we are using the smc index */
653 spin_lock_irqsave(&adev->smc_idx_lock, flags);
654 /* set rom index to 0 */
655 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
656 WREG32(mmSMC_IND_DATA_11, 0);
657 /* set index to data for continous read */
658 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
659 for (i = 0; i < length_dw; i++)
660 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
661 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
662
663 return true;
664 }
665
666 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
667 {mmGRBM_STATUS},
668 {mmGRBM_STATUS2},
669 {mmGRBM_STATUS_SE0},
670 {mmGRBM_STATUS_SE1},
671 {mmGRBM_STATUS_SE2},
672 {mmGRBM_STATUS_SE3},
673 {mmSRBM_STATUS},
674 {mmSRBM_STATUS2},
675 {mmSRBM_STATUS3},
676 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
677 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
678 {mmCP_STAT},
679 {mmCP_STALLED_STAT1},
680 {mmCP_STALLED_STAT2},
681 {mmCP_STALLED_STAT3},
682 {mmCP_CPF_BUSY_STAT},
683 {mmCP_CPF_STALLED_STAT1},
684 {mmCP_CPF_STATUS},
685 {mmCP_CPC_BUSY_STAT},
686 {mmCP_CPC_STALLED_STAT1},
687 {mmCP_CPC_STATUS},
688 {mmGB_ADDR_CONFIG},
689 {mmMC_ARB_RAMCFG},
690 {mmGB_TILE_MODE0},
691 {mmGB_TILE_MODE1},
692 {mmGB_TILE_MODE2},
693 {mmGB_TILE_MODE3},
694 {mmGB_TILE_MODE4},
695 {mmGB_TILE_MODE5},
696 {mmGB_TILE_MODE6},
697 {mmGB_TILE_MODE7},
698 {mmGB_TILE_MODE8},
699 {mmGB_TILE_MODE9},
700 {mmGB_TILE_MODE10},
701 {mmGB_TILE_MODE11},
702 {mmGB_TILE_MODE12},
703 {mmGB_TILE_MODE13},
704 {mmGB_TILE_MODE14},
705 {mmGB_TILE_MODE15},
706 {mmGB_TILE_MODE16},
707 {mmGB_TILE_MODE17},
708 {mmGB_TILE_MODE18},
709 {mmGB_TILE_MODE19},
710 {mmGB_TILE_MODE20},
711 {mmGB_TILE_MODE21},
712 {mmGB_TILE_MODE22},
713 {mmGB_TILE_MODE23},
714 {mmGB_TILE_MODE24},
715 {mmGB_TILE_MODE25},
716 {mmGB_TILE_MODE26},
717 {mmGB_TILE_MODE27},
718 {mmGB_TILE_MODE28},
719 {mmGB_TILE_MODE29},
720 {mmGB_TILE_MODE30},
721 {mmGB_TILE_MODE31},
722 {mmGB_MACROTILE_MODE0},
723 {mmGB_MACROTILE_MODE1},
724 {mmGB_MACROTILE_MODE2},
725 {mmGB_MACROTILE_MODE3},
726 {mmGB_MACROTILE_MODE4},
727 {mmGB_MACROTILE_MODE5},
728 {mmGB_MACROTILE_MODE6},
729 {mmGB_MACROTILE_MODE7},
730 {mmGB_MACROTILE_MODE8},
731 {mmGB_MACROTILE_MODE9},
732 {mmGB_MACROTILE_MODE10},
733 {mmGB_MACROTILE_MODE11},
734 {mmGB_MACROTILE_MODE12},
735 {mmGB_MACROTILE_MODE13},
736 {mmGB_MACROTILE_MODE14},
737 {mmGB_MACROTILE_MODE15},
738 {mmCC_RB_BACKEND_DISABLE, true},
739 {mmGC_USER_RB_BACKEND_DISABLE, true},
740 {mmGB_BACKEND_MAP, false},
741 {mmPA_SC_RASTER_CONFIG, true},
742 {mmPA_SC_RASTER_CONFIG_1, true},
743 };
744
vi_get_register_value(struct amdgpu_device * adev,bool indexed,u32 se_num,u32 sh_num,u32 reg_offset)745 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
746 bool indexed, u32 se_num,
747 u32 sh_num, u32 reg_offset)
748 {
749 if (indexed) {
750 uint32_t val;
751 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
752 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
753
754 switch (reg_offset) {
755 case mmCC_RB_BACKEND_DISABLE:
756 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
757 case mmGC_USER_RB_BACKEND_DISABLE:
758 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
759 case mmPA_SC_RASTER_CONFIG:
760 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
761 case mmPA_SC_RASTER_CONFIG_1:
762 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
763 }
764
765 mutex_lock(&adev->grbm_idx_mutex);
766 if (se_num != 0xffffffff || sh_num != 0xffffffff)
767 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
768
769 val = RREG32(reg_offset);
770
771 if (se_num != 0xffffffff || sh_num != 0xffffffff)
772 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
773 mutex_unlock(&adev->grbm_idx_mutex);
774 return val;
775 } else {
776 unsigned idx;
777
778 switch (reg_offset) {
779 case mmGB_ADDR_CONFIG:
780 return adev->gfx.config.gb_addr_config;
781 case mmMC_ARB_RAMCFG:
782 return adev->gfx.config.mc_arb_ramcfg;
783 case mmGB_TILE_MODE0:
784 case mmGB_TILE_MODE1:
785 case mmGB_TILE_MODE2:
786 case mmGB_TILE_MODE3:
787 case mmGB_TILE_MODE4:
788 case mmGB_TILE_MODE5:
789 case mmGB_TILE_MODE6:
790 case mmGB_TILE_MODE7:
791 case mmGB_TILE_MODE8:
792 case mmGB_TILE_MODE9:
793 case mmGB_TILE_MODE10:
794 case mmGB_TILE_MODE11:
795 case mmGB_TILE_MODE12:
796 case mmGB_TILE_MODE13:
797 case mmGB_TILE_MODE14:
798 case mmGB_TILE_MODE15:
799 case mmGB_TILE_MODE16:
800 case mmGB_TILE_MODE17:
801 case mmGB_TILE_MODE18:
802 case mmGB_TILE_MODE19:
803 case mmGB_TILE_MODE20:
804 case mmGB_TILE_MODE21:
805 case mmGB_TILE_MODE22:
806 case mmGB_TILE_MODE23:
807 case mmGB_TILE_MODE24:
808 case mmGB_TILE_MODE25:
809 case mmGB_TILE_MODE26:
810 case mmGB_TILE_MODE27:
811 case mmGB_TILE_MODE28:
812 case mmGB_TILE_MODE29:
813 case mmGB_TILE_MODE30:
814 case mmGB_TILE_MODE31:
815 idx = (reg_offset - mmGB_TILE_MODE0);
816 return adev->gfx.config.tile_mode_array[idx];
817 case mmGB_MACROTILE_MODE0:
818 case mmGB_MACROTILE_MODE1:
819 case mmGB_MACROTILE_MODE2:
820 case mmGB_MACROTILE_MODE3:
821 case mmGB_MACROTILE_MODE4:
822 case mmGB_MACROTILE_MODE5:
823 case mmGB_MACROTILE_MODE6:
824 case mmGB_MACROTILE_MODE7:
825 case mmGB_MACROTILE_MODE8:
826 case mmGB_MACROTILE_MODE9:
827 case mmGB_MACROTILE_MODE10:
828 case mmGB_MACROTILE_MODE11:
829 case mmGB_MACROTILE_MODE12:
830 case mmGB_MACROTILE_MODE13:
831 case mmGB_MACROTILE_MODE14:
832 case mmGB_MACROTILE_MODE15:
833 idx = (reg_offset - mmGB_MACROTILE_MODE0);
834 return adev->gfx.config.macrotile_mode_array[idx];
835 default:
836 return RREG32(reg_offset);
837 }
838 }
839 }
840
vi_read_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset,u32 * value)841 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
842 u32 sh_num, u32 reg_offset, u32 *value)
843 {
844 uint32_t i;
845
846 *value = 0;
847 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
848 bool indexed = vi_allowed_read_registers[i].grbm_indexed;
849
850 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
851 continue;
852
853 *value = vi_get_register_value(adev, indexed, se_num, sh_num,
854 reg_offset);
855 return 0;
856 }
857 return -EINVAL;
858 }
859
860 /**
861 * vi_asic_pci_config_reset - soft reset GPU
862 *
863 * @adev: amdgpu_device pointer
864 *
865 * Use PCI Config method to reset the GPU.
866 *
867 * Returns 0 for success.
868 */
vi_asic_pci_config_reset(struct amdgpu_device * adev)869 static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
870 {
871 u32 i;
872 int r = -EINVAL;
873
874 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
875
876 /* disable BM */
877 pci_clear_master(adev->pdev);
878 /* reset */
879 amdgpu_device_pci_config_reset(adev);
880
881 udelay(100);
882
883 /* wait for asic to come out of reset */
884 for (i = 0; i < adev->usec_timeout; i++) {
885 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
886 /* enable BM */
887 pci_set_master(adev->pdev);
888 adev->has_hw_reset = true;
889 r = 0;
890 break;
891 }
892 udelay(1);
893 }
894
895 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
896
897 return r;
898 }
899
vi_asic_supports_baco(struct amdgpu_device * adev)900 static bool vi_asic_supports_baco(struct amdgpu_device *adev)
901 {
902 switch (adev->asic_type) {
903 case CHIP_FIJI:
904 case CHIP_TONGA:
905 case CHIP_POLARIS10:
906 case CHIP_POLARIS11:
907 case CHIP_POLARIS12:
908 case CHIP_TOPAZ:
909 return amdgpu_dpm_is_baco_supported(adev);
910 default:
911 return false;
912 }
913 }
914
915 static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device * adev)916 vi_asic_reset_method(struct amdgpu_device *adev)
917 {
918 bool baco_reset;
919
920 if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
921 amdgpu_reset_method == AMD_RESET_METHOD_BACO)
922 return amdgpu_reset_method;
923
924 if (amdgpu_reset_method != -1)
925 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
926 amdgpu_reset_method);
927
928 switch (adev->asic_type) {
929 case CHIP_FIJI:
930 case CHIP_TONGA:
931 case CHIP_POLARIS10:
932 case CHIP_POLARIS11:
933 case CHIP_POLARIS12:
934 case CHIP_TOPAZ:
935 baco_reset = amdgpu_dpm_is_baco_supported(adev);
936 break;
937 default:
938 baco_reset = false;
939 break;
940 }
941
942 if (baco_reset)
943 return AMD_RESET_METHOD_BACO;
944 else
945 return AMD_RESET_METHOD_LEGACY;
946 }
947
948 /**
949 * vi_asic_reset - soft reset GPU
950 *
951 * @adev: amdgpu_device pointer
952 *
953 * Look up which blocks are hung and attempt
954 * to reset them.
955 * Returns 0 for success.
956 */
vi_asic_reset(struct amdgpu_device * adev)957 static int vi_asic_reset(struct amdgpu_device *adev)
958 {
959 int r;
960
961 /* APUs don't have full asic reset */
962 if (adev->flags & AMD_IS_APU)
963 return 0;
964
965 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
966 dev_info(adev->dev, "BACO reset\n");
967 r = amdgpu_dpm_baco_reset(adev);
968 } else {
969 dev_info(adev->dev, "PCI CONFIG reset\n");
970 r = vi_asic_pci_config_reset(adev);
971 }
972
973 return r;
974 }
975
vi_get_config_memsize(struct amdgpu_device * adev)976 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
977 {
978 return RREG32(mmCONFIG_MEMSIZE);
979 }
980
vi_set_uvd_clock(struct amdgpu_device * adev,u32 clock,u32 cntl_reg,u32 status_reg)981 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
982 u32 cntl_reg, u32 status_reg)
983 {
984 int r, i;
985 struct atom_clock_dividers dividers;
986 uint32_t tmp;
987
988 r = amdgpu_atombios_get_clock_dividers(adev,
989 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
990 clock, false, ÷rs);
991 if (r)
992 return r;
993
994 tmp = RREG32_SMC(cntl_reg);
995
996 if (adev->flags & AMD_IS_APU)
997 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
998 else
999 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
1000 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
1001 tmp |= dividers.post_divider;
1002 WREG32_SMC(cntl_reg, tmp);
1003
1004 for (i = 0; i < 100; i++) {
1005 tmp = RREG32_SMC(status_reg);
1006 if (adev->flags & AMD_IS_APU) {
1007 if (tmp & 0x10000)
1008 break;
1009 } else {
1010 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
1011 break;
1012 }
1013 mdelay(10);
1014 }
1015 if (i == 100)
1016 return -ETIMEDOUT;
1017 return 0;
1018 }
1019
1020 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
1021 #define ixGNB_CLK1_STATUS 0xD822010C
1022 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
1023 #define ixGNB_CLK2_STATUS 0xD822012C
1024 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
1025 #define ixGNB_CLK3_STATUS 0xD822014C
1026
vi_set_uvd_clocks(struct amdgpu_device * adev,u32 vclk,u32 dclk)1027 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1028 {
1029 int r;
1030
1031 if (adev->flags & AMD_IS_APU) {
1032 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
1033 if (r)
1034 return r;
1035
1036 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
1037 if (r)
1038 return r;
1039 } else {
1040 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1041 if (r)
1042 return r;
1043
1044 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1045 if (r)
1046 return r;
1047 }
1048
1049 return 0;
1050 }
1051
vi_set_vce_clocks(struct amdgpu_device * adev,u32 evclk,u32 ecclk)1052 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1053 {
1054 int r, i;
1055 struct atom_clock_dividers dividers;
1056 u32 tmp;
1057 u32 reg_ctrl;
1058 u32 reg_status;
1059 u32 status_mask;
1060 u32 reg_mask;
1061
1062 if (adev->flags & AMD_IS_APU) {
1063 reg_ctrl = ixGNB_CLK3_DFS_CNTL;
1064 reg_status = ixGNB_CLK3_STATUS;
1065 status_mask = 0x00010000;
1066 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1067 } else {
1068 reg_ctrl = ixCG_ECLK_CNTL;
1069 reg_status = ixCG_ECLK_STATUS;
1070 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
1071 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1072 }
1073
1074 r = amdgpu_atombios_get_clock_dividers(adev,
1075 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1076 ecclk, false, ÷rs);
1077 if (r)
1078 return r;
1079
1080 for (i = 0; i < 100; i++) {
1081 if (RREG32_SMC(reg_status) & status_mask)
1082 break;
1083 mdelay(10);
1084 }
1085
1086 if (i == 100)
1087 return -ETIMEDOUT;
1088
1089 tmp = RREG32_SMC(reg_ctrl);
1090 tmp &= ~reg_mask;
1091 tmp |= dividers.post_divider;
1092 WREG32_SMC(reg_ctrl, tmp);
1093
1094 for (i = 0; i < 100; i++) {
1095 if (RREG32_SMC(reg_status) & status_mask)
1096 break;
1097 mdelay(10);
1098 }
1099
1100 if (i == 100)
1101 return -ETIMEDOUT;
1102
1103 return 0;
1104 }
1105
vi_enable_aspm(struct amdgpu_device * adev)1106 static void vi_enable_aspm(struct amdgpu_device *adev)
1107 {
1108 u32 data, orig;
1109
1110 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1111 data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
1112 PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
1113 data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
1114 PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
1115 data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1116 data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK;
1117 if (orig != data)
1118 WREG32_PCIE(ixPCIE_LC_CNTL, data);
1119 }
1120
vi_program_aspm(struct amdgpu_device * adev)1121 static void vi_program_aspm(struct amdgpu_device *adev)
1122 {
1123 u32 data, data1, orig;
1124 bool bL1SS = false;
1125 bool bClkReqSupport = true;
1126
1127 if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported())
1128 return;
1129
1130 if (adev->flags & AMD_IS_APU ||
1131 adev->asic_type < CHIP_POLARIS10)
1132 return;
1133
1134 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1135 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
1136 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1137 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1138 if (orig != data)
1139 WREG32_PCIE(ixPCIE_LC_CNTL, data);
1140
1141 orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1142 data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
1143 data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
1144 data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
1145 if (orig != data)
1146 WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
1147
1148 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
1149 data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
1150 if (orig != data)
1151 WREG32_PCIE(ixPCIE_LC_CNTL3, data);
1152
1153 orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
1154 data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
1155 if (orig != data)
1156 WREG32_PCIE(ixPCIE_P_CNTL, data);
1157
1158 data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE);
1159 pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1);
1160 if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK &&
1161 (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK |
1162 PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK |
1163 PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK |
1164 PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) {
1165 bL1SS = true;
1166 } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK |
1167 PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK |
1168 PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK |
1169 PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) {
1170 bL1SS = true;
1171 }
1172
1173 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
1174 data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK;
1175 if (orig != data)
1176 WREG32_PCIE(ixPCIE_LC_CNTL6, data);
1177
1178 orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
1179 data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
1180 if (orig != data)
1181 WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
1182
1183 pci_read_config_dword(adev->pdev, LINK_CAP, &data);
1184 if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
1185 bClkReqSupport = false;
1186
1187 if (bClkReqSupport) {
1188 orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
1189 data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
1190 data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
1191 (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
1192 if (orig != data)
1193 WREG32_SMC(ixTHM_CLK_CNTL, data);
1194
1195 orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
1196 data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
1197 MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
1198 data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
1199 (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
1200 data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT);
1201 if (orig != data)
1202 WREG32_SMC(ixMISC_CLK_CTRL, data);
1203
1204 orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
1205 data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK;
1206 if (orig != data)
1207 WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1208
1209 orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
1210 data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK;
1211 if (orig != data)
1212 WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1213
1214 orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
1215 data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
1216 data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
1217 if (orig != data)
1218 WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
1219
1220 orig = data = RREG32_PCIE(ixCPM_CONTROL);
1221 data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
1222 CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK);
1223 if (orig != data)
1224 WREG32_PCIE(ixCPM_CONTROL, data);
1225
1226 orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
1227 data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
1228 data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT);
1229 if (orig != data)
1230 WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
1231
1232 orig = data = RREG32(mmBIF_CLK_CTRL);
1233 data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK;
1234 if (orig != data)
1235 WREG32(mmBIF_CLK_CTRL, data);
1236
1237 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
1238 data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK;
1239 if (orig != data)
1240 WREG32_PCIE(ixPCIE_LC_CNTL7, data);
1241
1242 orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
1243 data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK;
1244 if (orig != data)
1245 WREG32_PCIE(ixPCIE_HW_DEBUG, data);
1246
1247 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
1248 data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
1249 data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1250 if (bL1SS)
1251 data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1252 if (orig != data)
1253 WREG32_PCIE(ixPCIE_LC_CNTL2, data);
1254
1255 }
1256
1257 vi_enable_aspm(adev);
1258
1259 data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1260 data1 = RREG32_PCIE(ixPCIE_LC_STATUS1);
1261 if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
1262 data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
1263 data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
1264 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1265 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1266 if (orig != data)
1267 WREG32_PCIE(ixPCIE_LC_CNTL, data);
1268 }
1269
1270 if ((adev->asic_type == CHIP_POLARIS12 &&
1271 !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
1272 ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
1273 orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
1274 data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK;
1275 if (orig != data)
1276 WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
1277 }
1278 }
1279
vi_enable_doorbell_aperture(struct amdgpu_device * adev,bool enable)1280 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1281 bool enable)
1282 {
1283 u32 tmp;
1284
1285 /* not necessary on CZ */
1286 if (adev->flags & AMD_IS_APU)
1287 return;
1288
1289 tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1290 if (enable)
1291 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1292 else
1293 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1294
1295 WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1296 }
1297
1298 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
1299 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
1300 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
1301
vi_get_rev_id(struct amdgpu_device * adev)1302 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1303 {
1304 if (adev->flags & AMD_IS_APU)
1305 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1306 >> ATI_REV_ID_FUSE_MACRO__SHIFT;
1307 else
1308 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1309 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1310 }
1311
vi_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)1312 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1313 {
1314 if (!ring || !ring->funcs->emit_wreg) {
1315 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1316 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1317 } else {
1318 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1319 }
1320 }
1321
vi_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)1322 static void vi_invalidate_hdp(struct amdgpu_device *adev,
1323 struct amdgpu_ring *ring)
1324 {
1325 if (!ring || !ring->funcs->emit_wreg) {
1326 WREG32(mmHDP_DEBUG0, 1);
1327 RREG32(mmHDP_DEBUG0);
1328 } else {
1329 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1330 }
1331 }
1332
vi_need_full_reset(struct amdgpu_device * adev)1333 static bool vi_need_full_reset(struct amdgpu_device *adev)
1334 {
1335 switch (adev->asic_type) {
1336 case CHIP_CARRIZO:
1337 case CHIP_STONEY:
1338 /* CZ has hang issues with full reset at the moment */
1339 return false;
1340 case CHIP_FIJI:
1341 case CHIP_TONGA:
1342 /* XXX: soft reset should work on fiji and tonga */
1343 return true;
1344 case CHIP_POLARIS10:
1345 case CHIP_POLARIS11:
1346 case CHIP_POLARIS12:
1347 case CHIP_TOPAZ:
1348 default:
1349 /* change this when we support soft reset */
1350 return true;
1351 }
1352 }
1353
vi_get_pcie_usage(struct amdgpu_device * adev,uint64_t * count0,uint64_t * count1)1354 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1355 uint64_t *count1)
1356 {
1357 uint32_t perfctr = 0;
1358 uint64_t cnt0_of, cnt1_of;
1359 int tmp;
1360
1361 /* This reports 0 on APUs, so return to avoid writing/reading registers
1362 * that may or may not be different from their GPU counterparts
1363 */
1364 if (adev->flags & AMD_IS_APU)
1365 return;
1366
1367 /* Set the 2 events that we wish to watch, defined above */
1368 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1369 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1370 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1371
1372 /* Write to enable desired perf counters */
1373 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1374 /* Zero out and enable the perf counters
1375 * Write 0x5:
1376 * Bit 0 = Start all counters(1)
1377 * Bit 2 = Global counter reset enable(1)
1378 */
1379 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1380
1381 msleep(1000);
1382
1383 /* Load the shadow and disable the perf counters
1384 * Write 0x2:
1385 * Bit 0 = Stop counters(0)
1386 * Bit 1 = Load the shadow counters(1)
1387 */
1388 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1389
1390 /* Read register values to get any >32bit overflow */
1391 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1392 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1393 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1394
1395 /* Get the values and add the overflow */
1396 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1397 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1398 }
1399
vi_get_pcie_replay_count(struct amdgpu_device * adev)1400 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1401 {
1402 uint64_t nak_r, nak_g;
1403
1404 /* Get the number of NAKs received and generated */
1405 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1406 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1407
1408 /* Add the total number of NAKs, i.e the number of replays */
1409 return (nak_r + nak_g);
1410 }
1411
vi_need_reset_on_init(struct amdgpu_device * adev)1412 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1413 {
1414 u32 clock_cntl, pc;
1415
1416 if (adev->flags & AMD_IS_APU)
1417 return false;
1418
1419 /* check if the SMC is already running */
1420 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1421 pc = RREG32_SMC(ixSMC_PC_C);
1422 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1423 (0x20100 <= pc))
1424 return true;
1425
1426 return false;
1427 }
1428
vi_pre_asic_init(struct amdgpu_device * adev)1429 static void vi_pre_asic_init(struct amdgpu_device *adev)
1430 {
1431 }
1432
1433 static const struct amdgpu_asic_funcs vi_asic_funcs =
1434 {
1435 .read_disabled_bios = &vi_read_disabled_bios,
1436 .read_bios_from_rom = &vi_read_bios_from_rom,
1437 .read_register = &vi_read_register,
1438 .reset = &vi_asic_reset,
1439 .reset_method = &vi_asic_reset_method,
1440 .get_xclk = &vi_get_xclk,
1441 .set_uvd_clocks = &vi_set_uvd_clocks,
1442 .set_vce_clocks = &vi_set_vce_clocks,
1443 .get_config_memsize = &vi_get_config_memsize,
1444 .flush_hdp = &vi_flush_hdp,
1445 .invalidate_hdp = &vi_invalidate_hdp,
1446 .need_full_reset = &vi_need_full_reset,
1447 .init_doorbell_index = &legacy_doorbell_index_init,
1448 .get_pcie_usage = &vi_get_pcie_usage,
1449 .need_reset_on_init = &vi_need_reset_on_init,
1450 .get_pcie_replay_count = &vi_get_pcie_replay_count,
1451 .supports_baco = &vi_asic_supports_baco,
1452 .pre_asic_init = &vi_pre_asic_init,
1453 .query_video_codecs = &vi_query_video_codecs,
1454 };
1455
1456 #define CZ_REV_BRISTOL(rev) \
1457 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1458
vi_common_early_init(void * handle)1459 static int vi_common_early_init(void *handle)
1460 {
1461 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1462
1463 if (adev->flags & AMD_IS_APU) {
1464 adev->smc_rreg = &cz_smc_rreg;
1465 adev->smc_wreg = &cz_smc_wreg;
1466 } else {
1467 adev->smc_rreg = &vi_smc_rreg;
1468 adev->smc_wreg = &vi_smc_wreg;
1469 }
1470 adev->pcie_rreg = &vi_pcie_rreg;
1471 adev->pcie_wreg = &vi_pcie_wreg;
1472 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1473 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1474 adev->didt_rreg = &vi_didt_rreg;
1475 adev->didt_wreg = &vi_didt_wreg;
1476 adev->gc_cac_rreg = &vi_gc_cac_rreg;
1477 adev->gc_cac_wreg = &vi_gc_cac_wreg;
1478
1479 adev->asic_funcs = &vi_asic_funcs;
1480
1481 adev->rev_id = vi_get_rev_id(adev);
1482 adev->external_rev_id = 0xFF;
1483 switch (adev->asic_type) {
1484 case CHIP_TOPAZ:
1485 adev->cg_flags = 0;
1486 adev->pg_flags = 0;
1487 adev->external_rev_id = 0x1;
1488 break;
1489 case CHIP_FIJI:
1490 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1491 AMD_CG_SUPPORT_GFX_MGLS |
1492 AMD_CG_SUPPORT_GFX_RLC_LS |
1493 AMD_CG_SUPPORT_GFX_CP_LS |
1494 AMD_CG_SUPPORT_GFX_CGTS |
1495 AMD_CG_SUPPORT_GFX_CGTS_LS |
1496 AMD_CG_SUPPORT_GFX_CGCG |
1497 AMD_CG_SUPPORT_GFX_CGLS |
1498 AMD_CG_SUPPORT_SDMA_MGCG |
1499 AMD_CG_SUPPORT_SDMA_LS |
1500 AMD_CG_SUPPORT_BIF_LS |
1501 AMD_CG_SUPPORT_HDP_MGCG |
1502 AMD_CG_SUPPORT_HDP_LS |
1503 AMD_CG_SUPPORT_ROM_MGCG |
1504 AMD_CG_SUPPORT_MC_MGCG |
1505 AMD_CG_SUPPORT_MC_LS |
1506 AMD_CG_SUPPORT_UVD_MGCG;
1507 adev->pg_flags = 0;
1508 adev->external_rev_id = adev->rev_id + 0x3c;
1509 break;
1510 case CHIP_TONGA:
1511 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1512 AMD_CG_SUPPORT_GFX_CGCG |
1513 AMD_CG_SUPPORT_GFX_CGLS |
1514 AMD_CG_SUPPORT_SDMA_MGCG |
1515 AMD_CG_SUPPORT_SDMA_LS |
1516 AMD_CG_SUPPORT_BIF_LS |
1517 AMD_CG_SUPPORT_HDP_MGCG |
1518 AMD_CG_SUPPORT_HDP_LS |
1519 AMD_CG_SUPPORT_ROM_MGCG |
1520 AMD_CG_SUPPORT_MC_MGCG |
1521 AMD_CG_SUPPORT_MC_LS |
1522 AMD_CG_SUPPORT_DRM_LS |
1523 AMD_CG_SUPPORT_UVD_MGCG;
1524 adev->pg_flags = 0;
1525 adev->external_rev_id = adev->rev_id + 0x14;
1526 break;
1527 case CHIP_POLARIS11:
1528 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1529 AMD_CG_SUPPORT_GFX_RLC_LS |
1530 AMD_CG_SUPPORT_GFX_CP_LS |
1531 AMD_CG_SUPPORT_GFX_CGCG |
1532 AMD_CG_SUPPORT_GFX_CGLS |
1533 AMD_CG_SUPPORT_GFX_3D_CGCG |
1534 AMD_CG_SUPPORT_GFX_3D_CGLS |
1535 AMD_CG_SUPPORT_SDMA_MGCG |
1536 AMD_CG_SUPPORT_SDMA_LS |
1537 AMD_CG_SUPPORT_BIF_MGCG |
1538 AMD_CG_SUPPORT_BIF_LS |
1539 AMD_CG_SUPPORT_HDP_MGCG |
1540 AMD_CG_SUPPORT_HDP_LS |
1541 AMD_CG_SUPPORT_ROM_MGCG |
1542 AMD_CG_SUPPORT_MC_MGCG |
1543 AMD_CG_SUPPORT_MC_LS |
1544 AMD_CG_SUPPORT_DRM_LS |
1545 AMD_CG_SUPPORT_UVD_MGCG |
1546 AMD_CG_SUPPORT_VCE_MGCG;
1547 adev->pg_flags = 0;
1548 adev->external_rev_id = adev->rev_id + 0x5A;
1549 break;
1550 case CHIP_POLARIS10:
1551 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1552 AMD_CG_SUPPORT_GFX_RLC_LS |
1553 AMD_CG_SUPPORT_GFX_CP_LS |
1554 AMD_CG_SUPPORT_GFX_CGCG |
1555 AMD_CG_SUPPORT_GFX_CGLS |
1556 AMD_CG_SUPPORT_GFX_3D_CGCG |
1557 AMD_CG_SUPPORT_GFX_3D_CGLS |
1558 AMD_CG_SUPPORT_SDMA_MGCG |
1559 AMD_CG_SUPPORT_SDMA_LS |
1560 AMD_CG_SUPPORT_BIF_MGCG |
1561 AMD_CG_SUPPORT_BIF_LS |
1562 AMD_CG_SUPPORT_HDP_MGCG |
1563 AMD_CG_SUPPORT_HDP_LS |
1564 AMD_CG_SUPPORT_ROM_MGCG |
1565 AMD_CG_SUPPORT_MC_MGCG |
1566 AMD_CG_SUPPORT_MC_LS |
1567 AMD_CG_SUPPORT_DRM_LS |
1568 AMD_CG_SUPPORT_UVD_MGCG |
1569 AMD_CG_SUPPORT_VCE_MGCG;
1570 adev->pg_flags = 0;
1571 adev->external_rev_id = adev->rev_id + 0x50;
1572 break;
1573 case CHIP_POLARIS12:
1574 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1575 AMD_CG_SUPPORT_GFX_RLC_LS |
1576 AMD_CG_SUPPORT_GFX_CP_LS |
1577 AMD_CG_SUPPORT_GFX_CGCG |
1578 AMD_CG_SUPPORT_GFX_CGLS |
1579 AMD_CG_SUPPORT_GFX_3D_CGCG |
1580 AMD_CG_SUPPORT_GFX_3D_CGLS |
1581 AMD_CG_SUPPORT_SDMA_MGCG |
1582 AMD_CG_SUPPORT_SDMA_LS |
1583 AMD_CG_SUPPORT_BIF_MGCG |
1584 AMD_CG_SUPPORT_BIF_LS |
1585 AMD_CG_SUPPORT_HDP_MGCG |
1586 AMD_CG_SUPPORT_HDP_LS |
1587 AMD_CG_SUPPORT_ROM_MGCG |
1588 AMD_CG_SUPPORT_MC_MGCG |
1589 AMD_CG_SUPPORT_MC_LS |
1590 AMD_CG_SUPPORT_DRM_LS |
1591 AMD_CG_SUPPORT_UVD_MGCG |
1592 AMD_CG_SUPPORT_VCE_MGCG;
1593 adev->pg_flags = 0;
1594 adev->external_rev_id = adev->rev_id + 0x64;
1595 break;
1596 case CHIP_VEGAM:
1597 adev->cg_flags = 0;
1598 /*AMD_CG_SUPPORT_GFX_MGCG |
1599 AMD_CG_SUPPORT_GFX_RLC_LS |
1600 AMD_CG_SUPPORT_GFX_CP_LS |
1601 AMD_CG_SUPPORT_GFX_CGCG |
1602 AMD_CG_SUPPORT_GFX_CGLS |
1603 AMD_CG_SUPPORT_GFX_3D_CGCG |
1604 AMD_CG_SUPPORT_GFX_3D_CGLS |
1605 AMD_CG_SUPPORT_SDMA_MGCG |
1606 AMD_CG_SUPPORT_SDMA_LS |
1607 AMD_CG_SUPPORT_BIF_MGCG |
1608 AMD_CG_SUPPORT_BIF_LS |
1609 AMD_CG_SUPPORT_HDP_MGCG |
1610 AMD_CG_SUPPORT_HDP_LS |
1611 AMD_CG_SUPPORT_ROM_MGCG |
1612 AMD_CG_SUPPORT_MC_MGCG |
1613 AMD_CG_SUPPORT_MC_LS |
1614 AMD_CG_SUPPORT_DRM_LS |
1615 AMD_CG_SUPPORT_UVD_MGCG |
1616 AMD_CG_SUPPORT_VCE_MGCG;*/
1617 adev->pg_flags = 0;
1618 adev->external_rev_id = adev->rev_id + 0x6E;
1619 break;
1620 case CHIP_CARRIZO:
1621 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1622 AMD_CG_SUPPORT_GFX_MGCG |
1623 AMD_CG_SUPPORT_GFX_MGLS |
1624 AMD_CG_SUPPORT_GFX_RLC_LS |
1625 AMD_CG_SUPPORT_GFX_CP_LS |
1626 AMD_CG_SUPPORT_GFX_CGTS |
1627 AMD_CG_SUPPORT_GFX_CGTS_LS |
1628 AMD_CG_SUPPORT_GFX_CGCG |
1629 AMD_CG_SUPPORT_GFX_CGLS |
1630 AMD_CG_SUPPORT_BIF_LS |
1631 AMD_CG_SUPPORT_HDP_MGCG |
1632 AMD_CG_SUPPORT_HDP_LS |
1633 AMD_CG_SUPPORT_SDMA_MGCG |
1634 AMD_CG_SUPPORT_SDMA_LS |
1635 AMD_CG_SUPPORT_VCE_MGCG;
1636 /* rev0 hardware requires workarounds to support PG */
1637 adev->pg_flags = 0;
1638 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1639 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1640 AMD_PG_SUPPORT_GFX_PIPELINE |
1641 AMD_PG_SUPPORT_CP |
1642 AMD_PG_SUPPORT_UVD |
1643 AMD_PG_SUPPORT_VCE;
1644 }
1645 adev->external_rev_id = adev->rev_id + 0x1;
1646 break;
1647 case CHIP_STONEY:
1648 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1649 AMD_CG_SUPPORT_GFX_MGCG |
1650 AMD_CG_SUPPORT_GFX_MGLS |
1651 AMD_CG_SUPPORT_GFX_RLC_LS |
1652 AMD_CG_SUPPORT_GFX_CP_LS |
1653 AMD_CG_SUPPORT_GFX_CGTS |
1654 AMD_CG_SUPPORT_GFX_CGTS_LS |
1655 AMD_CG_SUPPORT_GFX_CGLS |
1656 AMD_CG_SUPPORT_BIF_LS |
1657 AMD_CG_SUPPORT_HDP_MGCG |
1658 AMD_CG_SUPPORT_HDP_LS |
1659 AMD_CG_SUPPORT_SDMA_MGCG |
1660 AMD_CG_SUPPORT_SDMA_LS |
1661 AMD_CG_SUPPORT_VCE_MGCG;
1662 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1663 AMD_PG_SUPPORT_GFX_SMG |
1664 AMD_PG_SUPPORT_GFX_PIPELINE |
1665 AMD_PG_SUPPORT_CP |
1666 AMD_PG_SUPPORT_UVD |
1667 AMD_PG_SUPPORT_VCE;
1668 adev->external_rev_id = adev->rev_id + 0x61;
1669 break;
1670 default:
1671 /* FIXME: not supported yet */
1672 return -EINVAL;
1673 }
1674
1675 if (amdgpu_sriov_vf(adev)) {
1676 amdgpu_virt_init_setting(adev);
1677 xgpu_vi_mailbox_set_irq_funcs(adev);
1678 }
1679
1680 return 0;
1681 }
1682
vi_common_late_init(void * handle)1683 static int vi_common_late_init(void *handle)
1684 {
1685 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1686
1687 if (amdgpu_sriov_vf(adev))
1688 xgpu_vi_mailbox_get_irq(adev);
1689
1690 return 0;
1691 }
1692
vi_common_sw_init(void * handle)1693 static int vi_common_sw_init(void *handle)
1694 {
1695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696
1697 if (amdgpu_sriov_vf(adev))
1698 xgpu_vi_mailbox_add_irq_id(adev);
1699
1700 return 0;
1701 }
1702
vi_common_sw_fini(void * handle)1703 static int vi_common_sw_fini(void *handle)
1704 {
1705 return 0;
1706 }
1707
vi_common_hw_init(void * handle)1708 static int vi_common_hw_init(void *handle)
1709 {
1710 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1711
1712 /* move the golden regs per IP block */
1713 vi_init_golden_registers(adev);
1714 /* enable aspm */
1715 vi_program_aspm(adev);
1716 /* enable the doorbell aperture */
1717 vi_enable_doorbell_aperture(adev, true);
1718
1719 return 0;
1720 }
1721
vi_common_hw_fini(void * handle)1722 static int vi_common_hw_fini(void *handle)
1723 {
1724 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1725
1726 /* enable the doorbell aperture */
1727 vi_enable_doorbell_aperture(adev, false);
1728
1729 if (amdgpu_sriov_vf(adev))
1730 xgpu_vi_mailbox_put_irq(adev);
1731
1732 return 0;
1733 }
1734
vi_common_suspend(void * handle)1735 static int vi_common_suspend(void *handle)
1736 {
1737 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1738
1739 return vi_common_hw_fini(adev);
1740 }
1741
vi_common_resume(void * handle)1742 static int vi_common_resume(void *handle)
1743 {
1744 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1745
1746 return vi_common_hw_init(adev);
1747 }
1748
vi_common_is_idle(void * handle)1749 static bool vi_common_is_idle(void *handle)
1750 {
1751 return true;
1752 }
1753
vi_common_wait_for_idle(void * handle)1754 static int vi_common_wait_for_idle(void *handle)
1755 {
1756 return 0;
1757 }
1758
vi_common_soft_reset(void * handle)1759 static int vi_common_soft_reset(void *handle)
1760 {
1761 return 0;
1762 }
1763
vi_update_bif_medium_grain_light_sleep(struct amdgpu_device * adev,bool enable)1764 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1765 bool enable)
1766 {
1767 uint32_t temp, data;
1768
1769 temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1770
1771 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1772 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1773 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1774 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1775 else
1776 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1777 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1778 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1779
1780 if (temp != data)
1781 WREG32_PCIE(ixPCIE_CNTL2, data);
1782 }
1783
vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)1784 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1785 bool enable)
1786 {
1787 uint32_t temp, data;
1788
1789 temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1790
1791 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1792 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1793 else
1794 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1795
1796 if (temp != data)
1797 WREG32(mmHDP_HOST_PATH_CNTL, data);
1798 }
1799
vi_update_hdp_light_sleep(struct amdgpu_device * adev,bool enable)1800 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1801 bool enable)
1802 {
1803 uint32_t temp, data;
1804
1805 temp = data = RREG32(mmHDP_MEM_POWER_LS);
1806
1807 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1808 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1809 else
1810 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1811
1812 if (temp != data)
1813 WREG32(mmHDP_MEM_POWER_LS, data);
1814 }
1815
vi_update_drm_light_sleep(struct amdgpu_device * adev,bool enable)1816 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1817 bool enable)
1818 {
1819 uint32_t temp, data;
1820
1821 temp = data = RREG32(0x157a);
1822
1823 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1824 data |= 1;
1825 else
1826 data &= ~1;
1827
1828 if (temp != data)
1829 WREG32(0x157a, data);
1830 }
1831
1832
vi_update_rom_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable)1833 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1834 bool enable)
1835 {
1836 uint32_t temp, data;
1837
1838 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1839
1840 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1841 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1842 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1843 else
1844 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1845 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1846
1847 if (temp != data)
1848 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1849 }
1850
vi_common_set_clockgating_state_by_smu(void * handle,enum amd_clockgating_state state)1851 static int vi_common_set_clockgating_state_by_smu(void *handle,
1852 enum amd_clockgating_state state)
1853 {
1854 uint32_t msg_id, pp_state = 0;
1855 uint32_t pp_support_state = 0;
1856 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1857
1858 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1859 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1860 pp_support_state = PP_STATE_SUPPORT_LS;
1861 pp_state = PP_STATE_LS;
1862 }
1863 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1864 pp_support_state |= PP_STATE_SUPPORT_CG;
1865 pp_state |= PP_STATE_CG;
1866 }
1867 if (state == AMD_CG_STATE_UNGATE)
1868 pp_state = 0;
1869 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1870 PP_BLOCK_SYS_MC,
1871 pp_support_state,
1872 pp_state);
1873 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1874 }
1875
1876 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1877 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1878 pp_support_state = PP_STATE_SUPPORT_LS;
1879 pp_state = PP_STATE_LS;
1880 }
1881 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1882 pp_support_state |= PP_STATE_SUPPORT_CG;
1883 pp_state |= PP_STATE_CG;
1884 }
1885 if (state == AMD_CG_STATE_UNGATE)
1886 pp_state = 0;
1887 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1888 PP_BLOCK_SYS_SDMA,
1889 pp_support_state,
1890 pp_state);
1891 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1892 }
1893
1894 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1895 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1896 pp_support_state = PP_STATE_SUPPORT_LS;
1897 pp_state = PP_STATE_LS;
1898 }
1899 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1900 pp_support_state |= PP_STATE_SUPPORT_CG;
1901 pp_state |= PP_STATE_CG;
1902 }
1903 if (state == AMD_CG_STATE_UNGATE)
1904 pp_state = 0;
1905 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1906 PP_BLOCK_SYS_HDP,
1907 pp_support_state,
1908 pp_state);
1909 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1910 }
1911
1912
1913 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1914 if (state == AMD_CG_STATE_UNGATE)
1915 pp_state = 0;
1916 else
1917 pp_state = PP_STATE_LS;
1918
1919 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1920 PP_BLOCK_SYS_BIF,
1921 PP_STATE_SUPPORT_LS,
1922 pp_state);
1923 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1924 }
1925 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1926 if (state == AMD_CG_STATE_UNGATE)
1927 pp_state = 0;
1928 else
1929 pp_state = PP_STATE_CG;
1930
1931 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1932 PP_BLOCK_SYS_BIF,
1933 PP_STATE_SUPPORT_CG,
1934 pp_state);
1935 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1936 }
1937
1938 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1939
1940 if (state == AMD_CG_STATE_UNGATE)
1941 pp_state = 0;
1942 else
1943 pp_state = PP_STATE_LS;
1944
1945 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1946 PP_BLOCK_SYS_DRM,
1947 PP_STATE_SUPPORT_LS,
1948 pp_state);
1949 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1950 }
1951
1952 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1953
1954 if (state == AMD_CG_STATE_UNGATE)
1955 pp_state = 0;
1956 else
1957 pp_state = PP_STATE_CG;
1958
1959 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1960 PP_BLOCK_SYS_ROM,
1961 PP_STATE_SUPPORT_CG,
1962 pp_state);
1963 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1964 }
1965 return 0;
1966 }
1967
vi_common_set_clockgating_state(void * handle,enum amd_clockgating_state state)1968 static int vi_common_set_clockgating_state(void *handle,
1969 enum amd_clockgating_state state)
1970 {
1971 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1972
1973 if (amdgpu_sriov_vf(adev))
1974 return 0;
1975
1976 switch (adev->asic_type) {
1977 case CHIP_FIJI:
1978 vi_update_bif_medium_grain_light_sleep(adev,
1979 state == AMD_CG_STATE_GATE);
1980 vi_update_hdp_medium_grain_clock_gating(adev,
1981 state == AMD_CG_STATE_GATE);
1982 vi_update_hdp_light_sleep(adev,
1983 state == AMD_CG_STATE_GATE);
1984 vi_update_rom_medium_grain_clock_gating(adev,
1985 state == AMD_CG_STATE_GATE);
1986 break;
1987 case CHIP_CARRIZO:
1988 case CHIP_STONEY:
1989 vi_update_bif_medium_grain_light_sleep(adev,
1990 state == AMD_CG_STATE_GATE);
1991 vi_update_hdp_medium_grain_clock_gating(adev,
1992 state == AMD_CG_STATE_GATE);
1993 vi_update_hdp_light_sleep(adev,
1994 state == AMD_CG_STATE_GATE);
1995 vi_update_drm_light_sleep(adev,
1996 state == AMD_CG_STATE_GATE);
1997 break;
1998 case CHIP_TONGA:
1999 case CHIP_POLARIS10:
2000 case CHIP_POLARIS11:
2001 case CHIP_POLARIS12:
2002 case CHIP_VEGAM:
2003 vi_common_set_clockgating_state_by_smu(adev, state);
2004 break;
2005 default:
2006 break;
2007 }
2008 return 0;
2009 }
2010
vi_common_set_powergating_state(void * handle,enum amd_powergating_state state)2011 static int vi_common_set_powergating_state(void *handle,
2012 enum amd_powergating_state state)
2013 {
2014 return 0;
2015 }
2016
vi_common_get_clockgating_state(void * handle,u64 * flags)2017 static void vi_common_get_clockgating_state(void *handle, u64 *flags)
2018 {
2019 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2020 int data;
2021
2022 if (amdgpu_sriov_vf(adev))
2023 *flags = 0;
2024
2025 /* AMD_CG_SUPPORT_BIF_LS */
2026 data = RREG32_PCIE(ixPCIE_CNTL2);
2027 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
2028 *flags |= AMD_CG_SUPPORT_BIF_LS;
2029
2030 /* AMD_CG_SUPPORT_HDP_LS */
2031 data = RREG32(mmHDP_MEM_POWER_LS);
2032 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
2033 *flags |= AMD_CG_SUPPORT_HDP_LS;
2034
2035 /* AMD_CG_SUPPORT_HDP_MGCG */
2036 data = RREG32(mmHDP_HOST_PATH_CNTL);
2037 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
2038 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
2039
2040 /* AMD_CG_SUPPORT_ROM_MGCG */
2041 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
2042 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
2043 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
2044 }
2045
2046 static const struct amd_ip_funcs vi_common_ip_funcs = {
2047 .name = "vi_common",
2048 .early_init = vi_common_early_init,
2049 .late_init = vi_common_late_init,
2050 .sw_init = vi_common_sw_init,
2051 .sw_fini = vi_common_sw_fini,
2052 .hw_init = vi_common_hw_init,
2053 .hw_fini = vi_common_hw_fini,
2054 .suspend = vi_common_suspend,
2055 .resume = vi_common_resume,
2056 .is_idle = vi_common_is_idle,
2057 .wait_for_idle = vi_common_wait_for_idle,
2058 .soft_reset = vi_common_soft_reset,
2059 .set_clockgating_state = vi_common_set_clockgating_state,
2060 .set_powergating_state = vi_common_set_powergating_state,
2061 .get_clockgating_state = vi_common_get_clockgating_state,
2062 };
2063
2064 static const struct amdgpu_ip_block_version vi_common_ip_block =
2065 {
2066 .type = AMD_IP_BLOCK_TYPE_COMMON,
2067 .major = 1,
2068 .minor = 0,
2069 .rev = 0,
2070 .funcs = &vi_common_ip_funcs,
2071 };
2072
vi_set_virt_ops(struct amdgpu_device * adev)2073 void vi_set_virt_ops(struct amdgpu_device *adev)
2074 {
2075 adev->virt.ops = &xgpu_vi_virt_ops;
2076 }
2077
vi_set_ip_blocks(struct amdgpu_device * adev)2078 int vi_set_ip_blocks(struct amdgpu_device *adev)
2079 {
2080 amdgpu_device_set_sriov_virtual_display(adev);
2081
2082 switch (adev->asic_type) {
2083 case CHIP_TOPAZ:
2084 /* topaz has no DCE, UVD, VCE */
2085 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2086 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
2087 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
2088 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2089 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
2090 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2091 if (adev->enable_virtual_display)
2092 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2093 break;
2094 case CHIP_FIJI:
2095 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2096 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
2097 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2098 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2099 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2100 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2101 if (adev->enable_virtual_display)
2102 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2103 #if defined(CONFIG_DRM_AMD_DC)
2104 else if (amdgpu_device_has_dc_support(adev))
2105 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2106 #endif
2107 else
2108 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
2109 if (!amdgpu_sriov_vf(adev)) {
2110 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2111 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2112 }
2113 break;
2114 case CHIP_TONGA:
2115 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2116 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2117 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2118 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2119 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2120 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2121 if (adev->enable_virtual_display)
2122 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2123 #if defined(CONFIG_DRM_AMD_DC)
2124 else if (amdgpu_device_has_dc_support(adev))
2125 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2126 #endif
2127 else
2128 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
2129 if (!amdgpu_sriov_vf(adev)) {
2130 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
2131 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2132 }
2133 break;
2134 case CHIP_POLARIS10:
2135 case CHIP_POLARIS11:
2136 case CHIP_POLARIS12:
2137 case CHIP_VEGAM:
2138 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2139 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
2140 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2141 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2142 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
2143 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2144 if (adev->enable_virtual_display)
2145 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2146 #if defined(CONFIG_DRM_AMD_DC)
2147 else if (amdgpu_device_has_dc_support(adev))
2148 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2149 #endif
2150 else
2151 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
2152 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
2153 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2154 break;
2155 case CHIP_CARRIZO:
2156 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2157 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2158 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2159 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2160 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2161 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2162 if (adev->enable_virtual_display)
2163 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2164 #if defined(CONFIG_DRM_AMD_DC)
2165 else if (amdgpu_device_has_dc_support(adev))
2166 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2167 #endif
2168 else
2169 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2170 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2171 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
2172 #if defined(CONFIG_DRM_AMD_ACP)
2173 amdgpu_device_ip_block_add(adev, &acp_ip_block);
2174 #endif
2175 break;
2176 case CHIP_STONEY:
2177 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2178 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2179 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2180 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
2181 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2182 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2183 if (adev->enable_virtual_display)
2184 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2185 #if defined(CONFIG_DRM_AMD_DC)
2186 else if (amdgpu_device_has_dc_support(adev))
2187 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2188 #endif
2189 else
2190 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2191 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
2192 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2193 #if defined(CONFIG_DRM_AMD_ACP)
2194 amdgpu_device_ip_block_add(adev, &acp_ip_block);
2195 #endif
2196 break;
2197 default:
2198 /* FIXME: not supported yet */
2199 return -EINVAL;
2200 }
2201
2202 return 0;
2203 }
2204
legacy_doorbell_index_init(struct amdgpu_device * adev)2205 void legacy_doorbell_index_init(struct amdgpu_device *adev)
2206 {
2207 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
2208 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
2209 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
2210 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
2211 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
2212 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
2213 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
2214 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
2215 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
2216 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
2217 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
2218 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
2219 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
2220 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
2221 }
2222