xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 9096d6e5)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23e60f8db5SAlex Xie #include <linux/firmware.h>
24fd5fd480SChunming Zhou #include <drm/drm_cache.h>
25e60f8db5SAlex Xie #include "amdgpu.h"
26e60f8db5SAlex Xie #include "gmc_v9_0.h"
278d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
28e60f8db5SAlex Xie 
2975199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3075199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
31cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
32135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
33135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
34fb960bd2SFeifei Xu #include "vega10_enum.h"
3565417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
366ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
37e60f8db5SAlex Xie 
38946a4d5bSShaoyun Liu #include "soc15.h"
39e60f8db5SAlex Xie #include "soc15_common.h"
4090c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
41e60f8db5SAlex Xie 
42e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
43e60f8db5SAlex Xie #include "mmhub_v1_0.h"
44e60f8db5SAlex Xie 
45e60f8db5SAlex Xie #define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
46e60f8db5SAlex Xie #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
47e60f8db5SAlex Xie //DF_CS_AON0_DramBaseAddress0
48e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT                                                        0x0
49e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT                                                    0x1
50e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT                                                      0x4
51e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT                                                      0x8
52e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT                                                      0xc
53e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK                                                          0x00000001L
54e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK                                                      0x00000002L
55e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
56e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
57e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
58e60f8db5SAlex Xie 
59e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
60e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
61e60f8db5SAlex Xie 
62e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
63e60f8db5SAlex Xie {
64e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
65e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
66e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
67e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
68e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
69e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
70e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
71e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
72e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
73e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
74e60f8db5SAlex Xie };
75e60f8db5SAlex Xie 
76946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
775c583018SEvan Quan {
78946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
79946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
805c583018SEvan Quan };
815c583018SEvan Quan 
82946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
835c583018SEvan Quan {
84946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
85946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
865c583018SEvan Quan };
875c583018SEvan Quan 
8802bab923SDavid Panariti /* Ecc related register addresses, (BASE + reg offset) */
8902bab923SDavid Panariti /* Universal Memory Controller caps (may be fused). */
9002bab923SDavid Panariti /* UMCCH:UmcLocalCap */
9102bab923SDavid Panariti #define UMCLOCALCAPS_ADDR0	(0x00014306 + 0x00000000)
9202bab923SDavid Panariti #define UMCLOCALCAPS_ADDR1	(0x00014306 + 0x00000800)
9302bab923SDavid Panariti #define UMCLOCALCAPS_ADDR2	(0x00014306 + 0x00001000)
9402bab923SDavid Panariti #define UMCLOCALCAPS_ADDR3	(0x00014306 + 0x00001800)
9502bab923SDavid Panariti #define UMCLOCALCAPS_ADDR4	(0x00054306 + 0x00000000)
9602bab923SDavid Panariti #define UMCLOCALCAPS_ADDR5	(0x00054306 + 0x00000800)
9702bab923SDavid Panariti #define UMCLOCALCAPS_ADDR6	(0x00054306 + 0x00001000)
9802bab923SDavid Panariti #define UMCLOCALCAPS_ADDR7	(0x00054306 + 0x00001800)
9902bab923SDavid Panariti #define UMCLOCALCAPS_ADDR8	(0x00094306 + 0x00000000)
10002bab923SDavid Panariti #define UMCLOCALCAPS_ADDR9	(0x00094306 + 0x00000800)
10102bab923SDavid Panariti #define UMCLOCALCAPS_ADDR10	(0x00094306 + 0x00001000)
10202bab923SDavid Panariti #define UMCLOCALCAPS_ADDR11	(0x00094306 + 0x00001800)
10302bab923SDavid Panariti #define UMCLOCALCAPS_ADDR12	(0x000d4306 + 0x00000000)
10402bab923SDavid Panariti #define UMCLOCALCAPS_ADDR13	(0x000d4306 + 0x00000800)
10502bab923SDavid Panariti #define UMCLOCALCAPS_ADDR14	(0x000d4306 + 0x00001000)
10602bab923SDavid Panariti #define UMCLOCALCAPS_ADDR15	(0x000d4306 + 0x00001800)
10702bab923SDavid Panariti 
10802bab923SDavid Panariti /* Universal Memory Controller Channel config. */
10902bab923SDavid Panariti /* UMCCH:UMC_CONFIG */
11002bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR0	(0x00014040 + 0x00000000)
11102bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR1	(0x00014040 + 0x00000800)
11202bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR2	(0x00014040 + 0x00001000)
11302bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR3	(0x00014040 + 0x00001800)
11402bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR4	(0x00054040 + 0x00000000)
11502bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR5	(0x00054040 + 0x00000800)
11602bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR6	(0x00054040 + 0x00001000)
11702bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR7	(0x00054040 + 0x00001800)
11802bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR8	(0x00094040 + 0x00000000)
11902bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR9	(0x00094040 + 0x00000800)
12002bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR10	(0x00094040 + 0x00001000)
12102bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR11	(0x00094040 + 0x00001800)
12202bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR12	(0x000d4040 + 0x00000000)
12302bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR13	(0x000d4040 + 0x00000800)
12402bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR14	(0x000d4040 + 0x00001000)
12502bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR15	(0x000d4040 + 0x00001800)
12602bab923SDavid Panariti 
12702bab923SDavid Panariti /* Universal Memory Controller Channel Ecc config. */
12802bab923SDavid Panariti /* UMCCH:EccCtrl */
12902bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR0	(0x00014053 + 0x00000000)
13002bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR1	(0x00014053 + 0x00000800)
13102bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR2	(0x00014053 + 0x00001000)
13202bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR3	(0x00014053 + 0x00001800)
13302bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR4	(0x00054053 + 0x00000000)
13402bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR5	(0x00054053 + 0x00000800)
13502bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR6	(0x00054053 + 0x00001000)
13602bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR7	(0x00054053 + 0x00001800)
13702bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR8	(0x00094053 + 0x00000000)
13802bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR9	(0x00094053 + 0x00000800)
13902bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR10	(0x00094053 + 0x00001000)
14002bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR11	(0x00094053 + 0x00001800)
14102bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR12	(0x000d4053 + 0x00000000)
14202bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR13	(0x000d4053 + 0x00000800)
14302bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR14	(0x000d4053 + 0x00001000)
14402bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR15	(0x000d4053 + 0x00001800)
14502bab923SDavid Panariti 
14602bab923SDavid Panariti static const uint32_t ecc_umclocalcap_addrs[] = {
14702bab923SDavid Panariti 	UMCLOCALCAPS_ADDR0,
14802bab923SDavid Panariti 	UMCLOCALCAPS_ADDR1,
14902bab923SDavid Panariti 	UMCLOCALCAPS_ADDR2,
15002bab923SDavid Panariti 	UMCLOCALCAPS_ADDR3,
15102bab923SDavid Panariti 	UMCLOCALCAPS_ADDR4,
15202bab923SDavid Panariti 	UMCLOCALCAPS_ADDR5,
15302bab923SDavid Panariti 	UMCLOCALCAPS_ADDR6,
15402bab923SDavid Panariti 	UMCLOCALCAPS_ADDR7,
15502bab923SDavid Panariti 	UMCLOCALCAPS_ADDR8,
15602bab923SDavid Panariti 	UMCLOCALCAPS_ADDR9,
15702bab923SDavid Panariti 	UMCLOCALCAPS_ADDR10,
15802bab923SDavid Panariti 	UMCLOCALCAPS_ADDR11,
15902bab923SDavid Panariti 	UMCLOCALCAPS_ADDR12,
16002bab923SDavid Panariti 	UMCLOCALCAPS_ADDR13,
16102bab923SDavid Panariti 	UMCLOCALCAPS_ADDR14,
16202bab923SDavid Panariti 	UMCLOCALCAPS_ADDR15,
16302bab923SDavid Panariti };
16402bab923SDavid Panariti 
16502bab923SDavid Panariti static const uint32_t ecc_umcch_umc_config_addrs[] = {
16602bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR0,
16702bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR1,
16802bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR2,
16902bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR3,
17002bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR4,
17102bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR5,
17202bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR6,
17302bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR7,
17402bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR8,
17502bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR9,
17602bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR10,
17702bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR11,
17802bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR12,
17902bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR13,
18002bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR14,
18102bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR15,
18202bab923SDavid Panariti };
18302bab923SDavid Panariti 
18402bab923SDavid Panariti static const uint32_t ecc_umcch_eccctrl_addrs[] = {
18502bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR0,
18602bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR1,
18702bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR2,
18802bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR3,
18902bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR4,
19002bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR5,
19102bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR6,
19202bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR7,
19302bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR8,
19402bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR9,
19502bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR10,
19602bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR11,
19702bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR12,
19802bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR13,
19902bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR14,
20002bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR15,
20102bab923SDavid Panariti };
20202bab923SDavid Panariti 
203e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
204e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
205e60f8db5SAlex Xie 					unsigned type,
206e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
207e60f8db5SAlex Xie {
208e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
209ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
210e60f8db5SAlex Xie 
21111250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21211250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21311250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21411250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21511250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21611250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21711250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
21811250164SChristian König 
219e60f8db5SAlex Xie 	switch (state) {
220e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
221ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
222ae6d1416STom St Denis 			hub = &adev->vmhub[j];
223e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
224e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
225e60f8db5SAlex Xie 				tmp = RREG32(reg);
226e60f8db5SAlex Xie 				tmp &= ~bits;
227e60f8db5SAlex Xie 				WREG32(reg, tmp);
228e60f8db5SAlex Xie 			}
229e60f8db5SAlex Xie 		}
230e60f8db5SAlex Xie 		break;
231e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
232ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
233ae6d1416STom St Denis 			hub = &adev->vmhub[j];
234e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
235e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
236e60f8db5SAlex Xie 				tmp = RREG32(reg);
237e60f8db5SAlex Xie 				tmp |= bits;
238e60f8db5SAlex Xie 				WREG32(reg, tmp);
239e60f8db5SAlex Xie 			}
240e60f8db5SAlex Xie 		}
241e60f8db5SAlex Xie 	default:
242e60f8db5SAlex Xie 		break;
243e60f8db5SAlex Xie 	}
244e60f8db5SAlex Xie 
245e60f8db5SAlex Xie 	return 0;
246e60f8db5SAlex Xie }
247e60f8db5SAlex Xie 
248e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
249e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
250e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
251e60f8db5SAlex Xie {
252c4f46f22SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
2534d6cbde3SFelix Kuehling 	uint32_t status = 0;
254e60f8db5SAlex Xie 	u64 addr;
255e60f8db5SAlex Xie 
256e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
257e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
258e60f8db5SAlex Xie 
25979a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
2605a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
2615a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
2624d6cbde3SFelix Kuehling 	}
263e60f8db5SAlex Xie 
2644d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
2654d6cbde3SFelix Kuehling 		dev_err(adev->dev,
2663816e42fSChristian König 			"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
267c4f46f22SChristian König 			entry->vmid_src ? "mmhub" : "gfxhub",
268c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
2693816e42fSChristian König 			entry->pasid);
2704d6cbde3SFelix Kuehling 		dev_err(adev->dev, "  at page 0x%016llx from %d\n",
27179a0c465SMonk Liu 			addr, entry->client_id);
2724d6cbde3SFelix Kuehling 		if (!amdgpu_sriov_vf(adev))
2734d6cbde3SFelix Kuehling 			dev_err(adev->dev,
2744d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
2754d6cbde3SFelix Kuehling 				status);
27679a0c465SMonk Liu 	}
277e60f8db5SAlex Xie 
278e60f8db5SAlex Xie 	return 0;
279e60f8db5SAlex Xie }
280e60f8db5SAlex Xie 
281e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
282e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
283e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
284e60f8db5SAlex Xie };
285e60f8db5SAlex Xie 
286e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
287e60f8db5SAlex Xie {
288770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
289770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
290e60f8db5SAlex Xie }
291e60f8db5SAlex Xie 
292c4f46f22SChristian König static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
29303f89febSChristian König {
29403f89febSChristian König 	u32 req = 0;
29503f89febSChristian König 
296c4f46f22SChristian König 	/* invalidate using legacy mode on vmid*/
29703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
298c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
29903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
30003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
30103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
30203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
30303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
30403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
30503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
30603f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
30703f89febSChristian König 
30803f89febSChristian König 	return req;
30903f89febSChristian König }
31003f89febSChristian König 
311e60f8db5SAlex Xie /*
312e60f8db5SAlex Xie  * GART
313e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
314e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
315e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
316e60f8db5SAlex Xie  */
317e60f8db5SAlex Xie 
318e60f8db5SAlex Xie /**
319132f34e4SChristian König  * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
320e60f8db5SAlex Xie  *
321e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
322e60f8db5SAlex Xie  * @vmid: vm instance to flush
323e60f8db5SAlex Xie  *
324e60f8db5SAlex Xie  * Flush the TLB for the requested page table.
325e60f8db5SAlex Xie  */
326132f34e4SChristian König static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
327e60f8db5SAlex Xie 					uint32_t vmid)
328e60f8db5SAlex Xie {
329e60f8db5SAlex Xie 	/* Use register 17 for GART */
330e60f8db5SAlex Xie 	const unsigned eng = 17;
331e60f8db5SAlex Xie 	unsigned i, j;
332e60f8db5SAlex Xie 
333770d13b1SChristian König 	spin_lock(&adev->gmc.invalidate_lock);
334e60f8db5SAlex Xie 
335e60f8db5SAlex Xie 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
336e60f8db5SAlex Xie 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
33703f89febSChristian König 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
338e60f8db5SAlex Xie 
339c7a7266bSXiangliang Yu 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
340e60f8db5SAlex Xie 
341e60f8db5SAlex Xie 		/* Busy wait for ACK.*/
342e60f8db5SAlex Xie 		for (j = 0; j < 100; j++) {
343c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
344e60f8db5SAlex Xie 			tmp &= 1 << vmid;
345e60f8db5SAlex Xie 			if (tmp)
346e60f8db5SAlex Xie 				break;
347e60f8db5SAlex Xie 			cpu_relax();
348e60f8db5SAlex Xie 		}
349e60f8db5SAlex Xie 		if (j < 100)
350e60f8db5SAlex Xie 			continue;
351e60f8db5SAlex Xie 
352e60f8db5SAlex Xie 		/* Wait for ACK with a delay.*/
353e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
354c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
355e60f8db5SAlex Xie 			tmp &= 1 << vmid;
356e60f8db5SAlex Xie 			if (tmp)
357e60f8db5SAlex Xie 				break;
358e60f8db5SAlex Xie 			udelay(1);
359e60f8db5SAlex Xie 		}
360e60f8db5SAlex Xie 		if (j < adev->usec_timeout)
361e60f8db5SAlex Xie 			continue;
362e60f8db5SAlex Xie 
363e60f8db5SAlex Xie 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
364e60f8db5SAlex Xie 	}
365e60f8db5SAlex Xie 
366770d13b1SChristian König 	spin_unlock(&adev->gmc.invalidate_lock);
367e60f8db5SAlex Xie }
368e60f8db5SAlex Xie 
3699096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
3709096d6e5SChristian König 					    unsigned vmid, unsigned pasid,
3719096d6e5SChristian König 					    uint64_t pd_addr)
3729096d6e5SChristian König {
3739096d6e5SChristian König 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
3749096d6e5SChristian König 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
3759096d6e5SChristian König 	uint64_t flags = AMDGPU_PTE_VALID;
3769096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
3779096d6e5SChristian König 
3789096d6e5SChristian König 	amdgpu_gmc_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
3799096d6e5SChristian König 	pd_addr |= flags;
3809096d6e5SChristian König 
3819096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
3829096d6e5SChristian König 			      lower_32_bits(pd_addr));
3839096d6e5SChristian König 
3849096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
3859096d6e5SChristian König 			      upper_32_bits(pd_addr));
3869096d6e5SChristian König 
3879096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
3889096d6e5SChristian König 
3899096d6e5SChristian König 	return pd_addr;
3909096d6e5SChristian König }
3919096d6e5SChristian König 
392e60f8db5SAlex Xie /**
393132f34e4SChristian König  * gmc_v9_0_set_pte_pde - update the page tables using MMIO
394e60f8db5SAlex Xie  *
395e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
396e60f8db5SAlex Xie  * @cpu_pt_addr: cpu address of the page table
397e60f8db5SAlex Xie  * @gpu_page_idx: entry in the page table to update
398e60f8db5SAlex Xie  * @addr: dst addr to write into pte/pde
399e60f8db5SAlex Xie  * @flags: access flags
400e60f8db5SAlex Xie  *
401e60f8db5SAlex Xie  * Update the page tables using the CPU.
402e60f8db5SAlex Xie  */
403132f34e4SChristian König static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
404132f34e4SChristian König 				uint32_t gpu_page_idx, uint64_t addr,
405e60f8db5SAlex Xie 				uint64_t flags)
406e60f8db5SAlex Xie {
407e60f8db5SAlex Xie 	void __iomem *ptr = (void *)cpu_pt_addr;
408e60f8db5SAlex Xie 	uint64_t value;
409e60f8db5SAlex Xie 
410e60f8db5SAlex Xie 	/*
411e60f8db5SAlex Xie 	 * PTE format on VEGA 10:
412e60f8db5SAlex Xie 	 * 63:59 reserved
413e60f8db5SAlex Xie 	 * 58:57 mtype
414e60f8db5SAlex Xie 	 * 56 F
415e60f8db5SAlex Xie 	 * 55 L
416e60f8db5SAlex Xie 	 * 54 P
417e60f8db5SAlex Xie 	 * 53 SW
418e60f8db5SAlex Xie 	 * 52 T
419e60f8db5SAlex Xie 	 * 50:48 reserved
420e60f8db5SAlex Xie 	 * 47:12 4k physical page base address
421e60f8db5SAlex Xie 	 * 11:7 fragment
422e60f8db5SAlex Xie 	 * 6 write
423e60f8db5SAlex Xie 	 * 5 read
424e60f8db5SAlex Xie 	 * 4 exe
425e60f8db5SAlex Xie 	 * 3 Z
426e60f8db5SAlex Xie 	 * 2 snooped
427e60f8db5SAlex Xie 	 * 1 system
428e60f8db5SAlex Xie 	 * 0 valid
429e60f8db5SAlex Xie 	 *
430e60f8db5SAlex Xie 	 * PDE format on VEGA 10:
431e60f8db5SAlex Xie 	 * 63:59 block fragment size
432e60f8db5SAlex Xie 	 * 58:55 reserved
433e60f8db5SAlex Xie 	 * 54 P
434e60f8db5SAlex Xie 	 * 53:48 reserved
435e60f8db5SAlex Xie 	 * 47:6 physical base address of PD or PTE
436e60f8db5SAlex Xie 	 * 5:3 reserved
437e60f8db5SAlex Xie 	 * 2 C
438e60f8db5SAlex Xie 	 * 1 system
439e60f8db5SAlex Xie 	 * 0 valid
440e60f8db5SAlex Xie 	 */
441e60f8db5SAlex Xie 
442e60f8db5SAlex Xie 	/*
443e60f8db5SAlex Xie 	 * The following is for PTE only. GART does not have PDEs.
444e60f8db5SAlex Xie 	*/
445e60f8db5SAlex Xie 	value = addr & 0x0000FFFFFFFFF000ULL;
446e60f8db5SAlex Xie 	value |= flags;
447e60f8db5SAlex Xie 	writeq(value, ptr + (gpu_page_idx * 8));
448e60f8db5SAlex Xie 	return 0;
449e60f8db5SAlex Xie }
450e60f8db5SAlex Xie 
451e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
452e60f8db5SAlex Xie 						uint32_t flags)
453e60f8db5SAlex Xie 
454e60f8db5SAlex Xie {
455e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
456e60f8db5SAlex Xie 
457e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
458e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
459e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
460e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
461e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
462e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
463e60f8db5SAlex Xie 
464e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
465e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
466e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
467e60f8db5SAlex Xie 		break;
468e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
469e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
470e60f8db5SAlex Xie 		break;
471e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
472e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
473e60f8db5SAlex Xie 		break;
474e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
475e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
476e60f8db5SAlex Xie 		break;
477e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
478e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
479e60f8db5SAlex Xie 		break;
480e60f8db5SAlex Xie 	default:
481e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
482e60f8db5SAlex Xie 		break;
483e60f8db5SAlex Xie 	}
484e60f8db5SAlex Xie 
485e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
486e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
487e60f8db5SAlex Xie 
488e60f8db5SAlex Xie 	return pte_flag;
489e60f8db5SAlex Xie }
490e60f8db5SAlex Xie 
4913de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
4923de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
493f75e237cSChristian König {
4943de676d8SChristian König 	if (!(*flags & AMDGPU_PDE_PTE))
4953de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
496770d13b1SChristian König 			adev->gmc.vram_start;
4973de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
4986a42fd6fSChristian König 
499770d13b1SChristian König 	if (!adev->gmc.translate_further)
5006a42fd6fSChristian König 		return;
5016a42fd6fSChristian König 
5026a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
5036a42fd6fSChristian König 		/* Set the block fragment size */
5046a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
5056a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
5066a42fd6fSChristian König 
5076a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
5086a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
5096a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
5106a42fd6fSChristian König 		else
5116a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
5126a42fd6fSChristian König 	}
513f75e237cSChristian König }
514f75e237cSChristian König 
515132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
516132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
5179096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
518132f34e4SChristian König 	.set_pte_pde = gmc_v9_0_set_pte_pde,
519b1166325SChristian König 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
520b1166325SChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde
521e60f8db5SAlex Xie };
522e60f8db5SAlex Xie 
523132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
524e60f8db5SAlex Xie {
525132f34e4SChristian König 	if (adev->gmc.gmc_funcs == NULL)
526132f34e4SChristian König 		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
527e60f8db5SAlex Xie }
528e60f8db5SAlex Xie 
529e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
530e60f8db5SAlex Xie {
531e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
532e60f8db5SAlex Xie 
533132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
534e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
535e60f8db5SAlex Xie 
536770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
537770d13b1SChristian König 	adev->gmc.shared_aperture_end =
538770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
539770d13b1SChristian König 	adev->gmc.private_aperture_start =
540770d13b1SChristian König 		adev->gmc.shared_aperture_end + 1;
541770d13b1SChristian König 	adev->gmc.private_aperture_end =
542770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
543a7ea6548SAlex Deucher 
544e60f8db5SAlex Xie 	return 0;
545e60f8db5SAlex Xie }
546e60f8db5SAlex Xie 
54702bab923SDavid Panariti static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
54802bab923SDavid Panariti {
54902bab923SDavid Panariti 	uint32_t reg_val;
55002bab923SDavid Panariti 	uint32_t reg_addr;
55102bab923SDavid Panariti 	uint32_t field_val;
55202bab923SDavid Panariti 	size_t i;
55302bab923SDavid Panariti 	uint32_t fv2;
55402bab923SDavid Panariti 	size_t lost_sheep;
55502bab923SDavid Panariti 
55602bab923SDavid Panariti 	DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
55702bab923SDavid Panariti 
55802bab923SDavid Panariti 	lost_sheep = 0;
55902bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
56002bab923SDavid Panariti 		reg_addr = ecc_umclocalcap_addrs[i];
56102bab923SDavid Panariti 		DRM_DEBUG("ecc: "
56202bab923SDavid Panariti 			  "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
56302bab923SDavid Panariti 			  i, reg_addr);
56402bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
56502bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
56602bab923SDavid Panariti 					  EccDis);
56702bab923SDavid Panariti 		DRM_DEBUG("ecc: "
56802bab923SDavid Panariti 			  "reg_val: 0x%08x, "
56902bab923SDavid Panariti 			  "EccDis: 0x%08x, ",
57002bab923SDavid Panariti 			  reg_val, field_val);
57102bab923SDavid Panariti 		if (field_val) {
57202bab923SDavid Panariti 			DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
57302bab923SDavid Panariti 			++lost_sheep;
57402bab923SDavid Panariti 		}
57502bab923SDavid Panariti 	}
57602bab923SDavid Panariti 
57702bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
57802bab923SDavid Panariti 		reg_addr = ecc_umcch_umc_config_addrs[i];
57902bab923SDavid Panariti 		DRM_DEBUG("ecc: "
58002bab923SDavid Panariti 			  "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
58102bab923SDavid Panariti 			  i, reg_addr);
58202bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
58302bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
58402bab923SDavid Panariti 					  DramReady);
58502bab923SDavid Panariti 		DRM_DEBUG("ecc: "
58602bab923SDavid Panariti 			  "reg_val: 0x%08x, "
58702bab923SDavid Panariti 			  "DramReady: 0x%08x\n",
58802bab923SDavid Panariti 			  reg_val, field_val);
58902bab923SDavid Panariti 
59002bab923SDavid Panariti 		if (!field_val) {
59102bab923SDavid Panariti 			DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
59202bab923SDavid Panariti 			++lost_sheep;
59302bab923SDavid Panariti 		}
59402bab923SDavid Panariti 	}
59502bab923SDavid Panariti 
59602bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
59702bab923SDavid Panariti 		reg_addr = ecc_umcch_eccctrl_addrs[i];
59802bab923SDavid Panariti 		DRM_DEBUG("ecc: "
59902bab923SDavid Panariti 			  "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
60002bab923SDavid Panariti 			  i, reg_addr);
60102bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
60202bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
60302bab923SDavid Panariti 					  WrEccEn);
60402bab923SDavid Panariti 		fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
60502bab923SDavid Panariti 				    RdEccEn);
60602bab923SDavid Panariti 		DRM_DEBUG("ecc: "
60702bab923SDavid Panariti 			  "reg_val: 0x%08x, "
60802bab923SDavid Panariti 			  "WrEccEn: 0x%08x, "
60902bab923SDavid Panariti 			  "RdEccEn: 0x%08x\n",
61002bab923SDavid Panariti 			  reg_val, field_val, fv2);
61102bab923SDavid Panariti 
61202bab923SDavid Panariti 		if (!field_val) {
6135a16008fSAlex Deucher 			DRM_DEBUG("ecc: WrEccEn is not set\n");
61402bab923SDavid Panariti 			++lost_sheep;
61502bab923SDavid Panariti 		}
61602bab923SDavid Panariti 		if (!fv2) {
6175a16008fSAlex Deucher 			DRM_DEBUG("ecc: RdEccEn is not set\n");
61802bab923SDavid Panariti 			++lost_sheep;
61902bab923SDavid Panariti 		}
62002bab923SDavid Panariti 	}
62102bab923SDavid Panariti 
62202bab923SDavid Panariti 	DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
62302bab923SDavid Panariti 	return lost_sheep == 0;
62402bab923SDavid Panariti }
62502bab923SDavid Panariti 
626e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
627e60f8db5SAlex Xie {
628e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
629c5066129Sozeng 	/*
630c5066129Sozeng 	 * The latest engine allocation on gfx9 is:
631c5066129Sozeng 	 * Engine 0, 1: idle
632c5066129Sozeng 	 * Engine 2, 3: firmware
633c5066129Sozeng 	 * Engine 4~13: amdgpu ring, subject to change when ring number changes
634c5066129Sozeng 	 * Engine 14~15: idle
635c5066129Sozeng 	 * Engine 16: kfd tlb invalidation
636c5066129Sozeng 	 * Engine 17: Gart flushes
637c5066129Sozeng 	 */
638c5066129Sozeng 	unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
6394789c463SChristian König 	unsigned i;
64002bab923SDavid Panariti 	int r;
6414789c463SChristian König 
6424789c463SChristian König 	for(i = 0; i < adev->num_rings; ++i) {
6434789c463SChristian König 		struct amdgpu_ring *ring = adev->rings[i];
6444789c463SChristian König 		unsigned vmhub = ring->funcs->vmhub;
6454789c463SChristian König 
6464789c463SChristian König 		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
647775f55f1STom St Denis 		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
648775f55f1STom St Denis 			 ring->idx, ring->name, ring->vm_inv_eng,
649775f55f1STom St Denis 			 ring->funcs->vmhub);
6504789c463SChristian König 	}
6514789c463SChristian König 
652c5066129Sozeng 	/* Engine 16 is used for KFD and 17 for GART flushes */
6534789c463SChristian König 	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
654c5066129Sozeng 		BUG_ON(vm_inv_eng[i] > 16);
6554789c463SChristian König 
6565ba4fa35SAlex Deucher 	if (adev->asic_type == CHIP_VEGA10) {
65702bab923SDavid Panariti 		r = gmc_v9_0_ecc_available(adev);
65802bab923SDavid Panariti 		if (r == 1) {
65902bab923SDavid Panariti 			DRM_INFO("ECC is active.\n");
66002bab923SDavid Panariti 		} else if (r == 0) {
66102bab923SDavid Panariti 			DRM_INFO("ECC is not present.\n");
66202bab923SDavid Panariti 		} else {
66302bab923SDavid Panariti 			DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
66402bab923SDavid Panariti 			return r;
66502bab923SDavid Panariti 		}
6665ba4fa35SAlex Deucher 	}
66702bab923SDavid Panariti 
668770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
669e60f8db5SAlex Xie }
670e60f8db5SAlex Xie 
671e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
672770d13b1SChristian König 					struct amdgpu_gmc *mc)
673e60f8db5SAlex Xie {
674eeb2487dSMonk Liu 	u64 base = 0;
675eeb2487dSMonk Liu 	if (!amdgpu_sriov_vf(adev))
676eeb2487dSMonk Liu 		base = mmhub_v1_0_get_fb_location(adev);
677770d13b1SChristian König 	amdgpu_device_vram_location(adev, &adev->gmc, base);
6782543e28aSAlex Deucher 	amdgpu_device_gart_location(adev, mc);
679bc099ee9SChunming Zhou 	/* base offset of vram pages */
680bc099ee9SChunming Zhou 	if (adev->flags & AMD_IS_APU)
681bc099ee9SChunming Zhou 		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
682bc099ee9SChunming Zhou 	else
683bc099ee9SChunming Zhou 		adev->vm_manager.vram_base_offset = 0;
684e60f8db5SAlex Xie }
685e60f8db5SAlex Xie 
686e60f8db5SAlex Xie /**
687e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
688e60f8db5SAlex Xie  *
689e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
690e60f8db5SAlex Xie  *
691e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
692e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
693e60f8db5SAlex Xie  * Returns 0 for success.
694e60f8db5SAlex Xie  */
695e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
696e60f8db5SAlex Xie {
697e60f8db5SAlex Xie 	u32 tmp;
698e60f8db5SAlex Xie 	int chansize, numchan;
699d6895ad3SChristian König 	int r;
700e60f8db5SAlex Xie 
701770d13b1SChristian König 	adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
702770d13b1SChristian König 	if (!adev->gmc.vram_width) {
703e60f8db5SAlex Xie 		/* hbm memory channel size */
704e60f8db5SAlex Xie 		chansize = 128;
705e60f8db5SAlex Xie 
706b9509c80SHuang Rui 		tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
707e60f8db5SAlex Xie 		tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
708e60f8db5SAlex Xie 		tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
709e60f8db5SAlex Xie 		switch (tmp) {
710e60f8db5SAlex Xie 		case 0:
711e60f8db5SAlex Xie 		default:
712e60f8db5SAlex Xie 			numchan = 1;
713e60f8db5SAlex Xie 			break;
714e60f8db5SAlex Xie 		case 1:
715e60f8db5SAlex Xie 			numchan = 2;
716e60f8db5SAlex Xie 			break;
717e60f8db5SAlex Xie 		case 2:
718e60f8db5SAlex Xie 			numchan = 0;
719e60f8db5SAlex Xie 			break;
720e60f8db5SAlex Xie 		case 3:
721e60f8db5SAlex Xie 			numchan = 4;
722e60f8db5SAlex Xie 			break;
723e60f8db5SAlex Xie 		case 4:
724e60f8db5SAlex Xie 			numchan = 0;
725e60f8db5SAlex Xie 			break;
726e60f8db5SAlex Xie 		case 5:
727e60f8db5SAlex Xie 			numchan = 8;
728e60f8db5SAlex Xie 			break;
729e60f8db5SAlex Xie 		case 6:
730e60f8db5SAlex Xie 			numchan = 0;
731e60f8db5SAlex Xie 			break;
732e60f8db5SAlex Xie 		case 7:
733e60f8db5SAlex Xie 			numchan = 16;
734e60f8db5SAlex Xie 			break;
735e60f8db5SAlex Xie 		case 8:
736e60f8db5SAlex Xie 			numchan = 2;
737e60f8db5SAlex Xie 			break;
738e60f8db5SAlex Xie 		}
739770d13b1SChristian König 		adev->gmc.vram_width = numchan * chansize;
7408d6a5230SAlex Deucher 	}
741e60f8db5SAlex Xie 
742e60f8db5SAlex Xie 	/* size in MB on si */
743770d13b1SChristian König 	adev->gmc.mc_vram_size =
744bf383fb6SAlex Deucher 		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
745770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
746d6895ad3SChristian König 
747d6895ad3SChristian König 	if (!(adev->flags & AMD_IS_APU)) {
748d6895ad3SChristian König 		r = amdgpu_device_resize_fb_bar(adev);
749d6895ad3SChristian König 		if (r)
750d6895ad3SChristian König 			return r;
751d6895ad3SChristian König 	}
752770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
753770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
754e60f8db5SAlex Xie 
755156a81beSChunming Zhou #ifdef CONFIG_X86_64
756156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
757156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
758156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
759156a81beSChunming Zhou 	}
760156a81beSChunming Zhou #endif
761e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
762770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
763770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
764770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
765e60f8db5SAlex Xie 
766c3db7b5aSAlex Deucher 	/* set the gart size */
767c3db7b5aSAlex Deucher 	if (amdgpu_gart_size == -1) {
768c3db7b5aSAlex Deucher 		switch (adev->asic_type) {
769c3db7b5aSAlex Deucher 		case CHIP_VEGA10:  /* all engines support GPUVM */
770c3db7b5aSAlex Deucher 		default:
771770d13b1SChristian König 			adev->gmc.gart_size = 256ULL << 20;
772c3db7b5aSAlex Deucher 			break;
773c3db7b5aSAlex Deucher 		case CHIP_RAVEN:   /* DCE SG support */
774770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
775c3db7b5aSAlex Deucher 			break;
776c3db7b5aSAlex Deucher 		}
777c3db7b5aSAlex Deucher 	} else {
778770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
779c3db7b5aSAlex Deucher 	}
780c3db7b5aSAlex Deucher 
781770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
782e60f8db5SAlex Xie 
783e60f8db5SAlex Xie 	return 0;
784e60f8db5SAlex Xie }
785e60f8db5SAlex Xie 
786e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
787e60f8db5SAlex Xie {
788e60f8db5SAlex Xie 	int r;
789e60f8db5SAlex Xie 
790e60f8db5SAlex Xie 	if (adev->gart.robj) {
791e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
792e60f8db5SAlex Xie 		return 0;
793e60f8db5SAlex Xie 	}
794e60f8db5SAlex Xie 	/* Initialize common gart structure */
795e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
796e60f8db5SAlex Xie 	if (r)
797e60f8db5SAlex Xie 		return r;
798e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
799e60f8db5SAlex Xie 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
800e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
801e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
802e60f8db5SAlex Xie }
803e60f8db5SAlex Xie 
804e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
805e60f8db5SAlex Xie {
806e60f8db5SAlex Xie 	int r;
807e60f8db5SAlex Xie 	int dma_bits;
808e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
809e60f8db5SAlex Xie 
8100c8c0847SHuang Rui 	gfxhub_v1_0_init(adev);
81177f6c763SHuang Rui 	mmhub_v1_0_init(adev);
8120c8c0847SHuang Rui 
813770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
814e60f8db5SAlex Xie 
815fd66560bSHawking Zhang 	switch (adev->asic_type) {
816fd66560bSHawking Zhang 	case CHIP_RAVEN:
817770d13b1SChristian König 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
8186a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
819f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
8206a42fd6fSChristian König 		} else {
8216a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
8226a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
823770d13b1SChristian König 			adev->gmc.translate_further =
8246a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
8256a42fd6fSChristian König 		}
826fd66560bSHawking Zhang 		break;
827fd66560bSHawking Zhang 	case CHIP_VEGA10:
828e60f8db5SAlex Xie 		/* XXX Don't know how to get VRAM type yet. */
829770d13b1SChristian König 		adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
83036b32a68SZhang, Jerry 		/*
83136b32a68SZhang, Jerry 		 * To fulfill 4-level page support,
83236b32a68SZhang, Jerry 		 * vm size is 256TB (48bit), maximum size of Vega10,
83336b32a68SZhang, Jerry 		 * block size 512 (9bit)
83436b32a68SZhang, Jerry 		 */
835f3368128SChristian König 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
836fd66560bSHawking Zhang 		break;
837fd66560bSHawking Zhang 	default:
838fd66560bSHawking Zhang 		break;
839fd66560bSHawking Zhang 	}
840fd66560bSHawking Zhang 
841e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
842e60f8db5SAlex Xie 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
843770d13b1SChristian König 				&adev->gmc.vm_fault);
844d7c434d3SFelix Kuehling 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
845770d13b1SChristian König 				&adev->gmc.vm_fault);
846e60f8db5SAlex Xie 
847e60f8db5SAlex Xie 	if (r)
848e60f8db5SAlex Xie 		return r;
849e60f8db5SAlex Xie 
850e60f8db5SAlex Xie 	/* Set the internal MC address mask
851e60f8db5SAlex Xie 	 * This is the max address of the GPU's
852e60f8db5SAlex Xie 	 * internal address space.
853e60f8db5SAlex Xie 	 */
854770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
855e60f8db5SAlex Xie 
856916910adSHuang Rui 	/*
857916910adSHuang Rui 	 * It needs to reserve 8M stolen memory for vega10
858916910adSHuang Rui 	 * TODO: Figure out how to avoid that...
859916910adSHuang Rui 	 */
860770d13b1SChristian König 	adev->gmc.stolen_size = 8 * 1024 * 1024;
861916910adSHuang Rui 
862e60f8db5SAlex Xie 	/* set DMA mask + need_dma32 flags.
863e60f8db5SAlex Xie 	 * PCIE - can handle 44-bits.
864e60f8db5SAlex Xie 	 * IGP - can handle 44-bits
865e60f8db5SAlex Xie 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
866e60f8db5SAlex Xie 	 */
867e60f8db5SAlex Xie 	adev->need_dma32 = false;
868e60f8db5SAlex Xie 	dma_bits = adev->need_dma32 ? 32 : 44;
869e60f8db5SAlex Xie 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
870e60f8db5SAlex Xie 	if (r) {
871e60f8db5SAlex Xie 		adev->need_dma32 = true;
872e60f8db5SAlex Xie 		dma_bits = 32;
873e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
874e60f8db5SAlex Xie 	}
875e60f8db5SAlex Xie 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
876e60f8db5SAlex Xie 	if (r) {
877e60f8db5SAlex Xie 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
878e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
879e60f8db5SAlex Xie 	}
880fd5fd480SChunming Zhou 	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
881e60f8db5SAlex Xie 
882e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
883e60f8db5SAlex Xie 	if (r)
884e60f8db5SAlex Xie 		return r;
885e60f8db5SAlex Xie 
886e60f8db5SAlex Xie 	/* Memory manager */
887e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
888e60f8db5SAlex Xie 	if (r)
889e60f8db5SAlex Xie 		return r;
890e60f8db5SAlex Xie 
891e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
892e60f8db5SAlex Xie 	if (r)
893e60f8db5SAlex Xie 		return r;
894e60f8db5SAlex Xie 
89505ec3edaSChristian König 	/*
89605ec3edaSChristian König 	 * number of VMs
89705ec3edaSChristian König 	 * VMID 0 is reserved for System
89805ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
89905ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
90005ec3edaSChristian König 	 */
90105ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
90205ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
90305ec3edaSChristian König 
90405ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
90505ec3edaSChristian König 
90605ec3edaSChristian König 	return 0;
907e60f8db5SAlex Xie }
908e60f8db5SAlex Xie 
909e60f8db5SAlex Xie /**
910c79ee7d8SMonk Liu  * gmc_v9_0_gart_fini - vm fini callback
911e60f8db5SAlex Xie  *
912e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
913e60f8db5SAlex Xie  *
914e60f8db5SAlex Xie  * Tears down the driver GART/VM setup (CIK).
915e60f8db5SAlex Xie  */
916e60f8db5SAlex Xie static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
917e60f8db5SAlex Xie {
918e60f8db5SAlex Xie 	amdgpu_gart_table_vram_free(adev);
919e60f8db5SAlex Xie 	amdgpu_gart_fini(adev);
920e60f8db5SAlex Xie }
921e60f8db5SAlex Xie 
922e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
923e60f8db5SAlex Xie {
924e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
925e60f8db5SAlex Xie 
926f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
927e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
928e60f8db5SAlex Xie 	gmc_v9_0_gart_fini(adev);
929e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
930e60f8db5SAlex Xie 
931e60f8db5SAlex Xie 	return 0;
932e60f8db5SAlex Xie }
933e60f8db5SAlex Xie 
934e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
935e60f8db5SAlex Xie {
936946a4d5bSShaoyun Liu 
937e60f8db5SAlex Xie 	switch (adev->asic_type) {
938e60f8db5SAlex Xie 	case CHIP_VEGA10:
939946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
9405c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
941c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
942946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
9435c583018SEvan Quan 						golden_settings_athub_1_0_0,
944c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
945e60f8db5SAlex Xie 		break;
946e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
947946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
9485c583018SEvan Quan 						golden_settings_athub_1_0_0,
949c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
950e4f3abaaSChunming Zhou 		break;
951e60f8db5SAlex Xie 	default:
952e60f8db5SAlex Xie 		break;
953e60f8db5SAlex Xie 	}
954e60f8db5SAlex Xie }
955e60f8db5SAlex Xie 
956e60f8db5SAlex Xie /**
957e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
958e60f8db5SAlex Xie  *
959e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
960e60f8db5SAlex Xie  */
961e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
962e60f8db5SAlex Xie {
963e60f8db5SAlex Xie 	int r;
964e60f8db5SAlex Xie 	bool value;
965e60f8db5SAlex Xie 	u32 tmp;
966e60f8db5SAlex Xie 
9679c3f2b54SAlex Deucher 	amdgpu_device_program_register_sequence(adev,
968e60f8db5SAlex Xie 						golden_settings_vega10_hdp,
969c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_vega10_hdp));
970e60f8db5SAlex Xie 
971e60f8db5SAlex Xie 	if (adev->gart.robj == NULL) {
972e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
973e60f8db5SAlex Xie 		return -EINVAL;
974e60f8db5SAlex Xie 	}
975ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
976ce1b1b66SMonk Liu 	if (r)
977ce1b1b66SMonk Liu 		return r;
978e60f8db5SAlex Xie 
9792fcd43ceSHawking Zhang 	switch (adev->asic_type) {
9802fcd43ceSHawking Zhang 	case CHIP_RAVEN:
9812fcd43ceSHawking Zhang 		mmhub_v1_0_initialize_power_gating(adev);
982f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
9832fcd43ceSHawking Zhang 		break;
9842fcd43ceSHawking Zhang 	default:
9852fcd43ceSHawking Zhang 		break;
9862fcd43ceSHawking Zhang 	}
9872fcd43ceSHawking Zhang 
988e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
989e60f8db5SAlex Xie 	if (r)
990e60f8db5SAlex Xie 		return r;
991e60f8db5SAlex Xie 
992e60f8db5SAlex Xie 	r = mmhub_v1_0_gart_enable(adev);
993e60f8db5SAlex Xie 	if (r)
994e60f8db5SAlex Xie 		return r;
995e60f8db5SAlex Xie 
996846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
997e60f8db5SAlex Xie 
998b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
999b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1000e60f8db5SAlex Xie 
10011d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
1002bf383fb6SAlex Deucher 	adev->nbio_funcs->hdp_flush(adev);
10031d4e0a8cSMonk Liu 
1004e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1005e60f8db5SAlex Xie 		value = false;
1006e60f8db5SAlex Xie 	else
1007e60f8db5SAlex Xie 		value = true;
1008e60f8db5SAlex Xie 
1009e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
1010e60f8db5SAlex Xie 	mmhub_v1_0_set_fault_enable_default(adev, value);
1011132f34e4SChristian König 	gmc_v9_0_flush_gpu_tlb(adev, 0);
1012e60f8db5SAlex Xie 
1013e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1014770d13b1SChristian König 		 (unsigned)(adev->gmc.gart_size >> 20),
1015e60f8db5SAlex Xie 		 (unsigned long long)adev->gart.table_addr);
1016e60f8db5SAlex Xie 	adev->gart.ready = true;
1017e60f8db5SAlex Xie 	return 0;
1018e60f8db5SAlex Xie }
1019e60f8db5SAlex Xie 
1020e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
1021e60f8db5SAlex Xie {
1022e60f8db5SAlex Xie 	int r;
1023e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1024e60f8db5SAlex Xie 
1025e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
1026e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
1027e60f8db5SAlex Xie 
1028edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
1029edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
10304d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1031edca2d05SAlex Deucher 
1032edca2d05SAlex Deucher 		/* disable VGA render */
10334d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1034edca2d05SAlex Deucher 	}
1035edca2d05SAlex Deucher 
1036e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1037e60f8db5SAlex Xie 
1038e60f8db5SAlex Xie 	return r;
1039e60f8db5SAlex Xie }
1040e60f8db5SAlex Xie 
1041e60f8db5SAlex Xie /**
1042e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1043e60f8db5SAlex Xie  *
1044e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1045e60f8db5SAlex Xie  *
1046e60f8db5SAlex Xie  * This disables all VM page table.
1047e60f8db5SAlex Xie  */
1048e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1049e60f8db5SAlex Xie {
1050e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
1051e60f8db5SAlex Xie 	mmhub_v1_0_gart_disable(adev);
1052ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1053e60f8db5SAlex Xie }
1054e60f8db5SAlex Xie 
1055e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1056e60f8db5SAlex Xie {
1057e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1058e60f8db5SAlex Xie 
10595dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
10605dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
10615dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
10625dd696aeSTrigger Huang 		return 0;
10635dd696aeSTrigger Huang 	}
10645dd696aeSTrigger Huang 
1065770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1066e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1067e60f8db5SAlex Xie 
1068e60f8db5SAlex Xie 	return 0;
1069e60f8db5SAlex Xie }
1070e60f8db5SAlex Xie 
1071e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1072e60f8db5SAlex Xie {
1073e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1074e60f8db5SAlex Xie 
1075f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1076e60f8db5SAlex Xie }
1077e60f8db5SAlex Xie 
1078e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1079e60f8db5SAlex Xie {
1080e60f8db5SAlex Xie 	int r;
1081e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1082e60f8db5SAlex Xie 
1083e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1084e60f8db5SAlex Xie 	if (r)
1085e60f8db5SAlex Xie 		return r;
1086e60f8db5SAlex Xie 
1087620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1088e60f8db5SAlex Xie 
108932601d48SChristian König 	return 0;
1090e60f8db5SAlex Xie }
1091e60f8db5SAlex Xie 
1092e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1093e60f8db5SAlex Xie {
1094e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1095e60f8db5SAlex Xie 	return true;
1096e60f8db5SAlex Xie }
1097e60f8db5SAlex Xie 
1098e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1099e60f8db5SAlex Xie {
1100e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1101e60f8db5SAlex Xie 	return 0;
1102e60f8db5SAlex Xie }
1103e60f8db5SAlex Xie 
1104e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1105e60f8db5SAlex Xie {
1106e60f8db5SAlex Xie 	/* XXX for emulation.*/
1107e60f8db5SAlex Xie 	return 0;
1108e60f8db5SAlex Xie }
1109e60f8db5SAlex Xie 
1110e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1111e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1112e60f8db5SAlex Xie {
1113d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1114d5583d4fSHuang Rui 
1115d5583d4fSHuang Rui 	return mmhub_v1_0_set_clockgating(adev, state);
1116e60f8db5SAlex Xie }
1117e60f8db5SAlex Xie 
111813052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
111913052be5SHuang Rui {
112013052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
112113052be5SHuang Rui 
112213052be5SHuang Rui 	mmhub_v1_0_get_clockgating(adev, flags);
112313052be5SHuang Rui }
112413052be5SHuang Rui 
1125e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1126e60f8db5SAlex Xie 					enum amd_powergating_state state)
1127e60f8db5SAlex Xie {
1128e60f8db5SAlex Xie 	return 0;
1129e60f8db5SAlex Xie }
1130e60f8db5SAlex Xie 
1131e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1132e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1133e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1134e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1135e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1136e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1137e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1138e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1139e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1140e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1141e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1142e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1143e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1144e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1145e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
114613052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1147e60f8db5SAlex Xie };
1148e60f8db5SAlex Xie 
1149e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1150e60f8db5SAlex Xie {
1151e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1152e60f8db5SAlex Xie 	.major = 9,
1153e60f8db5SAlex Xie 	.minor = 0,
1154e60f8db5SAlex Xie 	.rev = 0,
1155e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1156e60f8db5SAlex Xie };
1157