xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision bf383fb6)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23e60f8db5SAlex Xie #include <linux/firmware.h>
24e60f8db5SAlex Xie #include "amdgpu.h"
25e60f8db5SAlex Xie #include "gmc_v9_0.h"
268d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
27e60f8db5SAlex Xie 
2875199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
2975199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
30cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
31135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
32135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
33fb960bd2SFeifei Xu #include "vega10_enum.h"
3465417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
356ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
36e60f8db5SAlex Xie 
37946a4d5bSShaoyun Liu #include "soc15.h"
38e60f8db5SAlex Xie #include "soc15_common.h"
3990c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
40e60f8db5SAlex Xie 
41e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
42e60f8db5SAlex Xie #include "mmhub_v1_0.h"
43e60f8db5SAlex Xie 
44e60f8db5SAlex Xie #define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
45e60f8db5SAlex Xie #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
46e60f8db5SAlex Xie //DF_CS_AON0_DramBaseAddress0
47e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT                                                        0x0
48e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT                                                    0x1
49e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT                                                      0x4
50e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT                                                      0x8
51e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT                                                      0xc
52e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK                                                          0x00000001L
53e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK                                                      0x00000002L
54e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
55e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
56e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
57e60f8db5SAlex Xie 
58e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
59e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
60e60f8db5SAlex Xie 
61e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
62e60f8db5SAlex Xie {
63e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
64e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
65e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
66e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
67e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
68e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
69e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
70e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
71e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
72e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
73e60f8db5SAlex Xie };
74e60f8db5SAlex Xie 
75946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
765c583018SEvan Quan {
77946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
78946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
795c583018SEvan Quan };
805c583018SEvan Quan 
81946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
825c583018SEvan Quan {
83946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
84946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
855c583018SEvan Quan };
865c583018SEvan Quan 
8702bab923SDavid Panariti /* Ecc related register addresses, (BASE + reg offset) */
8802bab923SDavid Panariti /* Universal Memory Controller caps (may be fused). */
8902bab923SDavid Panariti /* UMCCH:UmcLocalCap */
9002bab923SDavid Panariti #define UMCLOCALCAPS_ADDR0	(0x00014306 + 0x00000000)
9102bab923SDavid Panariti #define UMCLOCALCAPS_ADDR1	(0x00014306 + 0x00000800)
9202bab923SDavid Panariti #define UMCLOCALCAPS_ADDR2	(0x00014306 + 0x00001000)
9302bab923SDavid Panariti #define UMCLOCALCAPS_ADDR3	(0x00014306 + 0x00001800)
9402bab923SDavid Panariti #define UMCLOCALCAPS_ADDR4	(0x00054306 + 0x00000000)
9502bab923SDavid Panariti #define UMCLOCALCAPS_ADDR5	(0x00054306 + 0x00000800)
9602bab923SDavid Panariti #define UMCLOCALCAPS_ADDR6	(0x00054306 + 0x00001000)
9702bab923SDavid Panariti #define UMCLOCALCAPS_ADDR7	(0x00054306 + 0x00001800)
9802bab923SDavid Panariti #define UMCLOCALCAPS_ADDR8	(0x00094306 + 0x00000000)
9902bab923SDavid Panariti #define UMCLOCALCAPS_ADDR9	(0x00094306 + 0x00000800)
10002bab923SDavid Panariti #define UMCLOCALCAPS_ADDR10	(0x00094306 + 0x00001000)
10102bab923SDavid Panariti #define UMCLOCALCAPS_ADDR11	(0x00094306 + 0x00001800)
10202bab923SDavid Panariti #define UMCLOCALCAPS_ADDR12	(0x000d4306 + 0x00000000)
10302bab923SDavid Panariti #define UMCLOCALCAPS_ADDR13	(0x000d4306 + 0x00000800)
10402bab923SDavid Panariti #define UMCLOCALCAPS_ADDR14	(0x000d4306 + 0x00001000)
10502bab923SDavid Panariti #define UMCLOCALCAPS_ADDR15	(0x000d4306 + 0x00001800)
10602bab923SDavid Panariti 
10702bab923SDavid Panariti /* Universal Memory Controller Channel config. */
10802bab923SDavid Panariti /* UMCCH:UMC_CONFIG */
10902bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR0	(0x00014040 + 0x00000000)
11002bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR1	(0x00014040 + 0x00000800)
11102bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR2	(0x00014040 + 0x00001000)
11202bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR3	(0x00014040 + 0x00001800)
11302bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR4	(0x00054040 + 0x00000000)
11402bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR5	(0x00054040 + 0x00000800)
11502bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR6	(0x00054040 + 0x00001000)
11602bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR7	(0x00054040 + 0x00001800)
11702bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR8	(0x00094040 + 0x00000000)
11802bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR9	(0x00094040 + 0x00000800)
11902bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR10	(0x00094040 + 0x00001000)
12002bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR11	(0x00094040 + 0x00001800)
12102bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR12	(0x000d4040 + 0x00000000)
12202bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR13	(0x000d4040 + 0x00000800)
12302bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR14	(0x000d4040 + 0x00001000)
12402bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR15	(0x000d4040 + 0x00001800)
12502bab923SDavid Panariti 
12602bab923SDavid Panariti /* Universal Memory Controller Channel Ecc config. */
12702bab923SDavid Panariti /* UMCCH:EccCtrl */
12802bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR0	(0x00014053 + 0x00000000)
12902bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR1	(0x00014053 + 0x00000800)
13002bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR2	(0x00014053 + 0x00001000)
13102bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR3	(0x00014053 + 0x00001800)
13202bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR4	(0x00054053 + 0x00000000)
13302bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR5	(0x00054053 + 0x00000800)
13402bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR6	(0x00054053 + 0x00001000)
13502bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR7	(0x00054053 + 0x00001800)
13602bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR8	(0x00094053 + 0x00000000)
13702bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR9	(0x00094053 + 0x00000800)
13802bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR10	(0x00094053 + 0x00001000)
13902bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR11	(0x00094053 + 0x00001800)
14002bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR12	(0x000d4053 + 0x00000000)
14102bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR13	(0x000d4053 + 0x00000800)
14202bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR14	(0x000d4053 + 0x00001000)
14302bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR15	(0x000d4053 + 0x00001800)
14402bab923SDavid Panariti 
14502bab923SDavid Panariti static const uint32_t ecc_umclocalcap_addrs[] = {
14602bab923SDavid Panariti 	UMCLOCALCAPS_ADDR0,
14702bab923SDavid Panariti 	UMCLOCALCAPS_ADDR1,
14802bab923SDavid Panariti 	UMCLOCALCAPS_ADDR2,
14902bab923SDavid Panariti 	UMCLOCALCAPS_ADDR3,
15002bab923SDavid Panariti 	UMCLOCALCAPS_ADDR4,
15102bab923SDavid Panariti 	UMCLOCALCAPS_ADDR5,
15202bab923SDavid Panariti 	UMCLOCALCAPS_ADDR6,
15302bab923SDavid Panariti 	UMCLOCALCAPS_ADDR7,
15402bab923SDavid Panariti 	UMCLOCALCAPS_ADDR8,
15502bab923SDavid Panariti 	UMCLOCALCAPS_ADDR9,
15602bab923SDavid Panariti 	UMCLOCALCAPS_ADDR10,
15702bab923SDavid Panariti 	UMCLOCALCAPS_ADDR11,
15802bab923SDavid Panariti 	UMCLOCALCAPS_ADDR12,
15902bab923SDavid Panariti 	UMCLOCALCAPS_ADDR13,
16002bab923SDavid Panariti 	UMCLOCALCAPS_ADDR14,
16102bab923SDavid Panariti 	UMCLOCALCAPS_ADDR15,
16202bab923SDavid Panariti };
16302bab923SDavid Panariti 
16402bab923SDavid Panariti static const uint32_t ecc_umcch_umc_config_addrs[] = {
16502bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR0,
16602bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR1,
16702bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR2,
16802bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR3,
16902bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR4,
17002bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR5,
17102bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR6,
17202bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR7,
17302bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR8,
17402bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR9,
17502bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR10,
17602bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR11,
17702bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR12,
17802bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR13,
17902bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR14,
18002bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR15,
18102bab923SDavid Panariti };
18202bab923SDavid Panariti 
18302bab923SDavid Panariti static const uint32_t ecc_umcch_eccctrl_addrs[] = {
18402bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR0,
18502bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR1,
18602bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR2,
18702bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR3,
18802bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR4,
18902bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR5,
19002bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR6,
19102bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR7,
19202bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR8,
19302bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR9,
19402bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR10,
19502bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR11,
19602bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR12,
19702bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR13,
19802bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR14,
19902bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR15,
20002bab923SDavid Panariti };
20102bab923SDavid Panariti 
202e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
203e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
204e60f8db5SAlex Xie 					unsigned type,
205e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
206e60f8db5SAlex Xie {
207e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
208ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
209e60f8db5SAlex Xie 
21011250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21111250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21211250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21311250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21411250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21511250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21611250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
21711250164SChristian König 
218e60f8db5SAlex Xie 	switch (state) {
219e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
220ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
221ae6d1416STom St Denis 			hub = &adev->vmhub[j];
222e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
223e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
224e60f8db5SAlex Xie 				tmp = RREG32(reg);
225e60f8db5SAlex Xie 				tmp &= ~bits;
226e60f8db5SAlex Xie 				WREG32(reg, tmp);
227e60f8db5SAlex Xie 			}
228e60f8db5SAlex Xie 		}
229e60f8db5SAlex Xie 		break;
230e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
231ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
232ae6d1416STom St Denis 			hub = &adev->vmhub[j];
233e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
234e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
235e60f8db5SAlex Xie 				tmp = RREG32(reg);
236e60f8db5SAlex Xie 				tmp |= bits;
237e60f8db5SAlex Xie 				WREG32(reg, tmp);
238e60f8db5SAlex Xie 			}
239e60f8db5SAlex Xie 		}
240e60f8db5SAlex Xie 	default:
241e60f8db5SAlex Xie 		break;
242e60f8db5SAlex Xie 	}
243e60f8db5SAlex Xie 
244e60f8db5SAlex Xie 	return 0;
245e60f8db5SAlex Xie }
246e60f8db5SAlex Xie 
247e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
248e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
249e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
250e60f8db5SAlex Xie {
2515a9b8e8aSChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
2524d6cbde3SFelix Kuehling 	uint32_t status = 0;
253e60f8db5SAlex Xie 	u64 addr;
254e60f8db5SAlex Xie 
255e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
256e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
257e60f8db5SAlex Xie 
25879a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
2595a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
2605a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
2614d6cbde3SFelix Kuehling 	}
262e60f8db5SAlex Xie 
2634d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
2644d6cbde3SFelix Kuehling 		dev_err(adev->dev,
2654d6cbde3SFelix Kuehling 			"[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
266e60f8db5SAlex Xie 			entry->vm_id_src ? "mmhub" : "gfxhub",
2674d6cbde3SFelix Kuehling 			entry->src_id, entry->ring_id, entry->vm_id,
2684d6cbde3SFelix Kuehling 			entry->pas_id);
2694d6cbde3SFelix Kuehling 		dev_err(adev->dev, "  at page 0x%016llx from %d\n",
27079a0c465SMonk Liu 			addr, entry->client_id);
2714d6cbde3SFelix Kuehling 		if (!amdgpu_sriov_vf(adev))
2724d6cbde3SFelix Kuehling 			dev_err(adev->dev,
2734d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
2744d6cbde3SFelix Kuehling 				status);
27579a0c465SMonk Liu 	}
276e60f8db5SAlex Xie 
277e60f8db5SAlex Xie 	return 0;
278e60f8db5SAlex Xie }
279e60f8db5SAlex Xie 
280e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
281e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
282e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
283e60f8db5SAlex Xie };
284e60f8db5SAlex Xie 
285e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
286e60f8db5SAlex Xie {
287e60f8db5SAlex Xie 	adev->mc.vm_fault.num_types = 1;
288e60f8db5SAlex Xie 	adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
289e60f8db5SAlex Xie }
290e60f8db5SAlex Xie 
29103f89febSChristian König static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
29203f89febSChristian König {
29303f89febSChristian König 	u32 req = 0;
29403f89febSChristian König 
29503f89febSChristian König 	/* invalidate using legacy mode on vm_id*/
29603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
29703f89febSChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vm_id);
29803f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
29903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
30003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
30103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
30203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
30303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
30403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
30503f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
30603f89febSChristian König 
30703f89febSChristian König 	return req;
30803f89febSChristian König }
30903f89febSChristian König 
310e60f8db5SAlex Xie /*
311e60f8db5SAlex Xie  * GART
312e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
313e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
314e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
315e60f8db5SAlex Xie  */
316e60f8db5SAlex Xie 
317e60f8db5SAlex Xie /**
318e60f8db5SAlex Xie  * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
319e60f8db5SAlex Xie  *
320e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
321e60f8db5SAlex Xie  * @vmid: vm instance to flush
322e60f8db5SAlex Xie  *
323e60f8db5SAlex Xie  * Flush the TLB for the requested page table.
324e60f8db5SAlex Xie  */
325e60f8db5SAlex Xie static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
326e60f8db5SAlex Xie 					uint32_t vmid)
327e60f8db5SAlex Xie {
328e60f8db5SAlex Xie 	/* Use register 17 for GART */
329e60f8db5SAlex Xie 	const unsigned eng = 17;
330e60f8db5SAlex Xie 	unsigned i, j;
331e60f8db5SAlex Xie 
332e60f8db5SAlex Xie 	/* flush hdp cache */
333bf383fb6SAlex Deucher 	adev->nbio_funcs->hdp_flush(adev);
334e60f8db5SAlex Xie 
335e60f8db5SAlex Xie 	spin_lock(&adev->mc.invalidate_lock);
336e60f8db5SAlex Xie 
337e60f8db5SAlex Xie 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
338e60f8db5SAlex Xie 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
33903f89febSChristian König 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
340e60f8db5SAlex Xie 
341c7a7266bSXiangliang Yu 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
342e60f8db5SAlex Xie 
343e60f8db5SAlex Xie 		/* Busy wait for ACK.*/
344e60f8db5SAlex Xie 		for (j = 0; j < 100; j++) {
345c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
346e60f8db5SAlex Xie 			tmp &= 1 << vmid;
347e60f8db5SAlex Xie 			if (tmp)
348e60f8db5SAlex Xie 				break;
349e60f8db5SAlex Xie 			cpu_relax();
350e60f8db5SAlex Xie 		}
351e60f8db5SAlex Xie 		if (j < 100)
352e60f8db5SAlex Xie 			continue;
353e60f8db5SAlex Xie 
354e60f8db5SAlex Xie 		/* Wait for ACK with a delay.*/
355e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
356c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
357e60f8db5SAlex Xie 			tmp &= 1 << vmid;
358e60f8db5SAlex Xie 			if (tmp)
359e60f8db5SAlex Xie 				break;
360e60f8db5SAlex Xie 			udelay(1);
361e60f8db5SAlex Xie 		}
362e60f8db5SAlex Xie 		if (j < adev->usec_timeout)
363e60f8db5SAlex Xie 			continue;
364e60f8db5SAlex Xie 
365e60f8db5SAlex Xie 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
366e60f8db5SAlex Xie 	}
367e60f8db5SAlex Xie 
368e60f8db5SAlex Xie 	spin_unlock(&adev->mc.invalidate_lock);
369e60f8db5SAlex Xie }
370e60f8db5SAlex Xie 
371e60f8db5SAlex Xie /**
372e60f8db5SAlex Xie  * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
373e60f8db5SAlex Xie  *
374e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
375e60f8db5SAlex Xie  * @cpu_pt_addr: cpu address of the page table
376e60f8db5SAlex Xie  * @gpu_page_idx: entry in the page table to update
377e60f8db5SAlex Xie  * @addr: dst addr to write into pte/pde
378e60f8db5SAlex Xie  * @flags: access flags
379e60f8db5SAlex Xie  *
380e60f8db5SAlex Xie  * Update the page tables using the CPU.
381e60f8db5SAlex Xie  */
382e60f8db5SAlex Xie static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
383e60f8db5SAlex Xie 					void *cpu_pt_addr,
384e60f8db5SAlex Xie 					uint32_t gpu_page_idx,
385e60f8db5SAlex Xie 					uint64_t addr,
386e60f8db5SAlex Xie 					uint64_t flags)
387e60f8db5SAlex Xie {
388e60f8db5SAlex Xie 	void __iomem *ptr = (void *)cpu_pt_addr;
389e60f8db5SAlex Xie 	uint64_t value;
390e60f8db5SAlex Xie 
391e60f8db5SAlex Xie 	/*
392e60f8db5SAlex Xie 	 * PTE format on VEGA 10:
393e60f8db5SAlex Xie 	 * 63:59 reserved
394e60f8db5SAlex Xie 	 * 58:57 mtype
395e60f8db5SAlex Xie 	 * 56 F
396e60f8db5SAlex Xie 	 * 55 L
397e60f8db5SAlex Xie 	 * 54 P
398e60f8db5SAlex Xie 	 * 53 SW
399e60f8db5SAlex Xie 	 * 52 T
400e60f8db5SAlex Xie 	 * 50:48 reserved
401e60f8db5SAlex Xie 	 * 47:12 4k physical page base address
402e60f8db5SAlex Xie 	 * 11:7 fragment
403e60f8db5SAlex Xie 	 * 6 write
404e60f8db5SAlex Xie 	 * 5 read
405e60f8db5SAlex Xie 	 * 4 exe
406e60f8db5SAlex Xie 	 * 3 Z
407e60f8db5SAlex Xie 	 * 2 snooped
408e60f8db5SAlex Xie 	 * 1 system
409e60f8db5SAlex Xie 	 * 0 valid
410e60f8db5SAlex Xie 	 *
411e60f8db5SAlex Xie 	 * PDE format on VEGA 10:
412e60f8db5SAlex Xie 	 * 63:59 block fragment size
413e60f8db5SAlex Xie 	 * 58:55 reserved
414e60f8db5SAlex Xie 	 * 54 P
415e60f8db5SAlex Xie 	 * 53:48 reserved
416e60f8db5SAlex Xie 	 * 47:6 physical base address of PD or PTE
417e60f8db5SAlex Xie 	 * 5:3 reserved
418e60f8db5SAlex Xie 	 * 2 C
419e60f8db5SAlex Xie 	 * 1 system
420e60f8db5SAlex Xie 	 * 0 valid
421e60f8db5SAlex Xie 	 */
422e60f8db5SAlex Xie 
423e60f8db5SAlex Xie 	/*
424e60f8db5SAlex Xie 	 * The following is for PTE only. GART does not have PDEs.
425e60f8db5SAlex Xie 	*/
426e60f8db5SAlex Xie 	value = addr & 0x0000FFFFFFFFF000ULL;
427e60f8db5SAlex Xie 	value |= flags;
428e60f8db5SAlex Xie 	writeq(value, ptr + (gpu_page_idx * 8));
429e60f8db5SAlex Xie 	return 0;
430e60f8db5SAlex Xie }
431e60f8db5SAlex Xie 
432e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
433e60f8db5SAlex Xie 						uint32_t flags)
434e60f8db5SAlex Xie 
435e60f8db5SAlex Xie {
436e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
437e60f8db5SAlex Xie 
438e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
439e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
440e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
441e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
442e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
443e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
444e60f8db5SAlex Xie 
445e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
446e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
447e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
448e60f8db5SAlex Xie 		break;
449e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
450e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
451e60f8db5SAlex Xie 		break;
452e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
453e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
454e60f8db5SAlex Xie 		break;
455e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
456e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
457e60f8db5SAlex Xie 		break;
458e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
459e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
460e60f8db5SAlex Xie 		break;
461e60f8db5SAlex Xie 	default:
462e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
463e60f8db5SAlex Xie 		break;
464e60f8db5SAlex Xie 	}
465e60f8db5SAlex Xie 
466e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
467e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
468e60f8db5SAlex Xie 
469e60f8db5SAlex Xie 	return pte_flag;
470e60f8db5SAlex Xie }
471e60f8db5SAlex Xie 
4723de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
4733de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
474f75e237cSChristian König {
4753de676d8SChristian König 	if (!(*flags & AMDGPU_PDE_PTE))
4763de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
4773de676d8SChristian König 			adev->mc.vram_start;
4783de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
479f75e237cSChristian König }
480f75e237cSChristian König 
481e60f8db5SAlex Xie static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
482e60f8db5SAlex Xie 	.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
483e60f8db5SAlex Xie 	.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
48403f89febSChristian König 	.get_invalidate_req = gmc_v9_0_get_invalidate_req,
485b1166325SChristian König 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
486b1166325SChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde
487e60f8db5SAlex Xie };
488e60f8db5SAlex Xie 
489e60f8db5SAlex Xie static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
490e60f8db5SAlex Xie {
491e60f8db5SAlex Xie 	if (adev->gart.gart_funcs == NULL)
492e60f8db5SAlex Xie 		adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
493e60f8db5SAlex Xie }
494e60f8db5SAlex Xie 
495e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
496e60f8db5SAlex Xie {
497e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
498e60f8db5SAlex Xie 
499e60f8db5SAlex Xie 	gmc_v9_0_set_gart_funcs(adev);
500e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
501e60f8db5SAlex Xie 
502e60f8db5SAlex Xie 	return 0;
503e60f8db5SAlex Xie }
504e60f8db5SAlex Xie 
50502bab923SDavid Panariti static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
50602bab923SDavid Panariti {
50702bab923SDavid Panariti 	uint32_t reg_val;
50802bab923SDavid Panariti 	uint32_t reg_addr;
50902bab923SDavid Panariti 	uint32_t field_val;
51002bab923SDavid Panariti 	size_t i;
51102bab923SDavid Panariti 	uint32_t fv2;
51202bab923SDavid Panariti 	size_t lost_sheep;
51302bab923SDavid Panariti 
51402bab923SDavid Panariti 	DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
51502bab923SDavid Panariti 
51602bab923SDavid Panariti 	lost_sheep = 0;
51702bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
51802bab923SDavid Panariti 		reg_addr = ecc_umclocalcap_addrs[i];
51902bab923SDavid Panariti 		DRM_DEBUG("ecc: "
52002bab923SDavid Panariti 			  "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
52102bab923SDavid Panariti 			  i, reg_addr);
52202bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
52302bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
52402bab923SDavid Panariti 					  EccDis);
52502bab923SDavid Panariti 		DRM_DEBUG("ecc: "
52602bab923SDavid Panariti 			  "reg_val: 0x%08x, "
52702bab923SDavid Panariti 			  "EccDis: 0x%08x, ",
52802bab923SDavid Panariti 			  reg_val, field_val);
52902bab923SDavid Panariti 		if (field_val) {
53002bab923SDavid Panariti 			DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
53102bab923SDavid Panariti 			++lost_sheep;
53202bab923SDavid Panariti 		}
53302bab923SDavid Panariti 	}
53402bab923SDavid Panariti 
53502bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
53602bab923SDavid Panariti 		reg_addr = ecc_umcch_umc_config_addrs[i];
53702bab923SDavid Panariti 		DRM_DEBUG("ecc: "
53802bab923SDavid Panariti 			  "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
53902bab923SDavid Panariti 			  i, reg_addr);
54002bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
54102bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
54202bab923SDavid Panariti 					  DramReady);
54302bab923SDavid Panariti 		DRM_DEBUG("ecc: "
54402bab923SDavid Panariti 			  "reg_val: 0x%08x, "
54502bab923SDavid Panariti 			  "DramReady: 0x%08x\n",
54602bab923SDavid Panariti 			  reg_val, field_val);
54702bab923SDavid Panariti 
54802bab923SDavid Panariti 		if (!field_val) {
54902bab923SDavid Panariti 			DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
55002bab923SDavid Panariti 			++lost_sheep;
55102bab923SDavid Panariti 		}
55202bab923SDavid Panariti 	}
55302bab923SDavid Panariti 
55402bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
55502bab923SDavid Panariti 		reg_addr = ecc_umcch_eccctrl_addrs[i];
55602bab923SDavid Panariti 		DRM_DEBUG("ecc: "
55702bab923SDavid Panariti 			  "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
55802bab923SDavid Panariti 			  i, reg_addr);
55902bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
56002bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
56102bab923SDavid Panariti 					  WrEccEn);
56202bab923SDavid Panariti 		fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
56302bab923SDavid Panariti 				    RdEccEn);
56402bab923SDavid Panariti 		DRM_DEBUG("ecc: "
56502bab923SDavid Panariti 			  "reg_val: 0x%08x, "
56602bab923SDavid Panariti 			  "WrEccEn: 0x%08x, "
56702bab923SDavid Panariti 			  "RdEccEn: 0x%08x\n",
56802bab923SDavid Panariti 			  reg_val, field_val, fv2);
56902bab923SDavid Panariti 
57002bab923SDavid Panariti 		if (!field_val) {
5715a16008fSAlex Deucher 			DRM_DEBUG("ecc: WrEccEn is not set\n");
57202bab923SDavid Panariti 			++lost_sheep;
57302bab923SDavid Panariti 		}
57402bab923SDavid Panariti 		if (!fv2) {
5755a16008fSAlex Deucher 			DRM_DEBUG("ecc: RdEccEn is not set\n");
57602bab923SDavid Panariti 			++lost_sheep;
57702bab923SDavid Panariti 		}
57802bab923SDavid Panariti 	}
57902bab923SDavid Panariti 
58002bab923SDavid Panariti 	DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
58102bab923SDavid Panariti 	return lost_sheep == 0;
58202bab923SDavid Panariti }
58302bab923SDavid Panariti 
584e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
585e60f8db5SAlex Xie {
586e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
587c5066129Sozeng 	/*
588c5066129Sozeng 	 * The latest engine allocation on gfx9 is:
589c5066129Sozeng 	 * Engine 0, 1: idle
590c5066129Sozeng 	 * Engine 2, 3: firmware
591c5066129Sozeng 	 * Engine 4~13: amdgpu ring, subject to change when ring number changes
592c5066129Sozeng 	 * Engine 14~15: idle
593c5066129Sozeng 	 * Engine 16: kfd tlb invalidation
594c5066129Sozeng 	 * Engine 17: Gart flushes
595c5066129Sozeng 	 */
596c5066129Sozeng 	unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
5974789c463SChristian König 	unsigned i;
59802bab923SDavid Panariti 	int r;
5994789c463SChristian König 
6004789c463SChristian König 	for(i = 0; i < adev->num_rings; ++i) {
6014789c463SChristian König 		struct amdgpu_ring *ring = adev->rings[i];
6024789c463SChristian König 		unsigned vmhub = ring->funcs->vmhub;
6034789c463SChristian König 
6044789c463SChristian König 		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
605775f55f1STom St Denis 		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
606775f55f1STom St Denis 			 ring->idx, ring->name, ring->vm_inv_eng,
607775f55f1STom St Denis 			 ring->funcs->vmhub);
6084789c463SChristian König 	}
6094789c463SChristian König 
610c5066129Sozeng 	/* Engine 16 is used for KFD and 17 for GART flushes */
6114789c463SChristian König 	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
612c5066129Sozeng 		BUG_ON(vm_inv_eng[i] > 16);
6134789c463SChristian König 
61402bab923SDavid Panariti 	r = gmc_v9_0_ecc_available(adev);
61502bab923SDavid Panariti 	if (r == 1) {
61602bab923SDavid Panariti 		DRM_INFO("ECC is active.\n");
61702bab923SDavid Panariti 	} else if (r == 0) {
61802bab923SDavid Panariti 		DRM_INFO("ECC is not present.\n");
61902bab923SDavid Panariti 	} else {
62002bab923SDavid Panariti 		DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
62102bab923SDavid Panariti 		return r;
62202bab923SDavid Panariti 	}
62302bab923SDavid Panariti 
624e60f8db5SAlex Xie 	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
625e60f8db5SAlex Xie }
626e60f8db5SAlex Xie 
627e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
628e60f8db5SAlex Xie 					struct amdgpu_mc *mc)
629e60f8db5SAlex Xie {
630eeb2487dSMonk Liu 	u64 base = 0;
631eeb2487dSMonk Liu 	if (!amdgpu_sriov_vf(adev))
632eeb2487dSMonk Liu 		base = mmhub_v1_0_get_fb_location(adev);
633e60f8db5SAlex Xie 	amdgpu_vram_location(adev, &adev->mc, base);
6346f02a696SChristian König 	amdgpu_gart_location(adev, mc);
635bc099ee9SChunming Zhou 	/* base offset of vram pages */
636bc099ee9SChunming Zhou 	if (adev->flags & AMD_IS_APU)
637bc099ee9SChunming Zhou 		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
638bc099ee9SChunming Zhou 	else
639bc099ee9SChunming Zhou 		adev->vm_manager.vram_base_offset = 0;
640e60f8db5SAlex Xie }
641e60f8db5SAlex Xie 
642e60f8db5SAlex Xie /**
643e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
644e60f8db5SAlex Xie  *
645e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
646e60f8db5SAlex Xie  *
647e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
648e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
649e60f8db5SAlex Xie  * Returns 0 for success.
650e60f8db5SAlex Xie  */
651e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
652e60f8db5SAlex Xie {
653e60f8db5SAlex Xie 	u32 tmp;
654e60f8db5SAlex Xie 	int chansize, numchan;
655d6895ad3SChristian König 	int r;
656e60f8db5SAlex Xie 
6578d6a5230SAlex Deucher 	adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
6588d6a5230SAlex Deucher 	if (!adev->mc.vram_width) {
659e60f8db5SAlex Xie 		/* hbm memory channel size */
660e60f8db5SAlex Xie 		chansize = 128;
661e60f8db5SAlex Xie 
662b9509c80SHuang Rui 		tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
663e60f8db5SAlex Xie 		tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
664e60f8db5SAlex Xie 		tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
665e60f8db5SAlex Xie 		switch (tmp) {
666e60f8db5SAlex Xie 		case 0:
667e60f8db5SAlex Xie 		default:
668e60f8db5SAlex Xie 			numchan = 1;
669e60f8db5SAlex Xie 			break;
670e60f8db5SAlex Xie 		case 1:
671e60f8db5SAlex Xie 			numchan = 2;
672e60f8db5SAlex Xie 			break;
673e60f8db5SAlex Xie 		case 2:
674e60f8db5SAlex Xie 			numchan = 0;
675e60f8db5SAlex Xie 			break;
676e60f8db5SAlex Xie 		case 3:
677e60f8db5SAlex Xie 			numchan = 4;
678e60f8db5SAlex Xie 			break;
679e60f8db5SAlex Xie 		case 4:
680e60f8db5SAlex Xie 			numchan = 0;
681e60f8db5SAlex Xie 			break;
682e60f8db5SAlex Xie 		case 5:
683e60f8db5SAlex Xie 			numchan = 8;
684e60f8db5SAlex Xie 			break;
685e60f8db5SAlex Xie 		case 6:
686e60f8db5SAlex Xie 			numchan = 0;
687e60f8db5SAlex Xie 			break;
688e60f8db5SAlex Xie 		case 7:
689e60f8db5SAlex Xie 			numchan = 16;
690e60f8db5SAlex Xie 			break;
691e60f8db5SAlex Xie 		case 8:
692e60f8db5SAlex Xie 			numchan = 2;
693e60f8db5SAlex Xie 			break;
694e60f8db5SAlex Xie 		}
695e60f8db5SAlex Xie 		adev->mc.vram_width = numchan * chansize;
6968d6a5230SAlex Deucher 	}
697e60f8db5SAlex Xie 
698e60f8db5SAlex Xie 	/* size in MB on si */
699e60f8db5SAlex Xie 	adev->mc.mc_vram_size =
700bf383fb6SAlex Deucher 		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
701e60f8db5SAlex Xie 	adev->mc.real_vram_size = adev->mc.mc_vram_size;
702d6895ad3SChristian König 
703d6895ad3SChristian König 	if (!(adev->flags & AMD_IS_APU)) {
704d6895ad3SChristian König 		r = amdgpu_device_resize_fb_bar(adev);
705d6895ad3SChristian König 		if (r)
706d6895ad3SChristian König 			return r;
707d6895ad3SChristian König 	}
708d6895ad3SChristian König 	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
709d6895ad3SChristian König 	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
710e60f8db5SAlex Xie 
711e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
712d6895ad3SChristian König 	adev->mc.visible_vram_size = adev->mc.aper_size;
713e60f8db5SAlex Xie 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
714e60f8db5SAlex Xie 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
715e60f8db5SAlex Xie 
716c3db7b5aSAlex Deucher 	/* set the gart size */
717c3db7b5aSAlex Deucher 	if (amdgpu_gart_size == -1) {
718c3db7b5aSAlex Deucher 		switch (adev->asic_type) {
719c3db7b5aSAlex Deucher 		case CHIP_VEGA10:  /* all engines support GPUVM */
720c3db7b5aSAlex Deucher 		default:
721c3db7b5aSAlex Deucher 			adev->mc.gart_size = 256ULL << 20;
722c3db7b5aSAlex Deucher 			break;
723c3db7b5aSAlex Deucher 		case CHIP_RAVEN:   /* DCE SG support */
724c3db7b5aSAlex Deucher 			adev->mc.gart_size = 1024ULL << 20;
725c3db7b5aSAlex Deucher 			break;
726c3db7b5aSAlex Deucher 		}
727c3db7b5aSAlex Deucher 	} else {
728c3db7b5aSAlex Deucher 		adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
729c3db7b5aSAlex Deucher 	}
730c3db7b5aSAlex Deucher 
731e60f8db5SAlex Xie 	gmc_v9_0_vram_gtt_location(adev, &adev->mc);
732e60f8db5SAlex Xie 
733e60f8db5SAlex Xie 	return 0;
734e60f8db5SAlex Xie }
735e60f8db5SAlex Xie 
736e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
737e60f8db5SAlex Xie {
738e60f8db5SAlex Xie 	int r;
739e60f8db5SAlex Xie 
740e60f8db5SAlex Xie 	if (adev->gart.robj) {
741e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
742e60f8db5SAlex Xie 		return 0;
743e60f8db5SAlex Xie 	}
744e60f8db5SAlex Xie 	/* Initialize common gart structure */
745e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
746e60f8db5SAlex Xie 	if (r)
747e60f8db5SAlex Xie 		return r;
748e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
749e60f8db5SAlex Xie 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
750e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
751e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
752e60f8db5SAlex Xie }
753e60f8db5SAlex Xie 
754e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
755e60f8db5SAlex Xie {
756e60f8db5SAlex Xie 	int r;
757e60f8db5SAlex Xie 	int dma_bits;
758e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
759e60f8db5SAlex Xie 
7600c8c0847SHuang Rui 	gfxhub_v1_0_init(adev);
76177f6c763SHuang Rui 	mmhub_v1_0_init(adev);
7620c8c0847SHuang Rui 
763e60f8db5SAlex Xie 	spin_lock_init(&adev->mc.invalidate_lock);
764e60f8db5SAlex Xie 
765fd66560bSHawking Zhang 	switch (adev->asic_type) {
766fd66560bSHawking Zhang 	case CHIP_RAVEN:
767e60f8db5SAlex Xie 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
768b38f41ebSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1)
769f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
770b38f41ebSChristian König 		else
771fd66560bSHawking Zhang 			/* vm_size is 64GB for legacy 2-level page support */
772f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 64, 9, 1, 48);
773fd66560bSHawking Zhang 		break;
774fd66560bSHawking Zhang 	case CHIP_VEGA10:
775e60f8db5SAlex Xie 		/* XXX Don't know how to get VRAM type yet. */
776e60f8db5SAlex Xie 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
77736b32a68SZhang, Jerry 		/*
77836b32a68SZhang, Jerry 		 * To fulfill 4-level page support,
77936b32a68SZhang, Jerry 		 * vm size is 256TB (48bit), maximum size of Vega10,
78036b32a68SZhang, Jerry 		 * block size 512 (9bit)
78136b32a68SZhang, Jerry 		 */
782f3368128SChristian König 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
783fd66560bSHawking Zhang 		break;
784fd66560bSHawking Zhang 	default:
785fd66560bSHawking Zhang 		break;
786fd66560bSHawking Zhang 	}
787fd66560bSHawking Zhang 
788e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
789e60f8db5SAlex Xie 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
790e60f8db5SAlex Xie 				&adev->mc.vm_fault);
791d7c434d3SFelix Kuehling 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
792d7c434d3SFelix Kuehling 				&adev->mc.vm_fault);
793e60f8db5SAlex Xie 
794e60f8db5SAlex Xie 	if (r)
795e60f8db5SAlex Xie 		return r;
796e60f8db5SAlex Xie 
797e60f8db5SAlex Xie 	/* Set the internal MC address mask
798e60f8db5SAlex Xie 	 * This is the max address of the GPU's
799e60f8db5SAlex Xie 	 * internal address space.
800e60f8db5SAlex Xie 	 */
801e60f8db5SAlex Xie 	adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
802e60f8db5SAlex Xie 
803916910adSHuang Rui 	/*
804916910adSHuang Rui 	 * It needs to reserve 8M stolen memory for vega10
805916910adSHuang Rui 	 * TODO: Figure out how to avoid that...
806916910adSHuang Rui 	 */
807916910adSHuang Rui 	adev->mc.stolen_size = 8 * 1024 * 1024;
808916910adSHuang Rui 
809e60f8db5SAlex Xie 	/* set DMA mask + need_dma32 flags.
810e60f8db5SAlex Xie 	 * PCIE - can handle 44-bits.
811e60f8db5SAlex Xie 	 * IGP - can handle 44-bits
812e60f8db5SAlex Xie 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
813e60f8db5SAlex Xie 	 */
814e60f8db5SAlex Xie 	adev->need_dma32 = false;
815e60f8db5SAlex Xie 	dma_bits = adev->need_dma32 ? 32 : 44;
816e60f8db5SAlex Xie 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
817e60f8db5SAlex Xie 	if (r) {
818e60f8db5SAlex Xie 		adev->need_dma32 = true;
819e60f8db5SAlex Xie 		dma_bits = 32;
820e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
821e60f8db5SAlex Xie 	}
822e60f8db5SAlex Xie 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
823e60f8db5SAlex Xie 	if (r) {
824e60f8db5SAlex Xie 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
825e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
826e60f8db5SAlex Xie 	}
827e60f8db5SAlex Xie 
828e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
829e60f8db5SAlex Xie 	if (r)
830e60f8db5SAlex Xie 		return r;
831e60f8db5SAlex Xie 
832e60f8db5SAlex Xie 	/* Memory manager */
833e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
834e60f8db5SAlex Xie 	if (r)
835e60f8db5SAlex Xie 		return r;
836e60f8db5SAlex Xie 
837e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
838e60f8db5SAlex Xie 	if (r)
839e60f8db5SAlex Xie 		return r;
840e60f8db5SAlex Xie 
84105ec3edaSChristian König 	/*
84205ec3edaSChristian König 	 * number of VMs
84305ec3edaSChristian König 	 * VMID 0 is reserved for System
84405ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
84505ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
84605ec3edaSChristian König 	 */
84705ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
84805ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
84905ec3edaSChristian König 
85005ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
85105ec3edaSChristian König 
85205ec3edaSChristian König 	return 0;
853e60f8db5SAlex Xie }
854e60f8db5SAlex Xie 
855e60f8db5SAlex Xie /**
856c79ee7d8SMonk Liu  * gmc_v9_0_gart_fini - vm fini callback
857e60f8db5SAlex Xie  *
858e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
859e60f8db5SAlex Xie  *
860e60f8db5SAlex Xie  * Tears down the driver GART/VM setup (CIK).
861e60f8db5SAlex Xie  */
862e60f8db5SAlex Xie static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
863e60f8db5SAlex Xie {
864e60f8db5SAlex Xie 	amdgpu_gart_table_vram_free(adev);
865e60f8db5SAlex Xie 	amdgpu_gart_fini(adev);
866e60f8db5SAlex Xie }
867e60f8db5SAlex Xie 
868e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
869e60f8db5SAlex Xie {
870e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
871e60f8db5SAlex Xie 
872f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
873e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
874e60f8db5SAlex Xie 	gmc_v9_0_gart_fini(adev);
875e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
876e60f8db5SAlex Xie 
877e60f8db5SAlex Xie 	return 0;
878e60f8db5SAlex Xie }
879e60f8db5SAlex Xie 
880e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
881e60f8db5SAlex Xie {
882946a4d5bSShaoyun Liu 
883e60f8db5SAlex Xie 	switch (adev->asic_type) {
884e60f8db5SAlex Xie 	case CHIP_VEGA10:
885946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
8865c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
887c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
888946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
8895c583018SEvan Quan 						golden_settings_athub_1_0_0,
890c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
891e60f8db5SAlex Xie 		break;
892e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
893946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
8945c583018SEvan Quan 						golden_settings_athub_1_0_0,
895c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
896e4f3abaaSChunming Zhou 		break;
897e60f8db5SAlex Xie 	default:
898e60f8db5SAlex Xie 		break;
899e60f8db5SAlex Xie 	}
900e60f8db5SAlex Xie }
901e60f8db5SAlex Xie 
902e60f8db5SAlex Xie /**
903e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
904e60f8db5SAlex Xie  *
905e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
906e60f8db5SAlex Xie  */
907e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
908e60f8db5SAlex Xie {
909e60f8db5SAlex Xie 	int r;
910e60f8db5SAlex Xie 	bool value;
911e60f8db5SAlex Xie 	u32 tmp;
912e60f8db5SAlex Xie 
913e60f8db5SAlex Xie 	amdgpu_program_register_sequence(adev,
914e60f8db5SAlex Xie 		golden_settings_vega10_hdp,
915c47b41a7SChristian König 		ARRAY_SIZE(golden_settings_vega10_hdp));
916e60f8db5SAlex Xie 
917e60f8db5SAlex Xie 	if (adev->gart.robj == NULL) {
918e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
919e60f8db5SAlex Xie 		return -EINVAL;
920e60f8db5SAlex Xie 	}
921ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
922ce1b1b66SMonk Liu 	if (r)
923ce1b1b66SMonk Liu 		return r;
924e60f8db5SAlex Xie 
9252fcd43ceSHawking Zhang 	switch (adev->asic_type) {
9262fcd43ceSHawking Zhang 	case CHIP_RAVEN:
9272fcd43ceSHawking Zhang 		mmhub_v1_0_initialize_power_gating(adev);
928f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
9292fcd43ceSHawking Zhang 		break;
9302fcd43ceSHawking Zhang 	default:
9312fcd43ceSHawking Zhang 		break;
9322fcd43ceSHawking Zhang 	}
9332fcd43ceSHawking Zhang 
934e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
935e60f8db5SAlex Xie 	if (r)
936e60f8db5SAlex Xie 		return r;
937e60f8db5SAlex Xie 
938e60f8db5SAlex Xie 	r = mmhub_v1_0_gart_enable(adev);
939e60f8db5SAlex Xie 	if (r)
940e60f8db5SAlex Xie 		return r;
941e60f8db5SAlex Xie 
942846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
943e60f8db5SAlex Xie 
944b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
945b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
946e60f8db5SAlex Xie 
9471d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
948bf383fb6SAlex Deucher 	adev->nbio_funcs->hdp_flush(adev);
9491d4e0a8cSMonk Liu 
950e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
951e60f8db5SAlex Xie 		value = false;
952e60f8db5SAlex Xie 	else
953e60f8db5SAlex Xie 		value = true;
954e60f8db5SAlex Xie 
955e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
956e60f8db5SAlex Xie 	mmhub_v1_0_set_fault_enable_default(adev, value);
957e60f8db5SAlex Xie 	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
958e60f8db5SAlex Xie 
959e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
9606f02a696SChristian König 		 (unsigned)(adev->mc.gart_size >> 20),
961e60f8db5SAlex Xie 		 (unsigned long long)adev->gart.table_addr);
962e60f8db5SAlex Xie 	adev->gart.ready = true;
963e60f8db5SAlex Xie 	return 0;
964e60f8db5SAlex Xie }
965e60f8db5SAlex Xie 
966e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
967e60f8db5SAlex Xie {
968e60f8db5SAlex Xie 	int r;
969e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
970e60f8db5SAlex Xie 
971e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
972e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
973e60f8db5SAlex Xie 
974edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
975edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
9764d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
977edca2d05SAlex Deucher 
978edca2d05SAlex Deucher 		/* disable VGA render */
9794d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
980edca2d05SAlex Deucher 	}
981edca2d05SAlex Deucher 
982e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
983e60f8db5SAlex Xie 
984e60f8db5SAlex Xie 	return r;
985e60f8db5SAlex Xie }
986e60f8db5SAlex Xie 
987e60f8db5SAlex Xie /**
988e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
989e60f8db5SAlex Xie  *
990e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
991e60f8db5SAlex Xie  *
992e60f8db5SAlex Xie  * This disables all VM page table.
993e60f8db5SAlex Xie  */
994e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
995e60f8db5SAlex Xie {
996e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
997e60f8db5SAlex Xie 	mmhub_v1_0_gart_disable(adev);
998ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
999e60f8db5SAlex Xie }
1000e60f8db5SAlex Xie 
1001e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1002e60f8db5SAlex Xie {
1003e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1004e60f8db5SAlex Xie 
10055dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
10065dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
10075dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
10085dd696aeSTrigger Huang 		return 0;
10095dd696aeSTrigger Huang 	}
10105dd696aeSTrigger Huang 
1011e60f8db5SAlex Xie 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1012e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1013e60f8db5SAlex Xie 
1014e60f8db5SAlex Xie 	return 0;
1015e60f8db5SAlex Xie }
1016e60f8db5SAlex Xie 
1017e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1018e60f8db5SAlex Xie {
1019e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1020e60f8db5SAlex Xie 
1021f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1022e60f8db5SAlex Xie }
1023e60f8db5SAlex Xie 
1024e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1025e60f8db5SAlex Xie {
1026e60f8db5SAlex Xie 	int r;
1027e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1028e60f8db5SAlex Xie 
1029e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1030e60f8db5SAlex Xie 	if (r)
1031e60f8db5SAlex Xie 		return r;
1032e60f8db5SAlex Xie 
103332601d48SChristian König 	amdgpu_vm_reset_all_ids(adev);
1034e60f8db5SAlex Xie 
103532601d48SChristian König 	return 0;
1036e60f8db5SAlex Xie }
1037e60f8db5SAlex Xie 
1038e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1039e60f8db5SAlex Xie {
1040e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1041e60f8db5SAlex Xie 	return true;
1042e60f8db5SAlex Xie }
1043e60f8db5SAlex Xie 
1044e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1045e60f8db5SAlex Xie {
1046e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1047e60f8db5SAlex Xie 	return 0;
1048e60f8db5SAlex Xie }
1049e60f8db5SAlex Xie 
1050e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1051e60f8db5SAlex Xie {
1052e60f8db5SAlex Xie 	/* XXX for emulation.*/
1053e60f8db5SAlex Xie 	return 0;
1054e60f8db5SAlex Xie }
1055e60f8db5SAlex Xie 
1056e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1057e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1058e60f8db5SAlex Xie {
1059d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060d5583d4fSHuang Rui 
1061d5583d4fSHuang Rui 	return mmhub_v1_0_set_clockgating(adev, state);
1062e60f8db5SAlex Xie }
1063e60f8db5SAlex Xie 
106413052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
106513052be5SHuang Rui {
106613052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
106713052be5SHuang Rui 
106813052be5SHuang Rui 	mmhub_v1_0_get_clockgating(adev, flags);
106913052be5SHuang Rui }
107013052be5SHuang Rui 
1071e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1072e60f8db5SAlex Xie 					enum amd_powergating_state state)
1073e60f8db5SAlex Xie {
1074e60f8db5SAlex Xie 	return 0;
1075e60f8db5SAlex Xie }
1076e60f8db5SAlex Xie 
1077e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1078e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1079e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1080e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1081e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1082e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1083e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1084e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1085e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1086e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1087e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1088e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1089e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1090e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1091e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
109213052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1093e60f8db5SAlex Xie };
1094e60f8db5SAlex Xie 
1095e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1096e60f8db5SAlex Xie {
1097e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1098e60f8db5SAlex Xie 	.major = 9,
1099e60f8db5SAlex Xie 	.minor = 0,
1100e60f8db5SAlex Xie 	.rev = 0,
1101e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1102e60f8db5SAlex Xie };
1103