14d77b7e5SLe Ma /*
24d77b7e5SLe Ma  * Copyright 2022 Advanced Micro Devices, Inc.
34d77b7e5SLe Ma  *
44d77b7e5SLe Ma  * Permission is hereby granted, free of charge, to any person obtaining a
54d77b7e5SLe Ma  * copy of this software and associated documentation files (the "Software"),
64d77b7e5SLe Ma  * to deal in the Software without restriction, including without limitation
74d77b7e5SLe Ma  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
84d77b7e5SLe Ma  * and/or sell copies of the Software, and to permit persons to whom the
94d77b7e5SLe Ma  * Software is furnished to do so, subject to the following conditions:
104d77b7e5SLe Ma  *
114d77b7e5SLe Ma  * The above copyright notice and this permission notice shall be included in
124d77b7e5SLe Ma  * all copies or substantial portions of the Software.
134d77b7e5SLe Ma  *
144d77b7e5SLe Ma  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
154d77b7e5SLe Ma  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
164d77b7e5SLe Ma  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
174d77b7e5SLe Ma  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
184d77b7e5SLe Ma  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
194d77b7e5SLe Ma  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
204d77b7e5SLe Ma  * OTHER DEALINGS IN THE SOFTWARE.
214d77b7e5SLe Ma  *
224d77b7e5SLe Ma  */
234d77b7e5SLe Ma #include "amdgpu.h"
244d77b7e5SLe Ma #include "mmhub_v1_8.h"
254d77b7e5SLe Ma 
264d77b7e5SLe Ma #include "mmhub/mmhub_1_8_0_offset.h"
274d77b7e5SLe Ma #include "mmhub/mmhub_1_8_0_sh_mask.h"
284d77b7e5SLe Ma #include "vega10_enum.h"
294d77b7e5SLe Ma 
304d77b7e5SLe Ma #include "soc15_common.h"
314d77b7e5SLe Ma #include "soc15.h"
32bc069d82SHawking Zhang #include "amdgpu_ras.h"
334d77b7e5SLe Ma 
344d77b7e5SLe Ma #define regVM_L2_CNTL3_DEFAULT	0x80100007
354d77b7e5SLe Ma #define regVM_L2_CNTL4_DEFAULT	0x000000c1
364d77b7e5SLe Ma 
374d77b7e5SLe Ma static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
384d77b7e5SLe Ma {
394d77b7e5SLe Ma 	u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
404d77b7e5SLe Ma 	u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
414d77b7e5SLe Ma 
424d77b7e5SLe Ma 	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
434d77b7e5SLe Ma 	base <<= 24;
444d77b7e5SLe Ma 
454d77b7e5SLe Ma 	top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
464d77b7e5SLe Ma 	top <<= 24;
474d77b7e5SLe Ma 
484d77b7e5SLe Ma 	adev->gmc.fb_start = base;
494d77b7e5SLe Ma 	adev->gmc.fb_end = top;
504d77b7e5SLe Ma 
514d77b7e5SLe Ma 	return base;
524d77b7e5SLe Ma }
534d77b7e5SLe Ma 
544d77b7e5SLe Ma static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
554d77b7e5SLe Ma 				uint64_t page_table_base)
564d77b7e5SLe Ma {
573a108387SLe Ma 	struct amdgpu_vmhub *hub;
587a1efad0SLijo Lazar 	u32 inst_mask;
593a108387SLe Ma 	int i;
604d77b7e5SLe Ma 
617a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
627a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
633a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
643a108387SLe Ma 		WREG32_SOC15_OFFSET(MMHUB, i,
653a108387SLe Ma 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
663a108387SLe Ma 				    hub->ctx_addr_distance * vmid,
673a108387SLe Ma 				    lower_32_bits(page_table_base));
684d77b7e5SLe Ma 
693a108387SLe Ma 		WREG32_SOC15_OFFSET(MMHUB, i,
703a108387SLe Ma 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
713a108387SLe Ma 				    hub->ctx_addr_distance * vmid,
723a108387SLe Ma 				    upper_32_bits(page_table_base));
733a108387SLe Ma 	}
744d77b7e5SLe Ma }
754d77b7e5SLe Ma 
764d77b7e5SLe Ma static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
774d77b7e5SLe Ma {
784d77b7e5SLe Ma 	uint64_t pt_base;
797a1efad0SLijo Lazar 	u32 inst_mask;
803a108387SLe Ma 	int i;
814d77b7e5SLe Ma 
824d77b7e5SLe Ma 	if (adev->gmc.pdb0_bo)
834d77b7e5SLe Ma 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
844d77b7e5SLe Ma 	else
854d77b7e5SLe Ma 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
864d77b7e5SLe Ma 
874d77b7e5SLe Ma 	mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base);
884d77b7e5SLe Ma 
894d77b7e5SLe Ma 	/* If use GART for FB translation, vmid0 page table covers both
904d77b7e5SLe Ma 	 * vram and system memory (gart)
914d77b7e5SLe Ma 	 */
927a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
937a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
944d77b7e5SLe Ma 		if (adev->gmc.pdb0_bo) {
953a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
963a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
974d77b7e5SLe Ma 				     (u32)(adev->gmc.fb_start >> 12));
983a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
993a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1004d77b7e5SLe Ma 				     (u32)(adev->gmc.fb_start >> 44));
1014d77b7e5SLe Ma 
1023a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1033a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1044d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_end >> 12));
1053a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1063a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1074d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_end >> 44));
1084d77b7e5SLe Ma 
1094d77b7e5SLe Ma 		} else {
1103a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1113a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
1124d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_start >> 12));
1133a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1143a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1154d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_start >> 44));
1164d77b7e5SLe Ma 
1173a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1183a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1194d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_end >> 12));
1203a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1213a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1224d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_end >> 44));
1234d77b7e5SLe Ma 		}
1244d77b7e5SLe Ma 	}
1253a108387SLe Ma }
1264d77b7e5SLe Ma 
1274d77b7e5SLe Ma static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
1284d77b7e5SLe Ma {
1297a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
1304d77b7e5SLe Ma 	uint64_t value;
1313a108387SLe Ma 	int i;
1324d77b7e5SLe Ma 
1337a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
1347a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
1354d77b7e5SLe Ma 		/* Program the AGP BAR */
1363a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0);
1373a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT,
1383a108387SLe Ma 			     adev->gmc.agp_start >> 24);
1393a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
1403a108387SLe Ma 			     adev->gmc.agp_end >> 24);
1413a108387SLe Ma 
1423a108387SLe Ma 		if (amdgpu_sriov_vf(adev))
1433a108387SLe Ma 			return;
1444d77b7e5SLe Ma 
1454d77b7e5SLe Ma 		/* Program the system aperture low logical page number. */
1463a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
1474d77b7e5SLe Ma 			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
1484d77b7e5SLe Ma 
1493a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1504d77b7e5SLe Ma 			max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
1514d77b7e5SLe Ma 
1524d77b7e5SLe Ma 		/* In the case squeezing vram into GART aperture, we don't use
1534d77b7e5SLe Ma 		 * FB aperture and AGP aperture. Disable them.
1544d77b7e5SLe Ma 		 */
1554d77b7e5SLe Ma 		if (adev->gmc.pdb0_bo) {
1563a108387SLe Ma 			WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF);
1573a108387SLe Ma 			WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0);
1583a108387SLe Ma 			WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0);
1593a108387SLe Ma 			WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE,
1603a108387SLe Ma 				     0x00FFFFFF);
1613a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1623a108387SLe Ma 				     regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
1633a108387SLe Ma 				     0x3FFFFFFF);
1643a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1653a108387SLe Ma 				     regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
1664d77b7e5SLe Ma 		}
1674d77b7e5SLe Ma 
1684d77b7e5SLe Ma 		/* Set default page address. */
1694d77b7e5SLe Ma 		value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
1703a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
1714d77b7e5SLe Ma 			     (u32)(value >> 12));
1723a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
1734d77b7e5SLe Ma 			     (u32)(value >> 44));
1744d77b7e5SLe Ma 
1754d77b7e5SLe Ma 		/* Program "protection fault". */
1763a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
1773a108387SLe Ma 			     regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
1784d77b7e5SLe Ma 			     (u32)(adev->dummy_page_addr >> 12));
1793a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
1803a108387SLe Ma 			     regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
1814d77b7e5SLe Ma 			     (u32)((u64)adev->dummy_page_addr >> 44));
1824d77b7e5SLe Ma 
1833a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2);
1844d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
1854d77b7e5SLe Ma 				    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
1863a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
1873a108387SLe Ma 	}
1884d77b7e5SLe Ma }
1894d77b7e5SLe Ma 
1904d77b7e5SLe Ma static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
1914d77b7e5SLe Ma {
1927a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
1933a108387SLe Ma 	int i;
1944d77b7e5SLe Ma 
1954d77b7e5SLe Ma 	/* Setup TLB control */
1967a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
1977a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
1983a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
1994d77b7e5SLe Ma 
2003a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
2013a108387SLe Ma 				    1);
2023a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
2033a108387SLe Ma 				    SYSTEM_ACCESS_MODE, 3);
2044d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
2054d77b7e5SLe Ma 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
2064d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
2074d77b7e5SLe Ma 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
2084d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
2094d77b7e5SLe Ma 				    MTYPE, MTYPE_UC);/* XXX for emulation. */
2104d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
2114d77b7e5SLe Ma 
2123a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
2133a108387SLe Ma 	}
2144d77b7e5SLe Ma }
2154d77b7e5SLe Ma 
2164d77b7e5SLe Ma static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
2174d77b7e5SLe Ma {
2187a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
2193a108387SLe Ma 	int i;
2204d77b7e5SLe Ma 
2214d77b7e5SLe Ma 	if (amdgpu_sriov_vf(adev))
2224d77b7e5SLe Ma 		return;
2234d77b7e5SLe Ma 
2244d77b7e5SLe Ma 	/* Setup L2 cache */
2257a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
2267a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
2273a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL);
2284d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
2293a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
2303a108387SLe Ma 				    ENABLE_L2_FRAGMENT_PROCESSING, 1);
2314d77b7e5SLe Ma 		/* XXX for emulation, Refer to closed source code.*/
2323a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
2333a108387SLe Ma 				    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
2343a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION,
2354d77b7e5SLe Ma 				    0);
2363a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
2373a108387SLe Ma 				    CONTEXT1_IDENTITY_ACCESS_MODE, 1);
2383a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
2393a108387SLe Ma 				    IDENTITY_MODE_FRAGMENT_SIZE, 0);
2403a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp);
2414d77b7e5SLe Ma 
2423a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2);
2433a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS,
2443a108387SLe Ma 				    1);
2454d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
2463a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp);
2474d77b7e5SLe Ma 
2484d77b7e5SLe Ma 		tmp = regVM_L2_CNTL3_DEFAULT;
2494d77b7e5SLe Ma 		if (adev->gmc.translate_further) {
2504d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
2514d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
2524d77b7e5SLe Ma 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
2534d77b7e5SLe Ma 		} else {
2544d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
2554d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
2564d77b7e5SLe Ma 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
2574d77b7e5SLe Ma 		}
2583a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp);
2594d77b7e5SLe Ma 
2604d77b7e5SLe Ma 		tmp = regVM_L2_CNTL4_DEFAULT;
2617a7aaab0SRajneesh Bhardwaj 		/* For AMD APP APUs setup WC memory */
2627a7aaab0SRajneesh Bhardwaj 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
2634d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
2644d77b7e5SLe Ma 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
2654d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
2664d77b7e5SLe Ma 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
2674d77b7e5SLe Ma 		} else {
2684d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
2694d77b7e5SLe Ma 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
2704d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
2714d77b7e5SLe Ma 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
2724d77b7e5SLe Ma 		}
2733a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp);
2743a108387SLe Ma 	}
2754d77b7e5SLe Ma }
2764d77b7e5SLe Ma 
2774d77b7e5SLe Ma static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev)
2784d77b7e5SLe Ma {
2797a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
2803a108387SLe Ma 	int i;
2814d77b7e5SLe Ma 
2827a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
2837a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
2843a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL);
2854d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
2864d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
2874d77b7e5SLe Ma 				adev->gmc.vmid0_page_table_depth);
2883a108387SLe Ma 		tmp = REG_SET_FIELD(tmp,
2893a108387SLe Ma 				    VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
2904d77b7e5SLe Ma 				    adev->gmc.vmid0_page_table_block_size);
2914d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
2924d77b7e5SLe Ma 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
2933a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp);
2943a108387SLe Ma 	}
2954d77b7e5SLe Ma }
2964d77b7e5SLe Ma 
2974d77b7e5SLe Ma static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev)
2984d77b7e5SLe Ma {
2997a1efad0SLijo Lazar 	u32 inst_mask;
3003a108387SLe Ma 	int i;
3013a108387SLe Ma 
3024d77b7e5SLe Ma 	if (amdgpu_sriov_vf(adev))
3034d77b7e5SLe Ma 		return;
3044d77b7e5SLe Ma 
3057a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
3067a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
3073a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3083a108387SLe Ma 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
3093a108387SLe Ma 			     0XFFFFFFFF);
3103a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3113a108387SLe Ma 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
3123a108387SLe Ma 			     0x0000000F);
3134d77b7e5SLe Ma 
3143a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3153a108387SLe Ma 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
3163a108387SLe Ma 			     0);
3173a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3183a108387SLe Ma 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
3193a108387SLe Ma 			     0);
3204d77b7e5SLe Ma 
3213a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3223a108387SLe Ma 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
3233a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3243a108387SLe Ma 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
3253a108387SLe Ma 	}
3264d77b7e5SLe Ma }
3274d77b7e5SLe Ma 
3284d77b7e5SLe Ma static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev)
3294d77b7e5SLe Ma {
3303a108387SLe Ma 	struct amdgpu_vmhub *hub;
331*ff6b11ccSSrinivasan Shanmugam 	unsigned int num_level, block_size;
3327a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
3333a108387SLe Ma 	int i, j;
3344d77b7e5SLe Ma 
3354d77b7e5SLe Ma 	num_level = adev->vm_manager.num_level;
3364d77b7e5SLe Ma 	block_size = adev->vm_manager.block_size;
3374d77b7e5SLe Ma 	if (adev->gmc.translate_further)
3384d77b7e5SLe Ma 		num_level -= 1;
3394d77b7e5SLe Ma 	else
3404d77b7e5SLe Ma 		block_size -= 9;
3414d77b7e5SLe Ma 
3427a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
3437a1efad0SLijo Lazar 	for_each_inst(j, inst_mask) {
3443a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
3454d77b7e5SLe Ma 		for (i = 0; i <= 14; i++) {
3463a108387SLe Ma 			tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
3473a108387SLe Ma 						  i);
3483a108387SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3493a108387SLe Ma 					    ENABLE_CONTEXT, 1);
3503a108387SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3513a108387SLe Ma 					    PAGE_TABLE_DEPTH, num_level);
3524d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3534d77b7e5SLe Ma 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3544d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3553a108387SLe Ma 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3564d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3574d77b7e5SLe Ma 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3584d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3594d77b7e5SLe Ma 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3604d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3614d77b7e5SLe Ma 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3624d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3634d77b7e5SLe Ma 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3644d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3654d77b7e5SLe Ma 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3664d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3674d77b7e5SLe Ma 					    PAGE_TABLE_BLOCK_SIZE,
3684d77b7e5SLe Ma 					    block_size);
3693a108387SLe Ma 			/* On 9.4.3, XNACK can be enabled in the SQ
3703a108387SLe Ma 			 * per-process. Retry faults need to be enabled for
3713a108387SLe Ma 			 * that to work.
3724d77b7e5SLe Ma 			 */
3734d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3743a108387SLe Ma 				RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
3753a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
3764d77b7e5SLe Ma 					    i * hub->ctx_distance, tmp);
3773a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
3783a108387SLe Ma 				regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
3794d77b7e5SLe Ma 				i * hub->ctx_addr_distance, 0);
3803a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
3813a108387SLe Ma 				regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
3824d77b7e5SLe Ma 				i * hub->ctx_addr_distance, 0);
3833a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
3843a108387SLe Ma 				regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
3854d77b7e5SLe Ma 				i * hub->ctx_addr_distance,
3864d77b7e5SLe Ma 				lower_32_bits(adev->vm_manager.max_pfn - 1));
3873a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
3883a108387SLe Ma 				regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
3894d77b7e5SLe Ma 				i * hub->ctx_addr_distance,
3904d77b7e5SLe Ma 				upper_32_bits(adev->vm_manager.max_pfn - 1));
3914d77b7e5SLe Ma 		}
3924d77b7e5SLe Ma 	}
3933a108387SLe Ma }
3944d77b7e5SLe Ma 
3954d77b7e5SLe Ma static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev)
3964d77b7e5SLe Ma {
3973a108387SLe Ma 	struct amdgpu_vmhub *hub;
3987a1efad0SLijo Lazar 	u32 i, j, inst_mask;
3994d77b7e5SLe Ma 
4007a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
4017a1efad0SLijo Lazar 	for_each_inst(j, inst_mask) {
4023a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
4034d77b7e5SLe Ma 		for (i = 0; i < 18; ++i) {
4043a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
4053a108387SLe Ma 					regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
4064d77b7e5SLe Ma 					i * hub->eng_addr_distance, 0xffffffff);
4073a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
4083a108387SLe Ma 					regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
4094d77b7e5SLe Ma 					i * hub->eng_addr_distance, 0x1f);
4104d77b7e5SLe Ma 		}
4114d77b7e5SLe Ma 	}
4123a108387SLe Ma }
4134d77b7e5SLe Ma 
4144d77b7e5SLe Ma static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
4154d77b7e5SLe Ma {
4164d77b7e5SLe Ma 	if (amdgpu_sriov_vf(adev)) {
4174d77b7e5SLe Ma 		/*
4184d77b7e5SLe Ma 		 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
4194d77b7e5SLe Ma 		 * VF copy registers so vbios post doesn't program them, for
4204d77b7e5SLe Ma 		 * SRIOV driver need to program them
4214d77b7e5SLe Ma 		 */
4224d77b7e5SLe Ma 		WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE,
4234d77b7e5SLe Ma 			     adev->gmc.vram_start >> 24);
4244d77b7e5SLe Ma 		WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP,
4254d77b7e5SLe Ma 			     adev->gmc.vram_end >> 24);
4264d77b7e5SLe Ma 	}
4274d77b7e5SLe Ma 
4284d77b7e5SLe Ma 	/* GART Enable. */
4294d77b7e5SLe Ma 	mmhub_v1_8_init_gart_aperture_regs(adev);
4304d77b7e5SLe Ma 	mmhub_v1_8_init_system_aperture_regs(adev);
4314d77b7e5SLe Ma 	mmhub_v1_8_init_tlb_regs(adev);
4324d77b7e5SLe Ma 	mmhub_v1_8_init_cache_regs(adev);
4334d77b7e5SLe Ma 
4344d77b7e5SLe Ma 	mmhub_v1_8_enable_system_domain(adev);
4354d77b7e5SLe Ma 	mmhub_v1_8_disable_identity_aperture(adev);
4364d77b7e5SLe Ma 	mmhub_v1_8_setup_vmid_config(adev);
4374d77b7e5SLe Ma 	mmhub_v1_8_program_invalidation(adev);
4384d77b7e5SLe Ma 
4394d77b7e5SLe Ma 	return 0;
4404d77b7e5SLe Ma }
4414d77b7e5SLe Ma 
4424d77b7e5SLe Ma static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
4434d77b7e5SLe Ma {
4443a108387SLe Ma 	struct amdgpu_vmhub *hub;
4454d77b7e5SLe Ma 	u32 tmp;
4467a1efad0SLijo Lazar 	u32 i, j, inst_mask;
4474d77b7e5SLe Ma 
4484d77b7e5SLe Ma 	/* Disable all tables */
4497a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
4507a1efad0SLijo Lazar 	for_each_inst(j, inst_mask) {
4513a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
4524d77b7e5SLe Ma 		for (i = 0; i < 16; i++)
4533a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
4544d77b7e5SLe Ma 					    i * hub->ctx_distance, 0);
4554d77b7e5SLe Ma 
4564d77b7e5SLe Ma 		/* Setup TLB control */
4573a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
4583a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
4593a108387SLe Ma 				    0);
4604d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
4614d77b7e5SLe Ma 				    ENABLE_ADVANCED_DRIVER_MODEL, 0);
4623a108387SLe Ma 		WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
4634d77b7e5SLe Ma 
4644d77b7e5SLe Ma 		if (!amdgpu_sriov_vf(adev)) {
4654d77b7e5SLe Ma 			/* Setup L2 cache */
4663a108387SLe Ma 			tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
4673a108387SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE,
4683a108387SLe Ma 					    0);
4693a108387SLe Ma 			WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp);
4703a108387SLe Ma 			WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
4713a108387SLe Ma 		}
4724d77b7e5SLe Ma 	}
4734d77b7e5SLe Ma }
4744d77b7e5SLe Ma 
4754d77b7e5SLe Ma /**
4764d77b7e5SLe Ma  * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling
4774d77b7e5SLe Ma  *
4784d77b7e5SLe Ma  * @adev: amdgpu_device pointer
4794d77b7e5SLe Ma  * @value: true redirects VM faults to the default page
4804d77b7e5SLe Ma  */
4814d77b7e5SLe Ma static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value)
4824d77b7e5SLe Ma {
4837a1efad0SLijo Lazar 	u32 tmp, inst_mask;
4843a108387SLe Ma 	int i;
4854d77b7e5SLe Ma 
4864d77b7e5SLe Ma 	if (amdgpu_sriov_vf(adev))
4874d77b7e5SLe Ma 		return;
4884d77b7e5SLe Ma 
4897a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
4907a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
4913a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL);
4924d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4934d77b7e5SLe Ma 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4944d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4954d77b7e5SLe Ma 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4964d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4974d77b7e5SLe Ma 				PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4984d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4994d77b7e5SLe Ma 				PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
5004d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5014d77b7e5SLe Ma 			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
5024d77b7e5SLe Ma 			value);
5034d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5044d77b7e5SLe Ma 				NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
5054d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5064d77b7e5SLe Ma 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
5074d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5084d77b7e5SLe Ma 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
5094d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5104d77b7e5SLe Ma 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
5114d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5124d77b7e5SLe Ma 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
5134d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5144d77b7e5SLe Ma 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
5154d77b7e5SLe Ma 		if (!value) {
5164d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5174d77b7e5SLe Ma 					    CRASH_ON_NO_RETRY_FAULT, 1);
5184d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5194d77b7e5SLe Ma 					    CRASH_ON_RETRY_FAULT, 1);
5204d77b7e5SLe Ma 		}
5214d77b7e5SLe Ma 
5223a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
5233a108387SLe Ma 	}
5244d77b7e5SLe Ma }
5254d77b7e5SLe Ma 
5264d77b7e5SLe Ma static void mmhub_v1_8_init(struct amdgpu_device *adev)
5274d77b7e5SLe Ma {
5283a108387SLe Ma 	struct amdgpu_vmhub *hub;
5297a1efad0SLijo Lazar 	u32 inst_mask;
5303a108387SLe Ma 	int i;
5314d77b7e5SLe Ma 
5327a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
5337a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
5343a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
5353a108387SLe Ma 
5364bc615a5SLe Ma 		hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i,
5374d77b7e5SLe Ma 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
5384bc615a5SLe Ma 		hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i,
5394d77b7e5SLe Ma 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
5404d77b7e5SLe Ma 		hub->vm_inv_eng0_req =
5414bc615a5SLe Ma 			SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ);
5424d77b7e5SLe Ma 		hub->vm_inv_eng0_ack =
5434bc615a5SLe Ma 			SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK);
5444d77b7e5SLe Ma 		hub->vm_context0_cntl =
5454bc615a5SLe Ma 			SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL);
5464bc615a5SLe Ma 		hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i,
5473a108387SLe Ma 			regVM_L2_PROTECTION_FAULT_STATUS);
5484bc615a5SLe Ma 		hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i,
5493a108387SLe Ma 			regVM_L2_PROTECTION_FAULT_CNTL);
5504d77b7e5SLe Ma 
5514d77b7e5SLe Ma 		hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
5523a108387SLe Ma 		hub->ctx_addr_distance =
5533a108387SLe Ma 			regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
5544d77b7e5SLe Ma 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
5553a108387SLe Ma 		hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
5563a108387SLe Ma 			regVM_INVALIDATE_ENG0_REQ;
5574d77b7e5SLe Ma 		hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
5584d77b7e5SLe Ma 			regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
5593a108387SLe Ma 	}
5604d77b7e5SLe Ma }
5614d77b7e5SLe Ma 
5624d77b7e5SLe Ma static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev,
5634d77b7e5SLe Ma 				      enum amd_clockgating_state state)
5644d77b7e5SLe Ma {
5654d77b7e5SLe Ma 	return 0;
5664d77b7e5SLe Ma }
5674d77b7e5SLe Ma 
5684d77b7e5SLe Ma static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
5694d77b7e5SLe Ma {
5704d77b7e5SLe Ma 
5714d77b7e5SLe Ma }
5724d77b7e5SLe Ma 
5734d77b7e5SLe Ma const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
5744d77b7e5SLe Ma 	.get_fb_location = mmhub_v1_8_get_fb_location,
5754d77b7e5SLe Ma 	.init = mmhub_v1_8_init,
5764d77b7e5SLe Ma 	.gart_enable = mmhub_v1_8_gart_enable,
5774d77b7e5SLe Ma 	.set_fault_enable_default = mmhub_v1_8_set_fault_enable_default,
5784d77b7e5SLe Ma 	.gart_disable = mmhub_v1_8_gart_disable,
5794d77b7e5SLe Ma 	.setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
5804d77b7e5SLe Ma 	.set_clockgating = mmhub_v1_8_set_clockgating,
5814d77b7e5SLe Ma 	.get_clockgating = mmhub_v1_8_get_clockgating,
5824d77b7e5SLe Ma };
583bc069d82SHawking Zhang 
584bc069d82SHawking Zhang static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
585bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_CE_ERR_STATUS_LO, regMMEA0_CE_ERR_STATUS_HI),
586bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
587bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_CE_ERR_STATUS_LO, regMMEA1_CE_ERR_STATUS_HI),
588bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
589bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_CE_ERR_STATUS_LO, regMMEA2_CE_ERR_STATUS_HI),
590bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
591bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_CE_ERR_STATUS_LO, regMMEA3_CE_ERR_STATUS_HI),
592bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
593bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_CE_ERR_STATUS_LO, regMMEA4_CE_ERR_STATUS_HI),
594bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
595bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_CE_ERR_STATUS_LO, regMM_CANE_CE_ERR_STATUS_HI),
596bc069d82SHawking Zhang 	1, 0, "MM_CANE"},
597bc069d82SHawking Zhang };
598bc069d82SHawking Zhang 
599bc069d82SHawking Zhang static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ue_reg_list[] = {
600bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_UE_ERR_STATUS_LO, regMMEA0_UE_ERR_STATUS_HI),
601bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
602bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_UE_ERR_STATUS_LO, regMMEA1_UE_ERR_STATUS_HI),
603bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
604bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_UE_ERR_STATUS_LO, regMMEA2_UE_ERR_STATUS_HI),
605bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
606bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_UE_ERR_STATUS_LO, regMMEA3_UE_ERR_STATUS_HI),
607bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
608bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_UE_ERR_STATUS_LO, regMMEA4_UE_ERR_STATUS_HI),
609bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
610bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_UE_ERR_STATUS_LO, regMM_CANE_UE_ERR_STATUS_HI),
611bc069d82SHawking Zhang 	1, 0, "MM_CANE"},
612bc069d82SHawking Zhang };
613bc069d82SHawking Zhang 
614bc069d82SHawking Zhang static const struct amdgpu_ras_memory_id_entry mmhub_v1_8_ras_memory_list[] = {
615bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WGMI_PAGEMEM, "MMEA_WGMI_PAGEMEM"},
616bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RGMI_PAGEMEM, "MMEA_RGMI_PAGEMEM"},
617bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WDRAM_PAGEMEM, "MMEA_WDRAM_PAGEMEM"},
618bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RDRAM_PAGEMEM, "MMEA_RDRAM_PAGEMEM"},
619bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WIO_CMDMEM, "MMEA_WIO_CMDMEM"},
620bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RIO_CMDMEM, "MMEA_RIO_CMDMEM"},
621bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WGMI_CMDMEM, "MMEA_WGMI_CMDMEM"},
622bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RGMI_CMDMEM, "MMEA_RGMI_CMDMEM"},
623bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WDRAM_CMDMEM, "MMEA_WDRAM_CMDMEM"},
624bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RDRAM_CMDMEM, "MMEA_RDRAM_CMDMEM"},
625bc069d82SHawking Zhang 	{AMDGPU_MMHUB_MAM_DMEM0, "MMEA_MAM_DMEM0"},
626bc069d82SHawking Zhang 	{AMDGPU_MMHUB_MAM_DMEM1, "MMEA_MAM_DMEM1"},
627bc069d82SHawking Zhang 	{AMDGPU_MMHUB_MAM_DMEM2, "MMEA_MAM_DMEM2"},
628bc069d82SHawking Zhang 	{AMDGPU_MMHUB_MAM_DMEM3, "MMEA_MAM_DMEM3"},
629bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WRET_TAGMEM, "MMEA_WRET_TAGMEM"},
630bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RRET_TAGMEM, "MMEA_RRET_TAGMEM"},
631bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WIO_DATAMEM, "MMEA_WIO_DATAMEM"},
632bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WGMI_DATAMEM, "MMEA_WGMI_DATAMEM"},
633bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WDRAM_DATAMEM, "MMEA_WDRAM_DATAMEM"},
634bc069d82SHawking Zhang };
635bc069d82SHawking Zhang 
636bc069d82SHawking Zhang static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
637bc069d82SHawking Zhang 						  uint32_t mmhub_inst,
638bc069d82SHawking Zhang 						  void *ras_err_status)
639bc069d82SHawking Zhang {
640bc069d82SHawking Zhang 	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
641bc069d82SHawking Zhang 
642bc069d82SHawking Zhang 	amdgpu_ras_inst_query_ras_error_count(adev,
643bc069d82SHawking Zhang 					mmhub_v1_8_ce_reg_list,
644bc069d82SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
645bc069d82SHawking Zhang 					mmhub_v1_8_ras_memory_list,
646bc069d82SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
647bc069d82SHawking Zhang 					mmhub_inst,
648bc069d82SHawking Zhang 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
649bc069d82SHawking Zhang 					&err_data->ce_count);
650bc069d82SHawking Zhang 	amdgpu_ras_inst_query_ras_error_count(adev,
651bc069d82SHawking Zhang 					mmhub_v1_8_ue_reg_list,
652bc069d82SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
653bc069d82SHawking Zhang 					mmhub_v1_8_ras_memory_list,
654bc069d82SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
655bc069d82SHawking Zhang 					mmhub_inst,
656bc069d82SHawking Zhang 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
657bc069d82SHawking Zhang 					&err_data->ue_count);
658bc069d82SHawking Zhang }
659bc069d82SHawking Zhang 
660bc069d82SHawking Zhang static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
661bc069d82SHawking Zhang 					     void *ras_err_status)
662bc069d82SHawking Zhang {
663bc069d82SHawking Zhang 	uint32_t inst_mask;
664bc069d82SHawking Zhang 	uint32_t i;
665bc069d82SHawking Zhang 
666bc069d82SHawking Zhang 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
667bc069d82SHawking Zhang 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
668bc069d82SHawking Zhang 		return;
669bc069d82SHawking Zhang 	}
670bc069d82SHawking Zhang 
671bc069d82SHawking Zhang 	inst_mask = adev->aid_mask;
672bc069d82SHawking Zhang 	for_each_inst(i, inst_mask)
673bc069d82SHawking Zhang 		mmhub_v1_8_inst_query_ras_error_count(adev, i, ras_err_status);
674bc069d82SHawking Zhang }
675a0cdb3d0SHawking Zhang 
676a0cdb3d0SHawking Zhang static void mmhub_v1_8_inst_reset_ras_error_count(struct amdgpu_device *adev,
677a0cdb3d0SHawking Zhang 						  uint32_t mmhub_inst)
678a0cdb3d0SHawking Zhang {
679a0cdb3d0SHawking Zhang 	amdgpu_ras_inst_reset_ras_error_count(adev,
680a0cdb3d0SHawking Zhang 					mmhub_v1_8_ce_reg_list,
681a0cdb3d0SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
682a0cdb3d0SHawking Zhang 					mmhub_inst);
683a0cdb3d0SHawking Zhang 	amdgpu_ras_inst_reset_ras_error_count(adev,
684a0cdb3d0SHawking Zhang 					mmhub_v1_8_ue_reg_list,
685a0cdb3d0SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
686a0cdb3d0SHawking Zhang 					mmhub_inst);
687a0cdb3d0SHawking Zhang }
688a0cdb3d0SHawking Zhang 
689a0cdb3d0SHawking Zhang static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev)
690a0cdb3d0SHawking Zhang {
691a0cdb3d0SHawking Zhang 	uint32_t inst_mask;
692a0cdb3d0SHawking Zhang 	uint32_t i;
693a0cdb3d0SHawking Zhang 
694a0cdb3d0SHawking Zhang 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
695a0cdb3d0SHawking Zhang 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
696a0cdb3d0SHawking Zhang 		return;
697a0cdb3d0SHawking Zhang 	}
698a0cdb3d0SHawking Zhang 
699a0cdb3d0SHawking Zhang 	inst_mask = adev->aid_mask;
700a0cdb3d0SHawking Zhang 	for_each_inst(i, inst_mask)
701a0cdb3d0SHawking Zhang 		mmhub_v1_8_inst_reset_ras_error_count(adev, i);
702a0cdb3d0SHawking Zhang }
70300c14522SHawking Zhang 
70400c14522SHawking Zhang static const uint32_t mmhub_v1_8_mmea_err_status_reg[] = {
70500c14522SHawking Zhang 	regMMEA0_ERR_STATUS,
70600c14522SHawking Zhang 	regMMEA1_ERR_STATUS,
70700c14522SHawking Zhang 	regMMEA2_ERR_STATUS,
70800c14522SHawking Zhang 	regMMEA3_ERR_STATUS,
70900c14522SHawking Zhang 	regMMEA4_ERR_STATUS,
71000c14522SHawking Zhang };
71100c14522SHawking Zhang 
71200c14522SHawking Zhang static void mmhub_v1_8_inst_query_ras_err_status(struct amdgpu_device *adev,
71300c14522SHawking Zhang 						 uint32_t mmhub_inst)
71400c14522SHawking Zhang {
71500c14522SHawking Zhang 	uint32_t reg_value;
71600c14522SHawking Zhang 	uint32_t mmea_err_status_addr_dist;
71700c14522SHawking Zhang 	uint32_t i;
71800c14522SHawking Zhang 
71900c14522SHawking Zhang 	/* query mmea ras err status */
72000c14522SHawking Zhang 	mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS;
72100c14522SHawking Zhang 	for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) {
72200c14522SHawking Zhang 		reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
72300c14522SHawking Zhang 						regMMEA0_ERR_STATUS,
72400c14522SHawking Zhang 						i * mmea_err_status_addr_dist);
72500c14522SHawking Zhang 		if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
72600c14522SHawking Zhang 		    REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
72700c14522SHawking Zhang 		    REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
72800c14522SHawking Zhang 			dev_warn(adev->dev,
72900c14522SHawking Zhang 				 "Detected MMEA%d err in MMHUB%d, status: 0x%x\n",
73000c14522SHawking Zhang 				 i, mmhub_inst, reg_value);
73100c14522SHawking Zhang 		}
73200c14522SHawking Zhang 	}
73300c14522SHawking Zhang 
73400c14522SHawking Zhang 	/* query mm_cane ras err status */
73500c14522SHawking Zhang 	reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS);
73600c14522SHawking Zhang 	if (REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_STATUS) ||
73700c14522SHawking Zhang 	    REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_WRRSP_STATUS) ||
73800c14522SHawking Zhang 	    REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_DATAPARITY_ERROR)) {
73900c14522SHawking Zhang 		dev_warn(adev->dev,
74000c14522SHawking Zhang 			 "Detected MM CANE err in MMHUB%d, status: 0x%x\n",
74100c14522SHawking Zhang 			 mmhub_inst, reg_value);
74200c14522SHawking Zhang 	}
74300c14522SHawking Zhang }
74400c14522SHawking Zhang 
74500c14522SHawking Zhang static void mmhub_v1_8_query_ras_error_status(struct amdgpu_device *adev)
74600c14522SHawking Zhang {
74700c14522SHawking Zhang 	uint32_t inst_mask;
74800c14522SHawking Zhang 	uint32_t i;
74900c14522SHawking Zhang 
75000c14522SHawking Zhang 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
75100c14522SHawking Zhang 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
75200c14522SHawking Zhang 		return;
75300c14522SHawking Zhang 	}
75400c14522SHawking Zhang 
75500c14522SHawking Zhang 	inst_mask = adev->aid_mask;
75600c14522SHawking Zhang 	for_each_inst(i, inst_mask)
75700c14522SHawking Zhang 		mmhub_v1_8_inst_query_ras_err_status(adev, i);
75800c14522SHawking Zhang }
759ccfdbd4bSHawking Zhang 
760ccfdbd4bSHawking Zhang static const uint32_t mmhub_v1_8_mmea_cgtt_clk_cntl_reg[] = {
761ccfdbd4bSHawking Zhang 	regMMEA0_CGTT_CLK_CTRL,
762ccfdbd4bSHawking Zhang 	regMMEA1_CGTT_CLK_CTRL,
763ccfdbd4bSHawking Zhang 	regMMEA2_CGTT_CLK_CTRL,
764ccfdbd4bSHawking Zhang 	regMMEA3_CGTT_CLK_CTRL,
765ccfdbd4bSHawking Zhang 	regMMEA4_CGTT_CLK_CTRL,
766ccfdbd4bSHawking Zhang };
767ccfdbd4bSHawking Zhang 
768ccfdbd4bSHawking Zhang static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev,
769ccfdbd4bSHawking Zhang 						 uint32_t mmhub_inst)
770ccfdbd4bSHawking Zhang {
771ccfdbd4bSHawking Zhang 	uint32_t mmea_cgtt_clk_cntl_addr_dist;
772ccfdbd4bSHawking Zhang 	uint32_t mmea_err_status_addr_dist;
773ccfdbd4bSHawking Zhang 	uint32_t reg_value;
774ccfdbd4bSHawking Zhang 	uint32_t i;
775ccfdbd4bSHawking Zhang 
776ccfdbd4bSHawking Zhang 	/* reset mmea ras err status */
777ccfdbd4bSHawking Zhang 	mmea_cgtt_clk_cntl_addr_dist = regMMEA1_CGTT_CLK_CTRL - regMMEA0_CGTT_CLK_CTRL;
778ccfdbd4bSHawking Zhang 	mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS;
779ccfdbd4bSHawking Zhang 	for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) {
780ccfdbd4bSHawking Zhang 		/* force clk branch on for response path
781*ff6b11ccSSrinivasan Shanmugam 		 * set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 1
782*ff6b11ccSSrinivasan Shanmugam 		 */
783ccfdbd4bSHawking Zhang 		reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
784ccfdbd4bSHawking Zhang 						regMMEA0_CGTT_CLK_CTRL,
785ccfdbd4bSHawking Zhang 						i * mmea_cgtt_clk_cntl_addr_dist);
786ccfdbd4bSHawking Zhang 		reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL,
787ccfdbd4bSHawking Zhang 					  SOFT_OVERRIDE_RETURN, 1);
788ccfdbd4bSHawking Zhang 		WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
789ccfdbd4bSHawking Zhang 				    regMMEA0_CGTT_CLK_CTRL,
790ccfdbd4bSHawking Zhang 				    i * mmea_cgtt_clk_cntl_addr_dist,
791ccfdbd4bSHawking Zhang 				    reg_value);
792ccfdbd4bSHawking Zhang 
793ccfdbd4bSHawking Zhang 		/* set MMEA0_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */
794ccfdbd4bSHawking Zhang 		reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
795ccfdbd4bSHawking Zhang 						regMMEA0_ERR_STATUS,
796ccfdbd4bSHawking Zhang 						i * mmea_err_status_addr_dist);
797ccfdbd4bSHawking Zhang 		reg_value = REG_SET_FIELD(reg_value, MMEA0_ERR_STATUS,
798ccfdbd4bSHawking Zhang 					  CLEAR_ERROR_STATUS, 1);
799ccfdbd4bSHawking Zhang 		WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
800ccfdbd4bSHawking Zhang 				    regMMEA0_ERR_STATUS,
801ccfdbd4bSHawking Zhang 				    i * mmea_err_status_addr_dist,
802ccfdbd4bSHawking Zhang 				    reg_value);
803ccfdbd4bSHawking Zhang 
804ccfdbd4bSHawking Zhang 		/* set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 0 */
805ccfdbd4bSHawking Zhang 		reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
806ccfdbd4bSHawking Zhang 						regMMEA0_CGTT_CLK_CTRL,
807ccfdbd4bSHawking Zhang 						i * mmea_cgtt_clk_cntl_addr_dist);
808ccfdbd4bSHawking Zhang 		reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL,
809ccfdbd4bSHawking Zhang 					  SOFT_OVERRIDE_RETURN, 0);
810ccfdbd4bSHawking Zhang 		WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
811ccfdbd4bSHawking Zhang 				    regMMEA0_CGTT_CLK_CTRL,
812ccfdbd4bSHawking Zhang 				    i * mmea_cgtt_clk_cntl_addr_dist,
813ccfdbd4bSHawking Zhang 				    reg_value);
814ccfdbd4bSHawking Zhang 	}
815ccfdbd4bSHawking Zhang 
816ccfdbd4bSHawking Zhang 	/* reset mm_cane ras err status
817ccfdbd4bSHawking Zhang 	 * force clk branch on for response path
818*ff6b11ccSSrinivasan Shanmugam 	 * set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 1
819*ff6b11ccSSrinivasan Shanmugam 	 */
820ccfdbd4bSHawking Zhang 	reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL);
821ccfdbd4bSHawking Zhang 	reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL,
822ccfdbd4bSHawking Zhang 				  SOFT_OVERRIDE_ATRET, 1);
823ccfdbd4bSHawking Zhang 	WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value);
824ccfdbd4bSHawking Zhang 
825ccfdbd4bSHawking Zhang 	/* set MM_CANE_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */
826ccfdbd4bSHawking Zhang 	reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS);
827ccfdbd4bSHawking Zhang 	reg_value = REG_SET_FIELD(reg_value, MM_CANE_ERR_STATUS,
828ccfdbd4bSHawking Zhang 				  CLEAR_ERROR_STATUS, 1);
829ccfdbd4bSHawking Zhang 	WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS, reg_value);
830ccfdbd4bSHawking Zhang 
831ccfdbd4bSHawking Zhang 	/* set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 0 */
832ccfdbd4bSHawking Zhang 	reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL);
833ccfdbd4bSHawking Zhang 	reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL,
834ccfdbd4bSHawking Zhang 				  SOFT_OVERRIDE_ATRET, 0);
835ccfdbd4bSHawking Zhang 	WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value);
836ccfdbd4bSHawking Zhang }
837ccfdbd4bSHawking Zhang 
838ccfdbd4bSHawking Zhang static void mmhub_v1_8_reset_ras_error_status(struct amdgpu_device *adev)
839ccfdbd4bSHawking Zhang {
840ccfdbd4bSHawking Zhang 	uint32_t inst_mask;
841ccfdbd4bSHawking Zhang 	uint32_t i;
842ccfdbd4bSHawking Zhang 
843ccfdbd4bSHawking Zhang 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
844ccfdbd4bSHawking Zhang 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
845ccfdbd4bSHawking Zhang 		return;
846ccfdbd4bSHawking Zhang 	}
847ccfdbd4bSHawking Zhang 
848ccfdbd4bSHawking Zhang 	inst_mask = adev->aid_mask;
849ccfdbd4bSHawking Zhang 	for_each_inst(i, inst_mask)
850ccfdbd4bSHawking Zhang 		mmhub_v1_8_inst_reset_ras_err_status(adev, i);
851ccfdbd4bSHawking Zhang }
85273c2b3fdSHawking Zhang 
85373c2b3fdSHawking Zhang static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
85473c2b3fdSHawking Zhang 	.query_ras_error_count = mmhub_v1_8_query_ras_error_count,
85573c2b3fdSHawking Zhang 	.reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
85673c2b3fdSHawking Zhang 	.query_ras_error_status = mmhub_v1_8_query_ras_error_status,
85773c2b3fdSHawking Zhang 	.reset_ras_error_status = mmhub_v1_8_reset_ras_error_status,
85873c2b3fdSHawking Zhang };
85973c2b3fdSHawking Zhang 
86073c2b3fdSHawking Zhang struct amdgpu_mmhub_ras mmhub_v1_8_ras = {
86173c2b3fdSHawking Zhang 	.ras_block = {
86273c2b3fdSHawking Zhang 		.hw_ops = &mmhub_v1_8_ras_hw_ops,
86373c2b3fdSHawking Zhang 	},
86473c2b3fdSHawking Zhang };
865