14d77b7e5SLe Ma /*
24d77b7e5SLe Ma  * Copyright 2022 Advanced Micro Devices, Inc.
34d77b7e5SLe Ma  *
44d77b7e5SLe Ma  * Permission is hereby granted, free of charge, to any person obtaining a
54d77b7e5SLe Ma  * copy of this software and associated documentation files (the "Software"),
64d77b7e5SLe Ma  * to deal in the Software without restriction, including without limitation
74d77b7e5SLe Ma  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
84d77b7e5SLe Ma  * and/or sell copies of the Software, and to permit persons to whom the
94d77b7e5SLe Ma  * Software is furnished to do so, subject to the following conditions:
104d77b7e5SLe Ma  *
114d77b7e5SLe Ma  * The above copyright notice and this permission notice shall be included in
124d77b7e5SLe Ma  * all copies or substantial portions of the Software.
134d77b7e5SLe Ma  *
144d77b7e5SLe Ma  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
154d77b7e5SLe Ma  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
164d77b7e5SLe Ma  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
174d77b7e5SLe Ma  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
184d77b7e5SLe Ma  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
194d77b7e5SLe Ma  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
204d77b7e5SLe Ma  * OTHER DEALINGS IN THE SOFTWARE.
214d77b7e5SLe Ma  *
224d77b7e5SLe Ma  */
234d77b7e5SLe Ma #include "amdgpu.h"
244d77b7e5SLe Ma #include "mmhub_v1_8.h"
254d77b7e5SLe Ma 
264d77b7e5SLe Ma #include "mmhub/mmhub_1_8_0_offset.h"
274d77b7e5SLe Ma #include "mmhub/mmhub_1_8_0_sh_mask.h"
284d77b7e5SLe Ma #include "vega10_enum.h"
294d77b7e5SLe Ma 
304d77b7e5SLe Ma #include "soc15_common.h"
314d77b7e5SLe Ma #include "soc15.h"
32bc069d82SHawking Zhang #include "amdgpu_ras.h"
334d77b7e5SLe Ma 
344d77b7e5SLe Ma #define regVM_L2_CNTL3_DEFAULT	0x80100007
354d77b7e5SLe Ma #define regVM_L2_CNTL4_DEFAULT	0x000000c1
364d77b7e5SLe Ma 
mmhub_v1_8_get_fb_location(struct amdgpu_device * adev)374d77b7e5SLe Ma static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
384d77b7e5SLe Ma {
394d77b7e5SLe Ma 	u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
404d77b7e5SLe Ma 	u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
414d77b7e5SLe Ma 
424d77b7e5SLe Ma 	base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
434d77b7e5SLe Ma 	base <<= 24;
444d77b7e5SLe Ma 
454d77b7e5SLe Ma 	top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
464d77b7e5SLe Ma 	top <<= 24;
474d77b7e5SLe Ma 
484d77b7e5SLe Ma 	adev->gmc.fb_start = base;
494d77b7e5SLe Ma 	adev->gmc.fb_end = top;
504d77b7e5SLe Ma 
514d77b7e5SLe Ma 	return base;
524d77b7e5SLe Ma }
534d77b7e5SLe Ma 
mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device * adev,uint32_t vmid,uint64_t page_table_base)544d77b7e5SLe Ma static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
554d77b7e5SLe Ma 				uint64_t page_table_base)
564d77b7e5SLe Ma {
573a108387SLe Ma 	struct amdgpu_vmhub *hub;
587a1efad0SLijo Lazar 	u32 inst_mask;
593a108387SLe Ma 	int i;
604d77b7e5SLe Ma 
617a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
627a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
633a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
643a108387SLe Ma 		WREG32_SOC15_OFFSET(MMHUB, i,
653a108387SLe Ma 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
663a108387SLe Ma 				    hub->ctx_addr_distance * vmid,
673a108387SLe Ma 				    lower_32_bits(page_table_base));
684d77b7e5SLe Ma 
693a108387SLe Ma 		WREG32_SOC15_OFFSET(MMHUB, i,
703a108387SLe Ma 				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
713a108387SLe Ma 				    hub->ctx_addr_distance * vmid,
723a108387SLe Ma 				    upper_32_bits(page_table_base));
733a108387SLe Ma 	}
744d77b7e5SLe Ma }
754d77b7e5SLe Ma 
mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device * adev)764d77b7e5SLe Ma static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev)
774d77b7e5SLe Ma {
784d77b7e5SLe Ma 	uint64_t pt_base;
797a1efad0SLijo Lazar 	u32 inst_mask;
803a108387SLe Ma 	int i;
814d77b7e5SLe Ma 
824d77b7e5SLe Ma 	if (adev->gmc.pdb0_bo)
834d77b7e5SLe Ma 		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
844d77b7e5SLe Ma 	else
854d77b7e5SLe Ma 		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
864d77b7e5SLe Ma 
874d77b7e5SLe Ma 	mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base);
884d77b7e5SLe Ma 
894d77b7e5SLe Ma 	/* If use GART for FB translation, vmid0 page table covers both
904d77b7e5SLe Ma 	 * vram and system memory (gart)
914d77b7e5SLe Ma 	 */
927a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
937a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
944d77b7e5SLe Ma 		if (adev->gmc.pdb0_bo) {
953a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
963a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
974d77b7e5SLe Ma 				     (u32)(adev->gmc.fb_start >> 12));
983a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
993a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1004d77b7e5SLe Ma 				     (u32)(adev->gmc.fb_start >> 44));
1014d77b7e5SLe Ma 
1023a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1033a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1044d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_end >> 12));
1053a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1063a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1074d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_end >> 44));
1084d77b7e5SLe Ma 
1094d77b7e5SLe Ma 		} else {
1103a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1113a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
1124d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_start >> 12));
1133a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1143a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1154d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_start >> 44));
1164d77b7e5SLe Ma 
1173a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1183a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1194d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_end >> 12));
1203a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1213a108387SLe Ma 				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1224d77b7e5SLe Ma 				     (u32)(adev->gmc.gart_end >> 44));
1234d77b7e5SLe Ma 		}
1244d77b7e5SLe Ma 	}
1253a108387SLe Ma }
1264d77b7e5SLe Ma 
mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device * adev)1274d77b7e5SLe Ma static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
1284d77b7e5SLe Ma {
1297a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
1304d77b7e5SLe Ma 	uint64_t value;
1313a108387SLe Ma 	int i;
1324d77b7e5SLe Ma 
133*3a56e61bSVictor Lu 	if (amdgpu_sriov_vf(adev))
134*3a56e61bSVictor Lu 		return;
135*3a56e61bSVictor Lu 
1367a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
1377a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
1384d77b7e5SLe Ma 		/* Program the AGP BAR */
1393a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0);
1403a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT,
1413a108387SLe Ma 			     adev->gmc.agp_start >> 24);
1423a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
1433a108387SLe Ma 			     adev->gmc.agp_end >> 24);
1443a108387SLe Ma 
1454d77b7e5SLe Ma 		/* Program the system aperture low logical page number. */
1463a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
1474d77b7e5SLe Ma 			min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
1484d77b7e5SLe Ma 
1493a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1504d77b7e5SLe Ma 			max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
1514d77b7e5SLe Ma 
1524d77b7e5SLe Ma 		/* In the case squeezing vram into GART aperture, we don't use
1534d77b7e5SLe Ma 		 * FB aperture and AGP aperture. Disable them.
1544d77b7e5SLe Ma 		 */
1554d77b7e5SLe Ma 		if (adev->gmc.pdb0_bo) {
1563a108387SLe Ma 			WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF);
1573a108387SLe Ma 			WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0);
1583a108387SLe Ma 			WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0);
1593a108387SLe Ma 			WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE,
1603a108387SLe Ma 				     0x00FFFFFF);
1613a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1623a108387SLe Ma 				     regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
1633a108387SLe Ma 				     0x3FFFFFFF);
1643a108387SLe Ma 			WREG32_SOC15(MMHUB, i,
1653a108387SLe Ma 				     regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
1664d77b7e5SLe Ma 		}
1674d77b7e5SLe Ma 
1684d77b7e5SLe Ma 		/* Set default page address. */
1694d77b7e5SLe Ma 		value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
1703a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
1714d77b7e5SLe Ma 			     (u32)(value >> 12));
1723a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
1734d77b7e5SLe Ma 			     (u32)(value >> 44));
1744d77b7e5SLe Ma 
1754d77b7e5SLe Ma 		/* Program "protection fault". */
1763a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
1773a108387SLe Ma 			     regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
1784d77b7e5SLe Ma 			     (u32)(adev->dummy_page_addr >> 12));
1793a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
1803a108387SLe Ma 			     regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
1814d77b7e5SLe Ma 			     (u32)((u64)adev->dummy_page_addr >> 44));
1824d77b7e5SLe Ma 
1833a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2);
1844d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
1854d77b7e5SLe Ma 				    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
1863a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
1873a108387SLe Ma 	}
1884d77b7e5SLe Ma }
1894d77b7e5SLe Ma 
mmhub_v1_8_init_tlb_regs(struct amdgpu_device * adev)1904d77b7e5SLe Ma static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev)
1914d77b7e5SLe Ma {
1927a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
1933a108387SLe Ma 	int i;
1944d77b7e5SLe Ma 
1954d77b7e5SLe Ma 	/* Setup TLB control */
1967a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
1977a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
1983a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL);
1994d77b7e5SLe Ma 
2003a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
2013a108387SLe Ma 				    1);
2023a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
2033a108387SLe Ma 				    SYSTEM_ACCESS_MODE, 3);
2044d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
2054d77b7e5SLe Ma 				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
2064d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
2074d77b7e5SLe Ma 				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
2084d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
2094d77b7e5SLe Ma 				    MTYPE, MTYPE_UC);/* XXX for emulation. */
2104d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
2114d77b7e5SLe Ma 
2123a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
2133a108387SLe Ma 	}
2144d77b7e5SLe Ma }
2154d77b7e5SLe Ma 
mmhub_v1_8_init_cache_regs(struct amdgpu_device * adev)2164d77b7e5SLe Ma static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev)
2174d77b7e5SLe Ma {
2187a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
2193a108387SLe Ma 	int i;
2204d77b7e5SLe Ma 
2214d77b7e5SLe Ma 	if (amdgpu_sriov_vf(adev))
2224d77b7e5SLe Ma 		return;
2234d77b7e5SLe Ma 
2244d77b7e5SLe Ma 	/* Setup L2 cache */
2257a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
2267a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
2273a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL);
2284d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
2293a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
2303a108387SLe Ma 				    ENABLE_L2_FRAGMENT_PROCESSING, 1);
2314d77b7e5SLe Ma 		/* XXX for emulation, Refer to closed source code.*/
2323a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
2333a108387SLe Ma 				    L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
2343a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION,
2354d77b7e5SLe Ma 				    0);
2363a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
2373a108387SLe Ma 				    CONTEXT1_IDENTITY_ACCESS_MODE, 1);
2383a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL,
2393a108387SLe Ma 				    IDENTITY_MODE_FRAGMENT_SIZE, 0);
2403a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp);
2414d77b7e5SLe Ma 
2423a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2);
2433a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS,
2443a108387SLe Ma 				    1);
2454d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
2463a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp);
2474d77b7e5SLe Ma 
2484d77b7e5SLe Ma 		tmp = regVM_L2_CNTL3_DEFAULT;
2494d77b7e5SLe Ma 		if (adev->gmc.translate_further) {
2504d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
2514d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
2524d77b7e5SLe Ma 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
2534d77b7e5SLe Ma 		} else {
2544d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
2554d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
2564d77b7e5SLe Ma 					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
2574d77b7e5SLe Ma 		}
2583a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp);
2594d77b7e5SLe Ma 
2604d77b7e5SLe Ma 		tmp = regVM_L2_CNTL4_DEFAULT;
2617a7aaab0SRajneesh Bhardwaj 		/* For AMD APP APUs setup WC memory */
2627a7aaab0SRajneesh Bhardwaj 		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
2634d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
2644d77b7e5SLe Ma 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
2654d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
2664d77b7e5SLe Ma 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
2674d77b7e5SLe Ma 		} else {
2684d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
2694d77b7e5SLe Ma 					    VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
2704d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
2714d77b7e5SLe Ma 					    VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
2724d77b7e5SLe Ma 		}
2733a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp);
2743a108387SLe Ma 	}
2754d77b7e5SLe Ma }
2764d77b7e5SLe Ma 
mmhub_v1_8_enable_system_domain(struct amdgpu_device * adev)2774d77b7e5SLe Ma static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev)
2784d77b7e5SLe Ma {
2797a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
2803a108387SLe Ma 	int i;
2814d77b7e5SLe Ma 
2827a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
2837a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
2843a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL);
2854d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
2864d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
2874d77b7e5SLe Ma 				adev->gmc.vmid0_page_table_depth);
2883a108387SLe Ma 		tmp = REG_SET_FIELD(tmp,
2893a108387SLe Ma 				    VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
2904d77b7e5SLe Ma 				    adev->gmc.vmid0_page_table_block_size);
2914d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
2924d77b7e5SLe Ma 				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
2933a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp);
2943a108387SLe Ma 	}
2954d77b7e5SLe Ma }
2964d77b7e5SLe Ma 
mmhub_v1_8_disable_identity_aperture(struct amdgpu_device * adev)2974d77b7e5SLe Ma static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev)
2984d77b7e5SLe Ma {
2997a1efad0SLijo Lazar 	u32 inst_mask;
3003a108387SLe Ma 	int i;
3013a108387SLe Ma 
3024d77b7e5SLe Ma 	if (amdgpu_sriov_vf(adev))
3034d77b7e5SLe Ma 		return;
3044d77b7e5SLe Ma 
3057a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
3067a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
3073a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3083a108387SLe Ma 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
3093a108387SLe Ma 			     0XFFFFFFFF);
3103a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3113a108387SLe Ma 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
3123a108387SLe Ma 			     0x0000000F);
3134d77b7e5SLe Ma 
3143a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3153a108387SLe Ma 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
3163a108387SLe Ma 			     0);
3173a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3183a108387SLe Ma 			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
3193a108387SLe Ma 			     0);
3204d77b7e5SLe Ma 
3213a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3223a108387SLe Ma 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
3233a108387SLe Ma 		WREG32_SOC15(MMHUB, i,
3243a108387SLe Ma 			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
3253a108387SLe Ma 	}
3264d77b7e5SLe Ma }
3274d77b7e5SLe Ma 
mmhub_v1_8_setup_vmid_config(struct amdgpu_device * adev)3284d77b7e5SLe Ma static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev)
3294d77b7e5SLe Ma {
3303a108387SLe Ma 	struct amdgpu_vmhub *hub;
331ff6b11ccSSrinivasan Shanmugam 	unsigned int num_level, block_size;
3327a1efad0SLijo Lazar 	uint32_t tmp, inst_mask;
3333a108387SLe Ma 	int i, j;
3344d77b7e5SLe Ma 
3354d77b7e5SLe Ma 	num_level = adev->vm_manager.num_level;
3364d77b7e5SLe Ma 	block_size = adev->vm_manager.block_size;
3374d77b7e5SLe Ma 	if (adev->gmc.translate_further)
3384d77b7e5SLe Ma 		num_level -= 1;
3394d77b7e5SLe Ma 	else
3404d77b7e5SLe Ma 		block_size -= 9;
3414d77b7e5SLe Ma 
3427a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
3437a1efad0SLijo Lazar 	for_each_inst(j, inst_mask) {
3443a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
3454d77b7e5SLe Ma 		for (i = 0; i <= 14; i++) {
3463a108387SLe Ma 			tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
3473a108387SLe Ma 						  i);
3483a108387SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3493a108387SLe Ma 					    ENABLE_CONTEXT, 1);
3503a108387SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3513a108387SLe Ma 					    PAGE_TABLE_DEPTH, num_level);
3524d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3534d77b7e5SLe Ma 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3544d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3553a108387SLe Ma 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3564d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3574d77b7e5SLe Ma 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3584d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3594d77b7e5SLe Ma 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3604d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3614d77b7e5SLe Ma 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3624d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3634d77b7e5SLe Ma 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3644d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3654d77b7e5SLe Ma 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
3664d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3674d77b7e5SLe Ma 					    PAGE_TABLE_BLOCK_SIZE,
3684d77b7e5SLe Ma 					    block_size);
3693a108387SLe Ma 			/* On 9.4.3, XNACK can be enabled in the SQ
3703a108387SLe Ma 			 * per-process. Retry faults need to be enabled for
3713a108387SLe Ma 			 * that to work.
3724d77b7e5SLe Ma 			 */
3734d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
3743a108387SLe Ma 				RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
3753a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL,
3764d77b7e5SLe Ma 					    i * hub->ctx_distance, tmp);
3773a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
3783a108387SLe Ma 				regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
3794d77b7e5SLe Ma 				i * hub->ctx_addr_distance, 0);
3803a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
3813a108387SLe Ma 				regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
3824d77b7e5SLe Ma 				i * hub->ctx_addr_distance, 0);
3833a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
3843a108387SLe Ma 				regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
3854d77b7e5SLe Ma 				i * hub->ctx_addr_distance,
3864d77b7e5SLe Ma 				lower_32_bits(adev->vm_manager.max_pfn - 1));
3873a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
3883a108387SLe Ma 				regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
3894d77b7e5SLe Ma 				i * hub->ctx_addr_distance,
3904d77b7e5SLe Ma 				upper_32_bits(adev->vm_manager.max_pfn - 1));
3914d77b7e5SLe Ma 		}
3924d77b7e5SLe Ma 	}
3933a108387SLe Ma }
3944d77b7e5SLe Ma 
mmhub_v1_8_program_invalidation(struct amdgpu_device * adev)3954d77b7e5SLe Ma static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev)
3964d77b7e5SLe Ma {
3973a108387SLe Ma 	struct amdgpu_vmhub *hub;
3987a1efad0SLijo Lazar 	u32 i, j, inst_mask;
3994d77b7e5SLe Ma 
4007a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
4017a1efad0SLijo Lazar 	for_each_inst(j, inst_mask) {
4023a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
4034d77b7e5SLe Ma 		for (i = 0; i < 18; ++i) {
4043a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
4053a108387SLe Ma 					regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
4064d77b7e5SLe Ma 					i * hub->eng_addr_distance, 0xffffffff);
4073a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j,
4083a108387SLe Ma 					regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
4094d77b7e5SLe Ma 					i * hub->eng_addr_distance, 0x1f);
4104d77b7e5SLe Ma 		}
4114d77b7e5SLe Ma 	}
4123a108387SLe Ma }
4134d77b7e5SLe Ma 
mmhub_v1_8_gart_enable(struct amdgpu_device * adev)4144d77b7e5SLe Ma static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev)
4154d77b7e5SLe Ma {
4164d77b7e5SLe Ma 	/* GART Enable. */
4174d77b7e5SLe Ma 	mmhub_v1_8_init_gart_aperture_regs(adev);
4184d77b7e5SLe Ma 	mmhub_v1_8_init_system_aperture_regs(adev);
4194d77b7e5SLe Ma 	mmhub_v1_8_init_tlb_regs(adev);
4204d77b7e5SLe Ma 	mmhub_v1_8_init_cache_regs(adev);
4214d77b7e5SLe Ma 
4224d77b7e5SLe Ma 	mmhub_v1_8_enable_system_domain(adev);
4234d77b7e5SLe Ma 	mmhub_v1_8_disable_identity_aperture(adev);
4244d77b7e5SLe Ma 	mmhub_v1_8_setup_vmid_config(adev);
4254d77b7e5SLe Ma 	mmhub_v1_8_program_invalidation(adev);
4264d77b7e5SLe Ma 
4274d77b7e5SLe Ma 	return 0;
4284d77b7e5SLe Ma }
4294d77b7e5SLe Ma 
mmhub_v1_8_gart_disable(struct amdgpu_device * adev)4304d77b7e5SLe Ma static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev)
4314d77b7e5SLe Ma {
4323a108387SLe Ma 	struct amdgpu_vmhub *hub;
4334d77b7e5SLe Ma 	u32 tmp;
4347a1efad0SLijo Lazar 	u32 i, j, inst_mask;
4354d77b7e5SLe Ma 
4364d77b7e5SLe Ma 	/* Disable all tables */
4377a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
4387a1efad0SLijo Lazar 	for_each_inst(j, inst_mask) {
4393a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(j)];
4404d77b7e5SLe Ma 		for (i = 0; i < 16; i++)
4413a108387SLe Ma 			WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL,
4424d77b7e5SLe Ma 					    i * hub->ctx_distance, 0);
4434d77b7e5SLe Ma 
4444d77b7e5SLe Ma 		/* Setup TLB control */
4453a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL);
4463a108387SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB,
4473a108387SLe Ma 				    0);
4484d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
4494d77b7e5SLe Ma 				    ENABLE_ADVANCED_DRIVER_MODEL, 0);
4503a108387SLe Ma 		WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
4514d77b7e5SLe Ma 
4524d77b7e5SLe Ma 		if (!amdgpu_sriov_vf(adev)) {
4534d77b7e5SLe Ma 			/* Setup L2 cache */
4543a108387SLe Ma 			tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL);
4553a108387SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE,
4563a108387SLe Ma 					    0);
4573a108387SLe Ma 			WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp);
4583a108387SLe Ma 			WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0);
4593a108387SLe Ma 		}
4604d77b7e5SLe Ma 	}
4614d77b7e5SLe Ma }
4624d77b7e5SLe Ma 
4634d77b7e5SLe Ma /**
4644d77b7e5SLe Ma  * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling
4654d77b7e5SLe Ma  *
4664d77b7e5SLe Ma  * @adev: amdgpu_device pointer
4674d77b7e5SLe Ma  * @value: true redirects VM faults to the default page
4684d77b7e5SLe Ma  */
mmhub_v1_8_set_fault_enable_default(struct amdgpu_device * adev,bool value)4694d77b7e5SLe Ma static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value)
4704d77b7e5SLe Ma {
4717a1efad0SLijo Lazar 	u32 tmp, inst_mask;
4723a108387SLe Ma 	int i;
4734d77b7e5SLe Ma 
4744d77b7e5SLe Ma 	if (amdgpu_sriov_vf(adev))
4754d77b7e5SLe Ma 		return;
4764d77b7e5SLe Ma 
4777a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
4787a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
4793a108387SLe Ma 		tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL);
4804d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4814d77b7e5SLe Ma 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4824d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4834d77b7e5SLe Ma 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4844d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4854d77b7e5SLe Ma 				PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4864d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4874d77b7e5SLe Ma 				PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4884d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4894d77b7e5SLe Ma 			TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
4904d77b7e5SLe Ma 			value);
4914d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4924d77b7e5SLe Ma 				NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4934d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4944d77b7e5SLe Ma 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4954d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4964d77b7e5SLe Ma 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4974d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
4984d77b7e5SLe Ma 				READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
4994d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5004d77b7e5SLe Ma 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
5014d77b7e5SLe Ma 		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5024d77b7e5SLe Ma 				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
5034d77b7e5SLe Ma 		if (!value) {
5044d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5054d77b7e5SLe Ma 					    CRASH_ON_NO_RETRY_FAULT, 1);
5064d77b7e5SLe Ma 			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
5074d77b7e5SLe Ma 					    CRASH_ON_RETRY_FAULT, 1);
5084d77b7e5SLe Ma 		}
5094d77b7e5SLe Ma 
5103a108387SLe Ma 		WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
5113a108387SLe Ma 	}
5124d77b7e5SLe Ma }
5134d77b7e5SLe Ma 
mmhub_v1_8_init(struct amdgpu_device * adev)5144d77b7e5SLe Ma static void mmhub_v1_8_init(struct amdgpu_device *adev)
5154d77b7e5SLe Ma {
5163a108387SLe Ma 	struct amdgpu_vmhub *hub;
5177a1efad0SLijo Lazar 	u32 inst_mask;
5183a108387SLe Ma 	int i;
5194d77b7e5SLe Ma 
5207a1efad0SLijo Lazar 	inst_mask = adev->aid_mask;
5217a1efad0SLijo Lazar 	for_each_inst(i, inst_mask) {
5223a108387SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(i)];
5233a108387SLe Ma 
5244bc615a5SLe Ma 		hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i,
5254d77b7e5SLe Ma 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
5264bc615a5SLe Ma 		hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i,
5274d77b7e5SLe Ma 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
5284d77b7e5SLe Ma 		hub->vm_inv_eng0_req =
5294bc615a5SLe Ma 			SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ);
5304d77b7e5SLe Ma 		hub->vm_inv_eng0_ack =
5314bc615a5SLe Ma 			SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK);
5324d77b7e5SLe Ma 		hub->vm_context0_cntl =
5334bc615a5SLe Ma 			SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL);
5344bc615a5SLe Ma 		hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i,
5353a108387SLe Ma 			regVM_L2_PROTECTION_FAULT_STATUS);
5364bc615a5SLe Ma 		hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i,
5373a108387SLe Ma 			regVM_L2_PROTECTION_FAULT_CNTL);
5384d77b7e5SLe Ma 
5394d77b7e5SLe Ma 		hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
5403a108387SLe Ma 		hub->ctx_addr_distance =
5413a108387SLe Ma 			regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
5424d77b7e5SLe Ma 			regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
5433a108387SLe Ma 		hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
5443a108387SLe Ma 			regVM_INVALIDATE_ENG0_REQ;
5454d77b7e5SLe Ma 		hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
5464d77b7e5SLe Ma 			regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
5473a108387SLe Ma 	}
5484d77b7e5SLe Ma }
5494d77b7e5SLe Ma 
mmhub_v1_8_set_clockgating(struct amdgpu_device * adev,enum amd_clockgating_state state)5504d77b7e5SLe Ma static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev,
5514d77b7e5SLe Ma 				      enum amd_clockgating_state state)
5524d77b7e5SLe Ma {
5534d77b7e5SLe Ma 	return 0;
5544d77b7e5SLe Ma }
5554d77b7e5SLe Ma 
mmhub_v1_8_get_clockgating(struct amdgpu_device * adev,u64 * flags)5564d77b7e5SLe Ma static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags)
5574d77b7e5SLe Ma {
5584d77b7e5SLe Ma 
5594d77b7e5SLe Ma }
5604d77b7e5SLe Ma 
5614d77b7e5SLe Ma const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = {
5624d77b7e5SLe Ma 	.get_fb_location = mmhub_v1_8_get_fb_location,
5634d77b7e5SLe Ma 	.init = mmhub_v1_8_init,
5644d77b7e5SLe Ma 	.gart_enable = mmhub_v1_8_gart_enable,
5654d77b7e5SLe Ma 	.set_fault_enable_default = mmhub_v1_8_set_fault_enable_default,
5664d77b7e5SLe Ma 	.gart_disable = mmhub_v1_8_gart_disable,
5674d77b7e5SLe Ma 	.setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs,
5684d77b7e5SLe Ma 	.set_clockgating = mmhub_v1_8_set_clockgating,
5694d77b7e5SLe Ma 	.get_clockgating = mmhub_v1_8_get_clockgating,
5704d77b7e5SLe Ma };
571bc069d82SHawking Zhang 
572bc069d82SHawking Zhang static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = {
573bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_CE_ERR_STATUS_LO, regMMEA0_CE_ERR_STATUS_HI),
574bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
575bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_CE_ERR_STATUS_LO, regMMEA1_CE_ERR_STATUS_HI),
576bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
577bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_CE_ERR_STATUS_LO, regMMEA2_CE_ERR_STATUS_HI),
578bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
579bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_CE_ERR_STATUS_LO, regMMEA3_CE_ERR_STATUS_HI),
580bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
581bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_CE_ERR_STATUS_LO, regMMEA4_CE_ERR_STATUS_HI),
582bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
583bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_CE_ERR_STATUS_LO, regMM_CANE_CE_ERR_STATUS_HI),
584bc069d82SHawking Zhang 	1, 0, "MM_CANE"},
585bc069d82SHawking Zhang };
586bc069d82SHawking Zhang 
587bc069d82SHawking Zhang static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ue_reg_list[] = {
588bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_UE_ERR_STATUS_LO, regMMEA0_UE_ERR_STATUS_HI),
589bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"},
590bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_UE_ERR_STATUS_LO, regMMEA1_UE_ERR_STATUS_HI),
591bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"},
592bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_UE_ERR_STATUS_LO, regMMEA2_UE_ERR_STATUS_HI),
593bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"},
594bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_UE_ERR_STATUS_LO, regMMEA3_UE_ERR_STATUS_HI),
595bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"},
596bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_UE_ERR_STATUS_LO, regMMEA4_UE_ERR_STATUS_HI),
597bc069d82SHawking Zhang 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"},
598bc069d82SHawking Zhang 	{AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_UE_ERR_STATUS_LO, regMM_CANE_UE_ERR_STATUS_HI),
599bc069d82SHawking Zhang 	1, 0, "MM_CANE"},
600bc069d82SHawking Zhang };
601bc069d82SHawking Zhang 
602bc069d82SHawking Zhang static const struct amdgpu_ras_memory_id_entry mmhub_v1_8_ras_memory_list[] = {
603bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WGMI_PAGEMEM, "MMEA_WGMI_PAGEMEM"},
604bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RGMI_PAGEMEM, "MMEA_RGMI_PAGEMEM"},
605bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WDRAM_PAGEMEM, "MMEA_WDRAM_PAGEMEM"},
606bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RDRAM_PAGEMEM, "MMEA_RDRAM_PAGEMEM"},
607bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WIO_CMDMEM, "MMEA_WIO_CMDMEM"},
608bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RIO_CMDMEM, "MMEA_RIO_CMDMEM"},
609bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WGMI_CMDMEM, "MMEA_WGMI_CMDMEM"},
610bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RGMI_CMDMEM, "MMEA_RGMI_CMDMEM"},
611bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WDRAM_CMDMEM, "MMEA_WDRAM_CMDMEM"},
612bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RDRAM_CMDMEM, "MMEA_RDRAM_CMDMEM"},
613bc069d82SHawking Zhang 	{AMDGPU_MMHUB_MAM_DMEM0, "MMEA_MAM_DMEM0"},
614bc069d82SHawking Zhang 	{AMDGPU_MMHUB_MAM_DMEM1, "MMEA_MAM_DMEM1"},
615bc069d82SHawking Zhang 	{AMDGPU_MMHUB_MAM_DMEM2, "MMEA_MAM_DMEM2"},
616bc069d82SHawking Zhang 	{AMDGPU_MMHUB_MAM_DMEM3, "MMEA_MAM_DMEM3"},
617bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WRET_TAGMEM, "MMEA_WRET_TAGMEM"},
618bc069d82SHawking Zhang 	{AMDGPU_MMHUB_RRET_TAGMEM, "MMEA_RRET_TAGMEM"},
619bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WIO_DATAMEM, "MMEA_WIO_DATAMEM"},
620bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WGMI_DATAMEM, "MMEA_WGMI_DATAMEM"},
621bc069d82SHawking Zhang 	{AMDGPU_MMHUB_WDRAM_DATAMEM, "MMEA_WDRAM_DATAMEM"},
622bc069d82SHawking Zhang };
623bc069d82SHawking Zhang 
mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device * adev,uint32_t mmhub_inst,void * ras_err_status)624bc069d82SHawking Zhang static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev,
625bc069d82SHawking Zhang 						  uint32_t mmhub_inst,
626bc069d82SHawking Zhang 						  void *ras_err_status)
627bc069d82SHawking Zhang {
628bc069d82SHawking Zhang 	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
629bc069d82SHawking Zhang 
630bc069d82SHawking Zhang 	amdgpu_ras_inst_query_ras_error_count(adev,
631bc069d82SHawking Zhang 					mmhub_v1_8_ce_reg_list,
632bc069d82SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
633bc069d82SHawking Zhang 					mmhub_v1_8_ras_memory_list,
634bc069d82SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
635bc069d82SHawking Zhang 					mmhub_inst,
636bc069d82SHawking Zhang 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
637bc069d82SHawking Zhang 					&err_data->ce_count);
638bc069d82SHawking Zhang 	amdgpu_ras_inst_query_ras_error_count(adev,
639bc069d82SHawking Zhang 					mmhub_v1_8_ue_reg_list,
640bc069d82SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
641bc069d82SHawking Zhang 					mmhub_v1_8_ras_memory_list,
642bc069d82SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ras_memory_list),
643bc069d82SHawking Zhang 					mmhub_inst,
644bc069d82SHawking Zhang 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
645bc069d82SHawking Zhang 					&err_data->ue_count);
646bc069d82SHawking Zhang }
647bc069d82SHawking Zhang 
mmhub_v1_8_query_ras_error_count(struct amdgpu_device * adev,void * ras_err_status)648bc069d82SHawking Zhang static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev,
649bc069d82SHawking Zhang 					     void *ras_err_status)
650bc069d82SHawking Zhang {
651bc069d82SHawking Zhang 	uint32_t inst_mask;
652bc069d82SHawking Zhang 	uint32_t i;
653bc069d82SHawking Zhang 
654bc069d82SHawking Zhang 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
655bc069d82SHawking Zhang 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
656bc069d82SHawking Zhang 		return;
657bc069d82SHawking Zhang 	}
658bc069d82SHawking Zhang 
659bc069d82SHawking Zhang 	inst_mask = adev->aid_mask;
660bc069d82SHawking Zhang 	for_each_inst(i, inst_mask)
661bc069d82SHawking Zhang 		mmhub_v1_8_inst_query_ras_error_count(adev, i, ras_err_status);
662bc069d82SHawking Zhang }
663a0cdb3d0SHawking Zhang 
mmhub_v1_8_inst_reset_ras_error_count(struct amdgpu_device * adev,uint32_t mmhub_inst)664a0cdb3d0SHawking Zhang static void mmhub_v1_8_inst_reset_ras_error_count(struct amdgpu_device *adev,
665a0cdb3d0SHawking Zhang 						  uint32_t mmhub_inst)
666a0cdb3d0SHawking Zhang {
667a0cdb3d0SHawking Zhang 	amdgpu_ras_inst_reset_ras_error_count(adev,
668a0cdb3d0SHawking Zhang 					mmhub_v1_8_ce_reg_list,
669a0cdb3d0SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ce_reg_list),
670a0cdb3d0SHawking Zhang 					mmhub_inst);
671a0cdb3d0SHawking Zhang 	amdgpu_ras_inst_reset_ras_error_count(adev,
672a0cdb3d0SHawking Zhang 					mmhub_v1_8_ue_reg_list,
673a0cdb3d0SHawking Zhang 					ARRAY_SIZE(mmhub_v1_8_ue_reg_list),
674a0cdb3d0SHawking Zhang 					mmhub_inst);
675a0cdb3d0SHawking Zhang }
676a0cdb3d0SHawking Zhang 
mmhub_v1_8_reset_ras_error_count(struct amdgpu_device * adev)677a0cdb3d0SHawking Zhang static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev)
678a0cdb3d0SHawking Zhang {
679a0cdb3d0SHawking Zhang 	uint32_t inst_mask;
680a0cdb3d0SHawking Zhang 	uint32_t i;
681a0cdb3d0SHawking Zhang 
682a0cdb3d0SHawking Zhang 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
683a0cdb3d0SHawking Zhang 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
684a0cdb3d0SHawking Zhang 		return;
685a0cdb3d0SHawking Zhang 	}
686a0cdb3d0SHawking Zhang 
687a0cdb3d0SHawking Zhang 	inst_mask = adev->aid_mask;
688a0cdb3d0SHawking Zhang 	for_each_inst(i, inst_mask)
689a0cdb3d0SHawking Zhang 		mmhub_v1_8_inst_reset_ras_error_count(adev, i);
690a0cdb3d0SHawking Zhang }
69100c14522SHawking Zhang 
6923034983dSSrinivasan Shanmugam static const u32 mmhub_v1_8_mmea_err_status_reg[] __maybe_unused = {
69300c14522SHawking Zhang 	regMMEA0_ERR_STATUS,
69400c14522SHawking Zhang 	regMMEA1_ERR_STATUS,
69500c14522SHawking Zhang 	regMMEA2_ERR_STATUS,
69600c14522SHawking Zhang 	regMMEA3_ERR_STATUS,
69700c14522SHawking Zhang 	regMMEA4_ERR_STATUS,
69800c14522SHawking Zhang };
69900c14522SHawking Zhang 
mmhub_v1_8_inst_query_ras_err_status(struct amdgpu_device * adev,uint32_t mmhub_inst)70000c14522SHawking Zhang static void mmhub_v1_8_inst_query_ras_err_status(struct amdgpu_device *adev,
70100c14522SHawking Zhang 						 uint32_t mmhub_inst)
70200c14522SHawking Zhang {
70300c14522SHawking Zhang 	uint32_t reg_value;
70400c14522SHawking Zhang 	uint32_t mmea_err_status_addr_dist;
70500c14522SHawking Zhang 	uint32_t i;
70600c14522SHawking Zhang 
70700c14522SHawking Zhang 	/* query mmea ras err status */
70800c14522SHawking Zhang 	mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS;
70900c14522SHawking Zhang 	for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) {
71000c14522SHawking Zhang 		reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
71100c14522SHawking Zhang 						regMMEA0_ERR_STATUS,
71200c14522SHawking Zhang 						i * mmea_err_status_addr_dist);
71300c14522SHawking Zhang 		if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
71400c14522SHawking Zhang 		    REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
71500c14522SHawking Zhang 		    REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
71600c14522SHawking Zhang 			dev_warn(adev->dev,
71700c14522SHawking Zhang 				 "Detected MMEA%d err in MMHUB%d, status: 0x%x\n",
71800c14522SHawking Zhang 				 i, mmhub_inst, reg_value);
71900c14522SHawking Zhang 		}
72000c14522SHawking Zhang 	}
72100c14522SHawking Zhang 
72200c14522SHawking Zhang 	/* query mm_cane ras err status */
72300c14522SHawking Zhang 	reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS);
72400c14522SHawking Zhang 	if (REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_STATUS) ||
72500c14522SHawking Zhang 	    REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_WRRSP_STATUS) ||
72600c14522SHawking Zhang 	    REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_DATAPARITY_ERROR)) {
72700c14522SHawking Zhang 		dev_warn(adev->dev,
72800c14522SHawking Zhang 			 "Detected MM CANE err in MMHUB%d, status: 0x%x\n",
72900c14522SHawking Zhang 			 mmhub_inst, reg_value);
73000c14522SHawking Zhang 	}
73100c14522SHawking Zhang }
73200c14522SHawking Zhang 
mmhub_v1_8_query_ras_error_status(struct amdgpu_device * adev)73300c14522SHawking Zhang static void mmhub_v1_8_query_ras_error_status(struct amdgpu_device *adev)
73400c14522SHawking Zhang {
73500c14522SHawking Zhang 	uint32_t inst_mask;
73600c14522SHawking Zhang 	uint32_t i;
73700c14522SHawking Zhang 
73800c14522SHawking Zhang 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
73900c14522SHawking Zhang 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
74000c14522SHawking Zhang 		return;
74100c14522SHawking Zhang 	}
74200c14522SHawking Zhang 
74300c14522SHawking Zhang 	inst_mask = adev->aid_mask;
74400c14522SHawking Zhang 	for_each_inst(i, inst_mask)
74500c14522SHawking Zhang 		mmhub_v1_8_inst_query_ras_err_status(adev, i);
74600c14522SHawking Zhang }
747ccfdbd4bSHawking Zhang 
mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device * adev,uint32_t mmhub_inst)748ccfdbd4bSHawking Zhang static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev,
749ccfdbd4bSHawking Zhang 						 uint32_t mmhub_inst)
750ccfdbd4bSHawking Zhang {
751ccfdbd4bSHawking Zhang 	uint32_t mmea_cgtt_clk_cntl_addr_dist;
752ccfdbd4bSHawking Zhang 	uint32_t mmea_err_status_addr_dist;
753ccfdbd4bSHawking Zhang 	uint32_t reg_value;
754ccfdbd4bSHawking Zhang 	uint32_t i;
755ccfdbd4bSHawking Zhang 
756ccfdbd4bSHawking Zhang 	/* reset mmea ras err status */
757ccfdbd4bSHawking Zhang 	mmea_cgtt_clk_cntl_addr_dist = regMMEA1_CGTT_CLK_CTRL - regMMEA0_CGTT_CLK_CTRL;
758ccfdbd4bSHawking Zhang 	mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS;
759ccfdbd4bSHawking Zhang 	for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) {
760ccfdbd4bSHawking Zhang 		/* force clk branch on for response path
761ff6b11ccSSrinivasan Shanmugam 		 * set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 1
762ff6b11ccSSrinivasan Shanmugam 		 */
763ccfdbd4bSHawking Zhang 		reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
764ccfdbd4bSHawking Zhang 						regMMEA0_CGTT_CLK_CTRL,
765ccfdbd4bSHawking Zhang 						i * mmea_cgtt_clk_cntl_addr_dist);
766ccfdbd4bSHawking Zhang 		reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL,
767ccfdbd4bSHawking Zhang 					  SOFT_OVERRIDE_RETURN, 1);
768ccfdbd4bSHawking Zhang 		WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
769ccfdbd4bSHawking Zhang 				    regMMEA0_CGTT_CLK_CTRL,
770ccfdbd4bSHawking Zhang 				    i * mmea_cgtt_clk_cntl_addr_dist,
771ccfdbd4bSHawking Zhang 				    reg_value);
772ccfdbd4bSHawking Zhang 
773ccfdbd4bSHawking Zhang 		/* set MMEA0_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */
774ccfdbd4bSHawking Zhang 		reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
775ccfdbd4bSHawking Zhang 						regMMEA0_ERR_STATUS,
776ccfdbd4bSHawking Zhang 						i * mmea_err_status_addr_dist);
777ccfdbd4bSHawking Zhang 		reg_value = REG_SET_FIELD(reg_value, MMEA0_ERR_STATUS,
778ccfdbd4bSHawking Zhang 					  CLEAR_ERROR_STATUS, 1);
779ccfdbd4bSHawking Zhang 		WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
780ccfdbd4bSHawking Zhang 				    regMMEA0_ERR_STATUS,
781ccfdbd4bSHawking Zhang 				    i * mmea_err_status_addr_dist,
782ccfdbd4bSHawking Zhang 				    reg_value);
783ccfdbd4bSHawking Zhang 
784ccfdbd4bSHawking Zhang 		/* set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 0 */
785ccfdbd4bSHawking Zhang 		reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
786ccfdbd4bSHawking Zhang 						regMMEA0_CGTT_CLK_CTRL,
787ccfdbd4bSHawking Zhang 						i * mmea_cgtt_clk_cntl_addr_dist);
788ccfdbd4bSHawking Zhang 		reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL,
789ccfdbd4bSHawking Zhang 					  SOFT_OVERRIDE_RETURN, 0);
790ccfdbd4bSHawking Zhang 		WREG32_SOC15_OFFSET(MMHUB, mmhub_inst,
791ccfdbd4bSHawking Zhang 				    regMMEA0_CGTT_CLK_CTRL,
792ccfdbd4bSHawking Zhang 				    i * mmea_cgtt_clk_cntl_addr_dist,
793ccfdbd4bSHawking Zhang 				    reg_value);
794ccfdbd4bSHawking Zhang 	}
795ccfdbd4bSHawking Zhang 
796ccfdbd4bSHawking Zhang 	/* reset mm_cane ras err status
797ccfdbd4bSHawking Zhang 	 * force clk branch on for response path
798ff6b11ccSSrinivasan Shanmugam 	 * set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 1
799ff6b11ccSSrinivasan Shanmugam 	 */
800ccfdbd4bSHawking Zhang 	reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL);
801ccfdbd4bSHawking Zhang 	reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL,
802ccfdbd4bSHawking Zhang 				  SOFT_OVERRIDE_ATRET, 1);
803ccfdbd4bSHawking Zhang 	WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value);
804ccfdbd4bSHawking Zhang 
805ccfdbd4bSHawking Zhang 	/* set MM_CANE_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */
806ccfdbd4bSHawking Zhang 	reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS);
807ccfdbd4bSHawking Zhang 	reg_value = REG_SET_FIELD(reg_value, MM_CANE_ERR_STATUS,
808ccfdbd4bSHawking Zhang 				  CLEAR_ERROR_STATUS, 1);
809ccfdbd4bSHawking Zhang 	WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS, reg_value);
810ccfdbd4bSHawking Zhang 
811ccfdbd4bSHawking Zhang 	/* set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 0 */
812ccfdbd4bSHawking Zhang 	reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL);
813ccfdbd4bSHawking Zhang 	reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL,
814ccfdbd4bSHawking Zhang 				  SOFT_OVERRIDE_ATRET, 0);
815ccfdbd4bSHawking Zhang 	WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value);
816ccfdbd4bSHawking Zhang }
817ccfdbd4bSHawking Zhang 
mmhub_v1_8_reset_ras_error_status(struct amdgpu_device * adev)818ccfdbd4bSHawking Zhang static void mmhub_v1_8_reset_ras_error_status(struct amdgpu_device *adev)
819ccfdbd4bSHawking Zhang {
820ccfdbd4bSHawking Zhang 	uint32_t inst_mask;
821ccfdbd4bSHawking Zhang 	uint32_t i;
822ccfdbd4bSHawking Zhang 
823ccfdbd4bSHawking Zhang 	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
824ccfdbd4bSHawking Zhang 		dev_warn(adev->dev, "MMHUB RAS is not supported\n");
825ccfdbd4bSHawking Zhang 		return;
826ccfdbd4bSHawking Zhang 	}
827ccfdbd4bSHawking Zhang 
828ccfdbd4bSHawking Zhang 	inst_mask = adev->aid_mask;
829ccfdbd4bSHawking Zhang 	for_each_inst(i, inst_mask)
830ccfdbd4bSHawking Zhang 		mmhub_v1_8_inst_reset_ras_err_status(adev, i);
831ccfdbd4bSHawking Zhang }
83273c2b3fdSHawking Zhang 
83373c2b3fdSHawking Zhang static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
83473c2b3fdSHawking Zhang 	.query_ras_error_count = mmhub_v1_8_query_ras_error_count,
83573c2b3fdSHawking Zhang 	.reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
83673c2b3fdSHawking Zhang 	.query_ras_error_status = mmhub_v1_8_query_ras_error_status,
83773c2b3fdSHawking Zhang 	.reset_ras_error_status = mmhub_v1_8_reset_ras_error_status,
83873c2b3fdSHawking Zhang };
83973c2b3fdSHawking Zhang 
84073c2b3fdSHawking Zhang struct amdgpu_mmhub_ras mmhub_v1_8_ras = {
84173c2b3fdSHawking Zhang 	.ras_block = {
84273c2b3fdSHawking Zhang 		.hw_ops = &mmhub_v1_8_ras_hw_ops,
84373c2b3fdSHawking Zhang 	},
84473c2b3fdSHawking Zhang };
845