1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "mmhub_v1_8.h" 25 26 #include "mmhub/mmhub_1_8_0_offset.h" 27 #include "mmhub/mmhub_1_8_0_sh_mask.h" 28 #include "vega10_enum.h" 29 30 #include "soc15_common.h" 31 #include "soc15.h" 32 #include "amdgpu_ras.h" 33 34 #define regVM_L2_CNTL3_DEFAULT 0x80100007 35 #define regVM_L2_CNTL4_DEFAULT 0x000000c1 36 37 static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev) 38 { 39 u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE); 40 u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP); 41 42 base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; 43 base <<= 24; 44 45 top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK; 46 top <<= 24; 47 48 adev->gmc.fb_start = base; 49 adev->gmc.fb_end = top; 50 51 return base; 52 } 53 54 static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, 55 uint64_t page_table_base) 56 { 57 struct amdgpu_vmhub *hub; 58 u32 inst_mask; 59 int i; 60 61 inst_mask = adev->aid_mask; 62 for_each_inst(i, inst_mask) { 63 hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; 64 WREG32_SOC15_OFFSET(MMHUB, i, 65 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 66 hub->ctx_addr_distance * vmid, 67 lower_32_bits(page_table_base)); 68 69 WREG32_SOC15_OFFSET(MMHUB, i, 70 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 71 hub->ctx_addr_distance * vmid, 72 upper_32_bits(page_table_base)); 73 } 74 } 75 76 static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) 77 { 78 uint64_t pt_base; 79 u32 inst_mask; 80 int i; 81 82 if (adev->gmc.pdb0_bo) 83 pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo); 84 else 85 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 86 87 mmhub_v1_8_setup_vm_pt_regs(adev, 0, pt_base); 88 89 /* If use GART for FB translation, vmid0 page table covers both 90 * vram and system memory (gart) 91 */ 92 inst_mask = adev->aid_mask; 93 for_each_inst(i, inst_mask) { 94 if (adev->gmc.pdb0_bo) { 95 WREG32_SOC15(MMHUB, i, 96 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 97 (u32)(adev->gmc.fb_start >> 12)); 98 WREG32_SOC15(MMHUB, i, 99 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 100 (u32)(adev->gmc.fb_start >> 44)); 101 102 WREG32_SOC15(MMHUB, i, 103 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 104 (u32)(adev->gmc.gart_end >> 12)); 105 WREG32_SOC15(MMHUB, i, 106 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 107 (u32)(adev->gmc.gart_end >> 44)); 108 109 } else { 110 WREG32_SOC15(MMHUB, i, 111 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 112 (u32)(adev->gmc.gart_start >> 12)); 113 WREG32_SOC15(MMHUB, i, 114 regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 115 (u32)(adev->gmc.gart_start >> 44)); 116 117 WREG32_SOC15(MMHUB, i, 118 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 119 (u32)(adev->gmc.gart_end >> 12)); 120 WREG32_SOC15(MMHUB, i, 121 regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 122 (u32)(adev->gmc.gart_end >> 44)); 123 } 124 } 125 } 126 127 static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev) 128 { 129 uint32_t tmp, inst_mask; 130 uint64_t value; 131 int i; 132 133 inst_mask = adev->aid_mask; 134 for_each_inst(i, inst_mask) { 135 /* Program the AGP BAR */ 136 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BASE, 0); 137 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 138 adev->gmc.agp_start >> 24); 139 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 140 adev->gmc.agp_end >> 24); 141 142 if (amdgpu_sriov_vf(adev)) 143 return; 144 145 /* Program the system aperture low logical page number. */ 146 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 147 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 148 149 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 150 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 151 152 /* In the case squeezing vram into GART aperture, we don't use 153 * FB aperture and AGP aperture. Disable them. 154 */ 155 if (adev->gmc.pdb0_bo) { 156 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_BOT, 0xFFFFFF); 157 WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP, 0); 158 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_TOP, 0); 159 WREG32_SOC15(MMHUB, i, regMC_VM_FB_LOCATION_BASE, 160 0x00FFFFFF); 161 WREG32_SOC15(MMHUB, i, 162 regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 163 0x3FFFFFFF); 164 WREG32_SOC15(MMHUB, i, 165 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); 166 } 167 168 /* Set default page address. */ 169 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 170 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 171 (u32)(value >> 12)); 172 WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 173 (u32)(value >> 44)); 174 175 /* Program "protection fault". */ 176 WREG32_SOC15(MMHUB, i, 177 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 178 (u32)(adev->dummy_page_addr >> 12)); 179 WREG32_SOC15(MMHUB, i, 180 regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 181 (u32)((u64)adev->dummy_page_addr >> 44)); 182 183 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2); 184 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, 185 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 186 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp); 187 } 188 } 189 190 static void mmhub_v1_8_init_tlb_regs(struct amdgpu_device *adev) 191 { 192 uint32_t tmp, inst_mask; 193 int i; 194 195 /* Setup TLB control */ 196 inst_mask = adev->aid_mask; 197 for_each_inst(i, inst_mask) { 198 tmp = RREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL); 199 200 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 201 1); 202 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 203 SYSTEM_ACCESS_MODE, 3); 204 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 205 ENABLE_ADVANCED_DRIVER_MODEL, 1); 206 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 207 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 208 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 209 MTYPE, MTYPE_UC);/* XXX for emulation. */ 210 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); 211 212 WREG32_SOC15(MMHUB, i, regMC_VM_MX_L1_TLB_CNTL, tmp); 213 } 214 } 215 216 static void mmhub_v1_8_init_cache_regs(struct amdgpu_device *adev) 217 { 218 uint32_t tmp, inst_mask; 219 int i; 220 221 if (amdgpu_sriov_vf(adev)) 222 return; 223 224 /* Setup L2 cache */ 225 inst_mask = adev->aid_mask; 226 for_each_inst(i, inst_mask) { 227 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL); 228 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 229 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 230 ENABLE_L2_FRAGMENT_PROCESSING, 1); 231 /* XXX for emulation, Refer to closed source code.*/ 232 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 233 L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); 234 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 235 0); 236 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 237 CONTEXT1_IDENTITY_ACCESS_MODE, 1); 238 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, 239 IDENTITY_MODE_FRAGMENT_SIZE, 0); 240 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL, tmp); 241 242 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_CNTL2); 243 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 244 1); 245 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 246 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL2, tmp); 247 248 tmp = regVM_L2_CNTL3_DEFAULT; 249 if (adev->gmc.translate_further) { 250 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); 251 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 252 L2_CACHE_BIGK_FRAGMENT_SIZE, 9); 253 } else { 254 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9); 255 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, 256 L2_CACHE_BIGK_FRAGMENT_SIZE, 6); 257 } 258 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL3, tmp); 259 260 tmp = regVM_L2_CNTL4_DEFAULT; 261 /* For AMD APP APUs setup WC memory */ 262 if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) { 263 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 264 VMC_TAP_PDE_REQUEST_PHYSICAL, 1); 265 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 266 VMC_TAP_PTE_REQUEST_PHYSICAL, 1); 267 } else { 268 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 269 VMC_TAP_PDE_REQUEST_PHYSICAL, 0); 270 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, 271 VMC_TAP_PTE_REQUEST_PHYSICAL, 0); 272 } 273 WREG32_SOC15(MMHUB, i, regVM_L2_CNTL4, tmp); 274 } 275 } 276 277 static void mmhub_v1_8_enable_system_domain(struct amdgpu_device *adev) 278 { 279 uint32_t tmp, inst_mask; 280 int i; 281 282 inst_mask = adev->aid_mask; 283 for_each_inst(i, inst_mask) { 284 tmp = RREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL); 285 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 286 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 287 adev->gmc.vmid0_page_table_depth); 288 tmp = REG_SET_FIELD(tmp, 289 VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE, 290 adev->gmc.vmid0_page_table_block_size); 291 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, 292 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); 293 WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_CNTL, tmp); 294 } 295 } 296 297 static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) 298 { 299 u32 inst_mask; 300 int i; 301 302 if (amdgpu_sriov_vf(adev)) 303 return; 304 305 inst_mask = adev->aid_mask; 306 for_each_inst(i, inst_mask) { 307 WREG32_SOC15(MMHUB, i, 308 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 309 0XFFFFFFFF); 310 WREG32_SOC15(MMHUB, i, 311 regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, 312 0x0000000F); 313 314 WREG32_SOC15(MMHUB, i, 315 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 316 0); 317 WREG32_SOC15(MMHUB, i, 318 regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 319 0); 320 321 WREG32_SOC15(MMHUB, i, 322 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0); 323 WREG32_SOC15(MMHUB, i, 324 regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0); 325 } 326 } 327 328 static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) 329 { 330 struct amdgpu_vmhub *hub; 331 unsigned int num_level, block_size; 332 uint32_t tmp, inst_mask; 333 int i, j; 334 335 num_level = adev->vm_manager.num_level; 336 block_size = adev->vm_manager.block_size; 337 if (adev->gmc.translate_further) 338 num_level -= 1; 339 else 340 block_size -= 9; 341 342 inst_mask = adev->aid_mask; 343 for_each_inst(j, inst_mask) { 344 hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; 345 for (i = 0; i <= 14; i++) { 346 tmp = RREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, 347 i); 348 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 349 ENABLE_CONTEXT, 1); 350 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 351 PAGE_TABLE_DEPTH, num_level); 352 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 353 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 354 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 355 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 356 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 357 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 358 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 359 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 360 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 361 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 362 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 363 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 364 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 365 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 366 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 367 PAGE_TABLE_BLOCK_SIZE, 368 block_size); 369 /* On 9.4.3, XNACK can be enabled in the SQ 370 * per-process. Retry faults need to be enabled for 371 * that to work. 372 */ 373 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 374 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1); 375 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT1_CNTL, 376 i * hub->ctx_distance, tmp); 377 WREG32_SOC15_OFFSET(MMHUB, j, 378 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, 379 i * hub->ctx_addr_distance, 0); 380 WREG32_SOC15_OFFSET(MMHUB, j, 381 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, 382 i * hub->ctx_addr_distance, 0); 383 WREG32_SOC15_OFFSET(MMHUB, j, 384 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, 385 i * hub->ctx_addr_distance, 386 lower_32_bits(adev->vm_manager.max_pfn - 1)); 387 WREG32_SOC15_OFFSET(MMHUB, j, 388 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, 389 i * hub->ctx_addr_distance, 390 upper_32_bits(adev->vm_manager.max_pfn - 1)); 391 } 392 } 393 } 394 395 static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) 396 { 397 struct amdgpu_vmhub *hub; 398 u32 i, j, inst_mask; 399 400 inst_mask = adev->aid_mask; 401 for_each_inst(j, inst_mask) { 402 hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; 403 for (i = 0; i < 18; ++i) { 404 WREG32_SOC15_OFFSET(MMHUB, j, 405 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, 406 i * hub->eng_addr_distance, 0xffffffff); 407 WREG32_SOC15_OFFSET(MMHUB, j, 408 regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, 409 i * hub->eng_addr_distance, 0x1f); 410 } 411 } 412 } 413 414 static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) 415 { 416 if (amdgpu_sriov_vf(adev)) { 417 /* 418 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are 419 * VF copy registers so vbios post doesn't program them, for 420 * SRIOV driver need to program them 421 */ 422 WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 423 adev->gmc.vram_start >> 24); 424 WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 425 adev->gmc.vram_end >> 24); 426 } 427 428 /* GART Enable. */ 429 mmhub_v1_8_init_gart_aperture_regs(adev); 430 mmhub_v1_8_init_system_aperture_regs(adev); 431 mmhub_v1_8_init_tlb_regs(adev); 432 mmhub_v1_8_init_cache_regs(adev); 433 434 mmhub_v1_8_enable_system_domain(adev); 435 mmhub_v1_8_disable_identity_aperture(adev); 436 mmhub_v1_8_setup_vmid_config(adev); 437 mmhub_v1_8_program_invalidation(adev); 438 439 return 0; 440 } 441 442 static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) 443 { 444 struct amdgpu_vmhub *hub; 445 u32 tmp; 446 u32 i, j, inst_mask; 447 448 /* Disable all tables */ 449 inst_mask = adev->aid_mask; 450 for_each_inst(j, inst_mask) { 451 hub = &adev->vmhub[AMDGPU_MMHUB0(j)]; 452 for (i = 0; i < 16; i++) 453 WREG32_SOC15_OFFSET(MMHUB, j, regVM_CONTEXT0_CNTL, 454 i * hub->ctx_distance, 0); 455 456 /* Setup TLB control */ 457 tmp = RREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL); 458 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 459 0); 460 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, 461 ENABLE_ADVANCED_DRIVER_MODEL, 0); 462 WREG32_SOC15(MMHUB, j, regMC_VM_MX_L1_TLB_CNTL, tmp); 463 464 if (!amdgpu_sriov_vf(adev)) { 465 /* Setup L2 cache */ 466 tmp = RREG32_SOC15(MMHUB, j, regVM_L2_CNTL); 467 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 468 0); 469 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL, tmp); 470 WREG32_SOC15(MMHUB, j, regVM_L2_CNTL3, 0); 471 } 472 } 473 } 474 475 /** 476 * mmhub_v1_8_set_fault_enable_default - update GART/VM fault handling 477 * 478 * @adev: amdgpu_device pointer 479 * @value: true redirects VM faults to the default page 480 */ 481 static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool value) 482 { 483 u32 tmp, inst_mask; 484 int i; 485 486 if (amdgpu_sriov_vf(adev)) 487 return; 488 489 inst_mask = adev->aid_mask; 490 for_each_inst(i, inst_mask) { 491 tmp = RREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL); 492 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 493 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 494 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 495 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 496 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 497 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 498 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 499 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 500 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 501 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 502 value); 503 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 504 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 505 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 506 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 507 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 508 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 509 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 510 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 511 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 512 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 513 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 514 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 515 if (!value) { 516 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 517 CRASH_ON_NO_RETRY_FAULT, 1); 518 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 519 CRASH_ON_RETRY_FAULT, 1); 520 } 521 522 WREG32_SOC15(MMHUB, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp); 523 } 524 } 525 526 static void mmhub_v1_8_init(struct amdgpu_device *adev) 527 { 528 struct amdgpu_vmhub *hub; 529 u32 inst_mask; 530 int i; 531 532 inst_mask = adev->aid_mask; 533 for_each_inst(i, inst_mask) { 534 hub = &adev->vmhub[AMDGPU_MMHUB0(i)]; 535 536 hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, i, 537 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 538 hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, i, 539 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 540 hub->vm_inv_eng0_req = 541 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_REQ); 542 hub->vm_inv_eng0_ack = 543 SOC15_REG_OFFSET(MMHUB, i, regVM_INVALIDATE_ENG0_ACK); 544 hub->vm_context0_cntl = 545 SOC15_REG_OFFSET(MMHUB, i, regVM_CONTEXT0_CNTL); 546 hub->vm_l2_pro_fault_status = SOC15_REG_OFFSET(MMHUB, i, 547 regVM_L2_PROTECTION_FAULT_STATUS); 548 hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, i, 549 regVM_L2_PROTECTION_FAULT_CNTL); 550 551 hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL; 552 hub->ctx_addr_distance = 553 regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - 554 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 555 hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - 556 regVM_INVALIDATE_ENG0_REQ; 557 hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - 558 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; 559 } 560 } 561 562 static int mmhub_v1_8_set_clockgating(struct amdgpu_device *adev, 563 enum amd_clockgating_state state) 564 { 565 return 0; 566 } 567 568 static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags) 569 { 570 571 } 572 573 const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { 574 .get_fb_location = mmhub_v1_8_get_fb_location, 575 .init = mmhub_v1_8_init, 576 .gart_enable = mmhub_v1_8_gart_enable, 577 .set_fault_enable_default = mmhub_v1_8_set_fault_enable_default, 578 .gart_disable = mmhub_v1_8_gart_disable, 579 .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs, 580 .set_clockgating = mmhub_v1_8_set_clockgating, 581 .get_clockgating = mmhub_v1_8_get_clockgating, 582 }; 583 584 static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = { 585 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_CE_ERR_STATUS_LO, regMMEA0_CE_ERR_STATUS_HI), 586 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"}, 587 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_CE_ERR_STATUS_LO, regMMEA1_CE_ERR_STATUS_HI), 588 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"}, 589 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_CE_ERR_STATUS_LO, regMMEA2_CE_ERR_STATUS_HI), 590 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"}, 591 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_CE_ERR_STATUS_LO, regMMEA3_CE_ERR_STATUS_HI), 592 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"}, 593 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_CE_ERR_STATUS_LO, regMMEA4_CE_ERR_STATUS_HI), 594 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"}, 595 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_CE_ERR_STATUS_LO, regMM_CANE_CE_ERR_STATUS_HI), 596 1, 0, "MM_CANE"}, 597 }; 598 599 static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ue_reg_list[] = { 600 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA0_UE_ERR_STATUS_LO, regMMEA0_UE_ERR_STATUS_HI), 601 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA0"}, 602 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA1_UE_ERR_STATUS_LO, regMMEA1_UE_ERR_STATUS_HI), 603 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA1"}, 604 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA2_UE_ERR_STATUS_LO, regMMEA2_UE_ERR_STATUS_HI), 605 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA2"}, 606 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA3_UE_ERR_STATUS_LO, regMMEA3_UE_ERR_STATUS_HI), 607 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA3"}, 608 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMMEA4_UE_ERR_STATUS_LO, regMMEA4_UE_ERR_STATUS_HI), 609 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "MMEA4"}, 610 {AMDGPU_RAS_REG_ENTRY(MMHUB, 0, regMM_CANE_UE_ERR_STATUS_LO, regMM_CANE_UE_ERR_STATUS_HI), 611 1, 0, "MM_CANE"}, 612 }; 613 614 static const struct amdgpu_ras_memory_id_entry mmhub_v1_8_ras_memory_list[] = { 615 {AMDGPU_MMHUB_WGMI_PAGEMEM, "MMEA_WGMI_PAGEMEM"}, 616 {AMDGPU_MMHUB_RGMI_PAGEMEM, "MMEA_RGMI_PAGEMEM"}, 617 {AMDGPU_MMHUB_WDRAM_PAGEMEM, "MMEA_WDRAM_PAGEMEM"}, 618 {AMDGPU_MMHUB_RDRAM_PAGEMEM, "MMEA_RDRAM_PAGEMEM"}, 619 {AMDGPU_MMHUB_WIO_CMDMEM, "MMEA_WIO_CMDMEM"}, 620 {AMDGPU_MMHUB_RIO_CMDMEM, "MMEA_RIO_CMDMEM"}, 621 {AMDGPU_MMHUB_WGMI_CMDMEM, "MMEA_WGMI_CMDMEM"}, 622 {AMDGPU_MMHUB_RGMI_CMDMEM, "MMEA_RGMI_CMDMEM"}, 623 {AMDGPU_MMHUB_WDRAM_CMDMEM, "MMEA_WDRAM_CMDMEM"}, 624 {AMDGPU_MMHUB_RDRAM_CMDMEM, "MMEA_RDRAM_CMDMEM"}, 625 {AMDGPU_MMHUB_MAM_DMEM0, "MMEA_MAM_DMEM0"}, 626 {AMDGPU_MMHUB_MAM_DMEM1, "MMEA_MAM_DMEM1"}, 627 {AMDGPU_MMHUB_MAM_DMEM2, "MMEA_MAM_DMEM2"}, 628 {AMDGPU_MMHUB_MAM_DMEM3, "MMEA_MAM_DMEM3"}, 629 {AMDGPU_MMHUB_WRET_TAGMEM, "MMEA_WRET_TAGMEM"}, 630 {AMDGPU_MMHUB_RRET_TAGMEM, "MMEA_RRET_TAGMEM"}, 631 {AMDGPU_MMHUB_WIO_DATAMEM, "MMEA_WIO_DATAMEM"}, 632 {AMDGPU_MMHUB_WGMI_DATAMEM, "MMEA_WGMI_DATAMEM"}, 633 {AMDGPU_MMHUB_WDRAM_DATAMEM, "MMEA_WDRAM_DATAMEM"}, 634 }; 635 636 static void mmhub_v1_8_inst_query_ras_error_count(struct amdgpu_device *adev, 637 uint32_t mmhub_inst, 638 void *ras_err_status) 639 { 640 struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status; 641 642 amdgpu_ras_inst_query_ras_error_count(adev, 643 mmhub_v1_8_ce_reg_list, 644 ARRAY_SIZE(mmhub_v1_8_ce_reg_list), 645 mmhub_v1_8_ras_memory_list, 646 ARRAY_SIZE(mmhub_v1_8_ras_memory_list), 647 mmhub_inst, 648 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, 649 &err_data->ce_count); 650 amdgpu_ras_inst_query_ras_error_count(adev, 651 mmhub_v1_8_ue_reg_list, 652 ARRAY_SIZE(mmhub_v1_8_ue_reg_list), 653 mmhub_v1_8_ras_memory_list, 654 ARRAY_SIZE(mmhub_v1_8_ras_memory_list), 655 mmhub_inst, 656 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 657 &err_data->ue_count); 658 } 659 660 static void mmhub_v1_8_query_ras_error_count(struct amdgpu_device *adev, 661 void *ras_err_status) 662 { 663 uint32_t inst_mask; 664 uint32_t i; 665 666 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { 667 dev_warn(adev->dev, "MMHUB RAS is not supported\n"); 668 return; 669 } 670 671 inst_mask = adev->aid_mask; 672 for_each_inst(i, inst_mask) 673 mmhub_v1_8_inst_query_ras_error_count(adev, i, ras_err_status); 674 } 675 676 static void mmhub_v1_8_inst_reset_ras_error_count(struct amdgpu_device *adev, 677 uint32_t mmhub_inst) 678 { 679 amdgpu_ras_inst_reset_ras_error_count(adev, 680 mmhub_v1_8_ce_reg_list, 681 ARRAY_SIZE(mmhub_v1_8_ce_reg_list), 682 mmhub_inst); 683 amdgpu_ras_inst_reset_ras_error_count(adev, 684 mmhub_v1_8_ue_reg_list, 685 ARRAY_SIZE(mmhub_v1_8_ue_reg_list), 686 mmhub_inst); 687 } 688 689 static void mmhub_v1_8_reset_ras_error_count(struct amdgpu_device *adev) 690 { 691 uint32_t inst_mask; 692 uint32_t i; 693 694 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { 695 dev_warn(adev->dev, "MMHUB RAS is not supported\n"); 696 return; 697 } 698 699 inst_mask = adev->aid_mask; 700 for_each_inst(i, inst_mask) 701 mmhub_v1_8_inst_reset_ras_error_count(adev, i); 702 } 703 704 static const u32 mmhub_v1_8_mmea_err_status_reg[] __maybe_unused = { 705 regMMEA0_ERR_STATUS, 706 regMMEA1_ERR_STATUS, 707 regMMEA2_ERR_STATUS, 708 regMMEA3_ERR_STATUS, 709 regMMEA4_ERR_STATUS, 710 }; 711 712 static void mmhub_v1_8_inst_query_ras_err_status(struct amdgpu_device *adev, 713 uint32_t mmhub_inst) 714 { 715 uint32_t reg_value; 716 uint32_t mmea_err_status_addr_dist; 717 uint32_t i; 718 719 /* query mmea ras err status */ 720 mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS; 721 for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) { 722 reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, 723 regMMEA0_ERR_STATUS, 724 i * mmea_err_status_addr_dist); 725 if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) || 726 REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) || 727 REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { 728 dev_warn(adev->dev, 729 "Detected MMEA%d err in MMHUB%d, status: 0x%x\n", 730 i, mmhub_inst, reg_value); 731 } 732 } 733 734 /* query mm_cane ras err status */ 735 reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS); 736 if (REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_STATUS) || 737 REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_WRRSP_STATUS) || 738 REG_GET_FIELD(reg_value, MM_CANE_ERR_STATUS, SDPM_RDRSP_DATAPARITY_ERROR)) { 739 dev_warn(adev->dev, 740 "Detected MM CANE err in MMHUB%d, status: 0x%x\n", 741 mmhub_inst, reg_value); 742 } 743 } 744 745 static void mmhub_v1_8_query_ras_error_status(struct amdgpu_device *adev) 746 { 747 uint32_t inst_mask; 748 uint32_t i; 749 750 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { 751 dev_warn(adev->dev, "MMHUB RAS is not supported\n"); 752 return; 753 } 754 755 inst_mask = adev->aid_mask; 756 for_each_inst(i, inst_mask) 757 mmhub_v1_8_inst_query_ras_err_status(adev, i); 758 } 759 760 static void mmhub_v1_8_inst_reset_ras_err_status(struct amdgpu_device *adev, 761 uint32_t mmhub_inst) 762 { 763 uint32_t mmea_cgtt_clk_cntl_addr_dist; 764 uint32_t mmea_err_status_addr_dist; 765 uint32_t reg_value; 766 uint32_t i; 767 768 /* reset mmea ras err status */ 769 mmea_cgtt_clk_cntl_addr_dist = regMMEA1_CGTT_CLK_CTRL - regMMEA0_CGTT_CLK_CTRL; 770 mmea_err_status_addr_dist = regMMEA1_ERR_STATUS - regMMEA0_ERR_STATUS; 771 for (i = 0; i < ARRAY_SIZE(mmhub_v1_8_mmea_err_status_reg); i++) { 772 /* force clk branch on for response path 773 * set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 1 774 */ 775 reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, 776 regMMEA0_CGTT_CLK_CTRL, 777 i * mmea_cgtt_clk_cntl_addr_dist); 778 reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL, 779 SOFT_OVERRIDE_RETURN, 1); 780 WREG32_SOC15_OFFSET(MMHUB, mmhub_inst, 781 regMMEA0_CGTT_CLK_CTRL, 782 i * mmea_cgtt_clk_cntl_addr_dist, 783 reg_value); 784 785 /* set MMEA0_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */ 786 reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, 787 regMMEA0_ERR_STATUS, 788 i * mmea_err_status_addr_dist); 789 reg_value = REG_SET_FIELD(reg_value, MMEA0_ERR_STATUS, 790 CLEAR_ERROR_STATUS, 1); 791 WREG32_SOC15_OFFSET(MMHUB, mmhub_inst, 792 regMMEA0_ERR_STATUS, 793 i * mmea_err_status_addr_dist, 794 reg_value); 795 796 /* set MMEA0_CGTT_CLK_CTRL.SOFT_OVERRIDE_RETURN = 0 */ 797 reg_value = RREG32_SOC15_OFFSET(MMHUB, mmhub_inst, 798 regMMEA0_CGTT_CLK_CTRL, 799 i * mmea_cgtt_clk_cntl_addr_dist); 800 reg_value = REG_SET_FIELD(reg_value, MMEA0_CGTT_CLK_CTRL, 801 SOFT_OVERRIDE_RETURN, 0); 802 WREG32_SOC15_OFFSET(MMHUB, mmhub_inst, 803 regMMEA0_CGTT_CLK_CTRL, 804 i * mmea_cgtt_clk_cntl_addr_dist, 805 reg_value); 806 } 807 808 /* reset mm_cane ras err status 809 * force clk branch on for response path 810 * set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 1 811 */ 812 reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL); 813 reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL, 814 SOFT_OVERRIDE_ATRET, 1); 815 WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value); 816 817 /* set MM_CANE_ERR_STATUS.CLEAR_ERROR_STATUS = 1 */ 818 reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS); 819 reg_value = REG_SET_FIELD(reg_value, MM_CANE_ERR_STATUS, 820 CLEAR_ERROR_STATUS, 1); 821 WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ERR_STATUS, reg_value); 822 823 /* set MM_CANE_ICG_CTRL.SOFT_OVERRIDE_ATRET = 0 */ 824 reg_value = RREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL); 825 reg_value = REG_SET_FIELD(reg_value, MM_CANE_ICG_CTRL, 826 SOFT_OVERRIDE_ATRET, 0); 827 WREG32_SOC15(MMHUB, mmhub_inst, regMM_CANE_ICG_CTRL, reg_value); 828 } 829 830 static void mmhub_v1_8_reset_ras_error_status(struct amdgpu_device *adev) 831 { 832 uint32_t inst_mask; 833 uint32_t i; 834 835 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { 836 dev_warn(adev->dev, "MMHUB RAS is not supported\n"); 837 return; 838 } 839 840 inst_mask = adev->aid_mask; 841 for_each_inst(i, inst_mask) 842 mmhub_v1_8_inst_reset_ras_err_status(adev, i); 843 } 844 845 static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = { 846 .query_ras_error_count = mmhub_v1_8_query_ras_error_count, 847 .reset_ras_error_count = mmhub_v1_8_reset_ras_error_count, 848 .query_ras_error_status = mmhub_v1_8_query_ras_error_status, 849 .reset_ras_error_status = mmhub_v1_8_reset_ras_error_status, 850 }; 851 852 struct amdgpu_mmhub_ras mmhub_v1_8_ras = { 853 .ras_block = { 854 .hw_ops = &mmhub_v1_8_ras_hw_ops, 855 }, 856 }; 857