1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "gfxhub_v1_0.h" 25 26 #include "vega10/soc15ip.h" 27 #include "vega10/GC/gc_9_0_offset.h" 28 #include "vega10/GC/gc_9_0_sh_mask.h" 29 #include "vega10/GC/gc_9_0_default.h" 30 #include "vega10/vega10_enum.h" 31 32 #include "soc15_common.h" 33 34 int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) 35 { 36 u32 tmp; 37 u64 value; 38 u32 i; 39 40 /* Program MC. */ 41 /* Update configuration */ 42 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR), 43 adev->mc.vram_start >> 18); 44 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR), 45 adev->mc.vram_end >> 18); 46 47 value = adev->vram_scratch.gpu_addr - adev->mc.vram_start 48 + adev->vm_manager.vram_base_offset; 49 WREG32(SOC15_REG_OFFSET(GC, 0, 50 mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB), 51 (u32)(value >> 12)); 52 WREG32(SOC15_REG_OFFSET(GC, 0, 53 mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB), 54 (u32)(value >> 44)); 55 56 if (amdgpu_sriov_vf(adev)) { 57 /* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are VF copy registers so 58 vbios post doesn't program them, for SRIOV driver need to program them */ 59 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_FB_LOCATION_BASE), 60 adev->mc.vram_start >> 24); 61 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_FB_LOCATION_TOP), 62 adev->mc.vram_end >> 24); 63 } 64 65 /* Disable AGP. */ 66 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_BASE), 0); 67 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_TOP), 0); 68 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_AGP_BOT), 0xFFFFFFFF); 69 70 /* GART Enable. */ 71 72 /* Setup TLB control */ 73 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL)); 74 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 75 tmp = REG_SET_FIELD(tmp, 76 MC_VM_MX_L1_TLB_CNTL, 77 SYSTEM_ACCESS_MODE, 78 3); 79 tmp = REG_SET_FIELD(tmp, 80 MC_VM_MX_L1_TLB_CNTL, 81 ENABLE_ADVANCED_DRIVER_MODEL, 82 1); 83 tmp = REG_SET_FIELD(tmp, 84 MC_VM_MX_L1_TLB_CNTL, 85 SYSTEM_APERTURE_UNMAPPED_ACCESS, 86 0); 87 tmp = REG_SET_FIELD(tmp, 88 MC_VM_MX_L1_TLB_CNTL, 89 ECO_BITS, 90 0); 91 tmp = REG_SET_FIELD(tmp, 92 MC_VM_MX_L1_TLB_CNTL, 93 MTYPE, 94 MTYPE_UC);/* XXX for emulation. */ 95 tmp = REG_SET_FIELD(tmp, 96 MC_VM_MX_L1_TLB_CNTL, 97 ATC_EN, 98 1); 99 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp); 100 101 /* Setup L2 cache */ 102 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL)); 103 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 104 tmp = REG_SET_FIELD(tmp, 105 VM_L2_CNTL, 106 ENABLE_L2_FRAGMENT_PROCESSING, 107 0); 108 tmp = REG_SET_FIELD(tmp, 109 VM_L2_CNTL, 110 L2_PDE0_CACHE_TAG_GENERATION_MODE, 111 0);/* XXX for emulation, Refer to closed source code.*/ 112 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1); 113 tmp = REG_SET_FIELD(tmp, 114 VM_L2_CNTL, 115 CONTEXT1_IDENTITY_ACCESS_MODE, 116 1); 117 tmp = REG_SET_FIELD(tmp, 118 VM_L2_CNTL, 119 IDENTITY_MODE_FRAGMENT_SIZE, 120 0); 121 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL), tmp); 122 123 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL2)); 124 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 125 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 126 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL2), tmp); 127 128 tmp = mmVM_L2_CNTL3_DEFAULT; 129 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL3), tmp); 130 131 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL4)); 132 tmp = REG_SET_FIELD(tmp, 133 VM_L2_CNTL4, 134 VMC_TAP_PDE_REQUEST_PHYSICAL, 135 0); 136 tmp = REG_SET_FIELD(tmp, 137 VM_L2_CNTL4, 138 VMC_TAP_PTE_REQUEST_PHYSICAL, 139 0); 140 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL4), tmp); 141 142 /* setup context0 */ 143 WREG32(SOC15_REG_OFFSET(GC, 0, 144 mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32), 145 (u32)(adev->mc.gtt_start >> 12)); 146 WREG32(SOC15_REG_OFFSET(GC, 0, 147 mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32), 148 (u32)(adev->mc.gtt_start >> 44)); 149 150 WREG32(SOC15_REG_OFFSET(GC, 0, 151 mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32), 152 (u32)(adev->mc.gtt_end >> 12)); 153 WREG32(SOC15_REG_OFFSET(GC, 0, 154 mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32), 155 (u32)(adev->mc.gtt_end >> 44)); 156 157 BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL)); 158 value = adev->gart.table_addr - adev->mc.vram_start 159 + adev->vm_manager.vram_base_offset; 160 value &= 0x0000FFFFFFFFF000ULL; 161 value |= 0x1; /*valid bit*/ 162 163 WREG32(SOC15_REG_OFFSET(GC, 0, 164 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32), 165 (u32)value); 166 WREG32(SOC15_REG_OFFSET(GC, 0, 167 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32), 168 (u32)(value >> 32)); 169 170 WREG32(SOC15_REG_OFFSET(GC, 0, 171 mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32), 172 (u32)(adev->dummy_page.addr >> 12)); 173 WREG32(SOC15_REG_OFFSET(GC, 0, 174 mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32), 175 (u32)(adev->dummy_page.addr >> 44)); 176 177 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL2)); 178 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2, 179 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 180 1); 181 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL2), tmp); 182 183 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL)); 184 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 185 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 186 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL), tmp); 187 188 /* Disable identity aperture.*/ 189 WREG32(SOC15_REG_OFFSET(GC, 0, 190 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32), 0XFFFFFFFF); 191 WREG32(SOC15_REG_OFFSET(GC, 0, 192 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32), 0x0000000F); 193 194 WREG32(SOC15_REG_OFFSET(GC, 0, 195 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32), 0); 196 WREG32(SOC15_REG_OFFSET(GC, 0, 197 mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32), 0); 198 199 WREG32(SOC15_REG_OFFSET(GC, 0, 200 mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32), 0); 201 WREG32(SOC15_REG_OFFSET(GC, 0, 202 mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32), 0); 203 204 for (i = 0; i <= 14; i++) { 205 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i); 206 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 207 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 208 adev->vm_manager.num_level); 209 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 210 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 211 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 212 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 213 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 214 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 215 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 216 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 217 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 218 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 219 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 220 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 221 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 222 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 223 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 224 PAGE_TABLE_BLOCK_SIZE, 225 amdgpu_vm_block_size - 9); 226 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp); 227 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0); 228 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0); 229 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32) + i*2, 230 adev->vm_manager.max_pfn - 1); 231 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32) + i*2, 0); 232 } 233 234 235 return 0; 236 } 237 238 void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) 239 { 240 u32 tmp; 241 u32 i; 242 243 /* Disable all tables */ 244 for (i = 0; i < 16; i++) 245 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL) + i, 0); 246 247 /* Setup TLB control */ 248 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL)); 249 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 250 tmp = REG_SET_FIELD(tmp, 251 MC_VM_MX_L1_TLB_CNTL, 252 ENABLE_ADVANCED_DRIVER_MODEL, 253 0); 254 WREG32(SOC15_REG_OFFSET(GC, 0, mmMC_VM_MX_L1_TLB_CNTL), tmp); 255 256 /* Setup L2 cache */ 257 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL)); 258 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); 259 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL), tmp); 260 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_CNTL3), 0); 261 } 262 263 /** 264 * gfxhub_v1_0_set_fault_enable_default - update GART/VM fault handling 265 * 266 * @adev: amdgpu_device pointer 267 * @value: true redirects VM faults to the default page 268 */ 269 void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, 270 bool value) 271 { 272 u32 tmp; 273 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL)); 274 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 275 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 276 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 277 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 278 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 279 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); 280 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 281 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); 282 tmp = REG_SET_FIELD(tmp, 283 VM_L2_PROTECTION_FAULT_CNTL, 284 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, 285 value); 286 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 287 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); 288 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 289 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 290 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 291 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 292 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 293 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 294 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 295 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 296 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL, 297 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 298 WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp); 299 } 300 301 static uint32_t gfxhub_v1_0_get_invalidate_req(unsigned int vm_id) 302 { 303 u32 req = 0; 304 305 /* invalidate using legacy mode on vm_id*/ 306 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 307 PER_VMID_INVALIDATE_REQ, 1 << vm_id); 308 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); 309 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 310 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 311 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 312 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 313 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 314 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 315 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 316 317 return req; 318 } 319 320 static uint32_t gfxhub_v1_0_get_vm_protection_bits(void) 321 { 322 return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 323 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 324 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 325 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 326 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 327 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 328 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); 329 } 330 331 static int gfxhub_v1_0_early_init(void *handle) 332 { 333 return 0; 334 } 335 336 static int gfxhub_v1_0_late_init(void *handle) 337 { 338 return 0; 339 } 340 341 static int gfxhub_v1_0_sw_init(void *handle) 342 { 343 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 344 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB]; 345 346 hub->ctx0_ptb_addr_lo32 = 347 SOC15_REG_OFFSET(GC, 0, 348 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); 349 hub->ctx0_ptb_addr_hi32 = 350 SOC15_REG_OFFSET(GC, 0, 351 mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); 352 hub->vm_inv_eng0_req = 353 SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ); 354 hub->vm_inv_eng0_ack = 355 SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ACK); 356 hub->vm_context0_cntl = 357 SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL); 358 hub->vm_l2_pro_fault_status = 359 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS); 360 hub->vm_l2_pro_fault_cntl = 361 SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL); 362 363 hub->get_invalidate_req = gfxhub_v1_0_get_invalidate_req; 364 hub->get_vm_protection_bits = gfxhub_v1_0_get_vm_protection_bits; 365 366 return 0; 367 } 368 369 static int gfxhub_v1_0_sw_fini(void *handle) 370 { 371 return 0; 372 } 373 374 static int gfxhub_v1_0_hw_init(void *handle) 375 { 376 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 377 unsigned i; 378 379 for (i = 0 ; i < 18; ++i) { 380 WREG32(SOC15_REG_OFFSET(GC, 0, 381 mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32) + 382 2 * i, 0xffffffff); 383 WREG32(SOC15_REG_OFFSET(GC, 0, 384 mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32) + 385 2 * i, 0x1f); 386 } 387 388 return 0; 389 } 390 391 static int gfxhub_v1_0_hw_fini(void *handle) 392 { 393 return 0; 394 } 395 396 static int gfxhub_v1_0_suspend(void *handle) 397 { 398 return 0; 399 } 400 401 static int gfxhub_v1_0_resume(void *handle) 402 { 403 return 0; 404 } 405 406 static bool gfxhub_v1_0_is_idle(void *handle) 407 { 408 return true; 409 } 410 411 static int gfxhub_v1_0_wait_for_idle(void *handle) 412 { 413 return 0; 414 } 415 416 static int gfxhub_v1_0_soft_reset(void *handle) 417 { 418 return 0; 419 } 420 421 static int gfxhub_v1_0_set_clockgating_state(void *handle, 422 enum amd_clockgating_state state) 423 { 424 return 0; 425 } 426 427 static int gfxhub_v1_0_set_powergating_state(void *handle, 428 enum amd_powergating_state state) 429 { 430 return 0; 431 } 432 433 const struct amd_ip_funcs gfxhub_v1_0_ip_funcs = { 434 .name = "gfxhub_v1_0", 435 .early_init = gfxhub_v1_0_early_init, 436 .late_init = gfxhub_v1_0_late_init, 437 .sw_init = gfxhub_v1_0_sw_init, 438 .sw_fini = gfxhub_v1_0_sw_fini, 439 .hw_init = gfxhub_v1_0_hw_init, 440 .hw_fini = gfxhub_v1_0_hw_fini, 441 .suspend = gfxhub_v1_0_suspend, 442 .resume = gfxhub_v1_0_resume, 443 .is_idle = gfxhub_v1_0_is_idle, 444 .wait_for_idle = gfxhub_v1_0_wait_for_idle, 445 .soft_reset = gfxhub_v1_0_soft_reset, 446 .set_clockgating_state = gfxhub_v1_0_set_clockgating_state, 447 .set_powergating_state = gfxhub_v1_0_set_powergating_state, 448 }; 449 450 const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block = 451 { 452 .type = AMD_IP_BLOCK_TYPE_GFXHUB, 453 .major = 1, 454 .minor = 0, 455 .rev = 0, 456 .funcs = &gfxhub_v1_0_ip_funcs, 457 }; 458