1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/pci.h> 26 27 #include <drm/drm_cache.h> 28 29 #include "amdgpu.h" 30 #include "gmc_v9_0.h" 31 #include "amdgpu_atomfirmware.h" 32 #include "amdgpu_gem.h" 33 34 #include "hdp/hdp_4_0_offset.h" 35 #include "hdp/hdp_4_0_sh_mask.h" 36 #include "gc/gc_9_0_sh_mask.h" 37 #include "dce/dce_12_0_offset.h" 38 #include "dce/dce_12_0_sh_mask.h" 39 #include "vega10_enum.h" 40 #include "mmhub/mmhub_1_0_offset.h" 41 #include "athub/athub_1_0_offset.h" 42 #include "oss/osssys_4_0_offset.h" 43 44 #include "soc15.h" 45 #include "soc15_common.h" 46 #include "umc/umc_6_0_sh_mask.h" 47 48 #include "gfxhub_v1_0.h" 49 #include "mmhub_v1_0.h" 50 #include "athub_v1_0.h" 51 #include "gfxhub_v1_1.h" 52 #include "mmhub_v9_4.h" 53 #include "umc_v6_1.h" 54 55 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" 56 57 #include "amdgpu_ras.h" 58 59 /* add these here since we already include dce12 headers and these are for DCN */ 60 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d 61 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 62 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 63 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 64 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL 65 #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L 66 67 /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/ 68 #define AMDGPU_NUM_OF_VMIDS 8 69 70 static const u32 golden_settings_vega10_hdp[] = 71 { 72 0xf64, 0x0fffffff, 0x00000000, 73 0xf65, 0x0fffffff, 0x00000000, 74 0xf66, 0x0fffffff, 0x00000000, 75 0xf67, 0x0fffffff, 0x00000000, 76 0xf68, 0x0fffffff, 0x00000000, 77 0xf6a, 0x0fffffff, 0x00000000, 78 0xf6b, 0x0fffffff, 0x00000000, 79 0xf6c, 0x0fffffff, 0x00000000, 80 0xf6d, 0x0fffffff, 0x00000000, 81 0xf6e, 0x0fffffff, 0x00000000, 82 }; 83 84 static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = 85 { 86 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa), 87 SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565) 88 }; 89 90 static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = 91 { 92 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800), 93 SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008) 94 }; 95 96 static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = { 97 (0x000143c0 + 0x00000000), 98 (0x000143c0 + 0x00000800), 99 (0x000143c0 + 0x00001000), 100 (0x000143c0 + 0x00001800), 101 (0x000543c0 + 0x00000000), 102 (0x000543c0 + 0x00000800), 103 (0x000543c0 + 0x00001000), 104 (0x000543c0 + 0x00001800), 105 (0x000943c0 + 0x00000000), 106 (0x000943c0 + 0x00000800), 107 (0x000943c0 + 0x00001000), 108 (0x000943c0 + 0x00001800), 109 (0x000d43c0 + 0x00000000), 110 (0x000d43c0 + 0x00000800), 111 (0x000d43c0 + 0x00001000), 112 (0x000d43c0 + 0x00001800), 113 (0x001143c0 + 0x00000000), 114 (0x001143c0 + 0x00000800), 115 (0x001143c0 + 0x00001000), 116 (0x001143c0 + 0x00001800), 117 (0x001543c0 + 0x00000000), 118 (0x001543c0 + 0x00000800), 119 (0x001543c0 + 0x00001000), 120 (0x001543c0 + 0x00001800), 121 (0x001943c0 + 0x00000000), 122 (0x001943c0 + 0x00000800), 123 (0x001943c0 + 0x00001000), 124 (0x001943c0 + 0x00001800), 125 (0x001d43c0 + 0x00000000), 126 (0x001d43c0 + 0x00000800), 127 (0x001d43c0 + 0x00001000), 128 (0x001d43c0 + 0x00001800), 129 }; 130 131 static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { 132 (0x000143e0 + 0x00000000), 133 (0x000143e0 + 0x00000800), 134 (0x000143e0 + 0x00001000), 135 (0x000143e0 + 0x00001800), 136 (0x000543e0 + 0x00000000), 137 (0x000543e0 + 0x00000800), 138 (0x000543e0 + 0x00001000), 139 (0x000543e0 + 0x00001800), 140 (0x000943e0 + 0x00000000), 141 (0x000943e0 + 0x00000800), 142 (0x000943e0 + 0x00001000), 143 (0x000943e0 + 0x00001800), 144 (0x000d43e0 + 0x00000000), 145 (0x000d43e0 + 0x00000800), 146 (0x000d43e0 + 0x00001000), 147 (0x000d43e0 + 0x00001800), 148 (0x001143e0 + 0x00000000), 149 (0x001143e0 + 0x00000800), 150 (0x001143e0 + 0x00001000), 151 (0x001143e0 + 0x00001800), 152 (0x001543e0 + 0x00000000), 153 (0x001543e0 + 0x00000800), 154 (0x001543e0 + 0x00001000), 155 (0x001543e0 + 0x00001800), 156 (0x001943e0 + 0x00000000), 157 (0x001943e0 + 0x00000800), 158 (0x001943e0 + 0x00001000), 159 (0x001943e0 + 0x00001800), 160 (0x001d43e0 + 0x00000000), 161 (0x001d43e0 + 0x00000800), 162 (0x001d43e0 + 0x00001000), 163 (0x001d43e0 + 0x00001800), 164 }; 165 166 static const uint32_t ecc_umc_mcumc_status_addrs[] = { 167 (0x000143c2 + 0x00000000), 168 (0x000143c2 + 0x00000800), 169 (0x000143c2 + 0x00001000), 170 (0x000143c2 + 0x00001800), 171 (0x000543c2 + 0x00000000), 172 (0x000543c2 + 0x00000800), 173 (0x000543c2 + 0x00001000), 174 (0x000543c2 + 0x00001800), 175 (0x000943c2 + 0x00000000), 176 (0x000943c2 + 0x00000800), 177 (0x000943c2 + 0x00001000), 178 (0x000943c2 + 0x00001800), 179 (0x000d43c2 + 0x00000000), 180 (0x000d43c2 + 0x00000800), 181 (0x000d43c2 + 0x00001000), 182 (0x000d43c2 + 0x00001800), 183 (0x001143c2 + 0x00000000), 184 (0x001143c2 + 0x00000800), 185 (0x001143c2 + 0x00001000), 186 (0x001143c2 + 0x00001800), 187 (0x001543c2 + 0x00000000), 188 (0x001543c2 + 0x00000800), 189 (0x001543c2 + 0x00001000), 190 (0x001543c2 + 0x00001800), 191 (0x001943c2 + 0x00000000), 192 (0x001943c2 + 0x00000800), 193 (0x001943c2 + 0x00001000), 194 (0x001943c2 + 0x00001800), 195 (0x001d43c2 + 0x00000000), 196 (0x001d43c2 + 0x00000800), 197 (0x001d43c2 + 0x00001000), 198 (0x001d43c2 + 0x00001800), 199 }; 200 201 static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, 202 struct amdgpu_irq_src *src, 203 unsigned type, 204 enum amdgpu_interrupt_state state) 205 { 206 u32 bits, i, tmp, reg; 207 208 bits = 0x7f; 209 210 switch (state) { 211 case AMDGPU_IRQ_STATE_DISABLE: 212 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 213 reg = ecc_umc_mcumc_ctrl_addrs[i]; 214 tmp = RREG32(reg); 215 tmp &= ~bits; 216 WREG32(reg, tmp); 217 } 218 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 219 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 220 tmp = RREG32(reg); 221 tmp &= ~bits; 222 WREG32(reg, tmp); 223 } 224 break; 225 case AMDGPU_IRQ_STATE_ENABLE: 226 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) { 227 reg = ecc_umc_mcumc_ctrl_addrs[i]; 228 tmp = RREG32(reg); 229 tmp |= bits; 230 WREG32(reg, tmp); 231 } 232 for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) { 233 reg = ecc_umc_mcumc_ctrl_mask_addrs[i]; 234 tmp = RREG32(reg); 235 tmp |= bits; 236 WREG32(reg, tmp); 237 } 238 break; 239 default: 240 break; 241 } 242 243 return 0; 244 } 245 246 static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, 247 struct ras_err_data *err_data, 248 struct amdgpu_iv_entry *entry) 249 { 250 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 251 if (adev->umc.funcs->query_ras_error_count) 252 adev->umc.funcs->query_ras_error_count(adev, err_data); 253 /* umc query_ras_error_address is also responsible for clearing 254 * error status 255 */ 256 if (adev->umc.funcs->query_ras_error_address) 257 adev->umc.funcs->query_ras_error_address(adev, err_data); 258 259 /* only uncorrectable error needs gpu reset */ 260 if (err_data->ue_count) 261 amdgpu_ras_reset_gpu(adev, 0); 262 263 return AMDGPU_RAS_SUCCESS; 264 } 265 266 static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev, 267 struct amdgpu_irq_src *source, 268 struct amdgpu_iv_entry *entry) 269 { 270 struct ras_common_if *ras_if = adev->gmc.umc_ras_if; 271 struct ras_dispatch_if ih_data = { 272 .entry = entry, 273 }; 274 275 if (!ras_if) 276 return 0; 277 278 ih_data.head = *ras_if; 279 280 amdgpu_ras_interrupt_dispatch(adev, &ih_data); 281 return 0; 282 } 283 284 static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 285 struct amdgpu_irq_src *src, 286 unsigned type, 287 enum amdgpu_interrupt_state state) 288 { 289 struct amdgpu_vmhub *hub; 290 u32 tmp, reg, bits, i, j; 291 292 bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 293 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 294 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 295 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 296 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 297 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 298 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; 299 300 switch (state) { 301 case AMDGPU_IRQ_STATE_DISABLE: 302 for (j = 0; j < adev->num_vmhubs; j++) { 303 hub = &adev->vmhub[j]; 304 for (i = 0; i < 16; i++) { 305 reg = hub->vm_context0_cntl + i; 306 tmp = RREG32(reg); 307 tmp &= ~bits; 308 WREG32(reg, tmp); 309 } 310 } 311 break; 312 case AMDGPU_IRQ_STATE_ENABLE: 313 for (j = 0; j < adev->num_vmhubs; j++) { 314 hub = &adev->vmhub[j]; 315 for (i = 0; i < 16; i++) { 316 reg = hub->vm_context0_cntl + i; 317 tmp = RREG32(reg); 318 tmp |= bits; 319 WREG32(reg, tmp); 320 } 321 } 322 default: 323 break; 324 } 325 326 return 0; 327 } 328 329 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, 330 struct amdgpu_irq_src *source, 331 struct amdgpu_iv_entry *entry) 332 { 333 struct amdgpu_vmhub *hub; 334 bool retry_fault = !!(entry->src_data[1] & 0x80); 335 uint32_t status = 0; 336 u64 addr; 337 char hub_name[10]; 338 339 addr = (u64)entry->src_data[0] << 12; 340 addr |= ((u64)entry->src_data[1] & 0xf) << 44; 341 342 if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid, 343 entry->timestamp)) 344 return 1; /* This also prevents sending it to KFD */ 345 346 if (entry->client_id == SOC15_IH_CLIENTID_VMC) { 347 snprintf(hub_name, sizeof(hub_name), "mmhub0"); 348 hub = &adev->vmhub[AMDGPU_MMHUB_0]; 349 } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { 350 snprintf(hub_name, sizeof(hub_name), "mmhub1"); 351 hub = &adev->vmhub[AMDGPU_MMHUB_1]; 352 } else { 353 snprintf(hub_name, sizeof(hub_name), "gfxhub0"); 354 hub = &adev->vmhub[AMDGPU_GFXHUB_0]; 355 } 356 357 /* If it's the first fault for this address, process it normally */ 358 if (!amdgpu_sriov_vf(adev)) { 359 status = RREG32(hub->vm_l2_pro_fault_status); 360 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); 361 } 362 363 if (printk_ratelimit()) { 364 struct amdgpu_task_info task_info; 365 366 memset(&task_info, 0, sizeof(struct amdgpu_task_info)); 367 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); 368 369 dev_err(adev->dev, 370 "[%s] %s page fault (src_id:%u ring:%u vmid:%u " 371 "pasid:%u, for process %s pid %d thread %s pid %d)\n", 372 hub_name, retry_fault ? "retry" : "no-retry", 373 entry->src_id, entry->ring_id, entry->vmid, 374 entry->pasid, task_info.process_name, task_info.tgid, 375 task_info.task_name, task_info.pid); 376 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", 377 addr, entry->client_id); 378 if (!amdgpu_sriov_vf(adev)) { 379 dev_err(adev->dev, 380 "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 381 status); 382 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", 383 REG_GET_FIELD(status, 384 VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); 385 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", 386 REG_GET_FIELD(status, 387 VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); 388 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", 389 REG_GET_FIELD(status, 390 VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); 391 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", 392 REG_GET_FIELD(status, 393 VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); 394 395 } 396 } 397 398 return 0; 399 } 400 401 static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { 402 .set = gmc_v9_0_vm_fault_interrupt_state, 403 .process = gmc_v9_0_process_interrupt, 404 }; 405 406 407 static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = { 408 .set = gmc_v9_0_ecc_interrupt_state, 409 .process = gmc_v9_0_process_ecc_irq, 410 }; 411 412 static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) 413 { 414 adev->gmc.vm_fault.num_types = 1; 415 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; 416 417 adev->gmc.ecc_irq.num_types = 1; 418 adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; 419 } 420 421 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, 422 uint32_t flush_type) 423 { 424 u32 req = 0; 425 426 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 427 PER_VMID_INVALIDATE_REQ, 1 << vmid); 428 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 429 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 430 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 431 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 432 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 433 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 434 req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 435 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 436 437 return req; 438 } 439 440 /* 441 * GART 442 * VMID 0 is the physical GPU addresses as used by the kernel. 443 * VMIDs 1-15 are used for userspace clients and are handled 444 * by the amdgpu vm/hsa code. 445 */ 446 447 /** 448 * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type 449 * 450 * @adev: amdgpu_device pointer 451 * @vmid: vm instance to flush 452 * @flush_type: the flush type 453 * 454 * Flush the TLB for the requested page table using certain type. 455 */ 456 static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, 457 uint32_t vmid, uint32_t flush_type) 458 { 459 const unsigned eng = 17; 460 unsigned i, j; 461 462 for (i = 0; i < adev->num_vmhubs; ++i) { 463 struct amdgpu_vmhub *hub = &adev->vmhub[i]; 464 u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); 465 466 /* This is necessary for a HW workaround under SRIOV as well 467 * as GFXOFF under bare metal 468 */ 469 if (adev->gfx.kiq.ring.sched.ready && 470 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && 471 !adev->in_gpu_reset) { 472 uint32_t req = hub->vm_inv_eng0_req + eng; 473 uint32_t ack = hub->vm_inv_eng0_ack + eng; 474 475 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, 476 1 << vmid); 477 continue; 478 } 479 480 spin_lock(&adev->gmc.invalidate_lock); 481 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); 482 for (j = 0; j < adev->usec_timeout; j++) { 483 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); 484 if (tmp & (1 << vmid)) 485 break; 486 udelay(1); 487 } 488 spin_unlock(&adev->gmc.invalidate_lock); 489 if (j < adev->usec_timeout) 490 continue; 491 492 DRM_ERROR("Timeout waiting for VM flush ACK!\n"); 493 } 494 } 495 496 static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 497 unsigned vmid, uint64_t pd_addr) 498 { 499 struct amdgpu_device *adev = ring->adev; 500 struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; 501 uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); 502 unsigned eng = ring->vm_inv_eng; 503 504 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), 505 lower_32_bits(pd_addr)); 506 507 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), 508 upper_32_bits(pd_addr)); 509 510 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, 511 hub->vm_inv_eng0_ack + eng, 512 req, 1 << vmid); 513 514 return pd_addr; 515 } 516 517 static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 518 unsigned pasid) 519 { 520 struct amdgpu_device *adev = ring->adev; 521 uint32_t reg; 522 523 /* Do nothing because there's no lut register for mmhub1. */ 524 if (ring->funcs->vmhub == AMDGPU_MMHUB_1) 525 return; 526 527 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) 528 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; 529 else 530 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; 531 532 amdgpu_ring_emit_wreg(ring, reg, pasid); 533 } 534 535 /* 536 * PTE format on VEGA 10: 537 * 63:59 reserved 538 * 58:57 mtype 539 * 56 F 540 * 55 L 541 * 54 P 542 * 53 SW 543 * 52 T 544 * 50:48 reserved 545 * 47:12 4k physical page base address 546 * 11:7 fragment 547 * 6 write 548 * 5 read 549 * 4 exe 550 * 3 Z 551 * 2 snooped 552 * 1 system 553 * 0 valid 554 * 555 * PDE format on VEGA 10: 556 * 63:59 block fragment size 557 * 58:55 reserved 558 * 54 P 559 * 53:48 reserved 560 * 47:6 physical base address of PD or PTE 561 * 5:3 reserved 562 * 2 C 563 * 1 system 564 * 0 valid 565 */ 566 567 static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, 568 uint32_t flags) 569 570 { 571 uint64_t pte_flag = 0; 572 573 if (flags & AMDGPU_VM_PAGE_EXECUTABLE) 574 pte_flag |= AMDGPU_PTE_EXECUTABLE; 575 if (flags & AMDGPU_VM_PAGE_READABLE) 576 pte_flag |= AMDGPU_PTE_READABLE; 577 if (flags & AMDGPU_VM_PAGE_WRITEABLE) 578 pte_flag |= AMDGPU_PTE_WRITEABLE; 579 580 switch (flags & AMDGPU_VM_MTYPE_MASK) { 581 case AMDGPU_VM_MTYPE_DEFAULT: 582 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 583 break; 584 case AMDGPU_VM_MTYPE_NC: 585 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 586 break; 587 case AMDGPU_VM_MTYPE_WC: 588 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); 589 break; 590 case AMDGPU_VM_MTYPE_CC: 591 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); 592 break; 593 case AMDGPU_VM_MTYPE_UC: 594 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); 595 break; 596 default: 597 pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); 598 break; 599 } 600 601 if (flags & AMDGPU_VM_PAGE_PRT) 602 pte_flag |= AMDGPU_PTE_PRT; 603 604 return pte_flag; 605 } 606 607 static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, 608 uint64_t *addr, uint64_t *flags) 609 { 610 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 611 *addr = adev->vm_manager.vram_base_offset + *addr - 612 adev->gmc.vram_start; 613 BUG_ON(*addr & 0xFFFF00000000003FULL); 614 615 if (!adev->gmc.translate_further) 616 return; 617 618 if (level == AMDGPU_VM_PDB1) { 619 /* Set the block fragment size */ 620 if (!(*flags & AMDGPU_PDE_PTE)) 621 *flags |= AMDGPU_PDE_BFS(0x9); 622 623 } else if (level == AMDGPU_VM_PDB0) { 624 if (*flags & AMDGPU_PDE_PTE) 625 *flags &= ~AMDGPU_PDE_PTE; 626 else 627 *flags |= AMDGPU_PTE_TF; 628 } 629 } 630 631 static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { 632 .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, 633 .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, 634 .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, 635 .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, 636 .get_vm_pde = gmc_v9_0_get_vm_pde 637 }; 638 639 static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) 640 { 641 adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; 642 } 643 644 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) 645 { 646 switch (adev->asic_type) { 647 case CHIP_VEGA20: 648 adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; 649 adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; 650 adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; 651 adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET; 652 adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; 653 adev->umc.funcs = &umc_v6_1_funcs; 654 break; 655 default: 656 break; 657 } 658 } 659 660 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) 661 { 662 switch (adev->asic_type) { 663 case CHIP_VEGA20: 664 adev->mmhub_funcs = &mmhub_v1_0_funcs; 665 break; 666 default: 667 break; 668 } 669 } 670 671 static int gmc_v9_0_early_init(void *handle) 672 { 673 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 674 675 gmc_v9_0_set_gmc_funcs(adev); 676 gmc_v9_0_set_irq_funcs(adev); 677 gmc_v9_0_set_umc_funcs(adev); 678 gmc_v9_0_set_mmhub_funcs(adev); 679 680 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 681 adev->gmc.shared_aperture_end = 682 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 683 adev->gmc.private_aperture_start = 0x1000000000000000ULL; 684 adev->gmc.private_aperture_end = 685 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 686 687 return 0; 688 } 689 690 static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) 691 { 692 693 /* 694 * TODO: 695 * Currently there is a bug where some memory client outside 696 * of the driver writes to first 8M of VRAM on S3 resume, 697 * this overrides GART which by default gets placed in first 8M and 698 * causes VM_FAULTS once GTT is accessed. 699 * Keep the stolen memory reservation until the while this is not solved. 700 * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init 701 */ 702 switch (adev->asic_type) { 703 case CHIP_VEGA10: 704 case CHIP_RAVEN: 705 case CHIP_ARCTURUS: 706 case CHIP_RENOIR: 707 return true; 708 case CHIP_VEGA12: 709 case CHIP_VEGA20: 710 default: 711 return false; 712 } 713 } 714 715 static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) 716 { 717 struct amdgpu_ring *ring; 718 unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = 719 {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, 720 GFXHUB_FREE_VM_INV_ENGS_BITMAP}; 721 unsigned i; 722 unsigned vmhub, inv_eng; 723 724 for (i = 0; i < adev->num_rings; ++i) { 725 ring = adev->rings[i]; 726 vmhub = ring->funcs->vmhub; 727 728 inv_eng = ffs(vm_inv_engs[vmhub]); 729 if (!inv_eng) { 730 dev_err(adev->dev, "no VM inv eng for ring %s\n", 731 ring->name); 732 return -EINVAL; 733 } 734 735 ring->vm_inv_eng = inv_eng - 1; 736 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); 737 738 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 739 ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 740 } 741 742 return 0; 743 } 744 745 static int gmc_v9_0_ecc_ras_block_late_init(void *handle, 746 struct ras_fs_if *fs_info, struct ras_common_if *ras_block) 747 { 748 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 749 struct ras_common_if **ras_if = NULL; 750 struct ras_ih_if ih_info = { 751 .cb = gmc_v9_0_process_ras_data_cb, 752 }; 753 int r; 754 755 if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) 756 ras_if = &adev->gmc.umc_ras_if; 757 else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB) 758 ras_if = &adev->gmc.mmhub_ras_if; 759 else 760 BUG(); 761 762 if (!amdgpu_ras_is_supported(adev, ras_block->block)) { 763 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 764 return 0; 765 } 766 767 /* handle resume path. */ 768 if (*ras_if) { 769 /* resend ras TA enable cmd during resume. 770 * prepare to handle failure. 771 */ 772 ih_info.head = **ras_if; 773 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); 774 if (r) { 775 if (r == -EAGAIN) { 776 /* request a gpu reset. will run again. */ 777 amdgpu_ras_request_reset_on_boot(adev, 778 ras_block->block); 779 return 0; 780 } 781 /* fail to enable ras, cleanup all. */ 782 goto irq; 783 } 784 /* enable successfully. continue. */ 785 goto resume; 786 } 787 788 *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); 789 if (!*ras_if) 790 return -ENOMEM; 791 792 **ras_if = *ras_block; 793 794 r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); 795 if (r) { 796 if (r == -EAGAIN) { 797 amdgpu_ras_request_reset_on_boot(adev, 798 ras_block->block); 799 r = 0; 800 } 801 goto feature; 802 } 803 804 ih_info.head = **ras_if; 805 fs_info->head = **ras_if; 806 807 if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) { 808 r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); 809 if (r) 810 goto interrupt; 811 } 812 813 amdgpu_ras_debugfs_create(adev, fs_info); 814 815 r = amdgpu_ras_sysfs_create(adev, fs_info); 816 if (r) 817 goto sysfs; 818 resume: 819 if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) { 820 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); 821 if (r) 822 goto irq; 823 } 824 825 return 0; 826 irq: 827 amdgpu_ras_sysfs_remove(adev, *ras_if); 828 sysfs: 829 amdgpu_ras_debugfs_remove(adev, *ras_if); 830 if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) 831 amdgpu_ras_interrupt_remove_handler(adev, &ih_info); 832 interrupt: 833 amdgpu_ras_feature_enable(adev, *ras_if, 0); 834 feature: 835 kfree(*ras_if); 836 *ras_if = NULL; 837 return r; 838 } 839 840 static int gmc_v9_0_ecc_late_init(void *handle) 841 { 842 int r; 843 844 struct ras_fs_if umc_fs_info = { 845 .sysfs_name = "umc_err_count", 846 .debugfs_name = "umc_err_inject", 847 }; 848 struct ras_common_if umc_ras_block = { 849 .block = AMDGPU_RAS_BLOCK__UMC, 850 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 851 .sub_block_index = 0, 852 .name = "umc", 853 }; 854 struct ras_fs_if mmhub_fs_info = { 855 .sysfs_name = "mmhub_err_count", 856 .debugfs_name = "mmhub_err_inject", 857 }; 858 struct ras_common_if mmhub_ras_block = { 859 .block = AMDGPU_RAS_BLOCK__MMHUB, 860 .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, 861 .sub_block_index = 0, 862 .name = "mmhub", 863 }; 864 865 r = gmc_v9_0_ecc_ras_block_late_init(handle, 866 &umc_fs_info, &umc_ras_block); 867 if (r) 868 return r; 869 870 r = gmc_v9_0_ecc_ras_block_late_init(handle, 871 &mmhub_fs_info, &mmhub_ras_block); 872 return r; 873 } 874 875 static int gmc_v9_0_late_init(void *handle) 876 { 877 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 878 bool r; 879 880 if (!gmc_v9_0_keep_stolen_memory(adev)) 881 amdgpu_bo_late_init(adev); 882 883 r = gmc_v9_0_allocate_vm_inv_eng(adev); 884 if (r) 885 return r; 886 /* Check if ecc is available */ 887 if (!amdgpu_sriov_vf(adev)) { 888 switch (adev->asic_type) { 889 case CHIP_VEGA10: 890 case CHIP_VEGA20: 891 r = amdgpu_atomfirmware_mem_ecc_supported(adev); 892 if (!r) { 893 DRM_INFO("ECC is not present.\n"); 894 if (adev->df_funcs->enable_ecc_force_par_wr_rmw) 895 adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false); 896 } else { 897 DRM_INFO("ECC is active.\n"); 898 } 899 900 r = amdgpu_atomfirmware_sram_ecc_supported(adev); 901 if (!r) { 902 DRM_INFO("SRAM ECC is not present.\n"); 903 } else { 904 DRM_INFO("SRAM ECC is active.\n"); 905 } 906 break; 907 default: 908 break; 909 } 910 } 911 912 r = gmc_v9_0_ecc_late_init(handle); 913 if (r) 914 return r; 915 916 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 917 } 918 919 static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, 920 struct amdgpu_gmc *mc) 921 { 922 u64 base = 0; 923 if (!amdgpu_sriov_vf(adev)) { 924 if (adev->asic_type == CHIP_ARCTURUS) 925 base = mmhub_v9_4_get_fb_location(adev); 926 else 927 base = mmhub_v1_0_get_fb_location(adev); 928 } 929 /* add the xgmi offset of the physical node */ 930 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 931 amdgpu_gmc_vram_location(adev, mc, base); 932 amdgpu_gmc_gart_location(adev, mc); 933 if (!amdgpu_sriov_vf(adev)) 934 amdgpu_gmc_agp_location(adev, mc); 935 /* base offset of vram pages */ 936 adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); 937 938 /* XXX: add the xgmi offset of the physical node? */ 939 adev->vm_manager.vram_base_offset += 940 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 941 } 942 943 /** 944 * gmc_v9_0_mc_init - initialize the memory controller driver params 945 * 946 * @adev: amdgpu_device pointer 947 * 948 * Look up the amount of vram, vram width, and decide how to place 949 * vram and gart within the GPU's physical address space. 950 * Returns 0 for success. 951 */ 952 static int gmc_v9_0_mc_init(struct amdgpu_device *adev) 953 { 954 int chansize, numchan; 955 int r; 956 957 if (amdgpu_sriov_vf(adev)) { 958 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, 959 * and DF related registers is not readable, seems hardcord is the 960 * only way to set the correct vram_width 961 */ 962 adev->gmc.vram_width = 2048; 963 } else if (amdgpu_emu_mode != 1) { 964 adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); 965 } 966 967 if (!adev->gmc.vram_width) { 968 /* hbm memory channel size */ 969 if (adev->flags & AMD_IS_APU) 970 chansize = 64; 971 else 972 chansize = 128; 973 974 numchan = adev->df_funcs->get_hbm_channel_number(adev); 975 adev->gmc.vram_width = numchan * chansize; 976 } 977 978 /* size in MB on si */ 979 adev->gmc.mc_vram_size = 980 adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; 981 adev->gmc.real_vram_size = adev->gmc.mc_vram_size; 982 983 if (!(adev->flags & AMD_IS_APU)) { 984 r = amdgpu_device_resize_fb_bar(adev); 985 if (r) 986 return r; 987 } 988 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 989 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 990 991 #ifdef CONFIG_X86_64 992 if (adev->flags & AMD_IS_APU) { 993 adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev); 994 adev->gmc.aper_size = adev->gmc.real_vram_size; 995 } 996 #endif 997 /* In case the PCI BAR is larger than the actual amount of vram */ 998 adev->gmc.visible_vram_size = adev->gmc.aper_size; 999 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 1000 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 1001 1002 /* set the gart size */ 1003 if (amdgpu_gart_size == -1) { 1004 switch (adev->asic_type) { 1005 case CHIP_VEGA10: /* all engines support GPUVM */ 1006 case CHIP_VEGA12: /* all engines support GPUVM */ 1007 case CHIP_VEGA20: 1008 case CHIP_ARCTURUS: 1009 default: 1010 adev->gmc.gart_size = 512ULL << 20; 1011 break; 1012 case CHIP_RAVEN: /* DCE SG support */ 1013 case CHIP_RENOIR: 1014 adev->gmc.gart_size = 1024ULL << 20; 1015 break; 1016 } 1017 } else { 1018 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 1019 } 1020 1021 gmc_v9_0_vram_gtt_location(adev, &adev->gmc); 1022 1023 return 0; 1024 } 1025 1026 static int gmc_v9_0_gart_init(struct amdgpu_device *adev) 1027 { 1028 int r; 1029 1030 if (adev->gart.bo) { 1031 WARN(1, "VEGA10 PCIE GART already initialized\n"); 1032 return 0; 1033 } 1034 /* Initialize common gart structure */ 1035 r = amdgpu_gart_init(adev); 1036 if (r) 1037 return r; 1038 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 1039 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) | 1040 AMDGPU_PTE_EXECUTABLE; 1041 return amdgpu_gart_table_vram_alloc(adev); 1042 } 1043 1044 static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) 1045 { 1046 u32 d1vga_control; 1047 unsigned size; 1048 1049 /* 1050 * TODO Remove once GART corruption is resolved 1051 * Check related code in gmc_v9_0_sw_fini 1052 * */ 1053 if (gmc_v9_0_keep_stolen_memory(adev)) 1054 return 9 * 1024 * 1024; 1055 1056 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); 1057 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 1058 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ 1059 } else { 1060 u32 viewport; 1061 1062 switch (adev->asic_type) { 1063 case CHIP_RAVEN: 1064 case CHIP_RENOIR: 1065 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); 1066 size = (REG_GET_FIELD(viewport, 1067 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * 1068 REG_GET_FIELD(viewport, 1069 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 1070 4); 1071 break; 1072 case CHIP_VEGA10: 1073 case CHIP_VEGA12: 1074 case CHIP_VEGA20: 1075 default: 1076 viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); 1077 size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * 1078 REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * 1079 4); 1080 break; 1081 } 1082 } 1083 /* return 0 if the pre-OS buffer uses up most of vram */ 1084 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) 1085 return 0; 1086 1087 return size; 1088 } 1089 1090 static int gmc_v9_0_sw_init(void *handle) 1091 { 1092 int r; 1093 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1094 1095 gfxhub_v1_0_init(adev); 1096 if (adev->asic_type == CHIP_ARCTURUS) 1097 mmhub_v9_4_init(adev); 1098 else 1099 mmhub_v1_0_init(adev); 1100 1101 spin_lock_init(&adev->gmc.invalidate_lock); 1102 1103 adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); 1104 switch (adev->asic_type) { 1105 case CHIP_RAVEN: 1106 adev->num_vmhubs = 2; 1107 1108 if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { 1109 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1110 } else { 1111 /* vm_size is 128TB + 512GB for legacy 3-level page support */ 1112 amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); 1113 adev->gmc.translate_further = 1114 adev->vm_manager.num_level > 1; 1115 } 1116 break; 1117 case CHIP_VEGA10: 1118 case CHIP_VEGA12: 1119 case CHIP_VEGA20: 1120 case CHIP_RENOIR: 1121 adev->num_vmhubs = 2; 1122 1123 1124 /* 1125 * To fulfill 4-level page support, 1126 * vm size is 256TB (48bit), maximum size of Vega10, 1127 * block size 512 (9bit) 1128 */ 1129 /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */ 1130 if (amdgpu_sriov_vf(adev)) 1131 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); 1132 else 1133 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1134 break; 1135 case CHIP_ARCTURUS: 1136 adev->num_vmhubs = 3; 1137 1138 /* Keep the vm size same with Vega20 */ 1139 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 1140 break; 1141 default: 1142 break; 1143 } 1144 1145 /* This interrupt is VMC page fault.*/ 1146 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT, 1147 &adev->gmc.vm_fault); 1148 if (r) 1149 return r; 1150 1151 if (adev->asic_type == CHIP_ARCTURUS) { 1152 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, 1153 &adev->gmc.vm_fault); 1154 if (r) 1155 return r; 1156 } 1157 1158 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, 1159 &adev->gmc.vm_fault); 1160 1161 if (r) 1162 return r; 1163 1164 /* interrupt sent to DF. */ 1165 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, 1166 &adev->gmc.ecc_irq); 1167 if (r) 1168 return r; 1169 1170 /* Set the internal MC address mask 1171 * This is the max address of the GPU's 1172 * internal address space. 1173 */ 1174 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 1175 1176 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); 1177 if (r) { 1178 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); 1179 return r; 1180 } 1181 adev->need_swiotlb = drm_need_swiotlb(44); 1182 1183 if (adev->gmc.xgmi.supported) { 1184 r = gfxhub_v1_1_get_xgmi_info(adev); 1185 if (r) 1186 return r; 1187 } 1188 1189 r = gmc_v9_0_mc_init(adev); 1190 if (r) 1191 return r; 1192 1193 adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); 1194 1195 /* Memory manager */ 1196 r = amdgpu_bo_init(adev); 1197 if (r) 1198 return r; 1199 1200 r = gmc_v9_0_gart_init(adev); 1201 if (r) 1202 return r; 1203 1204 /* 1205 * number of VMs 1206 * VMID 0 is reserved for System 1207 * amdgpu graphics/compute will use VMIDs 1-7 1208 * amdkfd will use VMIDs 8-15 1209 */ 1210 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; 1211 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; 1212 adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS; 1213 1214 amdgpu_vm_manager_init(adev); 1215 1216 return 0; 1217 } 1218 1219 static int gmc_v9_0_sw_fini(void *handle) 1220 { 1221 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1222 1223 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && 1224 adev->gmc.umc_ras_if) { 1225 struct ras_common_if *ras_if = adev->gmc.umc_ras_if; 1226 struct ras_ih_if ih_info = { 1227 .head = *ras_if, 1228 }; 1229 1230 /* remove fs first */ 1231 amdgpu_ras_debugfs_remove(adev, ras_if); 1232 amdgpu_ras_sysfs_remove(adev, ras_if); 1233 /* remove the IH */ 1234 amdgpu_ras_interrupt_remove_handler(adev, &ih_info); 1235 amdgpu_ras_feature_enable(adev, ras_if, 0); 1236 kfree(ras_if); 1237 } 1238 1239 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && 1240 adev->gmc.mmhub_ras_if) { 1241 struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if; 1242 1243 /* remove fs and disable ras feature */ 1244 amdgpu_ras_debugfs_remove(adev, ras_if); 1245 amdgpu_ras_sysfs_remove(adev, ras_if); 1246 amdgpu_ras_feature_enable(adev, ras_if, 0); 1247 kfree(ras_if); 1248 } 1249 1250 amdgpu_gem_force_release(adev); 1251 amdgpu_vm_manager_fini(adev); 1252 1253 if (gmc_v9_0_keep_stolen_memory(adev)) 1254 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); 1255 1256 amdgpu_gart_table_vram_free(adev); 1257 amdgpu_bo_fini(adev); 1258 amdgpu_gart_fini(adev); 1259 1260 return 0; 1261 } 1262 1263 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) 1264 { 1265 1266 switch (adev->asic_type) { 1267 case CHIP_VEGA10: 1268 if (amdgpu_sriov_vf(adev)) 1269 break; 1270 /* fall through */ 1271 case CHIP_VEGA20: 1272 soc15_program_register_sequence(adev, 1273 golden_settings_mmhub_1_0_0, 1274 ARRAY_SIZE(golden_settings_mmhub_1_0_0)); 1275 soc15_program_register_sequence(adev, 1276 golden_settings_athub_1_0_0, 1277 ARRAY_SIZE(golden_settings_athub_1_0_0)); 1278 break; 1279 case CHIP_VEGA12: 1280 break; 1281 case CHIP_RAVEN: 1282 /* TODO for renoir */ 1283 soc15_program_register_sequence(adev, 1284 golden_settings_athub_1_0_0, 1285 ARRAY_SIZE(golden_settings_athub_1_0_0)); 1286 break; 1287 default: 1288 break; 1289 } 1290 } 1291 1292 /** 1293 * gmc_v9_0_gart_enable - gart enable 1294 * 1295 * @adev: amdgpu_device pointer 1296 */ 1297 static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) 1298 { 1299 int r; 1300 bool value; 1301 u32 tmp; 1302 1303 amdgpu_device_program_register_sequence(adev, 1304 golden_settings_vega10_hdp, 1305 ARRAY_SIZE(golden_settings_vega10_hdp)); 1306 1307 if (adev->gart.bo == NULL) { 1308 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 1309 return -EINVAL; 1310 } 1311 r = amdgpu_gart_table_vram_pin(adev); 1312 if (r) 1313 return r; 1314 1315 switch (adev->asic_type) { 1316 case CHIP_RAVEN: 1317 /* TODO for renoir */ 1318 mmhub_v1_0_update_power_gating(adev, true); 1319 break; 1320 default: 1321 break; 1322 } 1323 1324 r = gfxhub_v1_0_gart_enable(adev); 1325 if (r) 1326 return r; 1327 1328 if (adev->asic_type == CHIP_ARCTURUS) 1329 r = mmhub_v9_4_gart_enable(adev); 1330 else 1331 r = mmhub_v1_0_gart_enable(adev); 1332 if (r) 1333 return r; 1334 1335 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); 1336 1337 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); 1338 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); 1339 1340 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8)); 1341 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); 1342 1343 /* After HDP is initialized, flush HDP.*/ 1344 adev->nbio_funcs->hdp_flush(adev, NULL); 1345 1346 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 1347 value = false; 1348 else 1349 value = true; 1350 1351 gfxhub_v1_0_set_fault_enable_default(adev, value); 1352 if (adev->asic_type == CHIP_ARCTURUS) 1353 mmhub_v9_4_set_fault_enable_default(adev, value); 1354 else 1355 mmhub_v1_0_set_fault_enable_default(adev, value); 1356 gmc_v9_0_flush_gpu_tlb(adev, 0, 0); 1357 1358 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1359 (unsigned)(adev->gmc.gart_size >> 20), 1360 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); 1361 adev->gart.ready = true; 1362 return 0; 1363 } 1364 1365 static int gmc_v9_0_hw_init(void *handle) 1366 { 1367 int r; 1368 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1369 1370 /* The sequence of these two function calls matters.*/ 1371 gmc_v9_0_init_golden_registers(adev); 1372 1373 if (adev->mode_info.num_crtc) { 1374 /* Lockout access through VGA aperture*/ 1375 WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 1376 1377 /* disable VGA render */ 1378 WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 1379 } 1380 1381 r = gmc_v9_0_gart_enable(adev); 1382 1383 return r; 1384 } 1385 1386 /** 1387 * gmc_v9_0_gart_disable - gart disable 1388 * 1389 * @adev: amdgpu_device pointer 1390 * 1391 * This disables all VM page table. 1392 */ 1393 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) 1394 { 1395 gfxhub_v1_0_gart_disable(adev); 1396 if (adev->asic_type == CHIP_ARCTURUS) 1397 mmhub_v9_4_gart_disable(adev); 1398 else 1399 mmhub_v1_0_gart_disable(adev); 1400 amdgpu_gart_table_vram_unpin(adev); 1401 } 1402 1403 static int gmc_v9_0_hw_fini(void *handle) 1404 { 1405 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1406 1407 if (amdgpu_sriov_vf(adev)) { 1408 /* full access mode, so don't touch any GMC register */ 1409 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 1410 return 0; 1411 } 1412 1413 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 1414 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 1415 gmc_v9_0_gart_disable(adev); 1416 1417 return 0; 1418 } 1419 1420 static int gmc_v9_0_suspend(void *handle) 1421 { 1422 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1423 1424 return gmc_v9_0_hw_fini(adev); 1425 } 1426 1427 static int gmc_v9_0_resume(void *handle) 1428 { 1429 int r; 1430 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1431 1432 r = gmc_v9_0_hw_init(adev); 1433 if (r) 1434 return r; 1435 1436 amdgpu_vmid_reset_all(adev); 1437 1438 return 0; 1439 } 1440 1441 static bool gmc_v9_0_is_idle(void *handle) 1442 { 1443 /* MC is always ready in GMC v9.*/ 1444 return true; 1445 } 1446 1447 static int gmc_v9_0_wait_for_idle(void *handle) 1448 { 1449 /* There is no need to wait for MC idle in GMC v9.*/ 1450 return 0; 1451 } 1452 1453 static int gmc_v9_0_soft_reset(void *handle) 1454 { 1455 /* XXX for emulation.*/ 1456 return 0; 1457 } 1458 1459 static int gmc_v9_0_set_clockgating_state(void *handle, 1460 enum amd_clockgating_state state) 1461 { 1462 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1463 1464 if (adev->asic_type == CHIP_ARCTURUS) 1465 mmhub_v9_4_set_clockgating(adev, state); 1466 else 1467 mmhub_v1_0_set_clockgating(adev, state); 1468 1469 athub_v1_0_set_clockgating(adev, state); 1470 1471 return 0; 1472 } 1473 1474 static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags) 1475 { 1476 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1477 1478 if (adev->asic_type == CHIP_ARCTURUS) 1479 mmhub_v9_4_get_clockgating(adev, flags); 1480 else 1481 mmhub_v1_0_get_clockgating(adev, flags); 1482 1483 athub_v1_0_get_clockgating(adev, flags); 1484 } 1485 1486 static int gmc_v9_0_set_powergating_state(void *handle, 1487 enum amd_powergating_state state) 1488 { 1489 return 0; 1490 } 1491 1492 const struct amd_ip_funcs gmc_v9_0_ip_funcs = { 1493 .name = "gmc_v9_0", 1494 .early_init = gmc_v9_0_early_init, 1495 .late_init = gmc_v9_0_late_init, 1496 .sw_init = gmc_v9_0_sw_init, 1497 .sw_fini = gmc_v9_0_sw_fini, 1498 .hw_init = gmc_v9_0_hw_init, 1499 .hw_fini = gmc_v9_0_hw_fini, 1500 .suspend = gmc_v9_0_suspend, 1501 .resume = gmc_v9_0_resume, 1502 .is_idle = gmc_v9_0_is_idle, 1503 .wait_for_idle = gmc_v9_0_wait_for_idle, 1504 .soft_reset = gmc_v9_0_soft_reset, 1505 .set_clockgating_state = gmc_v9_0_set_clockgating_state, 1506 .set_powergating_state = gmc_v9_0_set_powergating_state, 1507 .get_clockgating_state = gmc_v9_0_get_clockgating_state, 1508 }; 1509 1510 const struct amdgpu_ip_block_version gmc_v9_0_ip_block = 1511 { 1512 .type = AMD_IP_BLOCK_TYPE_GMC, 1513 .major = 9, 1514 .minor = 0, 1515 .rev = 0, 1516 .funcs = &gmc_v9_0_ip_funcs, 1517 }; 1518