1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu.h" 24 #include "df_v3_6.h" 25 26 #include "df/df_3_6_default.h" 27 #include "df/df_3_6_offset.h" 28 #include "df/df_3_6_sh_mask.h" 29 30 static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 31 16, 32, 0, 0, 0, 2, 4, 8}; 32 33 /* init df format attrs */ 34 AMDGPU_PMU_ATTR(event, "config:0-7"); 35 AMDGPU_PMU_ATTR(instance, "config:8-15"); 36 AMDGPU_PMU_ATTR(umask, "config:16-23"); 37 38 /* df format attributes */ 39 static struct attribute *df_v3_6_format_attrs[] = { 40 &pmu_attr_event.attr, 41 &pmu_attr_instance.attr, 42 &pmu_attr_umask.attr, 43 NULL 44 }; 45 46 /* df format attribute group */ 47 static struct attribute_group df_v3_6_format_attr_group = { 48 .name = "format", 49 .attrs = df_v3_6_format_attrs, 50 }; 51 52 /* df event attrs */ 53 AMDGPU_PMU_ATTR(cake0_pcsout_txdata, 54 "event=0x7,instance=0x46,umask=0x2"); 55 AMDGPU_PMU_ATTR(cake1_pcsout_txdata, 56 "event=0x7,instance=0x47,umask=0x2"); 57 AMDGPU_PMU_ATTR(cake0_pcsout_txmeta, 58 "event=0x7,instance=0x46,umask=0x4"); 59 AMDGPU_PMU_ATTR(cake1_pcsout_txmeta, 60 "event=0x7,instance=0x47,umask=0x4"); 61 AMDGPU_PMU_ATTR(cake0_ftiinstat_reqalloc, 62 "event=0xb,instance=0x46,umask=0x4"); 63 AMDGPU_PMU_ATTR(cake1_ftiinstat_reqalloc, 64 "event=0xb,instance=0x47,umask=0x4"); 65 AMDGPU_PMU_ATTR(cake0_ftiinstat_rspalloc, 66 "event=0xb,instance=0x46,umask=0x8"); 67 AMDGPU_PMU_ATTR(cake1_ftiinstat_rspalloc, 68 "event=0xb,instance=0x47,umask=0x8"); 69 70 /* df event attributes */ 71 static struct attribute *df_v3_6_event_attrs[] = { 72 &pmu_attr_cake0_pcsout_txdata.attr, 73 &pmu_attr_cake1_pcsout_txdata.attr, 74 &pmu_attr_cake0_pcsout_txmeta.attr, 75 &pmu_attr_cake1_pcsout_txmeta.attr, 76 &pmu_attr_cake0_ftiinstat_reqalloc.attr, 77 &pmu_attr_cake1_ftiinstat_reqalloc.attr, 78 &pmu_attr_cake0_ftiinstat_rspalloc.attr, 79 &pmu_attr_cake1_ftiinstat_rspalloc.attr, 80 NULL 81 }; 82 83 /* df event attribute group */ 84 static struct attribute_group df_v3_6_event_attr_group = { 85 .name = "events", 86 .attrs = df_v3_6_event_attrs 87 }; 88 89 /* df event attr groups */ 90 const struct attribute_group *df_v3_6_attr_groups[] = { 91 &df_v3_6_format_attr_group, 92 &df_v3_6_event_attr_group, 93 NULL 94 }; 95 96 static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev, 97 uint32_t ficaa_val) 98 { 99 unsigned long flags, address, data; 100 uint32_t ficadl_val, ficadh_val; 101 102 address = adev->nbio.funcs->get_pcie_index_offset(adev); 103 data = adev->nbio.funcs->get_pcie_data_offset(adev); 104 105 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 106 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); 107 WREG32(data, ficaa_val); 108 109 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3); 110 ficadl_val = RREG32(data); 111 112 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3); 113 ficadh_val = RREG32(data); 114 115 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 116 117 return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val); 118 } 119 120 static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val, 121 uint32_t ficadl_val, uint32_t ficadh_val) 122 { 123 unsigned long flags, address, data; 124 125 address = adev->nbio.funcs->get_pcie_index_offset(adev); 126 data = adev->nbio.funcs->get_pcie_data_offset(adev); 127 128 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 129 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); 130 WREG32(data, ficaa_val); 131 132 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3); 133 WREG32(data, ficadl_val); 134 135 WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3); 136 WREG32(data, ficadh_val); 137 138 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 139 } 140 141 /* 142 * df_v3_6_perfmon_rreg - read perfmon lo and hi 143 * 144 * required to be atomic. no mmio method provided so subsequent reads for lo 145 * and hi require to preserve df finite state machine 146 */ 147 static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev, 148 uint32_t lo_addr, uint32_t *lo_val, 149 uint32_t hi_addr, uint32_t *hi_val) 150 { 151 unsigned long flags, address, data; 152 153 address = adev->nbio.funcs->get_pcie_index_offset(adev); 154 data = adev->nbio.funcs->get_pcie_data_offset(adev); 155 156 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 157 WREG32(address, lo_addr); 158 *lo_val = RREG32(data); 159 WREG32(address, hi_addr); 160 *hi_val = RREG32(data); 161 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 162 } 163 164 /* 165 * df_v3_6_perfmon_wreg - write to perfmon lo and hi 166 * 167 * required to be atomic. no mmio method provided so subsequent reads after 168 * data writes cannot occur to preserve data fabrics finite state machine. 169 */ 170 static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, 171 uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val) 172 { 173 unsigned long flags, address, data; 174 175 address = adev->nbio.funcs->get_pcie_index_offset(adev); 176 data = adev->nbio.funcs->get_pcie_data_offset(adev); 177 178 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 179 WREG32(address, lo_addr); 180 WREG32(data, lo_val); 181 WREG32(address, hi_addr); 182 WREG32(data, hi_val); 183 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 184 } 185 186 /* get the number of df counters available */ 187 static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, 188 struct device_attribute *attr, 189 char *buf) 190 { 191 struct amdgpu_device *adev; 192 struct drm_device *ddev; 193 int i, count; 194 195 ddev = dev_get_drvdata(dev); 196 adev = ddev->dev_private; 197 count = 0; 198 199 for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { 200 if (adev->df_perfmon_config_assign_mask[i] == 0) 201 count++; 202 } 203 204 return snprintf(buf, PAGE_SIZE, "%i\n", count); 205 } 206 207 /* device attr for available perfmon counters */ 208 static DEVICE_ATTR(df_cntr_avail, S_IRUGO, df_v3_6_get_df_cntr_avail, NULL); 209 210 /* init perfmons */ 211 static void df_v3_6_sw_init(struct amdgpu_device *adev) 212 { 213 int i, ret; 214 215 ret = device_create_file(adev->dev, &dev_attr_df_cntr_avail); 216 if (ret) 217 DRM_ERROR("failed to create file for available df counters\n"); 218 219 for (i = 0; i < AMDGPU_MAX_DF_PERFMONS; i++) 220 adev->df_perfmon_config_assign_mask[i] = 0; 221 } 222 223 static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev, 224 bool enable) 225 { 226 u32 tmp; 227 228 if (enable) { 229 tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl); 230 tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK; 231 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp); 232 } else 233 WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, 234 mmFabricConfigAccessControl_DEFAULT); 235 } 236 237 static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev) 238 { 239 u32 tmp; 240 241 tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0); 242 tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; 243 tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; 244 245 return tmp; 246 } 247 248 static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) 249 { 250 int fb_channel_number; 251 252 fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); 253 if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)) 254 fb_channel_number = 0; 255 256 return df_v3_6_channel_number[fb_channel_number]; 257 } 258 259 static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, 260 bool enable) 261 { 262 u32 tmp; 263 264 /* Put DF on broadcast mode */ 265 adev->df_funcs->enable_broadcast_mode(adev, true); 266 267 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { 268 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); 269 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; 270 tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; 271 WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); 272 } else { 273 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); 274 tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; 275 tmp |= DF_V3_6_MGCG_DISABLE; 276 WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); 277 } 278 279 /* Exit broadcast mode */ 280 adev->df_funcs->enable_broadcast_mode(adev, false); 281 } 282 283 static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, 284 u32 *flags) 285 { 286 u32 tmp; 287 288 /* AMD_CG_SUPPORT_DF_MGCG */ 289 tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); 290 if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY) 291 *flags |= AMD_CG_SUPPORT_DF_MGCG; 292 } 293 294 /* get assigned df perfmon ctr as int */ 295 static int df_v3_6_pmc_config_2_cntr(struct amdgpu_device *adev, 296 uint64_t config) 297 { 298 int i; 299 300 for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { 301 if ((config & 0x0FFFFFFUL) == 302 adev->df_perfmon_config_assign_mask[i]) 303 return i; 304 } 305 306 return -EINVAL; 307 } 308 309 /* get address based on counter assignment */ 310 static void df_v3_6_pmc_get_addr(struct amdgpu_device *adev, 311 uint64_t config, 312 int is_ctrl, 313 uint32_t *lo_base_addr, 314 uint32_t *hi_base_addr) 315 { 316 int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); 317 318 if (target_cntr < 0) 319 return; 320 321 switch (target_cntr) { 322 323 case 0: 324 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0; 325 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0; 326 break; 327 case 1: 328 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1; 329 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1; 330 break; 331 case 2: 332 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2; 333 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2; 334 break; 335 case 3: 336 *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3; 337 *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3; 338 break; 339 340 } 341 342 } 343 344 /* get read counter address */ 345 static void df_v3_6_pmc_get_read_settings(struct amdgpu_device *adev, 346 uint64_t config, 347 uint32_t *lo_base_addr, 348 uint32_t *hi_base_addr) 349 { 350 df_v3_6_pmc_get_addr(adev, config, 0, lo_base_addr, hi_base_addr); 351 } 352 353 /* get control counter settings i.e. address and values to set */ 354 static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev, 355 uint64_t config, 356 uint32_t *lo_base_addr, 357 uint32_t *hi_base_addr, 358 uint32_t *lo_val, 359 uint32_t *hi_val) 360 { 361 362 uint32_t eventsel, instance, unitmask; 363 uint32_t instance_10, instance_5432, instance_76; 364 365 df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr); 366 367 if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) { 368 DRM_ERROR("[DF PMC] addressing not retrieved! Lo: %x, Hi: %x", 369 *lo_base_addr, *hi_base_addr); 370 return -ENXIO; 371 } 372 373 eventsel = DF_V3_6_GET_EVENT(config) & 0x3f; 374 unitmask = DF_V3_6_GET_UNITMASK(config) & 0xf; 375 instance = DF_V3_6_GET_INSTANCE(config); 376 377 instance_10 = instance & 0x3; 378 instance_5432 = (instance >> 2) & 0xf; 379 instance_76 = (instance >> 6) & 0x3; 380 381 *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22); 382 *hi_val = (instance_76 << 29) | instance_5432; 383 384 DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x", 385 config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val); 386 387 return 0; 388 } 389 390 /* add df performance counters for read */ 391 static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev, 392 uint64_t config) 393 { 394 int i, target_cntr; 395 396 target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); 397 398 if (target_cntr >= 0) 399 return 0; 400 401 for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { 402 if (adev->df_perfmon_config_assign_mask[i] == 0U) { 403 adev->df_perfmon_config_assign_mask[i] = 404 config & 0x0FFFFFFUL; 405 return 0; 406 } 407 } 408 409 return -ENOSPC; 410 } 411 412 /* release performance counter */ 413 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev, 414 uint64_t config) 415 { 416 int target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); 417 418 if (target_cntr >= 0) 419 adev->df_perfmon_config_assign_mask[target_cntr] = 0ULL; 420 } 421 422 423 static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev, 424 uint64_t config) 425 { 426 uint32_t lo_base_addr, hi_base_addr; 427 428 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr, 429 &hi_base_addr); 430 431 if ((lo_base_addr == 0) || (hi_base_addr == 0)) 432 return; 433 434 df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); 435 } 436 437 static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, 438 int is_enable) 439 { 440 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; 441 int ret = 0; 442 443 switch (adev->asic_type) { 444 case CHIP_VEGA20: 445 446 df_v3_6_reset_perfmon_cntr(adev, config); 447 448 if (is_enable) { 449 ret = df_v3_6_pmc_add_cntr(adev, config); 450 } else { 451 ret = df_v3_6_pmc_get_ctrl_settings(adev, 452 config, 453 &lo_base_addr, 454 &hi_base_addr, 455 &lo_val, 456 &hi_val); 457 458 if (ret) 459 return ret; 460 461 df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val, 462 hi_base_addr, hi_val); 463 } 464 465 break; 466 default: 467 break; 468 } 469 470 return ret; 471 } 472 473 static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config, 474 int is_disable) 475 { 476 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; 477 int ret = 0; 478 479 switch (adev->asic_type) { 480 case CHIP_VEGA20: 481 ret = df_v3_6_pmc_get_ctrl_settings(adev, 482 config, 483 &lo_base_addr, 484 &hi_base_addr, 485 &lo_val, 486 &hi_val); 487 488 if (ret) 489 return ret; 490 491 df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); 492 493 if (is_disable) 494 df_v3_6_pmc_release_cntr(adev, config); 495 496 break; 497 default: 498 break; 499 } 500 501 return ret; 502 } 503 504 static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, 505 uint64_t config, 506 uint64_t *count) 507 { 508 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; 509 *count = 0; 510 511 switch (adev->asic_type) { 512 case CHIP_VEGA20: 513 514 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr, 515 &hi_base_addr); 516 517 if ((lo_base_addr == 0) || (hi_base_addr == 0)) 518 return; 519 520 df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val, 521 hi_base_addr, &hi_val); 522 523 *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL); 524 525 if (*count >= DF_V3_6_PERFMON_OVERFLOW) 526 *count = 0; 527 528 DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x", 529 config, lo_base_addr, hi_base_addr, lo_val, hi_val); 530 531 break; 532 533 default: 534 break; 535 } 536 } 537 538 const struct amdgpu_df_funcs df_v3_6_funcs = { 539 .sw_init = df_v3_6_sw_init, 540 .enable_broadcast_mode = df_v3_6_enable_broadcast_mode, 541 .get_fb_channel_number = df_v3_6_get_fb_channel_number, 542 .get_hbm_channel_number = df_v3_6_get_hbm_channel_number, 543 .update_medium_grain_clock_gating = 544 df_v3_6_update_medium_grain_clock_gating, 545 .get_clockgating_state = df_v3_6_get_clockgating_state, 546 .pmc_start = df_v3_6_pmc_start, 547 .pmc_stop = df_v3_6_pmc_stop, 548 .pmc_get_count = df_v3_6_pmc_get_count, 549 .get_fica = df_v3_6_get_fica, 550 .set_fica = df_v3_6_set_fica 551 }; 552