1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/seq_file.h> 25 26 #include <drm/drm_pci.h> 27 28 #include "cikd.h" 29 #include "kv_dpm.h" 30 #include "r600_dpm.h" 31 #include "radeon.h" 32 #include "radeon_asic.h" 33 34 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 35 #define KV_MINIMUM_ENGINE_CLOCK 800 36 #define SMC_RAM_END 0x40000 37 38 static int kv_enable_nb_dpm(struct radeon_device *rdev, 39 bool enable); 40 static void kv_init_graphics_levels(struct radeon_device *rdev); 41 static int kv_calculate_ds_divider(struct radeon_device *rdev); 42 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); 43 static int kv_calculate_dpm_settings(struct radeon_device *rdev); 44 static void kv_enable_new_levels(struct radeon_device *rdev); 45 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 46 struct radeon_ps *new_rps); 47 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); 48 static int kv_set_enabled_levels(struct radeon_device *rdev); 49 static int kv_force_dpm_highest(struct radeon_device *rdev); 50 static int kv_force_dpm_lowest(struct radeon_device *rdev); 51 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 52 struct radeon_ps *new_rps, 53 struct radeon_ps *old_rps); 54 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 55 int min_temp, int max_temp); 56 static int kv_init_fps_limits(struct radeon_device *rdev); 57 58 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 59 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate); 60 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate); 61 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate); 62 63 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 64 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 65 extern void cik_update_cg(struct radeon_device *rdev, 66 u32 block, bool enable); 67 68 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 69 { 70 { 0, 4, 1 }, 71 { 1, 4, 1 }, 72 { 2, 5, 1 }, 73 { 3, 4, 2 }, 74 { 4, 1, 1 }, 75 { 5, 5, 2 }, 76 { 6, 6, 1 }, 77 { 7, 9, 2 }, 78 { 0xffffffff } 79 }; 80 81 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 82 { 83 { 0, 4, 1 }, 84 { 0xffffffff } 85 }; 86 87 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 88 { 89 { 0, 4, 1 }, 90 { 0xffffffff } 91 }; 92 93 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 94 { 95 { 0, 4, 1 }, 96 { 0xffffffff } 97 }; 98 99 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 100 { 101 { 0, 4, 1 }, 102 { 0xffffffff } 103 }; 104 105 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 106 { 107 { 0, 4, 1 }, 108 { 1, 4, 1 }, 109 { 2, 5, 1 }, 110 { 3, 4, 1 }, 111 { 4, 1, 1 }, 112 { 5, 5, 1 }, 113 { 6, 6, 1 }, 114 { 7, 9, 1 }, 115 { 8, 4, 1 }, 116 { 9, 2, 1 }, 117 { 10, 3, 1 }, 118 { 11, 6, 1 }, 119 { 12, 8, 2 }, 120 { 13, 1, 1 }, 121 { 14, 2, 1 }, 122 { 15, 3, 1 }, 123 { 16, 1, 1 }, 124 { 17, 4, 1 }, 125 { 18, 3, 1 }, 126 { 19, 1, 1 }, 127 { 20, 8, 1 }, 128 { 21, 5, 1 }, 129 { 22, 1, 1 }, 130 { 23, 1, 1 }, 131 { 24, 4, 1 }, 132 { 27, 6, 1 }, 133 { 28, 1, 1 }, 134 { 0xffffffff } 135 }; 136 137 static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 138 { 139 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 140 }; 141 142 static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 143 { 144 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 145 }; 146 147 static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 148 { 149 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 150 }; 151 152 static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 153 { 154 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 155 }; 156 157 static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 158 { 159 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 160 }; 161 162 static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 163 { 164 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 165 }; 166 167 static const struct kv_pt_config_reg didt_config_kv[] = 168 { 169 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 170 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 171 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 172 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 173 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 174 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 175 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 176 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 177 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 178 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 179 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 180 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 181 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 182 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 183 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 184 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 185 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 186 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 187 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 188 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 189 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 190 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 191 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 192 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 193 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 194 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 195 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 196 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 197 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 198 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 199 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 200 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 201 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 202 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 203 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 204 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 205 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 206 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 207 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 208 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 209 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 210 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 211 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 212 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 213 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 214 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 215 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 216 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 217 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 218 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 219 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 220 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 221 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 222 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 223 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 224 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 225 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 226 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 227 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 228 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 229 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 230 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 231 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 232 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 233 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 234 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 235 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 236 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 237 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 238 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 239 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 240 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 241 { 0xFFFFFFFF } 242 }; 243 244 static struct kv_ps *kv_get_ps(struct radeon_ps *rps) 245 { 246 struct kv_ps *ps = rps->ps_priv; 247 248 return ps; 249 } 250 251 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev) 252 { 253 struct kv_power_info *pi = rdev->pm.dpm.priv; 254 255 return pi; 256 } 257 258 #if 0 259 static void kv_program_local_cac_table(struct radeon_device *rdev, 260 const struct kv_lcac_config_values *local_cac_table, 261 const struct kv_lcac_config_reg *local_cac_reg) 262 { 263 u32 i, count, data; 264 const struct kv_lcac_config_values *values = local_cac_table; 265 266 while (values->block_id != 0xffffffff) { 267 count = values->signal_id; 268 for (i = 0; i < count; i++) { 269 data = ((values->block_id << local_cac_reg->block_shift) & 270 local_cac_reg->block_mask); 271 data |= ((i << local_cac_reg->signal_shift) & 272 local_cac_reg->signal_mask); 273 data |= ((values->t << local_cac_reg->t_shift) & 274 local_cac_reg->t_mask); 275 data |= ((1 << local_cac_reg->enable_shift) & 276 local_cac_reg->enable_mask); 277 WREG32_SMC(local_cac_reg->cntl, data); 278 } 279 values++; 280 } 281 } 282 #endif 283 284 static int kv_program_pt_config_registers(struct radeon_device *rdev, 285 const struct kv_pt_config_reg *cac_config_regs) 286 { 287 const struct kv_pt_config_reg *config_regs = cac_config_regs; 288 u32 data; 289 u32 cache = 0; 290 291 if (config_regs == NULL) 292 return -EINVAL; 293 294 while (config_regs->offset != 0xFFFFFFFF) { 295 if (config_regs->type == KV_CONFIGREG_CACHE) { 296 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 297 } else { 298 switch (config_regs->type) { 299 case KV_CONFIGREG_SMC_IND: 300 data = RREG32_SMC(config_regs->offset); 301 break; 302 case KV_CONFIGREG_DIDT_IND: 303 data = RREG32_DIDT(config_regs->offset); 304 break; 305 default: 306 data = RREG32(config_regs->offset << 2); 307 break; 308 } 309 310 data &= ~config_regs->mask; 311 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 312 data |= cache; 313 cache = 0; 314 315 switch (config_regs->type) { 316 case KV_CONFIGREG_SMC_IND: 317 WREG32_SMC(config_regs->offset, data); 318 break; 319 case KV_CONFIGREG_DIDT_IND: 320 WREG32_DIDT(config_regs->offset, data); 321 break; 322 default: 323 WREG32(config_regs->offset << 2, data); 324 break; 325 } 326 } 327 config_regs++; 328 } 329 330 return 0; 331 } 332 333 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable) 334 { 335 struct kv_power_info *pi = kv_get_pi(rdev); 336 u32 data; 337 338 if (pi->caps_sq_ramping) { 339 data = RREG32_DIDT(DIDT_SQ_CTRL0); 340 if (enable) 341 data |= DIDT_CTRL_EN; 342 else 343 data &= ~DIDT_CTRL_EN; 344 WREG32_DIDT(DIDT_SQ_CTRL0, data); 345 } 346 347 if (pi->caps_db_ramping) { 348 data = RREG32_DIDT(DIDT_DB_CTRL0); 349 if (enable) 350 data |= DIDT_CTRL_EN; 351 else 352 data &= ~DIDT_CTRL_EN; 353 WREG32_DIDT(DIDT_DB_CTRL0, data); 354 } 355 356 if (pi->caps_td_ramping) { 357 data = RREG32_DIDT(DIDT_TD_CTRL0); 358 if (enable) 359 data |= DIDT_CTRL_EN; 360 else 361 data &= ~DIDT_CTRL_EN; 362 WREG32_DIDT(DIDT_TD_CTRL0, data); 363 } 364 365 if (pi->caps_tcp_ramping) { 366 data = RREG32_DIDT(DIDT_TCP_CTRL0); 367 if (enable) 368 data |= DIDT_CTRL_EN; 369 else 370 data &= ~DIDT_CTRL_EN; 371 WREG32_DIDT(DIDT_TCP_CTRL0, data); 372 } 373 } 374 375 static int kv_enable_didt(struct radeon_device *rdev, bool enable) 376 { 377 struct kv_power_info *pi = kv_get_pi(rdev); 378 int ret; 379 380 if (pi->caps_sq_ramping || 381 pi->caps_db_ramping || 382 pi->caps_td_ramping || 383 pi->caps_tcp_ramping) { 384 cik_enter_rlc_safe_mode(rdev); 385 386 if (enable) { 387 ret = kv_program_pt_config_registers(rdev, didt_config_kv); 388 if (ret) { 389 cik_exit_rlc_safe_mode(rdev); 390 return ret; 391 } 392 } 393 394 kv_do_enable_didt(rdev, enable); 395 396 cik_exit_rlc_safe_mode(rdev); 397 } 398 399 return 0; 400 } 401 402 #if 0 403 static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev) 404 { 405 struct kv_power_info *pi = kv_get_pi(rdev); 406 407 if (pi->caps_cac) { 408 WREG32_SMC(LCAC_SX0_OVR_SEL, 0); 409 WREG32_SMC(LCAC_SX0_OVR_VAL, 0); 410 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 411 412 WREG32_SMC(LCAC_MC0_OVR_SEL, 0); 413 WREG32_SMC(LCAC_MC0_OVR_VAL, 0); 414 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 415 416 WREG32_SMC(LCAC_MC1_OVR_SEL, 0); 417 WREG32_SMC(LCAC_MC1_OVR_VAL, 0); 418 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 419 420 WREG32_SMC(LCAC_MC2_OVR_SEL, 0); 421 WREG32_SMC(LCAC_MC2_OVR_VAL, 0); 422 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 423 424 WREG32_SMC(LCAC_MC3_OVR_SEL, 0); 425 WREG32_SMC(LCAC_MC3_OVR_VAL, 0); 426 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 427 428 WREG32_SMC(LCAC_CPL_OVR_SEL, 0); 429 WREG32_SMC(LCAC_CPL_OVR_VAL, 0); 430 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 431 } 432 } 433 #endif 434 435 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable) 436 { 437 struct kv_power_info *pi = kv_get_pi(rdev); 438 int ret = 0; 439 440 if (pi->caps_cac) { 441 if (enable) { 442 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac); 443 if (ret) 444 pi->cac_enabled = false; 445 else 446 pi->cac_enabled = true; 447 } else if (pi->cac_enabled) { 448 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac); 449 pi->cac_enabled = false; 450 } 451 } 452 453 return ret; 454 } 455 456 static int kv_process_firmware_header(struct radeon_device *rdev) 457 { 458 struct kv_power_info *pi = kv_get_pi(rdev); 459 u32 tmp; 460 int ret; 461 462 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 463 offsetof(SMU7_Firmware_Header, DpmTable), 464 &tmp, pi->sram_end); 465 466 if (ret == 0) 467 pi->dpm_table_start = tmp; 468 469 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 470 offsetof(SMU7_Firmware_Header, SoftRegisters), 471 &tmp, pi->sram_end); 472 473 if (ret == 0) 474 pi->soft_regs_start = tmp; 475 476 return ret; 477 } 478 479 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev) 480 { 481 struct kv_power_info *pi = kv_get_pi(rdev); 482 int ret; 483 484 pi->graphics_voltage_change_enable = 1; 485 486 ret = kv_copy_bytes_to_smc(rdev, 487 pi->dpm_table_start + 488 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 489 &pi->graphics_voltage_change_enable, 490 sizeof(u8), pi->sram_end); 491 492 return ret; 493 } 494 495 static int kv_set_dpm_interval(struct radeon_device *rdev) 496 { 497 struct kv_power_info *pi = kv_get_pi(rdev); 498 int ret; 499 500 pi->graphics_interval = 1; 501 502 ret = kv_copy_bytes_to_smc(rdev, 503 pi->dpm_table_start + 504 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 505 &pi->graphics_interval, 506 sizeof(u8), pi->sram_end); 507 508 return ret; 509 } 510 511 static int kv_set_dpm_boot_state(struct radeon_device *rdev) 512 { 513 struct kv_power_info *pi = kv_get_pi(rdev); 514 int ret; 515 516 ret = kv_copy_bytes_to_smc(rdev, 517 pi->dpm_table_start + 518 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 519 &pi->graphics_boot_level, 520 sizeof(u8), pi->sram_end); 521 522 return ret; 523 } 524 525 static void kv_program_vc(struct radeon_device *rdev) 526 { 527 WREG32_SMC(CG_FTV_0, 0x3FFFC100); 528 } 529 530 static void kv_clear_vc(struct radeon_device *rdev) 531 { 532 WREG32_SMC(CG_FTV_0, 0); 533 } 534 535 static int kv_set_divider_value(struct radeon_device *rdev, 536 u32 index, u32 sclk) 537 { 538 struct kv_power_info *pi = kv_get_pi(rdev); 539 struct atom_clock_dividers dividers; 540 int ret; 541 542 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 543 sclk, false, ÷rs); 544 if (ret) 545 return ret; 546 547 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 548 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 549 550 return 0; 551 } 552 553 static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, 554 struct sumo_vid_mapping_table *vid_mapping_table, 555 u32 vid_2bit) 556 { 557 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 558 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 559 u32 i; 560 561 if (vddc_sclk_table && vddc_sclk_table->count) { 562 if (vid_2bit < vddc_sclk_table->count) 563 return vddc_sclk_table->entries[vid_2bit].v; 564 else 565 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 566 } else { 567 for (i = 0; i < vid_mapping_table->num_entries; i++) { 568 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 569 return vid_mapping_table->entries[i].vid_7bit; 570 } 571 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 572 } 573 } 574 575 static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, 576 struct sumo_vid_mapping_table *vid_mapping_table, 577 u32 vid_7bit) 578 { 579 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 580 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 581 u32 i; 582 583 if (vddc_sclk_table && vddc_sclk_table->count) { 584 for (i = 0; i < vddc_sclk_table->count; i++) { 585 if (vddc_sclk_table->entries[i].v == vid_7bit) 586 return i; 587 } 588 return vddc_sclk_table->count - 1; 589 } else { 590 for (i = 0; i < vid_mapping_table->num_entries; i++) { 591 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 592 return vid_mapping_table->entries[i].vid_2bit; 593 } 594 595 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 596 } 597 } 598 599 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 600 u16 voltage) 601 { 602 return 6200 - (voltage * 25); 603 } 604 605 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, 606 u32 vid_2bit) 607 { 608 struct kv_power_info *pi = kv_get_pi(rdev); 609 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, 610 &pi->sys_info.vid_mapping_table, 611 vid_2bit); 612 613 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 614 } 615 616 617 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid) 618 { 619 struct kv_power_info *pi = kv_get_pi(rdev); 620 621 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 622 pi->graphics_level[index].MinVddNb = 623 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid)); 624 625 return 0; 626 } 627 628 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at) 629 { 630 struct kv_power_info *pi = kv_get_pi(rdev); 631 632 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 633 634 return 0; 635 } 636 637 static void kv_dpm_power_level_enable(struct radeon_device *rdev, 638 u32 index, bool enable) 639 { 640 struct kv_power_info *pi = kv_get_pi(rdev); 641 642 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 643 } 644 645 static void kv_start_dpm(struct radeon_device *rdev) 646 { 647 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 648 649 tmp |= GLOBAL_PWRMGT_EN; 650 WREG32_SMC(GENERAL_PWRMGT, tmp); 651 652 kv_smc_dpm_enable(rdev, true); 653 } 654 655 static void kv_stop_dpm(struct radeon_device *rdev) 656 { 657 kv_smc_dpm_enable(rdev, false); 658 } 659 660 static void kv_start_am(struct radeon_device *rdev) 661 { 662 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 663 664 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 665 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN; 666 667 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 668 } 669 670 static void kv_reset_am(struct radeon_device *rdev) 671 { 672 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 673 674 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 675 676 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 677 } 678 679 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze) 680 { 681 return kv_notify_message_to_smu(rdev, freeze ? 682 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 683 } 684 685 static int kv_force_lowest_valid(struct radeon_device *rdev) 686 { 687 return kv_force_dpm_lowest(rdev); 688 } 689 690 static int kv_unforce_levels(struct radeon_device *rdev) 691 { 692 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 693 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 694 else 695 return kv_set_enabled_levels(rdev); 696 } 697 698 static int kv_update_sclk_t(struct radeon_device *rdev) 699 { 700 struct kv_power_info *pi = kv_get_pi(rdev); 701 u32 low_sclk_interrupt_t = 0; 702 int ret = 0; 703 704 if (pi->caps_sclk_throttle_low_notification) { 705 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 706 707 ret = kv_copy_bytes_to_smc(rdev, 708 pi->dpm_table_start + 709 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 710 (u8 *)&low_sclk_interrupt_t, 711 sizeof(u32), pi->sram_end); 712 } 713 return ret; 714 } 715 716 static int kv_program_bootup_state(struct radeon_device *rdev) 717 { 718 struct kv_power_info *pi = kv_get_pi(rdev); 719 u32 i; 720 struct radeon_clock_voltage_dependency_table *table = 721 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 722 723 if (table && table->count) { 724 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 725 if (table->entries[i].clk == pi->boot_pl.sclk) 726 break; 727 } 728 729 pi->graphics_boot_level = (u8)i; 730 kv_dpm_power_level_enable(rdev, i, true); 731 } else { 732 struct sumo_sclk_voltage_mapping_table *table = 733 &pi->sys_info.sclk_voltage_mapping_table; 734 735 if (table->num_max_dpm_entries == 0) 736 return -EINVAL; 737 738 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 739 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 740 break; 741 } 742 743 pi->graphics_boot_level = (u8)i; 744 kv_dpm_power_level_enable(rdev, i, true); 745 } 746 return 0; 747 } 748 749 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev) 750 { 751 struct kv_power_info *pi = kv_get_pi(rdev); 752 int ret; 753 754 pi->graphics_therm_throttle_enable = 1; 755 756 ret = kv_copy_bytes_to_smc(rdev, 757 pi->dpm_table_start + 758 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 759 &pi->graphics_therm_throttle_enable, 760 sizeof(u8), pi->sram_end); 761 762 return ret; 763 } 764 765 static int kv_upload_dpm_settings(struct radeon_device *rdev) 766 { 767 struct kv_power_info *pi = kv_get_pi(rdev); 768 int ret; 769 770 ret = kv_copy_bytes_to_smc(rdev, 771 pi->dpm_table_start + 772 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 773 (u8 *)&pi->graphics_level, 774 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 775 pi->sram_end); 776 777 if (ret) 778 return ret; 779 780 ret = kv_copy_bytes_to_smc(rdev, 781 pi->dpm_table_start + 782 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 783 &pi->graphics_dpm_level_count, 784 sizeof(u8), pi->sram_end); 785 786 return ret; 787 } 788 789 static u32 kv_get_clock_difference(u32 a, u32 b) 790 { 791 return (a >= b) ? a - b : b - a; 792 } 793 794 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk) 795 { 796 struct kv_power_info *pi = kv_get_pi(rdev); 797 u32 value; 798 799 if (pi->caps_enable_dfs_bypass) { 800 if (kv_get_clock_difference(clk, 40000) < 200) 801 value = 3; 802 else if (kv_get_clock_difference(clk, 30000) < 200) 803 value = 2; 804 else if (kv_get_clock_difference(clk, 20000) < 200) 805 value = 7; 806 else if (kv_get_clock_difference(clk, 15000) < 200) 807 value = 6; 808 else if (kv_get_clock_difference(clk, 10000) < 200) 809 value = 8; 810 else 811 value = 0; 812 } else { 813 value = 0; 814 } 815 816 return value; 817 } 818 819 static int kv_populate_uvd_table(struct radeon_device *rdev) 820 { 821 struct kv_power_info *pi = kv_get_pi(rdev); 822 struct radeon_uvd_clock_voltage_dependency_table *table = 823 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 824 struct atom_clock_dividers dividers; 825 int ret; 826 u32 i; 827 828 if (table == NULL || table->count == 0) 829 return 0; 830 831 pi->uvd_level_count = 0; 832 for (i = 0; i < table->count; i++) { 833 if (pi->high_voltage_t && 834 (pi->high_voltage_t < table->entries[i].v)) 835 break; 836 837 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 838 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 839 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 840 841 pi->uvd_level[i].VClkBypassCntl = 842 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); 843 pi->uvd_level[i].DClkBypassCntl = 844 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); 845 846 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 847 table->entries[i].vclk, false, ÷rs); 848 if (ret) 849 return ret; 850 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 851 852 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 853 table->entries[i].dclk, false, ÷rs); 854 if (ret) 855 return ret; 856 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 857 858 pi->uvd_level_count++; 859 } 860 861 ret = kv_copy_bytes_to_smc(rdev, 862 pi->dpm_table_start + 863 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 864 (u8 *)&pi->uvd_level_count, 865 sizeof(u8), pi->sram_end); 866 if (ret) 867 return ret; 868 869 pi->uvd_interval = 1; 870 871 ret = kv_copy_bytes_to_smc(rdev, 872 pi->dpm_table_start + 873 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 874 &pi->uvd_interval, 875 sizeof(u8), pi->sram_end); 876 if (ret) 877 return ret; 878 879 ret = kv_copy_bytes_to_smc(rdev, 880 pi->dpm_table_start + 881 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 882 (u8 *)&pi->uvd_level, 883 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 884 pi->sram_end); 885 886 return ret; 887 888 } 889 890 static int kv_populate_vce_table(struct radeon_device *rdev) 891 { 892 struct kv_power_info *pi = kv_get_pi(rdev); 893 int ret; 894 u32 i; 895 struct radeon_vce_clock_voltage_dependency_table *table = 896 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 897 struct atom_clock_dividers dividers; 898 899 if (table == NULL || table->count == 0) 900 return 0; 901 902 pi->vce_level_count = 0; 903 for (i = 0; i < table->count; i++) { 904 if (pi->high_voltage_t && 905 pi->high_voltage_t < table->entries[i].v) 906 break; 907 908 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 909 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 910 911 pi->vce_level[i].ClkBypassCntl = 912 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); 913 914 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 915 table->entries[i].evclk, false, ÷rs); 916 if (ret) 917 return ret; 918 pi->vce_level[i].Divider = (u8)dividers.post_div; 919 920 pi->vce_level_count++; 921 } 922 923 ret = kv_copy_bytes_to_smc(rdev, 924 pi->dpm_table_start + 925 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 926 (u8 *)&pi->vce_level_count, 927 sizeof(u8), 928 pi->sram_end); 929 if (ret) 930 return ret; 931 932 pi->vce_interval = 1; 933 934 ret = kv_copy_bytes_to_smc(rdev, 935 pi->dpm_table_start + 936 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 937 (u8 *)&pi->vce_interval, 938 sizeof(u8), 939 pi->sram_end); 940 if (ret) 941 return ret; 942 943 ret = kv_copy_bytes_to_smc(rdev, 944 pi->dpm_table_start + 945 offsetof(SMU7_Fusion_DpmTable, VceLevel), 946 (u8 *)&pi->vce_level, 947 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 948 pi->sram_end); 949 950 return ret; 951 } 952 953 static int kv_populate_samu_table(struct radeon_device *rdev) 954 { 955 struct kv_power_info *pi = kv_get_pi(rdev); 956 struct radeon_clock_voltage_dependency_table *table = 957 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 958 struct atom_clock_dividers dividers; 959 int ret; 960 u32 i; 961 962 if (table == NULL || table->count == 0) 963 return 0; 964 965 pi->samu_level_count = 0; 966 for (i = 0; i < table->count; i++) { 967 if (pi->high_voltage_t && 968 pi->high_voltage_t < table->entries[i].v) 969 break; 970 971 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 972 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 973 974 pi->samu_level[i].ClkBypassCntl = 975 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); 976 977 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 978 table->entries[i].clk, false, ÷rs); 979 if (ret) 980 return ret; 981 pi->samu_level[i].Divider = (u8)dividers.post_div; 982 983 pi->samu_level_count++; 984 } 985 986 ret = kv_copy_bytes_to_smc(rdev, 987 pi->dpm_table_start + 988 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 989 (u8 *)&pi->samu_level_count, 990 sizeof(u8), 991 pi->sram_end); 992 if (ret) 993 return ret; 994 995 pi->samu_interval = 1; 996 997 ret = kv_copy_bytes_to_smc(rdev, 998 pi->dpm_table_start + 999 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 1000 (u8 *)&pi->samu_interval, 1001 sizeof(u8), 1002 pi->sram_end); 1003 if (ret) 1004 return ret; 1005 1006 ret = kv_copy_bytes_to_smc(rdev, 1007 pi->dpm_table_start + 1008 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1009 (u8 *)&pi->samu_level, 1010 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1011 pi->sram_end); 1012 if (ret) 1013 return ret; 1014 1015 return ret; 1016 } 1017 1018 1019 static int kv_populate_acp_table(struct radeon_device *rdev) 1020 { 1021 struct kv_power_info *pi = kv_get_pi(rdev); 1022 struct radeon_clock_voltage_dependency_table *table = 1023 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1024 struct atom_clock_dividers dividers; 1025 int ret; 1026 u32 i; 1027 1028 if (table == NULL || table->count == 0) 1029 return 0; 1030 1031 pi->acp_level_count = 0; 1032 for (i = 0; i < table->count; i++) { 1033 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1034 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1035 1036 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1037 table->entries[i].clk, false, ÷rs); 1038 if (ret) 1039 return ret; 1040 pi->acp_level[i].Divider = (u8)dividers.post_div; 1041 1042 pi->acp_level_count++; 1043 } 1044 1045 ret = kv_copy_bytes_to_smc(rdev, 1046 pi->dpm_table_start + 1047 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1048 (u8 *)&pi->acp_level_count, 1049 sizeof(u8), 1050 pi->sram_end); 1051 if (ret) 1052 return ret; 1053 1054 pi->acp_interval = 1; 1055 1056 ret = kv_copy_bytes_to_smc(rdev, 1057 pi->dpm_table_start + 1058 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1059 (u8 *)&pi->acp_interval, 1060 sizeof(u8), 1061 pi->sram_end); 1062 if (ret) 1063 return ret; 1064 1065 ret = kv_copy_bytes_to_smc(rdev, 1066 pi->dpm_table_start + 1067 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1068 (u8 *)&pi->acp_level, 1069 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1070 pi->sram_end); 1071 if (ret) 1072 return ret; 1073 1074 return ret; 1075 } 1076 1077 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev) 1078 { 1079 struct kv_power_info *pi = kv_get_pi(rdev); 1080 u32 i; 1081 struct radeon_clock_voltage_dependency_table *table = 1082 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1083 1084 if (table && table->count) { 1085 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1086 if (pi->caps_enable_dfs_bypass) { 1087 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1088 pi->graphics_level[i].ClkBypassCntl = 3; 1089 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1090 pi->graphics_level[i].ClkBypassCntl = 2; 1091 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1092 pi->graphics_level[i].ClkBypassCntl = 7; 1093 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1094 pi->graphics_level[i].ClkBypassCntl = 6; 1095 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1096 pi->graphics_level[i].ClkBypassCntl = 8; 1097 else 1098 pi->graphics_level[i].ClkBypassCntl = 0; 1099 } else { 1100 pi->graphics_level[i].ClkBypassCntl = 0; 1101 } 1102 } 1103 } else { 1104 struct sumo_sclk_voltage_mapping_table *table = 1105 &pi->sys_info.sclk_voltage_mapping_table; 1106 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1107 if (pi->caps_enable_dfs_bypass) { 1108 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1109 pi->graphics_level[i].ClkBypassCntl = 3; 1110 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1111 pi->graphics_level[i].ClkBypassCntl = 2; 1112 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1113 pi->graphics_level[i].ClkBypassCntl = 7; 1114 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1115 pi->graphics_level[i].ClkBypassCntl = 6; 1116 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1117 pi->graphics_level[i].ClkBypassCntl = 8; 1118 else 1119 pi->graphics_level[i].ClkBypassCntl = 0; 1120 } else { 1121 pi->graphics_level[i].ClkBypassCntl = 0; 1122 } 1123 } 1124 } 1125 } 1126 1127 static int kv_enable_ulv(struct radeon_device *rdev, bool enable) 1128 { 1129 return kv_notify_message_to_smu(rdev, enable ? 1130 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1131 } 1132 1133 static void kv_reset_acp_boot_level(struct radeon_device *rdev) 1134 { 1135 struct kv_power_info *pi = kv_get_pi(rdev); 1136 1137 pi->acp_boot_level = 0xff; 1138 } 1139 1140 static void kv_update_current_ps(struct radeon_device *rdev, 1141 struct radeon_ps *rps) 1142 { 1143 struct kv_ps *new_ps = kv_get_ps(rps); 1144 struct kv_power_info *pi = kv_get_pi(rdev); 1145 1146 pi->current_rps = *rps; 1147 pi->current_ps = *new_ps; 1148 pi->current_rps.ps_priv = &pi->current_ps; 1149 } 1150 1151 static void kv_update_requested_ps(struct radeon_device *rdev, 1152 struct radeon_ps *rps) 1153 { 1154 struct kv_ps *new_ps = kv_get_ps(rps); 1155 struct kv_power_info *pi = kv_get_pi(rdev); 1156 1157 pi->requested_rps = *rps; 1158 pi->requested_ps = *new_ps; 1159 pi->requested_rps.ps_priv = &pi->requested_ps; 1160 } 1161 1162 void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1163 { 1164 struct kv_power_info *pi = kv_get_pi(rdev); 1165 int ret; 1166 1167 if (pi->bapm_enable) { 1168 ret = kv_smc_bapm_enable(rdev, enable); 1169 if (ret) 1170 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1171 } 1172 } 1173 1174 static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) 1175 { 1176 u32 thermal_int; 1177 1178 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); 1179 if (enable) 1180 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; 1181 else 1182 thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); 1183 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); 1184 1185 } 1186 1187 int kv_dpm_enable(struct radeon_device *rdev) 1188 { 1189 struct kv_power_info *pi = kv_get_pi(rdev); 1190 int ret; 1191 1192 ret = kv_process_firmware_header(rdev); 1193 if (ret) { 1194 DRM_ERROR("kv_process_firmware_header failed\n"); 1195 return ret; 1196 } 1197 kv_init_fps_limits(rdev); 1198 kv_init_graphics_levels(rdev); 1199 ret = kv_program_bootup_state(rdev); 1200 if (ret) { 1201 DRM_ERROR("kv_program_bootup_state failed\n"); 1202 return ret; 1203 } 1204 kv_calculate_dfs_bypass_settings(rdev); 1205 ret = kv_upload_dpm_settings(rdev); 1206 if (ret) { 1207 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1208 return ret; 1209 } 1210 ret = kv_populate_uvd_table(rdev); 1211 if (ret) { 1212 DRM_ERROR("kv_populate_uvd_table failed\n"); 1213 return ret; 1214 } 1215 ret = kv_populate_vce_table(rdev); 1216 if (ret) { 1217 DRM_ERROR("kv_populate_vce_table failed\n"); 1218 return ret; 1219 } 1220 ret = kv_populate_samu_table(rdev); 1221 if (ret) { 1222 DRM_ERROR("kv_populate_samu_table failed\n"); 1223 return ret; 1224 } 1225 ret = kv_populate_acp_table(rdev); 1226 if (ret) { 1227 DRM_ERROR("kv_populate_acp_table failed\n"); 1228 return ret; 1229 } 1230 kv_program_vc(rdev); 1231 #if 0 1232 kv_initialize_hardware_cac_manager(rdev); 1233 #endif 1234 kv_start_am(rdev); 1235 if (pi->enable_auto_thermal_throttling) { 1236 ret = kv_enable_auto_thermal_throttling(rdev); 1237 if (ret) { 1238 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1239 return ret; 1240 } 1241 } 1242 ret = kv_enable_dpm_voltage_scaling(rdev); 1243 if (ret) { 1244 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1245 return ret; 1246 } 1247 ret = kv_set_dpm_interval(rdev); 1248 if (ret) { 1249 DRM_ERROR("kv_set_dpm_interval failed\n"); 1250 return ret; 1251 } 1252 ret = kv_set_dpm_boot_state(rdev); 1253 if (ret) { 1254 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1255 return ret; 1256 } 1257 ret = kv_enable_ulv(rdev, true); 1258 if (ret) { 1259 DRM_ERROR("kv_enable_ulv failed\n"); 1260 return ret; 1261 } 1262 kv_start_dpm(rdev); 1263 ret = kv_enable_didt(rdev, true); 1264 if (ret) { 1265 DRM_ERROR("kv_enable_didt failed\n"); 1266 return ret; 1267 } 1268 ret = kv_enable_smc_cac(rdev, true); 1269 if (ret) { 1270 DRM_ERROR("kv_enable_smc_cac failed\n"); 1271 return ret; 1272 } 1273 1274 kv_reset_acp_boot_level(rdev); 1275 1276 ret = kv_smc_bapm_enable(rdev, false); 1277 if (ret) { 1278 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1279 return ret; 1280 } 1281 1282 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1283 1284 return ret; 1285 } 1286 1287 int kv_dpm_late_enable(struct radeon_device *rdev) 1288 { 1289 int ret = 0; 1290 1291 if (rdev->irq.installed && 1292 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1293 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1294 if (ret) { 1295 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1296 return ret; 1297 } 1298 kv_enable_thermal_int(rdev, true); 1299 } 1300 1301 /* powerdown unused blocks for now */ 1302 kv_dpm_powergate_acp(rdev, true); 1303 kv_dpm_powergate_samu(rdev, true); 1304 kv_dpm_powergate_vce(rdev, true); 1305 kv_dpm_powergate_uvd(rdev, true); 1306 1307 return ret; 1308 } 1309 1310 void kv_dpm_disable(struct radeon_device *rdev) 1311 { 1312 kv_smc_bapm_enable(rdev, false); 1313 1314 if (rdev->family == CHIP_MULLINS) 1315 kv_enable_nb_dpm(rdev, false); 1316 1317 /* powerup blocks */ 1318 kv_dpm_powergate_acp(rdev, false); 1319 kv_dpm_powergate_samu(rdev, false); 1320 kv_dpm_powergate_vce(rdev, false); 1321 kv_dpm_powergate_uvd(rdev, false); 1322 1323 kv_enable_smc_cac(rdev, false); 1324 kv_enable_didt(rdev, false); 1325 kv_clear_vc(rdev); 1326 kv_stop_dpm(rdev); 1327 kv_enable_ulv(rdev, false); 1328 kv_reset_am(rdev); 1329 kv_enable_thermal_int(rdev, false); 1330 1331 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1332 } 1333 1334 #if 0 1335 static int kv_write_smc_soft_register(struct radeon_device *rdev, 1336 u16 reg_offset, u32 value) 1337 { 1338 struct kv_power_info *pi = kv_get_pi(rdev); 1339 1340 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset, 1341 (u8 *)&value, sizeof(u16), pi->sram_end); 1342 } 1343 1344 static int kv_read_smc_soft_register(struct radeon_device *rdev, 1345 u16 reg_offset, u32 *value) 1346 { 1347 struct kv_power_info *pi = kv_get_pi(rdev); 1348 1349 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset, 1350 value, pi->sram_end); 1351 } 1352 #endif 1353 1354 static void kv_init_sclk_t(struct radeon_device *rdev) 1355 { 1356 struct kv_power_info *pi = kv_get_pi(rdev); 1357 1358 pi->low_sclk_interrupt_t = 0; 1359 } 1360 1361 static int kv_init_fps_limits(struct radeon_device *rdev) 1362 { 1363 struct kv_power_info *pi = kv_get_pi(rdev); 1364 int ret = 0; 1365 1366 if (pi->caps_fps) { 1367 u16 tmp; 1368 1369 tmp = 45; 1370 pi->fps_high_t = cpu_to_be16(tmp); 1371 ret = kv_copy_bytes_to_smc(rdev, 1372 pi->dpm_table_start + 1373 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1374 (u8 *)&pi->fps_high_t, 1375 sizeof(u16), pi->sram_end); 1376 1377 tmp = 30; 1378 pi->fps_low_t = cpu_to_be16(tmp); 1379 1380 ret = kv_copy_bytes_to_smc(rdev, 1381 pi->dpm_table_start + 1382 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1383 (u8 *)&pi->fps_low_t, 1384 sizeof(u16), pi->sram_end); 1385 1386 } 1387 return ret; 1388 } 1389 1390 static void kv_init_powergate_state(struct radeon_device *rdev) 1391 { 1392 struct kv_power_info *pi = kv_get_pi(rdev); 1393 1394 pi->uvd_power_gated = false; 1395 pi->vce_power_gated = false; 1396 pi->samu_power_gated = false; 1397 pi->acp_power_gated = false; 1398 1399 } 1400 1401 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 1402 { 1403 return kv_notify_message_to_smu(rdev, enable ? 1404 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1405 } 1406 1407 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) 1408 { 1409 return kv_notify_message_to_smu(rdev, enable ? 1410 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1411 } 1412 1413 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) 1414 { 1415 return kv_notify_message_to_smu(rdev, enable ? 1416 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1417 } 1418 1419 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable) 1420 { 1421 return kv_notify_message_to_smu(rdev, enable ? 1422 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1423 } 1424 1425 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) 1426 { 1427 struct kv_power_info *pi = kv_get_pi(rdev); 1428 struct radeon_uvd_clock_voltage_dependency_table *table = 1429 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1430 int ret; 1431 u32 mask; 1432 1433 if (!gate) { 1434 if (table->count) 1435 pi->uvd_boot_level = table->count - 1; 1436 else 1437 pi->uvd_boot_level = 0; 1438 1439 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1440 mask = 1 << pi->uvd_boot_level; 1441 } else { 1442 mask = 0x1f; 1443 } 1444 1445 ret = kv_copy_bytes_to_smc(rdev, 1446 pi->dpm_table_start + 1447 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1448 (uint8_t *)&pi->uvd_boot_level, 1449 sizeof(u8), pi->sram_end); 1450 if (ret) 1451 return ret; 1452 1453 kv_send_msg_to_smc_with_parameter(rdev, 1454 PPSMC_MSG_UVDDPM_SetEnabledMask, 1455 mask); 1456 } 1457 1458 return kv_enable_uvd_dpm(rdev, !gate); 1459 } 1460 1461 static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk) 1462 { 1463 u8 i; 1464 struct radeon_vce_clock_voltage_dependency_table *table = 1465 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1466 1467 for (i = 0; i < table->count; i++) { 1468 if (table->entries[i].evclk >= evclk) 1469 break; 1470 } 1471 1472 return i; 1473 } 1474 1475 static int kv_update_vce_dpm(struct radeon_device *rdev, 1476 struct radeon_ps *radeon_new_state, 1477 struct radeon_ps *radeon_current_state) 1478 { 1479 struct kv_power_info *pi = kv_get_pi(rdev); 1480 struct radeon_vce_clock_voltage_dependency_table *table = 1481 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1482 int ret; 1483 1484 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { 1485 kv_dpm_powergate_vce(rdev, false); 1486 /* turn the clocks on when encoding */ 1487 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 1488 if (pi->caps_stable_p_state) 1489 pi->vce_boot_level = table->count - 1; 1490 else 1491 pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk); 1492 1493 ret = kv_copy_bytes_to_smc(rdev, 1494 pi->dpm_table_start + 1495 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1496 (u8 *)&pi->vce_boot_level, 1497 sizeof(u8), 1498 pi->sram_end); 1499 if (ret) 1500 return ret; 1501 1502 if (pi->caps_stable_p_state) 1503 kv_send_msg_to_smc_with_parameter(rdev, 1504 PPSMC_MSG_VCEDPM_SetEnabledMask, 1505 (1 << pi->vce_boot_level)); 1506 1507 kv_enable_vce_dpm(rdev, true); 1508 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { 1509 kv_enable_vce_dpm(rdev, false); 1510 /* turn the clocks off when not encoding */ 1511 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 1512 kv_dpm_powergate_vce(rdev, true); 1513 } 1514 1515 return 0; 1516 } 1517 1518 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) 1519 { 1520 struct kv_power_info *pi = kv_get_pi(rdev); 1521 struct radeon_clock_voltage_dependency_table *table = 1522 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1523 int ret; 1524 1525 if (!gate) { 1526 if (pi->caps_stable_p_state) 1527 pi->samu_boot_level = table->count - 1; 1528 else 1529 pi->samu_boot_level = 0; 1530 1531 ret = kv_copy_bytes_to_smc(rdev, 1532 pi->dpm_table_start + 1533 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1534 (u8 *)&pi->samu_boot_level, 1535 sizeof(u8), 1536 pi->sram_end); 1537 if (ret) 1538 return ret; 1539 1540 if (pi->caps_stable_p_state) 1541 kv_send_msg_to_smc_with_parameter(rdev, 1542 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1543 (1 << pi->samu_boot_level)); 1544 } 1545 1546 return kv_enable_samu_dpm(rdev, !gate); 1547 } 1548 1549 static u8 kv_get_acp_boot_level(struct radeon_device *rdev) 1550 { 1551 u8 i; 1552 struct radeon_clock_voltage_dependency_table *table = 1553 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1554 1555 for (i = 0; i < table->count; i++) { 1556 if (table->entries[i].clk >= 0) /* XXX */ 1557 break; 1558 } 1559 1560 if (i >= table->count) 1561 i = table->count - 1; 1562 1563 return i; 1564 } 1565 1566 static void kv_update_acp_boot_level(struct radeon_device *rdev) 1567 { 1568 struct kv_power_info *pi = kv_get_pi(rdev); 1569 u8 acp_boot_level; 1570 1571 if (!pi->caps_stable_p_state) { 1572 acp_boot_level = kv_get_acp_boot_level(rdev); 1573 if (acp_boot_level != pi->acp_boot_level) { 1574 pi->acp_boot_level = acp_boot_level; 1575 kv_send_msg_to_smc_with_parameter(rdev, 1576 PPSMC_MSG_ACPDPM_SetEnabledMask, 1577 (1 << pi->acp_boot_level)); 1578 } 1579 } 1580 } 1581 1582 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1583 { 1584 struct kv_power_info *pi = kv_get_pi(rdev); 1585 struct radeon_clock_voltage_dependency_table *table = 1586 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1587 int ret; 1588 1589 if (!gate) { 1590 if (pi->caps_stable_p_state) 1591 pi->acp_boot_level = table->count - 1; 1592 else 1593 pi->acp_boot_level = kv_get_acp_boot_level(rdev); 1594 1595 ret = kv_copy_bytes_to_smc(rdev, 1596 pi->dpm_table_start + 1597 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1598 (u8 *)&pi->acp_boot_level, 1599 sizeof(u8), 1600 pi->sram_end); 1601 if (ret) 1602 return ret; 1603 1604 if (pi->caps_stable_p_state) 1605 kv_send_msg_to_smc_with_parameter(rdev, 1606 PPSMC_MSG_ACPDPM_SetEnabledMask, 1607 (1 << pi->acp_boot_level)); 1608 } 1609 1610 return kv_enable_acp_dpm(rdev, !gate); 1611 } 1612 1613 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 1614 { 1615 struct kv_power_info *pi = kv_get_pi(rdev); 1616 1617 if (pi->uvd_power_gated == gate) 1618 return; 1619 1620 pi->uvd_power_gated = gate; 1621 1622 if (gate) { 1623 if (pi->caps_uvd_pg) { 1624 uvd_v1_0_stop(rdev); 1625 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); 1626 } 1627 kv_update_uvd_dpm(rdev, gate); 1628 if (pi->caps_uvd_pg) 1629 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); 1630 } else { 1631 if (pi->caps_uvd_pg) { 1632 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); 1633 uvd_v4_2_resume(rdev); 1634 uvd_v1_0_start(rdev); 1635 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); 1636 } 1637 kv_update_uvd_dpm(rdev, gate); 1638 } 1639 } 1640 1641 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) 1642 { 1643 struct kv_power_info *pi = kv_get_pi(rdev); 1644 1645 if (pi->vce_power_gated == gate) 1646 return; 1647 1648 pi->vce_power_gated = gate; 1649 1650 if (gate) { 1651 if (pi->caps_vce_pg) { 1652 /* XXX do we need a vce_v1_0_stop() ? */ 1653 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); 1654 } 1655 } else { 1656 if (pi->caps_vce_pg) { 1657 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); 1658 vce_v2_0_resume(rdev); 1659 vce_v1_0_start(rdev); 1660 } 1661 } 1662 } 1663 1664 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate) 1665 { 1666 struct kv_power_info *pi = kv_get_pi(rdev); 1667 1668 if (pi->samu_power_gated == gate) 1669 return; 1670 1671 pi->samu_power_gated = gate; 1672 1673 if (gate) { 1674 kv_update_samu_dpm(rdev, true); 1675 if (pi->caps_samu_pg) 1676 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF); 1677 } else { 1678 if (pi->caps_samu_pg) 1679 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON); 1680 kv_update_samu_dpm(rdev, false); 1681 } 1682 } 1683 1684 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) 1685 { 1686 struct kv_power_info *pi = kv_get_pi(rdev); 1687 1688 if (pi->acp_power_gated == gate) 1689 return; 1690 1691 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1692 return; 1693 1694 pi->acp_power_gated = gate; 1695 1696 if (gate) { 1697 kv_update_acp_dpm(rdev, true); 1698 if (pi->caps_acp_pg) 1699 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF); 1700 } else { 1701 if (pi->caps_acp_pg) 1702 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON); 1703 kv_update_acp_dpm(rdev, false); 1704 } 1705 } 1706 1707 static void kv_set_valid_clock_range(struct radeon_device *rdev, 1708 struct radeon_ps *new_rps) 1709 { 1710 struct kv_ps *new_ps = kv_get_ps(new_rps); 1711 struct kv_power_info *pi = kv_get_pi(rdev); 1712 u32 i; 1713 struct radeon_clock_voltage_dependency_table *table = 1714 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1715 1716 if (table && table->count) { 1717 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1718 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1719 (i == (pi->graphics_dpm_level_count - 1))) { 1720 pi->lowest_valid = i; 1721 break; 1722 } 1723 } 1724 1725 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1726 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1727 break; 1728 } 1729 pi->highest_valid = i; 1730 1731 if (pi->lowest_valid > pi->highest_valid) { 1732 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1733 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1734 pi->highest_valid = pi->lowest_valid; 1735 else 1736 pi->lowest_valid = pi->highest_valid; 1737 } 1738 } else { 1739 struct sumo_sclk_voltage_mapping_table *table = 1740 &pi->sys_info.sclk_voltage_mapping_table; 1741 1742 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1743 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1744 i == (int)(pi->graphics_dpm_level_count - 1)) { 1745 pi->lowest_valid = i; 1746 break; 1747 } 1748 } 1749 1750 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1751 if (table->entries[i].sclk_frequency <= 1752 new_ps->levels[new_ps->num_levels - 1].sclk) 1753 break; 1754 } 1755 pi->highest_valid = i; 1756 1757 if (pi->lowest_valid > pi->highest_valid) { 1758 if ((new_ps->levels[0].sclk - 1759 table->entries[pi->highest_valid].sclk_frequency) > 1760 (table->entries[pi->lowest_valid].sclk_frequency - 1761 new_ps->levels[new_ps->num_levels -1].sclk)) 1762 pi->highest_valid = pi->lowest_valid; 1763 else 1764 pi->lowest_valid = pi->highest_valid; 1765 } 1766 } 1767 } 1768 1769 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev, 1770 struct radeon_ps *new_rps) 1771 { 1772 struct kv_ps *new_ps = kv_get_ps(new_rps); 1773 struct kv_power_info *pi = kv_get_pi(rdev); 1774 int ret = 0; 1775 u8 clk_bypass_cntl; 1776 1777 if (pi->caps_enable_dfs_bypass) { 1778 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1779 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1780 ret = kv_copy_bytes_to_smc(rdev, 1781 (pi->dpm_table_start + 1782 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1783 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1784 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1785 &clk_bypass_cntl, 1786 sizeof(u8), pi->sram_end); 1787 } 1788 1789 return ret; 1790 } 1791 1792 static int kv_enable_nb_dpm(struct radeon_device *rdev, 1793 bool enable) 1794 { 1795 struct kv_power_info *pi = kv_get_pi(rdev); 1796 int ret = 0; 1797 1798 if (enable) { 1799 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1800 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); 1801 if (ret == 0) 1802 pi->nb_dpm_enabled = true; 1803 } 1804 } else { 1805 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1806 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable); 1807 if (ret == 0) 1808 pi->nb_dpm_enabled = false; 1809 } 1810 } 1811 1812 return ret; 1813 } 1814 1815 int kv_dpm_force_performance_level(struct radeon_device *rdev, 1816 enum radeon_dpm_forced_level level) 1817 { 1818 int ret; 1819 1820 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1821 ret = kv_force_dpm_highest(rdev); 1822 if (ret) 1823 return ret; 1824 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1825 ret = kv_force_dpm_lowest(rdev); 1826 if (ret) 1827 return ret; 1828 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 1829 ret = kv_unforce_levels(rdev); 1830 if (ret) 1831 return ret; 1832 } 1833 1834 rdev->pm.dpm.forced_level = level; 1835 1836 return 0; 1837 } 1838 1839 int kv_dpm_pre_set_power_state(struct radeon_device *rdev) 1840 { 1841 struct kv_power_info *pi = kv_get_pi(rdev); 1842 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 1843 struct radeon_ps *new_ps = &requested_ps; 1844 1845 kv_update_requested_ps(rdev, new_ps); 1846 1847 kv_apply_state_adjust_rules(rdev, 1848 &pi->requested_rps, 1849 &pi->current_rps); 1850 1851 return 0; 1852 } 1853 1854 int kv_dpm_set_power_state(struct radeon_device *rdev) 1855 { 1856 struct kv_power_info *pi = kv_get_pi(rdev); 1857 struct radeon_ps *new_ps = &pi->requested_rps; 1858 struct radeon_ps *old_ps = &pi->current_rps; 1859 int ret; 1860 1861 if (pi->bapm_enable) { 1862 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1863 if (ret) { 1864 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1865 return ret; 1866 } 1867 } 1868 1869 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1870 if (pi->enable_dpm) { 1871 kv_set_valid_clock_range(rdev, new_ps); 1872 kv_update_dfs_bypass_settings(rdev, new_ps); 1873 ret = kv_calculate_ds_divider(rdev); 1874 if (ret) { 1875 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1876 return ret; 1877 } 1878 kv_calculate_nbps_level_settings(rdev); 1879 kv_calculate_dpm_settings(rdev); 1880 kv_force_lowest_valid(rdev); 1881 kv_enable_new_levels(rdev); 1882 kv_upload_dpm_settings(rdev); 1883 kv_program_nbps_index_settings(rdev, new_ps); 1884 kv_unforce_levels(rdev); 1885 kv_set_enabled_levels(rdev); 1886 kv_force_lowest_valid(rdev); 1887 kv_unforce_levels(rdev); 1888 1889 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1890 if (ret) { 1891 DRM_ERROR("kv_update_vce_dpm failed\n"); 1892 return ret; 1893 } 1894 kv_update_sclk_t(rdev); 1895 if (rdev->family == CHIP_MULLINS) 1896 kv_enable_nb_dpm(rdev, true); 1897 } 1898 } else { 1899 if (pi->enable_dpm) { 1900 kv_set_valid_clock_range(rdev, new_ps); 1901 kv_update_dfs_bypass_settings(rdev, new_ps); 1902 ret = kv_calculate_ds_divider(rdev); 1903 if (ret) { 1904 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1905 return ret; 1906 } 1907 kv_calculate_nbps_level_settings(rdev); 1908 kv_calculate_dpm_settings(rdev); 1909 kv_freeze_sclk_dpm(rdev, true); 1910 kv_upload_dpm_settings(rdev); 1911 kv_program_nbps_index_settings(rdev, new_ps); 1912 kv_freeze_sclk_dpm(rdev, false); 1913 kv_set_enabled_levels(rdev); 1914 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1915 if (ret) { 1916 DRM_ERROR("kv_update_vce_dpm failed\n"); 1917 return ret; 1918 } 1919 kv_update_acp_boot_level(rdev); 1920 kv_update_sclk_t(rdev); 1921 kv_enable_nb_dpm(rdev, true); 1922 } 1923 } 1924 1925 return 0; 1926 } 1927 1928 void kv_dpm_post_set_power_state(struct radeon_device *rdev) 1929 { 1930 struct kv_power_info *pi = kv_get_pi(rdev); 1931 struct radeon_ps *new_ps = &pi->requested_rps; 1932 1933 kv_update_current_ps(rdev, new_ps); 1934 } 1935 1936 void kv_dpm_setup_asic(struct radeon_device *rdev) 1937 { 1938 sumo_take_smu_control(rdev, true); 1939 kv_init_powergate_state(rdev); 1940 kv_init_sclk_t(rdev); 1941 } 1942 1943 #if 0 1944 void kv_dpm_reset_asic(struct radeon_device *rdev) 1945 { 1946 struct kv_power_info *pi = kv_get_pi(rdev); 1947 1948 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1949 kv_force_lowest_valid(rdev); 1950 kv_init_graphics_levels(rdev); 1951 kv_program_bootup_state(rdev); 1952 kv_upload_dpm_settings(rdev); 1953 kv_force_lowest_valid(rdev); 1954 kv_unforce_levels(rdev); 1955 } else { 1956 kv_init_graphics_levels(rdev); 1957 kv_program_bootup_state(rdev); 1958 kv_freeze_sclk_dpm(rdev, true); 1959 kv_upload_dpm_settings(rdev); 1960 kv_freeze_sclk_dpm(rdev, false); 1961 kv_set_enabled_level(rdev, pi->graphics_boot_level); 1962 } 1963 } 1964 #endif 1965 1966 //XXX use sumo_dpm_display_configuration_changed 1967 1968 static void kv_construct_max_power_limits_table(struct radeon_device *rdev, 1969 struct radeon_clock_and_voltage_limits *table) 1970 { 1971 struct kv_power_info *pi = kv_get_pi(rdev); 1972 1973 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 1974 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 1975 table->sclk = 1976 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 1977 table->vddc = 1978 kv_convert_2bit_index_to_voltage(rdev, 1979 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 1980 } 1981 1982 table->mclk = pi->sys_info.nbp_memory_clock[0]; 1983 } 1984 1985 static void kv_patch_voltage_values(struct radeon_device *rdev) 1986 { 1987 int i; 1988 struct radeon_uvd_clock_voltage_dependency_table *uvd_table = 1989 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1990 struct radeon_vce_clock_voltage_dependency_table *vce_table = 1991 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1992 struct radeon_clock_voltage_dependency_table *samu_table = 1993 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1994 struct radeon_clock_voltage_dependency_table *acp_table = 1995 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1996 1997 if (uvd_table->count) { 1998 for (i = 0; i < uvd_table->count; i++) 1999 uvd_table->entries[i].v = 2000 kv_convert_8bit_index_to_voltage(rdev, 2001 uvd_table->entries[i].v); 2002 } 2003 2004 if (vce_table->count) { 2005 for (i = 0; i < vce_table->count; i++) 2006 vce_table->entries[i].v = 2007 kv_convert_8bit_index_to_voltage(rdev, 2008 vce_table->entries[i].v); 2009 } 2010 2011 if (samu_table->count) { 2012 for (i = 0; i < samu_table->count; i++) 2013 samu_table->entries[i].v = 2014 kv_convert_8bit_index_to_voltage(rdev, 2015 samu_table->entries[i].v); 2016 } 2017 2018 if (acp_table->count) { 2019 for (i = 0; i < acp_table->count; i++) 2020 acp_table->entries[i].v = 2021 kv_convert_8bit_index_to_voltage(rdev, 2022 acp_table->entries[i].v); 2023 } 2024 2025 } 2026 2027 static void kv_construct_boot_state(struct radeon_device *rdev) 2028 { 2029 struct kv_power_info *pi = kv_get_pi(rdev); 2030 2031 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2032 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2033 pi->boot_pl.ds_divider_index = 0; 2034 pi->boot_pl.ss_divider_index = 0; 2035 pi->boot_pl.allow_gnb_slow = 1; 2036 pi->boot_pl.force_nbp_state = 0; 2037 pi->boot_pl.display_wm = 0; 2038 pi->boot_pl.vce_wm = 0; 2039 } 2040 2041 static int kv_force_dpm_highest(struct radeon_device *rdev) 2042 { 2043 int ret; 2044 u32 enable_mask, i; 2045 2046 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2047 if (ret) 2048 return ret; 2049 2050 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2051 if (enable_mask & (1 << i)) 2052 break; 2053 } 2054 2055 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2056 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2057 else 2058 return kv_set_enabled_level(rdev, i); 2059 } 2060 2061 static int kv_force_dpm_lowest(struct radeon_device *rdev) 2062 { 2063 int ret; 2064 u32 enable_mask, i; 2065 2066 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2067 if (ret) 2068 return ret; 2069 2070 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2071 if (enable_mask & (1 << i)) 2072 break; 2073 } 2074 2075 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2076 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2077 else 2078 return kv_set_enabled_level(rdev, i); 2079 } 2080 2081 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2082 u32 sclk, u32 min_sclk_in_sr) 2083 { 2084 struct kv_power_info *pi = kv_get_pi(rdev); 2085 u32 i; 2086 u32 temp; 2087 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? 2088 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; 2089 2090 if (sclk < min) 2091 return 0; 2092 2093 if (!pi->caps_sclk_ds) 2094 return 0; 2095 2096 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2097 temp = sclk / sumo_get_sleep_divider_from_id(i); 2098 if (temp >= min) 2099 break; 2100 } 2101 2102 return (u8)i; 2103 } 2104 2105 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit) 2106 { 2107 struct kv_power_info *pi = kv_get_pi(rdev); 2108 struct radeon_clock_voltage_dependency_table *table = 2109 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2110 int i; 2111 2112 if (table && table->count) { 2113 for (i = table->count - 1; i >= 0; i--) { 2114 if (pi->high_voltage_t && 2115 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= 2116 pi->high_voltage_t)) { 2117 *limit = i; 2118 return 0; 2119 } 2120 } 2121 } else { 2122 struct sumo_sclk_voltage_mapping_table *table = 2123 &pi->sys_info.sclk_voltage_mapping_table; 2124 2125 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2126 if (pi->high_voltage_t && 2127 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= 2128 pi->high_voltage_t)) { 2129 *limit = i; 2130 return 0; 2131 } 2132 } 2133 } 2134 2135 *limit = 0; 2136 return 0; 2137 } 2138 2139 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 2140 struct radeon_ps *new_rps, 2141 struct radeon_ps *old_rps) 2142 { 2143 struct kv_ps *ps = kv_get_ps(new_rps); 2144 struct kv_power_info *pi = kv_get_pi(rdev); 2145 u32 min_sclk = 10000; /* ??? */ 2146 u32 sclk, mclk = 0; 2147 int i, limit; 2148 bool force_high; 2149 struct radeon_clock_voltage_dependency_table *table = 2150 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2151 u32 stable_p_state_sclk = 0; 2152 struct radeon_clock_and_voltage_limits *max_limits = 2153 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2154 2155 if (new_rps->vce_active) { 2156 new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 2157 new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 2158 } else { 2159 new_rps->evclk = 0; 2160 new_rps->ecclk = 0; 2161 } 2162 2163 mclk = max_limits->mclk; 2164 sclk = min_sclk; 2165 2166 if (pi->caps_stable_p_state) { 2167 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2168 2169 for (i = table->count - 1; i >= 0; i--) { 2170 if (stable_p_state_sclk >= table->entries[i].clk) { 2171 stable_p_state_sclk = table->entries[i].clk; 2172 break; 2173 } 2174 } 2175 2176 if (i > 0) 2177 stable_p_state_sclk = table->entries[0].clk; 2178 2179 sclk = stable_p_state_sclk; 2180 } 2181 2182 if (new_rps->vce_active) { 2183 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 2184 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 2185 } 2186 2187 ps->need_dfs_bypass = true; 2188 2189 for (i = 0; i < ps->num_levels; i++) { 2190 if (ps->levels[i].sclk < sclk) 2191 ps->levels[i].sclk = sclk; 2192 } 2193 2194 if (table && table->count) { 2195 for (i = 0; i < ps->num_levels; i++) { 2196 if (pi->high_voltage_t && 2197 (pi->high_voltage_t < 2198 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2199 kv_get_high_voltage_limit(rdev, &limit); 2200 ps->levels[i].sclk = table->entries[limit].clk; 2201 } 2202 } 2203 } else { 2204 struct sumo_sclk_voltage_mapping_table *table = 2205 &pi->sys_info.sclk_voltage_mapping_table; 2206 2207 for (i = 0; i < ps->num_levels; i++) { 2208 if (pi->high_voltage_t && 2209 (pi->high_voltage_t < 2210 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2211 kv_get_high_voltage_limit(rdev, &limit); 2212 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2213 } 2214 } 2215 } 2216 2217 if (pi->caps_stable_p_state) { 2218 for (i = 0; i < ps->num_levels; i++) { 2219 ps->levels[i].sclk = stable_p_state_sclk; 2220 } 2221 } 2222 2223 pi->video_start = new_rps->dclk || new_rps->vclk || 2224 new_rps->evclk || new_rps->ecclk; 2225 2226 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2227 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2228 pi->battery_state = true; 2229 else 2230 pi->battery_state = false; 2231 2232 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2233 ps->dpm0_pg_nb_ps_lo = 0x1; 2234 ps->dpm0_pg_nb_ps_hi = 0x0; 2235 ps->dpmx_nb_ps_lo = 0x1; 2236 ps->dpmx_nb_ps_hi = 0x0; 2237 } else { 2238 ps->dpm0_pg_nb_ps_lo = 0x3; 2239 ps->dpm0_pg_nb_ps_hi = 0x0; 2240 ps->dpmx_nb_ps_lo = 0x3; 2241 ps->dpmx_nb_ps_hi = 0x0; 2242 2243 if (pi->sys_info.nb_dpm_enable) { 2244 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2245 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2246 pi->disable_nb_ps3_in_battery; 2247 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2248 ps->dpm0_pg_nb_ps_hi = 0x2; 2249 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2250 ps->dpmx_nb_ps_hi = 0x2; 2251 } 2252 } 2253 } 2254 2255 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev, 2256 u32 index, bool enable) 2257 { 2258 struct kv_power_info *pi = kv_get_pi(rdev); 2259 2260 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2261 } 2262 2263 static int kv_calculate_ds_divider(struct radeon_device *rdev) 2264 { 2265 struct kv_power_info *pi = kv_get_pi(rdev); 2266 u32 sclk_in_sr = 10000; /* ??? */ 2267 u32 i; 2268 2269 if (pi->lowest_valid > pi->highest_valid) 2270 return -EINVAL; 2271 2272 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2273 pi->graphics_level[i].DeepSleepDivId = 2274 kv_get_sleep_divider_id_from_clock(rdev, 2275 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2276 sclk_in_sr); 2277 } 2278 return 0; 2279 } 2280 2281 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) 2282 { 2283 struct kv_power_info *pi = kv_get_pi(rdev); 2284 u32 i; 2285 bool force_high; 2286 struct radeon_clock_and_voltage_limits *max_limits = 2287 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2288 u32 mclk = max_limits->mclk; 2289 2290 if (pi->lowest_valid > pi->highest_valid) 2291 return -EINVAL; 2292 2293 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2294 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2295 pi->graphics_level[i].GnbSlow = 1; 2296 pi->graphics_level[i].ForceNbPs1 = 0; 2297 pi->graphics_level[i].UpH = 0; 2298 } 2299 2300 if (!pi->sys_info.nb_dpm_enable) 2301 return 0; 2302 2303 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2304 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2305 2306 if (force_high) { 2307 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2308 pi->graphics_level[i].GnbSlow = 0; 2309 } else { 2310 if (pi->battery_state) 2311 pi->graphics_level[0].ForceNbPs1 = 1; 2312 2313 pi->graphics_level[1].GnbSlow = 0; 2314 pi->graphics_level[2].GnbSlow = 0; 2315 pi->graphics_level[3].GnbSlow = 0; 2316 pi->graphics_level[4].GnbSlow = 0; 2317 } 2318 } else { 2319 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2320 pi->graphics_level[i].GnbSlow = 1; 2321 pi->graphics_level[i].ForceNbPs1 = 0; 2322 pi->graphics_level[i].UpH = 0; 2323 } 2324 2325 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2326 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2327 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2328 if (pi->lowest_valid != pi->highest_valid) 2329 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2330 } 2331 } 2332 return 0; 2333 } 2334 2335 static int kv_calculate_dpm_settings(struct radeon_device *rdev) 2336 { 2337 struct kv_power_info *pi = kv_get_pi(rdev); 2338 u32 i; 2339 2340 if (pi->lowest_valid > pi->highest_valid) 2341 return -EINVAL; 2342 2343 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2344 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2345 2346 return 0; 2347 } 2348 2349 static void kv_init_graphics_levels(struct radeon_device *rdev) 2350 { 2351 struct kv_power_info *pi = kv_get_pi(rdev); 2352 u32 i; 2353 struct radeon_clock_voltage_dependency_table *table = 2354 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2355 2356 if (table && table->count) { 2357 u32 vid_2bit; 2358 2359 pi->graphics_dpm_level_count = 0; 2360 for (i = 0; i < table->count; i++) { 2361 if (pi->high_voltage_t && 2362 (pi->high_voltage_t < 2363 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) 2364 break; 2365 2366 kv_set_divider_value(rdev, i, table->entries[i].clk); 2367 vid_2bit = kv_convert_vid7_to_vid2(rdev, 2368 &pi->sys_info.vid_mapping_table, 2369 table->entries[i].v); 2370 kv_set_vid(rdev, i, vid_2bit); 2371 kv_set_at(rdev, i, pi->at[i]); 2372 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2373 pi->graphics_dpm_level_count++; 2374 } 2375 } else { 2376 struct sumo_sclk_voltage_mapping_table *table = 2377 &pi->sys_info.sclk_voltage_mapping_table; 2378 2379 pi->graphics_dpm_level_count = 0; 2380 for (i = 0; i < table->num_max_dpm_entries; i++) { 2381 if (pi->high_voltage_t && 2382 pi->high_voltage_t < 2383 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) 2384 break; 2385 2386 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); 2387 kv_set_vid(rdev, i, table->entries[i].vid_2bit); 2388 kv_set_at(rdev, i, pi->at[i]); 2389 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2390 pi->graphics_dpm_level_count++; 2391 } 2392 } 2393 2394 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2395 kv_dpm_power_level_enable(rdev, i, false); 2396 } 2397 2398 static void kv_enable_new_levels(struct radeon_device *rdev) 2399 { 2400 struct kv_power_info *pi = kv_get_pi(rdev); 2401 u32 i; 2402 2403 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2404 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2405 kv_dpm_power_level_enable(rdev, i, true); 2406 } 2407 } 2408 2409 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) 2410 { 2411 u32 new_mask = (1 << level); 2412 2413 return kv_send_msg_to_smc_with_parameter(rdev, 2414 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2415 new_mask); 2416 } 2417 2418 static int kv_set_enabled_levels(struct radeon_device *rdev) 2419 { 2420 struct kv_power_info *pi = kv_get_pi(rdev); 2421 u32 i, new_mask = 0; 2422 2423 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2424 new_mask |= (1 << i); 2425 2426 return kv_send_msg_to_smc_with_parameter(rdev, 2427 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2428 new_mask); 2429 } 2430 2431 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 2432 struct radeon_ps *new_rps) 2433 { 2434 struct kv_ps *new_ps = kv_get_ps(new_rps); 2435 struct kv_power_info *pi = kv_get_pi(rdev); 2436 u32 nbdpmconfig1; 2437 2438 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2439 return; 2440 2441 if (pi->sys_info.nb_dpm_enable) { 2442 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1); 2443 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | 2444 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK); 2445 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) | 2446 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) | 2447 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) | 2448 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi)); 2449 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1); 2450 } 2451 } 2452 2453 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 2454 int min_temp, int max_temp) 2455 { 2456 int low_temp = 0 * 1000; 2457 int high_temp = 255 * 1000; 2458 u32 tmp; 2459 2460 if (low_temp < min_temp) 2461 low_temp = min_temp; 2462 if (high_temp > max_temp) 2463 high_temp = max_temp; 2464 if (high_temp < low_temp) { 2465 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2466 return -EINVAL; 2467 } 2468 2469 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL); 2470 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK); 2471 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) | 2472 DIG_THERM_INTL(49 + (low_temp / 1000))); 2473 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp); 2474 2475 rdev->pm.dpm.thermal.min_temp = low_temp; 2476 rdev->pm.dpm.thermal.max_temp = high_temp; 2477 2478 return 0; 2479 } 2480 2481 union igp_info { 2482 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2483 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2484 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2485 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2486 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2487 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2488 }; 2489 2490 static int kv_parse_sys_info_table(struct radeon_device *rdev) 2491 { 2492 struct kv_power_info *pi = kv_get_pi(rdev); 2493 struct radeon_mode_info *mode_info = &rdev->mode_info; 2494 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2495 union igp_info *igp_info; 2496 u8 frev, crev; 2497 u16 data_offset; 2498 int i; 2499 2500 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2501 &frev, &crev, &data_offset)) { 2502 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2503 data_offset); 2504 2505 if (crev != 8) { 2506 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2507 return -EINVAL; 2508 } 2509 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2510 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2511 pi->sys_info.bootup_nb_voltage_index = 2512 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2513 if (igp_info->info_8.ucHtcTmpLmt == 0) 2514 pi->sys_info.htc_tmp_lmt = 203; 2515 else 2516 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2517 if (igp_info->info_8.ucHtcHystLmt == 0) 2518 pi->sys_info.htc_hyst_lmt = 5; 2519 else 2520 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2521 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2522 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2523 } 2524 2525 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2526 pi->sys_info.nb_dpm_enable = true; 2527 else 2528 pi->sys_info.nb_dpm_enable = false; 2529 2530 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2531 pi->sys_info.nbp_memory_clock[i] = 2532 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2533 pi->sys_info.nbp_n_clock[i] = 2534 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2535 } 2536 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2537 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2538 pi->caps_enable_dfs_bypass = true; 2539 2540 sumo_construct_sclk_voltage_mapping_table(rdev, 2541 &pi->sys_info.sclk_voltage_mapping_table, 2542 igp_info->info_8.sAvail_SCLK); 2543 2544 sumo_construct_vid_mapping_table(rdev, 2545 &pi->sys_info.vid_mapping_table, 2546 igp_info->info_8.sAvail_SCLK); 2547 2548 kv_construct_max_power_limits_table(rdev, 2549 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2550 } 2551 return 0; 2552 } 2553 2554 union power_info { 2555 struct _ATOM_POWERPLAY_INFO info; 2556 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2557 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2558 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2559 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2560 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2561 }; 2562 2563 union pplib_clock_info { 2564 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2565 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2566 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2567 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2568 }; 2569 2570 union pplib_power_state { 2571 struct _ATOM_PPLIB_STATE v1; 2572 struct _ATOM_PPLIB_STATE_V2 v2; 2573 }; 2574 2575 static void kv_patch_boot_state(struct radeon_device *rdev, 2576 struct kv_ps *ps) 2577 { 2578 struct kv_power_info *pi = kv_get_pi(rdev); 2579 2580 ps->num_levels = 1; 2581 ps->levels[0] = pi->boot_pl; 2582 } 2583 2584 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev, 2585 struct radeon_ps *rps, 2586 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2587 u8 table_rev) 2588 { 2589 struct kv_ps *ps = kv_get_ps(rps); 2590 2591 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2592 rps->class = le16_to_cpu(non_clock_info->usClassification); 2593 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2594 2595 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2596 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2597 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2598 } else { 2599 rps->vclk = 0; 2600 rps->dclk = 0; 2601 } 2602 2603 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2604 rdev->pm.dpm.boot_ps = rps; 2605 kv_patch_boot_state(rdev, ps); 2606 } 2607 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2608 rdev->pm.dpm.uvd_ps = rps; 2609 } 2610 2611 static void kv_parse_pplib_clock_info(struct radeon_device *rdev, 2612 struct radeon_ps *rps, int index, 2613 union pplib_clock_info *clock_info) 2614 { 2615 struct kv_power_info *pi = kv_get_pi(rdev); 2616 struct kv_ps *ps = kv_get_ps(rps); 2617 struct kv_pl *pl = &ps->levels[index]; 2618 u32 sclk; 2619 2620 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2621 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2622 pl->sclk = sclk; 2623 pl->vddc_index = clock_info->sumo.vddcIndex; 2624 2625 ps->num_levels = index + 1; 2626 2627 if (pi->caps_sclk_ds) { 2628 pl->ds_divider_index = 5; 2629 pl->ss_divider_index = 5; 2630 } 2631 } 2632 2633 static int kv_parse_power_table(struct radeon_device *rdev) 2634 { 2635 struct radeon_mode_info *mode_info = &rdev->mode_info; 2636 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2637 union pplib_power_state *power_state; 2638 int i, j, k, non_clock_array_index, clock_array_index; 2639 union pplib_clock_info *clock_info; 2640 struct _StateArray *state_array; 2641 struct _ClockInfoArray *clock_info_array; 2642 struct _NonClockInfoArray *non_clock_info_array; 2643 union power_info *power_info; 2644 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2645 u16 data_offset; 2646 u8 frev, crev; 2647 u8 *power_state_offset; 2648 struct kv_ps *ps; 2649 2650 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2651 &frev, &crev, &data_offset)) 2652 return -EINVAL; 2653 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2654 2655 state_array = (struct _StateArray *) 2656 (mode_info->atom_context->bios + data_offset + 2657 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2658 clock_info_array = (struct _ClockInfoArray *) 2659 (mode_info->atom_context->bios + data_offset + 2660 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2661 non_clock_info_array = (struct _NonClockInfoArray *) 2662 (mode_info->atom_context->bios + data_offset + 2663 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2664 2665 rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, 2666 sizeof(struct radeon_ps), 2667 GFP_KERNEL); 2668 if (!rdev->pm.dpm.ps) 2669 return -ENOMEM; 2670 power_state_offset = (u8 *)state_array->states; 2671 for (i = 0; i < state_array->ucNumEntries; i++) { 2672 u8 *idx; 2673 power_state = (union pplib_power_state *)power_state_offset; 2674 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2675 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2676 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2677 if (!rdev->pm.power_state[i].clock_info) 2678 return -EINVAL; 2679 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2680 if (ps == NULL) { 2681 kfree(rdev->pm.dpm.ps); 2682 return -ENOMEM; 2683 } 2684 rdev->pm.dpm.ps[i].ps_priv = ps; 2685 k = 0; 2686 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2687 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2688 clock_array_index = idx[j]; 2689 if (clock_array_index >= clock_info_array->ucNumEntries) 2690 continue; 2691 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2692 break; 2693 clock_info = (union pplib_clock_info *) 2694 ((u8 *)&clock_info_array->clockInfo[0] + 2695 (clock_array_index * clock_info_array->ucEntrySize)); 2696 kv_parse_pplib_clock_info(rdev, 2697 &rdev->pm.dpm.ps[i], k, 2698 clock_info); 2699 k++; 2700 } 2701 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2702 non_clock_info, 2703 non_clock_info_array->ucEntrySize); 2704 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2705 } 2706 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 2707 2708 /* fill in the vce power states */ 2709 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 2710 u32 sclk; 2711 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 2712 clock_info = (union pplib_clock_info *) 2713 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2714 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2715 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2716 rdev->pm.dpm.vce_states[i].sclk = sclk; 2717 rdev->pm.dpm.vce_states[i].mclk = 0; 2718 } 2719 2720 return 0; 2721 } 2722 2723 int kv_dpm_init(struct radeon_device *rdev) 2724 { 2725 struct kv_power_info *pi; 2726 int ret, i; 2727 2728 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2729 if (pi == NULL) 2730 return -ENOMEM; 2731 rdev->pm.dpm.priv = pi; 2732 2733 ret = r600_get_platform_caps(rdev); 2734 if (ret) 2735 return ret; 2736 2737 ret = r600_parse_extended_power_table(rdev); 2738 if (ret) 2739 return ret; 2740 2741 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2742 pi->at[i] = TRINITY_AT_DFLT; 2743 2744 pi->sram_end = SMC_RAM_END; 2745 2746 /* Enabling nb dpm on an asrock system prevents dpm from working */ 2747 if (rdev->pdev->subsystem_vendor == 0x1849) 2748 pi->enable_nb_dpm = false; 2749 else 2750 pi->enable_nb_dpm = true; 2751 2752 pi->caps_power_containment = true; 2753 pi->caps_cac = true; 2754 pi->enable_didt = false; 2755 if (pi->enable_didt) { 2756 pi->caps_sq_ramping = true; 2757 pi->caps_db_ramping = true; 2758 pi->caps_td_ramping = true; 2759 pi->caps_tcp_ramping = true; 2760 } 2761 2762 pi->caps_sclk_ds = true; 2763 pi->enable_auto_thermal_throttling = true; 2764 pi->disable_nb_ps3_in_battery = false; 2765 if (radeon_bapm == -1) { 2766 /* only enable bapm on KB, ML by default */ 2767 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2768 pi->bapm_enable = true; 2769 else 2770 pi->bapm_enable = false; 2771 } else if (radeon_bapm == 0) { 2772 pi->bapm_enable = false; 2773 } else { 2774 pi->bapm_enable = true; 2775 } 2776 pi->voltage_drop_t = 0; 2777 pi->caps_sclk_throttle_low_notification = false; 2778 pi->caps_fps = false; /* true? */ 2779 pi->caps_uvd_pg = true; 2780 pi->caps_uvd_dpm = true; 2781 pi->caps_vce_pg = false; /* XXX true */ 2782 pi->caps_samu_pg = false; 2783 pi->caps_acp_pg = false; 2784 pi->caps_stable_p_state = false; 2785 2786 ret = kv_parse_sys_info_table(rdev); 2787 if (ret) 2788 return ret; 2789 2790 kv_patch_voltage_values(rdev); 2791 kv_construct_boot_state(rdev); 2792 2793 ret = kv_parse_power_table(rdev); 2794 if (ret) 2795 return ret; 2796 2797 pi->enable_dpm = true; 2798 2799 return 0; 2800 } 2801 2802 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 2803 struct seq_file *m) 2804 { 2805 struct kv_power_info *pi = kv_get_pi(rdev); 2806 u32 current_index = 2807 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2808 CURR_SCLK_INDEX_SHIFT; 2809 u32 sclk, tmp; 2810 u16 vddc; 2811 2812 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2813 seq_printf(m, "invalid dpm profile %d\n", current_index); 2814 } else { 2815 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2816 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2817 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; 2818 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); 2819 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2820 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2821 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2822 current_index, sclk, vddc); 2823 } 2824 } 2825 2826 u32 kv_dpm_get_current_sclk(struct radeon_device *rdev) 2827 { 2828 struct kv_power_info *pi = kv_get_pi(rdev); 2829 u32 current_index = 2830 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2831 CURR_SCLK_INDEX_SHIFT; 2832 u32 sclk; 2833 2834 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2835 return 0; 2836 } else { 2837 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2838 return sclk; 2839 } 2840 } 2841 2842 u32 kv_dpm_get_current_mclk(struct radeon_device *rdev) 2843 { 2844 struct kv_power_info *pi = kv_get_pi(rdev); 2845 2846 return pi->sys_info.bootup_uma_clk; 2847 } 2848 2849 void kv_dpm_print_power_state(struct radeon_device *rdev, 2850 struct radeon_ps *rps) 2851 { 2852 int i; 2853 struct kv_ps *ps = kv_get_ps(rps); 2854 2855 r600_dpm_print_class_info(rps->class, rps->class2); 2856 r600_dpm_print_cap_info(rps->caps); 2857 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2858 for (i = 0; i < ps->num_levels; i++) { 2859 struct kv_pl *pl = &ps->levels[i]; 2860 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2861 i, pl->sclk, 2862 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index)); 2863 } 2864 r600_dpm_print_ps_status(rdev, rps); 2865 } 2866 2867 void kv_dpm_fini(struct radeon_device *rdev) 2868 { 2869 int i; 2870 2871 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 2872 kfree(rdev->pm.dpm.ps[i].ps_priv); 2873 } 2874 kfree(rdev->pm.dpm.ps); 2875 kfree(rdev->pm.dpm.priv); 2876 r600_free_extended_power_table(rdev); 2877 } 2878 2879 void kv_dpm_display_configuration_changed(struct radeon_device *rdev) 2880 { 2881 2882 } 2883 2884 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low) 2885 { 2886 struct kv_power_info *pi = kv_get_pi(rdev); 2887 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2888 2889 if (low) 2890 return requested_state->levels[0].sclk; 2891 else 2892 return requested_state->levels[requested_state->num_levels - 1].sclk; 2893 } 2894 2895 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low) 2896 { 2897 struct kv_power_info *pi = kv_get_pi(rdev); 2898 2899 return pi->sys_info.bootup_uma_clk; 2900 } 2901 2902