1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 #include <linux/seq_file.h> 26 27 #include "cikd.h" 28 #include "kv_dpm.h" 29 #include "r600_dpm.h" 30 #include "radeon.h" 31 #include "radeon_asic.h" 32 33 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 34 #define KV_MINIMUM_ENGINE_CLOCK 800 35 #define SMC_RAM_END 0x40000 36 37 static int kv_enable_nb_dpm(struct radeon_device *rdev, 38 bool enable); 39 static void kv_init_graphics_levels(struct radeon_device *rdev); 40 static int kv_calculate_ds_divider(struct radeon_device *rdev); 41 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); 42 static int kv_calculate_dpm_settings(struct radeon_device *rdev); 43 static void kv_enable_new_levels(struct radeon_device *rdev); 44 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 45 struct radeon_ps *new_rps); 46 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); 47 static int kv_set_enabled_levels(struct radeon_device *rdev); 48 static int kv_force_dpm_highest(struct radeon_device *rdev); 49 static int kv_force_dpm_lowest(struct radeon_device *rdev); 50 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 51 struct radeon_ps *new_rps, 52 struct radeon_ps *old_rps); 53 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 54 int min_temp, int max_temp); 55 static int kv_init_fps_limits(struct radeon_device *rdev); 56 57 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 58 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate); 59 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate); 60 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate); 61 62 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 63 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 64 extern void cik_update_cg(struct radeon_device *rdev, 65 u32 block, bool enable); 66 67 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 68 { 69 { 0, 4, 1 }, 70 { 1, 4, 1 }, 71 { 2, 5, 1 }, 72 { 3, 4, 2 }, 73 { 4, 1, 1 }, 74 { 5, 5, 2 }, 75 { 6, 6, 1 }, 76 { 7, 9, 2 }, 77 { 0xffffffff } 78 }; 79 80 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 81 { 82 { 0, 4, 1 }, 83 { 0xffffffff } 84 }; 85 86 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 87 { 88 { 0, 4, 1 }, 89 { 0xffffffff } 90 }; 91 92 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 93 { 94 { 0, 4, 1 }, 95 { 0xffffffff } 96 }; 97 98 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 99 { 100 { 0, 4, 1 }, 101 { 0xffffffff } 102 }; 103 104 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 105 { 106 { 0, 4, 1 }, 107 { 1, 4, 1 }, 108 { 2, 5, 1 }, 109 { 3, 4, 1 }, 110 { 4, 1, 1 }, 111 { 5, 5, 1 }, 112 { 6, 6, 1 }, 113 { 7, 9, 1 }, 114 { 8, 4, 1 }, 115 { 9, 2, 1 }, 116 { 10, 3, 1 }, 117 { 11, 6, 1 }, 118 { 12, 8, 2 }, 119 { 13, 1, 1 }, 120 { 14, 2, 1 }, 121 { 15, 3, 1 }, 122 { 16, 1, 1 }, 123 { 17, 4, 1 }, 124 { 18, 3, 1 }, 125 { 19, 1, 1 }, 126 { 20, 8, 1 }, 127 { 21, 5, 1 }, 128 { 22, 1, 1 }, 129 { 23, 1, 1 }, 130 { 24, 4, 1 }, 131 { 27, 6, 1 }, 132 { 28, 1, 1 }, 133 { 0xffffffff } 134 }; 135 136 static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 137 { 138 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 139 }; 140 141 static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 142 { 143 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 144 }; 145 146 static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 147 { 148 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 149 }; 150 151 static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 152 { 153 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 154 }; 155 156 static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 157 { 158 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 159 }; 160 161 static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 162 { 163 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 164 }; 165 166 static const struct kv_pt_config_reg didt_config_kv[] = 167 { 168 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 169 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 170 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 171 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 172 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 173 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 174 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 175 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 176 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 177 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 178 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 179 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 180 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 181 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 182 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 183 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 184 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 185 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 186 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 187 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 188 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 189 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 190 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 191 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 192 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 193 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 194 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 195 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 196 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 197 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 198 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 199 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 200 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 201 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 202 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 203 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 204 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 205 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 206 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 207 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 208 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 209 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 210 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 211 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 212 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 213 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 214 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 215 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 216 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 217 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 218 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 219 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 220 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 221 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 222 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 223 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 224 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 225 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 226 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 227 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 228 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 229 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 230 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 231 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 232 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 233 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 234 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 235 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 236 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 237 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 238 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 239 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 240 { 0xFFFFFFFF } 241 }; 242 243 static struct kv_ps *kv_get_ps(struct radeon_ps *rps) 244 { 245 struct kv_ps *ps = rps->ps_priv; 246 247 return ps; 248 } 249 250 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev) 251 { 252 struct kv_power_info *pi = rdev->pm.dpm.priv; 253 254 return pi; 255 } 256 257 #if 0 258 static void kv_program_local_cac_table(struct radeon_device *rdev, 259 const struct kv_lcac_config_values *local_cac_table, 260 const struct kv_lcac_config_reg *local_cac_reg) 261 { 262 u32 i, count, data; 263 const struct kv_lcac_config_values *values = local_cac_table; 264 265 while (values->block_id != 0xffffffff) { 266 count = values->signal_id; 267 for (i = 0; i < count; i++) { 268 data = ((values->block_id << local_cac_reg->block_shift) & 269 local_cac_reg->block_mask); 270 data |= ((i << local_cac_reg->signal_shift) & 271 local_cac_reg->signal_mask); 272 data |= ((values->t << local_cac_reg->t_shift) & 273 local_cac_reg->t_mask); 274 data |= ((1 << local_cac_reg->enable_shift) & 275 local_cac_reg->enable_mask); 276 WREG32_SMC(local_cac_reg->cntl, data); 277 } 278 values++; 279 } 280 } 281 #endif 282 283 static int kv_program_pt_config_registers(struct radeon_device *rdev, 284 const struct kv_pt_config_reg *cac_config_regs) 285 { 286 const struct kv_pt_config_reg *config_regs = cac_config_regs; 287 u32 data; 288 u32 cache = 0; 289 290 if (config_regs == NULL) 291 return -EINVAL; 292 293 while (config_regs->offset != 0xFFFFFFFF) { 294 if (config_regs->type == KV_CONFIGREG_CACHE) { 295 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 296 } else { 297 switch (config_regs->type) { 298 case KV_CONFIGREG_SMC_IND: 299 data = RREG32_SMC(config_regs->offset); 300 break; 301 case KV_CONFIGREG_DIDT_IND: 302 data = RREG32_DIDT(config_regs->offset); 303 break; 304 default: 305 data = RREG32(config_regs->offset << 2); 306 break; 307 } 308 309 data &= ~config_regs->mask; 310 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 311 data |= cache; 312 cache = 0; 313 314 switch (config_regs->type) { 315 case KV_CONFIGREG_SMC_IND: 316 WREG32_SMC(config_regs->offset, data); 317 break; 318 case KV_CONFIGREG_DIDT_IND: 319 WREG32_DIDT(config_regs->offset, data); 320 break; 321 default: 322 WREG32(config_regs->offset << 2, data); 323 break; 324 } 325 } 326 config_regs++; 327 } 328 329 return 0; 330 } 331 332 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable) 333 { 334 struct kv_power_info *pi = kv_get_pi(rdev); 335 u32 data; 336 337 if (pi->caps_sq_ramping) { 338 data = RREG32_DIDT(DIDT_SQ_CTRL0); 339 if (enable) 340 data |= DIDT_CTRL_EN; 341 else 342 data &= ~DIDT_CTRL_EN; 343 WREG32_DIDT(DIDT_SQ_CTRL0, data); 344 } 345 346 if (pi->caps_db_ramping) { 347 data = RREG32_DIDT(DIDT_DB_CTRL0); 348 if (enable) 349 data |= DIDT_CTRL_EN; 350 else 351 data &= ~DIDT_CTRL_EN; 352 WREG32_DIDT(DIDT_DB_CTRL0, data); 353 } 354 355 if (pi->caps_td_ramping) { 356 data = RREG32_DIDT(DIDT_TD_CTRL0); 357 if (enable) 358 data |= DIDT_CTRL_EN; 359 else 360 data &= ~DIDT_CTRL_EN; 361 WREG32_DIDT(DIDT_TD_CTRL0, data); 362 } 363 364 if (pi->caps_tcp_ramping) { 365 data = RREG32_DIDT(DIDT_TCP_CTRL0); 366 if (enable) 367 data |= DIDT_CTRL_EN; 368 else 369 data &= ~DIDT_CTRL_EN; 370 WREG32_DIDT(DIDT_TCP_CTRL0, data); 371 } 372 } 373 374 static int kv_enable_didt(struct radeon_device *rdev, bool enable) 375 { 376 struct kv_power_info *pi = kv_get_pi(rdev); 377 int ret; 378 379 if (pi->caps_sq_ramping || 380 pi->caps_db_ramping || 381 pi->caps_td_ramping || 382 pi->caps_tcp_ramping) { 383 cik_enter_rlc_safe_mode(rdev); 384 385 if (enable) { 386 ret = kv_program_pt_config_registers(rdev, didt_config_kv); 387 if (ret) { 388 cik_exit_rlc_safe_mode(rdev); 389 return ret; 390 } 391 } 392 393 kv_do_enable_didt(rdev, enable); 394 395 cik_exit_rlc_safe_mode(rdev); 396 } 397 398 return 0; 399 } 400 401 #if 0 402 static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev) 403 { 404 struct kv_power_info *pi = kv_get_pi(rdev); 405 406 if (pi->caps_cac) { 407 WREG32_SMC(LCAC_SX0_OVR_SEL, 0); 408 WREG32_SMC(LCAC_SX0_OVR_VAL, 0); 409 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 410 411 WREG32_SMC(LCAC_MC0_OVR_SEL, 0); 412 WREG32_SMC(LCAC_MC0_OVR_VAL, 0); 413 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 414 415 WREG32_SMC(LCAC_MC1_OVR_SEL, 0); 416 WREG32_SMC(LCAC_MC1_OVR_VAL, 0); 417 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 418 419 WREG32_SMC(LCAC_MC2_OVR_SEL, 0); 420 WREG32_SMC(LCAC_MC2_OVR_VAL, 0); 421 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 422 423 WREG32_SMC(LCAC_MC3_OVR_SEL, 0); 424 WREG32_SMC(LCAC_MC3_OVR_VAL, 0); 425 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 426 427 WREG32_SMC(LCAC_CPL_OVR_SEL, 0); 428 WREG32_SMC(LCAC_CPL_OVR_VAL, 0); 429 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 430 } 431 } 432 #endif 433 434 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable) 435 { 436 struct kv_power_info *pi = kv_get_pi(rdev); 437 int ret = 0; 438 439 if (pi->caps_cac) { 440 if (enable) { 441 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac); 442 if (ret) 443 pi->cac_enabled = false; 444 else 445 pi->cac_enabled = true; 446 } else if (pi->cac_enabled) { 447 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac); 448 pi->cac_enabled = false; 449 } 450 } 451 452 return ret; 453 } 454 455 static int kv_process_firmware_header(struct radeon_device *rdev) 456 { 457 struct kv_power_info *pi = kv_get_pi(rdev); 458 u32 tmp; 459 int ret; 460 461 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 462 offsetof(SMU7_Firmware_Header, DpmTable), 463 &tmp, pi->sram_end); 464 465 if (ret == 0) 466 pi->dpm_table_start = tmp; 467 468 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 469 offsetof(SMU7_Firmware_Header, SoftRegisters), 470 &tmp, pi->sram_end); 471 472 if (ret == 0) 473 pi->soft_regs_start = tmp; 474 475 return ret; 476 } 477 478 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev) 479 { 480 struct kv_power_info *pi = kv_get_pi(rdev); 481 int ret; 482 483 pi->graphics_voltage_change_enable = 1; 484 485 ret = kv_copy_bytes_to_smc(rdev, 486 pi->dpm_table_start + 487 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 488 &pi->graphics_voltage_change_enable, 489 sizeof(u8), pi->sram_end); 490 491 return ret; 492 } 493 494 static int kv_set_dpm_interval(struct radeon_device *rdev) 495 { 496 struct kv_power_info *pi = kv_get_pi(rdev); 497 int ret; 498 499 pi->graphics_interval = 1; 500 501 ret = kv_copy_bytes_to_smc(rdev, 502 pi->dpm_table_start + 503 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 504 &pi->graphics_interval, 505 sizeof(u8), pi->sram_end); 506 507 return ret; 508 } 509 510 static int kv_set_dpm_boot_state(struct radeon_device *rdev) 511 { 512 struct kv_power_info *pi = kv_get_pi(rdev); 513 int ret; 514 515 ret = kv_copy_bytes_to_smc(rdev, 516 pi->dpm_table_start + 517 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 518 &pi->graphics_boot_level, 519 sizeof(u8), pi->sram_end); 520 521 return ret; 522 } 523 524 static void kv_program_vc(struct radeon_device *rdev) 525 { 526 WREG32_SMC(CG_FTV_0, 0x3FFFC100); 527 } 528 529 static void kv_clear_vc(struct radeon_device *rdev) 530 { 531 WREG32_SMC(CG_FTV_0, 0); 532 } 533 534 static int kv_set_divider_value(struct radeon_device *rdev, 535 u32 index, u32 sclk) 536 { 537 struct kv_power_info *pi = kv_get_pi(rdev); 538 struct atom_clock_dividers dividers; 539 int ret; 540 541 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 542 sclk, false, ÷rs); 543 if (ret) 544 return ret; 545 546 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 547 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 548 549 return 0; 550 } 551 552 static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, 553 struct sumo_vid_mapping_table *vid_mapping_table, 554 u32 vid_2bit) 555 { 556 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 557 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 558 u32 i; 559 560 if (vddc_sclk_table && vddc_sclk_table->count) { 561 if (vid_2bit < vddc_sclk_table->count) 562 return vddc_sclk_table->entries[vid_2bit].v; 563 else 564 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 565 } else { 566 for (i = 0; i < vid_mapping_table->num_entries; i++) { 567 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 568 return vid_mapping_table->entries[i].vid_7bit; 569 } 570 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 571 } 572 } 573 574 static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, 575 struct sumo_vid_mapping_table *vid_mapping_table, 576 u32 vid_7bit) 577 { 578 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 579 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 580 u32 i; 581 582 if (vddc_sclk_table && vddc_sclk_table->count) { 583 for (i = 0; i < vddc_sclk_table->count; i++) { 584 if (vddc_sclk_table->entries[i].v == vid_7bit) 585 return i; 586 } 587 return vddc_sclk_table->count - 1; 588 } else { 589 for (i = 0; i < vid_mapping_table->num_entries; i++) { 590 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 591 return vid_mapping_table->entries[i].vid_2bit; 592 } 593 594 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 595 } 596 } 597 598 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 599 u16 voltage) 600 { 601 return 6200 - (voltage * 25); 602 } 603 604 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, 605 u32 vid_2bit) 606 { 607 struct kv_power_info *pi = kv_get_pi(rdev); 608 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, 609 &pi->sys_info.vid_mapping_table, 610 vid_2bit); 611 612 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 613 } 614 615 616 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid) 617 { 618 struct kv_power_info *pi = kv_get_pi(rdev); 619 620 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 621 pi->graphics_level[index].MinVddNb = 622 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid)); 623 624 return 0; 625 } 626 627 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at) 628 { 629 struct kv_power_info *pi = kv_get_pi(rdev); 630 631 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 632 633 return 0; 634 } 635 636 static void kv_dpm_power_level_enable(struct radeon_device *rdev, 637 u32 index, bool enable) 638 { 639 struct kv_power_info *pi = kv_get_pi(rdev); 640 641 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 642 } 643 644 static void kv_start_dpm(struct radeon_device *rdev) 645 { 646 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 647 648 tmp |= GLOBAL_PWRMGT_EN; 649 WREG32_SMC(GENERAL_PWRMGT, tmp); 650 651 kv_smc_dpm_enable(rdev, true); 652 } 653 654 static void kv_stop_dpm(struct radeon_device *rdev) 655 { 656 kv_smc_dpm_enable(rdev, false); 657 } 658 659 static void kv_start_am(struct radeon_device *rdev) 660 { 661 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 662 663 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 664 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN; 665 666 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 667 } 668 669 static void kv_reset_am(struct radeon_device *rdev) 670 { 671 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 672 673 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 674 675 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 676 } 677 678 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze) 679 { 680 return kv_notify_message_to_smu(rdev, freeze ? 681 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 682 } 683 684 static int kv_force_lowest_valid(struct radeon_device *rdev) 685 { 686 return kv_force_dpm_lowest(rdev); 687 } 688 689 static int kv_unforce_levels(struct radeon_device *rdev) 690 { 691 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 692 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 693 else 694 return kv_set_enabled_levels(rdev); 695 } 696 697 static int kv_update_sclk_t(struct radeon_device *rdev) 698 { 699 struct kv_power_info *pi = kv_get_pi(rdev); 700 u32 low_sclk_interrupt_t = 0; 701 int ret = 0; 702 703 if (pi->caps_sclk_throttle_low_notification) { 704 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 705 706 ret = kv_copy_bytes_to_smc(rdev, 707 pi->dpm_table_start + 708 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 709 (u8 *)&low_sclk_interrupt_t, 710 sizeof(u32), pi->sram_end); 711 } 712 return ret; 713 } 714 715 static int kv_program_bootup_state(struct radeon_device *rdev) 716 { 717 struct kv_power_info *pi = kv_get_pi(rdev); 718 u32 i; 719 struct radeon_clock_voltage_dependency_table *table = 720 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 721 722 if (table && table->count) { 723 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 724 if (table->entries[i].clk == pi->boot_pl.sclk) 725 break; 726 } 727 728 pi->graphics_boot_level = (u8)i; 729 kv_dpm_power_level_enable(rdev, i, true); 730 } else { 731 struct sumo_sclk_voltage_mapping_table *table = 732 &pi->sys_info.sclk_voltage_mapping_table; 733 734 if (table->num_max_dpm_entries == 0) 735 return -EINVAL; 736 737 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 738 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 739 break; 740 } 741 742 pi->graphics_boot_level = (u8)i; 743 kv_dpm_power_level_enable(rdev, i, true); 744 } 745 return 0; 746 } 747 748 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev) 749 { 750 struct kv_power_info *pi = kv_get_pi(rdev); 751 int ret; 752 753 pi->graphics_therm_throttle_enable = 1; 754 755 ret = kv_copy_bytes_to_smc(rdev, 756 pi->dpm_table_start + 757 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 758 &pi->graphics_therm_throttle_enable, 759 sizeof(u8), pi->sram_end); 760 761 return ret; 762 } 763 764 static int kv_upload_dpm_settings(struct radeon_device *rdev) 765 { 766 struct kv_power_info *pi = kv_get_pi(rdev); 767 int ret; 768 769 ret = kv_copy_bytes_to_smc(rdev, 770 pi->dpm_table_start + 771 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 772 (u8 *)&pi->graphics_level, 773 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 774 pi->sram_end); 775 776 if (ret) 777 return ret; 778 779 ret = kv_copy_bytes_to_smc(rdev, 780 pi->dpm_table_start + 781 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 782 &pi->graphics_dpm_level_count, 783 sizeof(u8), pi->sram_end); 784 785 return ret; 786 } 787 788 static u32 kv_get_clock_difference(u32 a, u32 b) 789 { 790 return (a >= b) ? a - b : b - a; 791 } 792 793 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk) 794 { 795 struct kv_power_info *pi = kv_get_pi(rdev); 796 u32 value; 797 798 if (pi->caps_enable_dfs_bypass) { 799 if (kv_get_clock_difference(clk, 40000) < 200) 800 value = 3; 801 else if (kv_get_clock_difference(clk, 30000) < 200) 802 value = 2; 803 else if (kv_get_clock_difference(clk, 20000) < 200) 804 value = 7; 805 else if (kv_get_clock_difference(clk, 15000) < 200) 806 value = 6; 807 else if (kv_get_clock_difference(clk, 10000) < 200) 808 value = 8; 809 else 810 value = 0; 811 } else { 812 value = 0; 813 } 814 815 return value; 816 } 817 818 static int kv_populate_uvd_table(struct radeon_device *rdev) 819 { 820 struct kv_power_info *pi = kv_get_pi(rdev); 821 struct radeon_uvd_clock_voltage_dependency_table *table = 822 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 823 struct atom_clock_dividers dividers; 824 int ret; 825 u32 i; 826 827 if (table == NULL || table->count == 0) 828 return 0; 829 830 pi->uvd_level_count = 0; 831 for (i = 0; i < table->count; i++) { 832 if (pi->high_voltage_t && 833 (pi->high_voltage_t < table->entries[i].v)) 834 break; 835 836 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 837 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 838 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 839 840 pi->uvd_level[i].VClkBypassCntl = 841 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); 842 pi->uvd_level[i].DClkBypassCntl = 843 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); 844 845 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 846 table->entries[i].vclk, false, ÷rs); 847 if (ret) 848 return ret; 849 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 850 851 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 852 table->entries[i].dclk, false, ÷rs); 853 if (ret) 854 return ret; 855 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 856 857 pi->uvd_level_count++; 858 } 859 860 ret = kv_copy_bytes_to_smc(rdev, 861 pi->dpm_table_start + 862 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 863 (u8 *)&pi->uvd_level_count, 864 sizeof(u8), pi->sram_end); 865 if (ret) 866 return ret; 867 868 pi->uvd_interval = 1; 869 870 ret = kv_copy_bytes_to_smc(rdev, 871 pi->dpm_table_start + 872 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 873 &pi->uvd_interval, 874 sizeof(u8), pi->sram_end); 875 if (ret) 876 return ret; 877 878 ret = kv_copy_bytes_to_smc(rdev, 879 pi->dpm_table_start + 880 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 881 (u8 *)&pi->uvd_level, 882 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 883 pi->sram_end); 884 885 return ret; 886 887 } 888 889 static int kv_populate_vce_table(struct radeon_device *rdev) 890 { 891 struct kv_power_info *pi = kv_get_pi(rdev); 892 int ret; 893 u32 i; 894 struct radeon_vce_clock_voltage_dependency_table *table = 895 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 896 struct atom_clock_dividers dividers; 897 898 if (table == NULL || table->count == 0) 899 return 0; 900 901 pi->vce_level_count = 0; 902 for (i = 0; i < table->count; i++) { 903 if (pi->high_voltage_t && 904 pi->high_voltage_t < table->entries[i].v) 905 break; 906 907 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 908 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 909 910 pi->vce_level[i].ClkBypassCntl = 911 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); 912 913 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 914 table->entries[i].evclk, false, ÷rs); 915 if (ret) 916 return ret; 917 pi->vce_level[i].Divider = (u8)dividers.post_div; 918 919 pi->vce_level_count++; 920 } 921 922 ret = kv_copy_bytes_to_smc(rdev, 923 pi->dpm_table_start + 924 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 925 (u8 *)&pi->vce_level_count, 926 sizeof(u8), 927 pi->sram_end); 928 if (ret) 929 return ret; 930 931 pi->vce_interval = 1; 932 933 ret = kv_copy_bytes_to_smc(rdev, 934 pi->dpm_table_start + 935 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 936 (u8 *)&pi->vce_interval, 937 sizeof(u8), 938 pi->sram_end); 939 if (ret) 940 return ret; 941 942 ret = kv_copy_bytes_to_smc(rdev, 943 pi->dpm_table_start + 944 offsetof(SMU7_Fusion_DpmTable, VceLevel), 945 (u8 *)&pi->vce_level, 946 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 947 pi->sram_end); 948 949 return ret; 950 } 951 952 static int kv_populate_samu_table(struct radeon_device *rdev) 953 { 954 struct kv_power_info *pi = kv_get_pi(rdev); 955 struct radeon_clock_voltage_dependency_table *table = 956 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 957 struct atom_clock_dividers dividers; 958 int ret; 959 u32 i; 960 961 if (table == NULL || table->count == 0) 962 return 0; 963 964 pi->samu_level_count = 0; 965 for (i = 0; i < table->count; i++) { 966 if (pi->high_voltage_t && 967 pi->high_voltage_t < table->entries[i].v) 968 break; 969 970 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 971 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 972 973 pi->samu_level[i].ClkBypassCntl = 974 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); 975 976 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 977 table->entries[i].clk, false, ÷rs); 978 if (ret) 979 return ret; 980 pi->samu_level[i].Divider = (u8)dividers.post_div; 981 982 pi->samu_level_count++; 983 } 984 985 ret = kv_copy_bytes_to_smc(rdev, 986 pi->dpm_table_start + 987 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 988 (u8 *)&pi->samu_level_count, 989 sizeof(u8), 990 pi->sram_end); 991 if (ret) 992 return ret; 993 994 pi->samu_interval = 1; 995 996 ret = kv_copy_bytes_to_smc(rdev, 997 pi->dpm_table_start + 998 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 999 (u8 *)&pi->samu_interval, 1000 sizeof(u8), 1001 pi->sram_end); 1002 if (ret) 1003 return ret; 1004 1005 ret = kv_copy_bytes_to_smc(rdev, 1006 pi->dpm_table_start + 1007 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1008 (u8 *)&pi->samu_level, 1009 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1010 pi->sram_end); 1011 if (ret) 1012 return ret; 1013 1014 return ret; 1015 } 1016 1017 1018 static int kv_populate_acp_table(struct radeon_device *rdev) 1019 { 1020 struct kv_power_info *pi = kv_get_pi(rdev); 1021 struct radeon_clock_voltage_dependency_table *table = 1022 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1023 struct atom_clock_dividers dividers; 1024 int ret; 1025 u32 i; 1026 1027 if (table == NULL || table->count == 0) 1028 return 0; 1029 1030 pi->acp_level_count = 0; 1031 for (i = 0; i < table->count; i++) { 1032 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1033 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1034 1035 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1036 table->entries[i].clk, false, ÷rs); 1037 if (ret) 1038 return ret; 1039 pi->acp_level[i].Divider = (u8)dividers.post_div; 1040 1041 pi->acp_level_count++; 1042 } 1043 1044 ret = kv_copy_bytes_to_smc(rdev, 1045 pi->dpm_table_start + 1046 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1047 (u8 *)&pi->acp_level_count, 1048 sizeof(u8), 1049 pi->sram_end); 1050 if (ret) 1051 return ret; 1052 1053 pi->acp_interval = 1; 1054 1055 ret = kv_copy_bytes_to_smc(rdev, 1056 pi->dpm_table_start + 1057 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1058 (u8 *)&pi->acp_interval, 1059 sizeof(u8), 1060 pi->sram_end); 1061 if (ret) 1062 return ret; 1063 1064 ret = kv_copy_bytes_to_smc(rdev, 1065 pi->dpm_table_start + 1066 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1067 (u8 *)&pi->acp_level, 1068 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1069 pi->sram_end); 1070 if (ret) 1071 return ret; 1072 1073 return ret; 1074 } 1075 1076 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev) 1077 { 1078 struct kv_power_info *pi = kv_get_pi(rdev); 1079 u32 i; 1080 struct radeon_clock_voltage_dependency_table *table = 1081 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1082 1083 if (table && table->count) { 1084 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1085 if (pi->caps_enable_dfs_bypass) { 1086 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1087 pi->graphics_level[i].ClkBypassCntl = 3; 1088 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1089 pi->graphics_level[i].ClkBypassCntl = 2; 1090 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1091 pi->graphics_level[i].ClkBypassCntl = 7; 1092 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1093 pi->graphics_level[i].ClkBypassCntl = 6; 1094 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1095 pi->graphics_level[i].ClkBypassCntl = 8; 1096 else 1097 pi->graphics_level[i].ClkBypassCntl = 0; 1098 } else { 1099 pi->graphics_level[i].ClkBypassCntl = 0; 1100 } 1101 } 1102 } else { 1103 struct sumo_sclk_voltage_mapping_table *table = 1104 &pi->sys_info.sclk_voltage_mapping_table; 1105 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1106 if (pi->caps_enable_dfs_bypass) { 1107 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1108 pi->graphics_level[i].ClkBypassCntl = 3; 1109 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1110 pi->graphics_level[i].ClkBypassCntl = 2; 1111 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1112 pi->graphics_level[i].ClkBypassCntl = 7; 1113 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1114 pi->graphics_level[i].ClkBypassCntl = 6; 1115 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1116 pi->graphics_level[i].ClkBypassCntl = 8; 1117 else 1118 pi->graphics_level[i].ClkBypassCntl = 0; 1119 } else { 1120 pi->graphics_level[i].ClkBypassCntl = 0; 1121 } 1122 } 1123 } 1124 } 1125 1126 static int kv_enable_ulv(struct radeon_device *rdev, bool enable) 1127 { 1128 return kv_notify_message_to_smu(rdev, enable ? 1129 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1130 } 1131 1132 static void kv_reset_acp_boot_level(struct radeon_device *rdev) 1133 { 1134 struct kv_power_info *pi = kv_get_pi(rdev); 1135 1136 pi->acp_boot_level = 0xff; 1137 } 1138 1139 static void kv_update_current_ps(struct radeon_device *rdev, 1140 struct radeon_ps *rps) 1141 { 1142 struct kv_ps *new_ps = kv_get_ps(rps); 1143 struct kv_power_info *pi = kv_get_pi(rdev); 1144 1145 pi->current_rps = *rps; 1146 pi->current_ps = *new_ps; 1147 pi->current_rps.ps_priv = &pi->current_ps; 1148 } 1149 1150 static void kv_update_requested_ps(struct radeon_device *rdev, 1151 struct radeon_ps *rps) 1152 { 1153 struct kv_ps *new_ps = kv_get_ps(rps); 1154 struct kv_power_info *pi = kv_get_pi(rdev); 1155 1156 pi->requested_rps = *rps; 1157 pi->requested_ps = *new_ps; 1158 pi->requested_rps.ps_priv = &pi->requested_ps; 1159 } 1160 1161 void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1162 { 1163 struct kv_power_info *pi = kv_get_pi(rdev); 1164 int ret; 1165 1166 if (pi->bapm_enable) { 1167 ret = kv_smc_bapm_enable(rdev, enable); 1168 if (ret) 1169 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1170 } 1171 } 1172 1173 static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) 1174 { 1175 u32 thermal_int; 1176 1177 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); 1178 if (enable) 1179 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; 1180 else 1181 thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); 1182 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); 1183 1184 } 1185 1186 int kv_dpm_enable(struct radeon_device *rdev) 1187 { 1188 struct kv_power_info *pi = kv_get_pi(rdev); 1189 int ret; 1190 1191 ret = kv_process_firmware_header(rdev); 1192 if (ret) { 1193 DRM_ERROR("kv_process_firmware_header failed\n"); 1194 return ret; 1195 } 1196 kv_init_fps_limits(rdev); 1197 kv_init_graphics_levels(rdev); 1198 ret = kv_program_bootup_state(rdev); 1199 if (ret) { 1200 DRM_ERROR("kv_program_bootup_state failed\n"); 1201 return ret; 1202 } 1203 kv_calculate_dfs_bypass_settings(rdev); 1204 ret = kv_upload_dpm_settings(rdev); 1205 if (ret) { 1206 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1207 return ret; 1208 } 1209 ret = kv_populate_uvd_table(rdev); 1210 if (ret) { 1211 DRM_ERROR("kv_populate_uvd_table failed\n"); 1212 return ret; 1213 } 1214 ret = kv_populate_vce_table(rdev); 1215 if (ret) { 1216 DRM_ERROR("kv_populate_vce_table failed\n"); 1217 return ret; 1218 } 1219 ret = kv_populate_samu_table(rdev); 1220 if (ret) { 1221 DRM_ERROR("kv_populate_samu_table failed\n"); 1222 return ret; 1223 } 1224 ret = kv_populate_acp_table(rdev); 1225 if (ret) { 1226 DRM_ERROR("kv_populate_acp_table failed\n"); 1227 return ret; 1228 } 1229 kv_program_vc(rdev); 1230 #if 0 1231 kv_initialize_hardware_cac_manager(rdev); 1232 #endif 1233 kv_start_am(rdev); 1234 if (pi->enable_auto_thermal_throttling) { 1235 ret = kv_enable_auto_thermal_throttling(rdev); 1236 if (ret) { 1237 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1238 return ret; 1239 } 1240 } 1241 ret = kv_enable_dpm_voltage_scaling(rdev); 1242 if (ret) { 1243 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1244 return ret; 1245 } 1246 ret = kv_set_dpm_interval(rdev); 1247 if (ret) { 1248 DRM_ERROR("kv_set_dpm_interval failed\n"); 1249 return ret; 1250 } 1251 ret = kv_set_dpm_boot_state(rdev); 1252 if (ret) { 1253 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1254 return ret; 1255 } 1256 ret = kv_enable_ulv(rdev, true); 1257 if (ret) { 1258 DRM_ERROR("kv_enable_ulv failed\n"); 1259 return ret; 1260 } 1261 kv_start_dpm(rdev); 1262 ret = kv_enable_didt(rdev, true); 1263 if (ret) { 1264 DRM_ERROR("kv_enable_didt failed\n"); 1265 return ret; 1266 } 1267 ret = kv_enable_smc_cac(rdev, true); 1268 if (ret) { 1269 DRM_ERROR("kv_enable_smc_cac failed\n"); 1270 return ret; 1271 } 1272 1273 kv_reset_acp_boot_level(rdev); 1274 1275 ret = kv_smc_bapm_enable(rdev, false); 1276 if (ret) { 1277 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1278 return ret; 1279 } 1280 1281 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1282 1283 return ret; 1284 } 1285 1286 int kv_dpm_late_enable(struct radeon_device *rdev) 1287 { 1288 int ret = 0; 1289 1290 if (rdev->irq.installed && 1291 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1292 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1293 if (ret) { 1294 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1295 return ret; 1296 } 1297 kv_enable_thermal_int(rdev, true); 1298 } 1299 1300 /* powerdown unused blocks for now */ 1301 kv_dpm_powergate_acp(rdev, true); 1302 kv_dpm_powergate_samu(rdev, true); 1303 kv_dpm_powergate_vce(rdev, true); 1304 kv_dpm_powergate_uvd(rdev, true); 1305 1306 return ret; 1307 } 1308 1309 void kv_dpm_disable(struct radeon_device *rdev) 1310 { 1311 kv_smc_bapm_enable(rdev, false); 1312 1313 if (rdev->family == CHIP_MULLINS) 1314 kv_enable_nb_dpm(rdev, false); 1315 1316 /* powerup blocks */ 1317 kv_dpm_powergate_acp(rdev, false); 1318 kv_dpm_powergate_samu(rdev, false); 1319 kv_dpm_powergate_vce(rdev, false); 1320 kv_dpm_powergate_uvd(rdev, false); 1321 1322 kv_enable_smc_cac(rdev, false); 1323 kv_enable_didt(rdev, false); 1324 kv_clear_vc(rdev); 1325 kv_stop_dpm(rdev); 1326 kv_enable_ulv(rdev, false); 1327 kv_reset_am(rdev); 1328 kv_enable_thermal_int(rdev, false); 1329 1330 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1331 } 1332 1333 #if 0 1334 static int kv_write_smc_soft_register(struct radeon_device *rdev, 1335 u16 reg_offset, u32 value) 1336 { 1337 struct kv_power_info *pi = kv_get_pi(rdev); 1338 1339 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset, 1340 (u8 *)&value, sizeof(u16), pi->sram_end); 1341 } 1342 1343 static int kv_read_smc_soft_register(struct radeon_device *rdev, 1344 u16 reg_offset, u32 *value) 1345 { 1346 struct kv_power_info *pi = kv_get_pi(rdev); 1347 1348 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset, 1349 value, pi->sram_end); 1350 } 1351 #endif 1352 1353 static void kv_init_sclk_t(struct radeon_device *rdev) 1354 { 1355 struct kv_power_info *pi = kv_get_pi(rdev); 1356 1357 pi->low_sclk_interrupt_t = 0; 1358 } 1359 1360 static int kv_init_fps_limits(struct radeon_device *rdev) 1361 { 1362 struct kv_power_info *pi = kv_get_pi(rdev); 1363 int ret = 0; 1364 1365 if (pi->caps_fps) { 1366 u16 tmp; 1367 1368 tmp = 45; 1369 pi->fps_high_t = cpu_to_be16(tmp); 1370 ret = kv_copy_bytes_to_smc(rdev, 1371 pi->dpm_table_start + 1372 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1373 (u8 *)&pi->fps_high_t, 1374 sizeof(u16), pi->sram_end); 1375 1376 tmp = 30; 1377 pi->fps_low_t = cpu_to_be16(tmp); 1378 1379 ret = kv_copy_bytes_to_smc(rdev, 1380 pi->dpm_table_start + 1381 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1382 (u8 *)&pi->fps_low_t, 1383 sizeof(u16), pi->sram_end); 1384 1385 } 1386 return ret; 1387 } 1388 1389 static void kv_init_powergate_state(struct radeon_device *rdev) 1390 { 1391 struct kv_power_info *pi = kv_get_pi(rdev); 1392 1393 pi->uvd_power_gated = false; 1394 pi->vce_power_gated = false; 1395 pi->samu_power_gated = false; 1396 pi->acp_power_gated = false; 1397 1398 } 1399 1400 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 1401 { 1402 return kv_notify_message_to_smu(rdev, enable ? 1403 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1404 } 1405 1406 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) 1407 { 1408 return kv_notify_message_to_smu(rdev, enable ? 1409 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1410 } 1411 1412 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) 1413 { 1414 return kv_notify_message_to_smu(rdev, enable ? 1415 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1416 } 1417 1418 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable) 1419 { 1420 return kv_notify_message_to_smu(rdev, enable ? 1421 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1422 } 1423 1424 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) 1425 { 1426 struct kv_power_info *pi = kv_get_pi(rdev); 1427 struct radeon_uvd_clock_voltage_dependency_table *table = 1428 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1429 int ret; 1430 u32 mask; 1431 1432 if (!gate) { 1433 if (table->count) 1434 pi->uvd_boot_level = table->count - 1; 1435 else 1436 pi->uvd_boot_level = 0; 1437 1438 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1439 mask = 1 << pi->uvd_boot_level; 1440 } else { 1441 mask = 0x1f; 1442 } 1443 1444 ret = kv_copy_bytes_to_smc(rdev, 1445 pi->dpm_table_start + 1446 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1447 (uint8_t *)&pi->uvd_boot_level, 1448 sizeof(u8), pi->sram_end); 1449 if (ret) 1450 return ret; 1451 1452 kv_send_msg_to_smc_with_parameter(rdev, 1453 PPSMC_MSG_UVDDPM_SetEnabledMask, 1454 mask); 1455 } 1456 1457 return kv_enable_uvd_dpm(rdev, !gate); 1458 } 1459 1460 static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk) 1461 { 1462 u8 i; 1463 struct radeon_vce_clock_voltage_dependency_table *table = 1464 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1465 1466 for (i = 0; i < table->count; i++) { 1467 if (table->entries[i].evclk >= evclk) 1468 break; 1469 } 1470 1471 return i; 1472 } 1473 1474 static int kv_update_vce_dpm(struct radeon_device *rdev, 1475 struct radeon_ps *radeon_new_state, 1476 struct radeon_ps *radeon_current_state) 1477 { 1478 struct kv_power_info *pi = kv_get_pi(rdev); 1479 struct radeon_vce_clock_voltage_dependency_table *table = 1480 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1481 int ret; 1482 1483 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { 1484 kv_dpm_powergate_vce(rdev, false); 1485 /* turn the clocks on when encoding */ 1486 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 1487 if (pi->caps_stable_p_state) 1488 pi->vce_boot_level = table->count - 1; 1489 else 1490 pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk); 1491 1492 ret = kv_copy_bytes_to_smc(rdev, 1493 pi->dpm_table_start + 1494 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1495 (u8 *)&pi->vce_boot_level, 1496 sizeof(u8), 1497 pi->sram_end); 1498 if (ret) 1499 return ret; 1500 1501 if (pi->caps_stable_p_state) 1502 kv_send_msg_to_smc_with_parameter(rdev, 1503 PPSMC_MSG_VCEDPM_SetEnabledMask, 1504 (1 << pi->vce_boot_level)); 1505 1506 kv_enable_vce_dpm(rdev, true); 1507 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { 1508 kv_enable_vce_dpm(rdev, false); 1509 /* turn the clocks off when not encoding */ 1510 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 1511 kv_dpm_powergate_vce(rdev, true); 1512 } 1513 1514 return 0; 1515 } 1516 1517 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) 1518 { 1519 struct kv_power_info *pi = kv_get_pi(rdev); 1520 struct radeon_clock_voltage_dependency_table *table = 1521 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1522 int ret; 1523 1524 if (!gate) { 1525 if (pi->caps_stable_p_state) 1526 pi->samu_boot_level = table->count - 1; 1527 else 1528 pi->samu_boot_level = 0; 1529 1530 ret = kv_copy_bytes_to_smc(rdev, 1531 pi->dpm_table_start + 1532 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1533 (u8 *)&pi->samu_boot_level, 1534 sizeof(u8), 1535 pi->sram_end); 1536 if (ret) 1537 return ret; 1538 1539 if (pi->caps_stable_p_state) 1540 kv_send_msg_to_smc_with_parameter(rdev, 1541 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1542 (1 << pi->samu_boot_level)); 1543 } 1544 1545 return kv_enable_samu_dpm(rdev, !gate); 1546 } 1547 1548 static u8 kv_get_acp_boot_level(struct radeon_device *rdev) 1549 { 1550 u8 i; 1551 struct radeon_clock_voltage_dependency_table *table = 1552 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1553 1554 for (i = 0; i < table->count; i++) { 1555 if (table->entries[i].clk >= 0) /* XXX */ 1556 break; 1557 } 1558 1559 if (i >= table->count) 1560 i = table->count - 1; 1561 1562 return i; 1563 } 1564 1565 static void kv_update_acp_boot_level(struct radeon_device *rdev) 1566 { 1567 struct kv_power_info *pi = kv_get_pi(rdev); 1568 u8 acp_boot_level; 1569 1570 if (!pi->caps_stable_p_state) { 1571 acp_boot_level = kv_get_acp_boot_level(rdev); 1572 if (acp_boot_level != pi->acp_boot_level) { 1573 pi->acp_boot_level = acp_boot_level; 1574 kv_send_msg_to_smc_with_parameter(rdev, 1575 PPSMC_MSG_ACPDPM_SetEnabledMask, 1576 (1 << pi->acp_boot_level)); 1577 } 1578 } 1579 } 1580 1581 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1582 { 1583 struct kv_power_info *pi = kv_get_pi(rdev); 1584 struct radeon_clock_voltage_dependency_table *table = 1585 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1586 int ret; 1587 1588 if (!gate) { 1589 if (pi->caps_stable_p_state) 1590 pi->acp_boot_level = table->count - 1; 1591 else 1592 pi->acp_boot_level = kv_get_acp_boot_level(rdev); 1593 1594 ret = kv_copy_bytes_to_smc(rdev, 1595 pi->dpm_table_start + 1596 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1597 (u8 *)&pi->acp_boot_level, 1598 sizeof(u8), 1599 pi->sram_end); 1600 if (ret) 1601 return ret; 1602 1603 if (pi->caps_stable_p_state) 1604 kv_send_msg_to_smc_with_parameter(rdev, 1605 PPSMC_MSG_ACPDPM_SetEnabledMask, 1606 (1 << pi->acp_boot_level)); 1607 } 1608 1609 return kv_enable_acp_dpm(rdev, !gate); 1610 } 1611 1612 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 1613 { 1614 struct kv_power_info *pi = kv_get_pi(rdev); 1615 1616 if (pi->uvd_power_gated == gate) 1617 return; 1618 1619 pi->uvd_power_gated = gate; 1620 1621 if (gate) { 1622 if (pi->caps_uvd_pg) { 1623 uvd_v1_0_stop(rdev); 1624 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); 1625 } 1626 kv_update_uvd_dpm(rdev, gate); 1627 if (pi->caps_uvd_pg) 1628 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); 1629 } else { 1630 if (pi->caps_uvd_pg) { 1631 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); 1632 uvd_v4_2_resume(rdev); 1633 uvd_v1_0_start(rdev); 1634 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); 1635 } 1636 kv_update_uvd_dpm(rdev, gate); 1637 } 1638 } 1639 1640 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) 1641 { 1642 struct kv_power_info *pi = kv_get_pi(rdev); 1643 1644 if (pi->vce_power_gated == gate) 1645 return; 1646 1647 pi->vce_power_gated = gate; 1648 1649 if (gate) { 1650 if (pi->caps_vce_pg) { 1651 /* XXX do we need a vce_v1_0_stop() ? */ 1652 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); 1653 } 1654 } else { 1655 if (pi->caps_vce_pg) { 1656 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); 1657 vce_v2_0_resume(rdev); 1658 vce_v1_0_start(rdev); 1659 } 1660 } 1661 } 1662 1663 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate) 1664 { 1665 struct kv_power_info *pi = kv_get_pi(rdev); 1666 1667 if (pi->samu_power_gated == gate) 1668 return; 1669 1670 pi->samu_power_gated = gate; 1671 1672 if (gate) { 1673 kv_update_samu_dpm(rdev, true); 1674 if (pi->caps_samu_pg) 1675 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF); 1676 } else { 1677 if (pi->caps_samu_pg) 1678 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON); 1679 kv_update_samu_dpm(rdev, false); 1680 } 1681 } 1682 1683 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) 1684 { 1685 struct kv_power_info *pi = kv_get_pi(rdev); 1686 1687 if (pi->acp_power_gated == gate) 1688 return; 1689 1690 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1691 return; 1692 1693 pi->acp_power_gated = gate; 1694 1695 if (gate) { 1696 kv_update_acp_dpm(rdev, true); 1697 if (pi->caps_acp_pg) 1698 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF); 1699 } else { 1700 if (pi->caps_acp_pg) 1701 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON); 1702 kv_update_acp_dpm(rdev, false); 1703 } 1704 } 1705 1706 static void kv_set_valid_clock_range(struct radeon_device *rdev, 1707 struct radeon_ps *new_rps) 1708 { 1709 struct kv_ps *new_ps = kv_get_ps(new_rps); 1710 struct kv_power_info *pi = kv_get_pi(rdev); 1711 u32 i; 1712 struct radeon_clock_voltage_dependency_table *table = 1713 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1714 1715 if (table && table->count) { 1716 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1717 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1718 (i == (pi->graphics_dpm_level_count - 1))) { 1719 pi->lowest_valid = i; 1720 break; 1721 } 1722 } 1723 1724 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1725 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1726 break; 1727 } 1728 pi->highest_valid = i; 1729 1730 if (pi->lowest_valid > pi->highest_valid) { 1731 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1732 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1733 pi->highest_valid = pi->lowest_valid; 1734 else 1735 pi->lowest_valid = pi->highest_valid; 1736 } 1737 } else { 1738 struct sumo_sclk_voltage_mapping_table *table = 1739 &pi->sys_info.sclk_voltage_mapping_table; 1740 1741 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1742 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1743 i == (int)(pi->graphics_dpm_level_count - 1)) { 1744 pi->lowest_valid = i; 1745 break; 1746 } 1747 } 1748 1749 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1750 if (table->entries[i].sclk_frequency <= 1751 new_ps->levels[new_ps->num_levels - 1].sclk) 1752 break; 1753 } 1754 pi->highest_valid = i; 1755 1756 if (pi->lowest_valid > pi->highest_valid) { 1757 if ((new_ps->levels[0].sclk - 1758 table->entries[pi->highest_valid].sclk_frequency) > 1759 (table->entries[pi->lowest_valid].sclk_frequency - 1760 new_ps->levels[new_ps->num_levels -1].sclk)) 1761 pi->highest_valid = pi->lowest_valid; 1762 else 1763 pi->lowest_valid = pi->highest_valid; 1764 } 1765 } 1766 } 1767 1768 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev, 1769 struct radeon_ps *new_rps) 1770 { 1771 struct kv_ps *new_ps = kv_get_ps(new_rps); 1772 struct kv_power_info *pi = kv_get_pi(rdev); 1773 int ret = 0; 1774 u8 clk_bypass_cntl; 1775 1776 if (pi->caps_enable_dfs_bypass) { 1777 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1778 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1779 ret = kv_copy_bytes_to_smc(rdev, 1780 (pi->dpm_table_start + 1781 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1782 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1783 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1784 &clk_bypass_cntl, 1785 sizeof(u8), pi->sram_end); 1786 } 1787 1788 return ret; 1789 } 1790 1791 static int kv_enable_nb_dpm(struct radeon_device *rdev, 1792 bool enable) 1793 { 1794 struct kv_power_info *pi = kv_get_pi(rdev); 1795 int ret = 0; 1796 1797 if (enable) { 1798 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1799 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); 1800 if (ret == 0) 1801 pi->nb_dpm_enabled = true; 1802 } 1803 } else { 1804 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { 1805 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable); 1806 if (ret == 0) 1807 pi->nb_dpm_enabled = false; 1808 } 1809 } 1810 1811 return ret; 1812 } 1813 1814 int kv_dpm_force_performance_level(struct radeon_device *rdev, 1815 enum radeon_dpm_forced_level level) 1816 { 1817 int ret; 1818 1819 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1820 ret = kv_force_dpm_highest(rdev); 1821 if (ret) 1822 return ret; 1823 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1824 ret = kv_force_dpm_lowest(rdev); 1825 if (ret) 1826 return ret; 1827 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 1828 ret = kv_unforce_levels(rdev); 1829 if (ret) 1830 return ret; 1831 } 1832 1833 rdev->pm.dpm.forced_level = level; 1834 1835 return 0; 1836 } 1837 1838 int kv_dpm_pre_set_power_state(struct radeon_device *rdev) 1839 { 1840 struct kv_power_info *pi = kv_get_pi(rdev); 1841 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 1842 struct radeon_ps *new_ps = &requested_ps; 1843 1844 kv_update_requested_ps(rdev, new_ps); 1845 1846 kv_apply_state_adjust_rules(rdev, 1847 &pi->requested_rps, 1848 &pi->current_rps); 1849 1850 return 0; 1851 } 1852 1853 int kv_dpm_set_power_state(struct radeon_device *rdev) 1854 { 1855 struct kv_power_info *pi = kv_get_pi(rdev); 1856 struct radeon_ps *new_ps = &pi->requested_rps; 1857 struct radeon_ps *old_ps = &pi->current_rps; 1858 int ret; 1859 1860 if (pi->bapm_enable) { 1861 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1862 if (ret) { 1863 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1864 return ret; 1865 } 1866 } 1867 1868 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1869 if (pi->enable_dpm) { 1870 kv_set_valid_clock_range(rdev, new_ps); 1871 kv_update_dfs_bypass_settings(rdev, new_ps); 1872 ret = kv_calculate_ds_divider(rdev); 1873 if (ret) { 1874 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1875 return ret; 1876 } 1877 kv_calculate_nbps_level_settings(rdev); 1878 kv_calculate_dpm_settings(rdev); 1879 kv_force_lowest_valid(rdev); 1880 kv_enable_new_levels(rdev); 1881 kv_upload_dpm_settings(rdev); 1882 kv_program_nbps_index_settings(rdev, new_ps); 1883 kv_unforce_levels(rdev); 1884 kv_set_enabled_levels(rdev); 1885 kv_force_lowest_valid(rdev); 1886 kv_unforce_levels(rdev); 1887 1888 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1889 if (ret) { 1890 DRM_ERROR("kv_update_vce_dpm failed\n"); 1891 return ret; 1892 } 1893 kv_update_sclk_t(rdev); 1894 if (rdev->family == CHIP_MULLINS) 1895 kv_enable_nb_dpm(rdev, true); 1896 } 1897 } else { 1898 if (pi->enable_dpm) { 1899 kv_set_valid_clock_range(rdev, new_ps); 1900 kv_update_dfs_bypass_settings(rdev, new_ps); 1901 ret = kv_calculate_ds_divider(rdev); 1902 if (ret) { 1903 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1904 return ret; 1905 } 1906 kv_calculate_nbps_level_settings(rdev); 1907 kv_calculate_dpm_settings(rdev); 1908 kv_freeze_sclk_dpm(rdev, true); 1909 kv_upload_dpm_settings(rdev); 1910 kv_program_nbps_index_settings(rdev, new_ps); 1911 kv_freeze_sclk_dpm(rdev, false); 1912 kv_set_enabled_levels(rdev); 1913 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1914 if (ret) { 1915 DRM_ERROR("kv_update_vce_dpm failed\n"); 1916 return ret; 1917 } 1918 kv_update_acp_boot_level(rdev); 1919 kv_update_sclk_t(rdev); 1920 kv_enable_nb_dpm(rdev, true); 1921 } 1922 } 1923 1924 return 0; 1925 } 1926 1927 void kv_dpm_post_set_power_state(struct radeon_device *rdev) 1928 { 1929 struct kv_power_info *pi = kv_get_pi(rdev); 1930 struct radeon_ps *new_ps = &pi->requested_rps; 1931 1932 kv_update_current_ps(rdev, new_ps); 1933 } 1934 1935 void kv_dpm_setup_asic(struct radeon_device *rdev) 1936 { 1937 sumo_take_smu_control(rdev, true); 1938 kv_init_powergate_state(rdev); 1939 kv_init_sclk_t(rdev); 1940 } 1941 1942 #if 0 1943 void kv_dpm_reset_asic(struct radeon_device *rdev) 1944 { 1945 struct kv_power_info *pi = kv_get_pi(rdev); 1946 1947 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1948 kv_force_lowest_valid(rdev); 1949 kv_init_graphics_levels(rdev); 1950 kv_program_bootup_state(rdev); 1951 kv_upload_dpm_settings(rdev); 1952 kv_force_lowest_valid(rdev); 1953 kv_unforce_levels(rdev); 1954 } else { 1955 kv_init_graphics_levels(rdev); 1956 kv_program_bootup_state(rdev); 1957 kv_freeze_sclk_dpm(rdev, true); 1958 kv_upload_dpm_settings(rdev); 1959 kv_freeze_sclk_dpm(rdev, false); 1960 kv_set_enabled_level(rdev, pi->graphics_boot_level); 1961 } 1962 } 1963 #endif 1964 1965 //XXX use sumo_dpm_display_configuration_changed 1966 1967 static void kv_construct_max_power_limits_table(struct radeon_device *rdev, 1968 struct radeon_clock_and_voltage_limits *table) 1969 { 1970 struct kv_power_info *pi = kv_get_pi(rdev); 1971 1972 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 1973 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 1974 table->sclk = 1975 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 1976 table->vddc = 1977 kv_convert_2bit_index_to_voltage(rdev, 1978 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 1979 } 1980 1981 table->mclk = pi->sys_info.nbp_memory_clock[0]; 1982 } 1983 1984 static void kv_patch_voltage_values(struct radeon_device *rdev) 1985 { 1986 int i; 1987 struct radeon_uvd_clock_voltage_dependency_table *uvd_table = 1988 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1989 struct radeon_vce_clock_voltage_dependency_table *vce_table = 1990 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1991 struct radeon_clock_voltage_dependency_table *samu_table = 1992 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1993 struct radeon_clock_voltage_dependency_table *acp_table = 1994 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1995 1996 if (uvd_table->count) { 1997 for (i = 0; i < uvd_table->count; i++) 1998 uvd_table->entries[i].v = 1999 kv_convert_8bit_index_to_voltage(rdev, 2000 uvd_table->entries[i].v); 2001 } 2002 2003 if (vce_table->count) { 2004 for (i = 0; i < vce_table->count; i++) 2005 vce_table->entries[i].v = 2006 kv_convert_8bit_index_to_voltage(rdev, 2007 vce_table->entries[i].v); 2008 } 2009 2010 if (samu_table->count) { 2011 for (i = 0; i < samu_table->count; i++) 2012 samu_table->entries[i].v = 2013 kv_convert_8bit_index_to_voltage(rdev, 2014 samu_table->entries[i].v); 2015 } 2016 2017 if (acp_table->count) { 2018 for (i = 0; i < acp_table->count; i++) 2019 acp_table->entries[i].v = 2020 kv_convert_8bit_index_to_voltage(rdev, 2021 acp_table->entries[i].v); 2022 } 2023 2024 } 2025 2026 static void kv_construct_boot_state(struct radeon_device *rdev) 2027 { 2028 struct kv_power_info *pi = kv_get_pi(rdev); 2029 2030 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2031 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2032 pi->boot_pl.ds_divider_index = 0; 2033 pi->boot_pl.ss_divider_index = 0; 2034 pi->boot_pl.allow_gnb_slow = 1; 2035 pi->boot_pl.force_nbp_state = 0; 2036 pi->boot_pl.display_wm = 0; 2037 pi->boot_pl.vce_wm = 0; 2038 } 2039 2040 static int kv_force_dpm_highest(struct radeon_device *rdev) 2041 { 2042 int ret; 2043 u32 enable_mask, i; 2044 2045 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2046 if (ret) 2047 return ret; 2048 2049 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2050 if (enable_mask & (1 << i)) 2051 break; 2052 } 2053 2054 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2055 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2056 else 2057 return kv_set_enabled_level(rdev, i); 2058 } 2059 2060 static int kv_force_dpm_lowest(struct radeon_device *rdev) 2061 { 2062 int ret; 2063 u32 enable_mask, i; 2064 2065 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2066 if (ret) 2067 return ret; 2068 2069 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2070 if (enable_mask & (1 << i)) 2071 break; 2072 } 2073 2074 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2075 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2076 else 2077 return kv_set_enabled_level(rdev, i); 2078 } 2079 2080 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2081 u32 sclk, u32 min_sclk_in_sr) 2082 { 2083 struct kv_power_info *pi = kv_get_pi(rdev); 2084 u32 i; 2085 u32 temp; 2086 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? 2087 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; 2088 2089 if (sclk < min) 2090 return 0; 2091 2092 if (!pi->caps_sclk_ds) 2093 return 0; 2094 2095 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2096 temp = sclk / sumo_get_sleep_divider_from_id(i); 2097 if (temp >= min) 2098 break; 2099 } 2100 2101 return (u8)i; 2102 } 2103 2104 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit) 2105 { 2106 struct kv_power_info *pi = kv_get_pi(rdev); 2107 struct radeon_clock_voltage_dependency_table *table = 2108 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2109 int i; 2110 2111 if (table && table->count) { 2112 for (i = table->count - 1; i >= 0; i--) { 2113 if (pi->high_voltage_t && 2114 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= 2115 pi->high_voltage_t)) { 2116 *limit = i; 2117 return 0; 2118 } 2119 } 2120 } else { 2121 struct sumo_sclk_voltage_mapping_table *table = 2122 &pi->sys_info.sclk_voltage_mapping_table; 2123 2124 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2125 if (pi->high_voltage_t && 2126 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= 2127 pi->high_voltage_t)) { 2128 *limit = i; 2129 return 0; 2130 } 2131 } 2132 } 2133 2134 *limit = 0; 2135 return 0; 2136 } 2137 2138 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 2139 struct radeon_ps *new_rps, 2140 struct radeon_ps *old_rps) 2141 { 2142 struct kv_ps *ps = kv_get_ps(new_rps); 2143 struct kv_power_info *pi = kv_get_pi(rdev); 2144 u32 min_sclk = 10000; /* ??? */ 2145 u32 sclk, mclk = 0; 2146 int i, limit; 2147 bool force_high; 2148 struct radeon_clock_voltage_dependency_table *table = 2149 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2150 u32 stable_p_state_sclk = 0; 2151 struct radeon_clock_and_voltage_limits *max_limits = 2152 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2153 2154 if (new_rps->vce_active) { 2155 new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 2156 new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 2157 } else { 2158 new_rps->evclk = 0; 2159 new_rps->ecclk = 0; 2160 } 2161 2162 mclk = max_limits->mclk; 2163 sclk = min_sclk; 2164 2165 if (pi->caps_stable_p_state) { 2166 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2167 2168 for (i = table->count - 1; i >= 0; i--) { 2169 if (stable_p_state_sclk >= table->entries[i].clk) { 2170 stable_p_state_sclk = table->entries[i].clk; 2171 break; 2172 } 2173 } 2174 2175 if (i > 0) 2176 stable_p_state_sclk = table->entries[0].clk; 2177 2178 sclk = stable_p_state_sclk; 2179 } 2180 2181 if (new_rps->vce_active) { 2182 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 2183 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 2184 } 2185 2186 ps->need_dfs_bypass = true; 2187 2188 for (i = 0; i < ps->num_levels; i++) { 2189 if (ps->levels[i].sclk < sclk) 2190 ps->levels[i].sclk = sclk; 2191 } 2192 2193 if (table && table->count) { 2194 for (i = 0; i < ps->num_levels; i++) { 2195 if (pi->high_voltage_t && 2196 (pi->high_voltage_t < 2197 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2198 kv_get_high_voltage_limit(rdev, &limit); 2199 ps->levels[i].sclk = table->entries[limit].clk; 2200 } 2201 } 2202 } else { 2203 struct sumo_sclk_voltage_mapping_table *table = 2204 &pi->sys_info.sclk_voltage_mapping_table; 2205 2206 for (i = 0; i < ps->num_levels; i++) { 2207 if (pi->high_voltage_t && 2208 (pi->high_voltage_t < 2209 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2210 kv_get_high_voltage_limit(rdev, &limit); 2211 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2212 } 2213 } 2214 } 2215 2216 if (pi->caps_stable_p_state) { 2217 for (i = 0; i < ps->num_levels; i++) { 2218 ps->levels[i].sclk = stable_p_state_sclk; 2219 } 2220 } 2221 2222 pi->video_start = new_rps->dclk || new_rps->vclk || 2223 new_rps->evclk || new_rps->ecclk; 2224 2225 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2226 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2227 pi->battery_state = true; 2228 else 2229 pi->battery_state = false; 2230 2231 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2232 ps->dpm0_pg_nb_ps_lo = 0x1; 2233 ps->dpm0_pg_nb_ps_hi = 0x0; 2234 ps->dpmx_nb_ps_lo = 0x1; 2235 ps->dpmx_nb_ps_hi = 0x0; 2236 } else { 2237 ps->dpm0_pg_nb_ps_lo = 0x3; 2238 ps->dpm0_pg_nb_ps_hi = 0x0; 2239 ps->dpmx_nb_ps_lo = 0x3; 2240 ps->dpmx_nb_ps_hi = 0x0; 2241 2242 if (pi->sys_info.nb_dpm_enable) { 2243 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2244 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2245 pi->disable_nb_ps3_in_battery; 2246 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2247 ps->dpm0_pg_nb_ps_hi = 0x2; 2248 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2249 ps->dpmx_nb_ps_hi = 0x2; 2250 } 2251 } 2252 } 2253 2254 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev, 2255 u32 index, bool enable) 2256 { 2257 struct kv_power_info *pi = kv_get_pi(rdev); 2258 2259 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2260 } 2261 2262 static int kv_calculate_ds_divider(struct radeon_device *rdev) 2263 { 2264 struct kv_power_info *pi = kv_get_pi(rdev); 2265 u32 sclk_in_sr = 10000; /* ??? */ 2266 u32 i; 2267 2268 if (pi->lowest_valid > pi->highest_valid) 2269 return -EINVAL; 2270 2271 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2272 pi->graphics_level[i].DeepSleepDivId = 2273 kv_get_sleep_divider_id_from_clock(rdev, 2274 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2275 sclk_in_sr); 2276 } 2277 return 0; 2278 } 2279 2280 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) 2281 { 2282 struct kv_power_info *pi = kv_get_pi(rdev); 2283 u32 i; 2284 bool force_high; 2285 struct radeon_clock_and_voltage_limits *max_limits = 2286 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2287 u32 mclk = max_limits->mclk; 2288 2289 if (pi->lowest_valid > pi->highest_valid) 2290 return -EINVAL; 2291 2292 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2293 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2294 pi->graphics_level[i].GnbSlow = 1; 2295 pi->graphics_level[i].ForceNbPs1 = 0; 2296 pi->graphics_level[i].UpH = 0; 2297 } 2298 2299 if (!pi->sys_info.nb_dpm_enable) 2300 return 0; 2301 2302 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2303 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2304 2305 if (force_high) { 2306 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2307 pi->graphics_level[i].GnbSlow = 0; 2308 } else { 2309 if (pi->battery_state) 2310 pi->graphics_level[0].ForceNbPs1 = 1; 2311 2312 pi->graphics_level[1].GnbSlow = 0; 2313 pi->graphics_level[2].GnbSlow = 0; 2314 pi->graphics_level[3].GnbSlow = 0; 2315 pi->graphics_level[4].GnbSlow = 0; 2316 } 2317 } else { 2318 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2319 pi->graphics_level[i].GnbSlow = 1; 2320 pi->graphics_level[i].ForceNbPs1 = 0; 2321 pi->graphics_level[i].UpH = 0; 2322 } 2323 2324 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2325 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2326 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2327 if (pi->lowest_valid != pi->highest_valid) 2328 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2329 } 2330 } 2331 return 0; 2332 } 2333 2334 static int kv_calculate_dpm_settings(struct radeon_device *rdev) 2335 { 2336 struct kv_power_info *pi = kv_get_pi(rdev); 2337 u32 i; 2338 2339 if (pi->lowest_valid > pi->highest_valid) 2340 return -EINVAL; 2341 2342 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2343 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2344 2345 return 0; 2346 } 2347 2348 static void kv_init_graphics_levels(struct radeon_device *rdev) 2349 { 2350 struct kv_power_info *pi = kv_get_pi(rdev); 2351 u32 i; 2352 struct radeon_clock_voltage_dependency_table *table = 2353 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2354 2355 if (table && table->count) { 2356 u32 vid_2bit; 2357 2358 pi->graphics_dpm_level_count = 0; 2359 for (i = 0; i < table->count; i++) { 2360 if (pi->high_voltage_t && 2361 (pi->high_voltage_t < 2362 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) 2363 break; 2364 2365 kv_set_divider_value(rdev, i, table->entries[i].clk); 2366 vid_2bit = kv_convert_vid7_to_vid2(rdev, 2367 &pi->sys_info.vid_mapping_table, 2368 table->entries[i].v); 2369 kv_set_vid(rdev, i, vid_2bit); 2370 kv_set_at(rdev, i, pi->at[i]); 2371 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2372 pi->graphics_dpm_level_count++; 2373 } 2374 } else { 2375 struct sumo_sclk_voltage_mapping_table *table = 2376 &pi->sys_info.sclk_voltage_mapping_table; 2377 2378 pi->graphics_dpm_level_count = 0; 2379 for (i = 0; i < table->num_max_dpm_entries; i++) { 2380 if (pi->high_voltage_t && 2381 pi->high_voltage_t < 2382 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) 2383 break; 2384 2385 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); 2386 kv_set_vid(rdev, i, table->entries[i].vid_2bit); 2387 kv_set_at(rdev, i, pi->at[i]); 2388 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2389 pi->graphics_dpm_level_count++; 2390 } 2391 } 2392 2393 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2394 kv_dpm_power_level_enable(rdev, i, false); 2395 } 2396 2397 static void kv_enable_new_levels(struct radeon_device *rdev) 2398 { 2399 struct kv_power_info *pi = kv_get_pi(rdev); 2400 u32 i; 2401 2402 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2403 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2404 kv_dpm_power_level_enable(rdev, i, true); 2405 } 2406 } 2407 2408 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) 2409 { 2410 u32 new_mask = (1 << level); 2411 2412 return kv_send_msg_to_smc_with_parameter(rdev, 2413 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2414 new_mask); 2415 } 2416 2417 static int kv_set_enabled_levels(struct radeon_device *rdev) 2418 { 2419 struct kv_power_info *pi = kv_get_pi(rdev); 2420 u32 i, new_mask = 0; 2421 2422 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2423 new_mask |= (1 << i); 2424 2425 return kv_send_msg_to_smc_with_parameter(rdev, 2426 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2427 new_mask); 2428 } 2429 2430 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 2431 struct radeon_ps *new_rps) 2432 { 2433 struct kv_ps *new_ps = kv_get_ps(new_rps); 2434 struct kv_power_info *pi = kv_get_pi(rdev); 2435 u32 nbdpmconfig1; 2436 2437 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2438 return; 2439 2440 if (pi->sys_info.nb_dpm_enable) { 2441 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1); 2442 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | 2443 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK); 2444 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) | 2445 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) | 2446 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) | 2447 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi)); 2448 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1); 2449 } 2450 } 2451 2452 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 2453 int min_temp, int max_temp) 2454 { 2455 int low_temp = 0 * 1000; 2456 int high_temp = 255 * 1000; 2457 u32 tmp; 2458 2459 if (low_temp < min_temp) 2460 low_temp = min_temp; 2461 if (high_temp > max_temp) 2462 high_temp = max_temp; 2463 if (high_temp < low_temp) { 2464 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2465 return -EINVAL; 2466 } 2467 2468 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL); 2469 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK); 2470 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) | 2471 DIG_THERM_INTL(49 + (low_temp / 1000))); 2472 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp); 2473 2474 rdev->pm.dpm.thermal.min_temp = low_temp; 2475 rdev->pm.dpm.thermal.max_temp = high_temp; 2476 2477 return 0; 2478 } 2479 2480 union igp_info { 2481 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2482 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2483 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2484 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2485 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2486 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2487 }; 2488 2489 static int kv_parse_sys_info_table(struct radeon_device *rdev) 2490 { 2491 struct kv_power_info *pi = kv_get_pi(rdev); 2492 struct radeon_mode_info *mode_info = &rdev->mode_info; 2493 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2494 union igp_info *igp_info; 2495 u8 frev, crev; 2496 u16 data_offset; 2497 int i; 2498 2499 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2500 &frev, &crev, &data_offset)) { 2501 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2502 data_offset); 2503 2504 if (crev != 8) { 2505 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2506 return -EINVAL; 2507 } 2508 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2509 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2510 pi->sys_info.bootup_nb_voltage_index = 2511 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2512 if (igp_info->info_8.ucHtcTmpLmt == 0) 2513 pi->sys_info.htc_tmp_lmt = 203; 2514 else 2515 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2516 if (igp_info->info_8.ucHtcHystLmt == 0) 2517 pi->sys_info.htc_hyst_lmt = 5; 2518 else 2519 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2520 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2521 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2522 } 2523 2524 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2525 pi->sys_info.nb_dpm_enable = true; 2526 else 2527 pi->sys_info.nb_dpm_enable = false; 2528 2529 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2530 pi->sys_info.nbp_memory_clock[i] = 2531 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2532 pi->sys_info.nbp_n_clock[i] = 2533 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2534 } 2535 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2536 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2537 pi->caps_enable_dfs_bypass = true; 2538 2539 sumo_construct_sclk_voltage_mapping_table(rdev, 2540 &pi->sys_info.sclk_voltage_mapping_table, 2541 igp_info->info_8.sAvail_SCLK); 2542 2543 sumo_construct_vid_mapping_table(rdev, 2544 &pi->sys_info.vid_mapping_table, 2545 igp_info->info_8.sAvail_SCLK); 2546 2547 kv_construct_max_power_limits_table(rdev, 2548 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2549 } 2550 return 0; 2551 } 2552 2553 union power_info { 2554 struct _ATOM_POWERPLAY_INFO info; 2555 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2556 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2557 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2558 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2559 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2560 }; 2561 2562 union pplib_clock_info { 2563 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2564 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2565 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2566 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2567 }; 2568 2569 union pplib_power_state { 2570 struct _ATOM_PPLIB_STATE v1; 2571 struct _ATOM_PPLIB_STATE_V2 v2; 2572 }; 2573 2574 static void kv_patch_boot_state(struct radeon_device *rdev, 2575 struct kv_ps *ps) 2576 { 2577 struct kv_power_info *pi = kv_get_pi(rdev); 2578 2579 ps->num_levels = 1; 2580 ps->levels[0] = pi->boot_pl; 2581 } 2582 2583 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev, 2584 struct radeon_ps *rps, 2585 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2586 u8 table_rev) 2587 { 2588 struct kv_ps *ps = kv_get_ps(rps); 2589 2590 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2591 rps->class = le16_to_cpu(non_clock_info->usClassification); 2592 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2593 2594 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2595 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2596 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2597 } else { 2598 rps->vclk = 0; 2599 rps->dclk = 0; 2600 } 2601 2602 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2603 rdev->pm.dpm.boot_ps = rps; 2604 kv_patch_boot_state(rdev, ps); 2605 } 2606 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2607 rdev->pm.dpm.uvd_ps = rps; 2608 } 2609 2610 static void kv_parse_pplib_clock_info(struct radeon_device *rdev, 2611 struct radeon_ps *rps, int index, 2612 union pplib_clock_info *clock_info) 2613 { 2614 struct kv_power_info *pi = kv_get_pi(rdev); 2615 struct kv_ps *ps = kv_get_ps(rps); 2616 struct kv_pl *pl = &ps->levels[index]; 2617 u32 sclk; 2618 2619 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2620 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2621 pl->sclk = sclk; 2622 pl->vddc_index = clock_info->sumo.vddcIndex; 2623 2624 ps->num_levels = index + 1; 2625 2626 if (pi->caps_sclk_ds) { 2627 pl->ds_divider_index = 5; 2628 pl->ss_divider_index = 5; 2629 } 2630 } 2631 2632 static int kv_parse_power_table(struct radeon_device *rdev) 2633 { 2634 struct radeon_mode_info *mode_info = &rdev->mode_info; 2635 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2636 union pplib_power_state *power_state; 2637 int i, j, k, non_clock_array_index, clock_array_index; 2638 union pplib_clock_info *clock_info; 2639 struct _StateArray *state_array; 2640 struct _ClockInfoArray *clock_info_array; 2641 struct _NonClockInfoArray *non_clock_info_array; 2642 union power_info *power_info; 2643 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2644 u16 data_offset; 2645 u8 frev, crev; 2646 u8 *power_state_offset; 2647 struct kv_ps *ps; 2648 2649 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2650 &frev, &crev, &data_offset)) 2651 return -EINVAL; 2652 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2653 2654 state_array = (struct _StateArray *) 2655 (mode_info->atom_context->bios + data_offset + 2656 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2657 clock_info_array = (struct _ClockInfoArray *) 2658 (mode_info->atom_context->bios + data_offset + 2659 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2660 non_clock_info_array = (struct _NonClockInfoArray *) 2661 (mode_info->atom_context->bios + data_offset + 2662 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2663 2664 rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, 2665 sizeof(struct radeon_ps), 2666 GFP_KERNEL); 2667 if (!rdev->pm.dpm.ps) 2668 return -ENOMEM; 2669 power_state_offset = (u8 *)state_array->states; 2670 for (i = 0; i < state_array->ucNumEntries; i++) { 2671 u8 *idx; 2672 power_state = (union pplib_power_state *)power_state_offset; 2673 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2674 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2675 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2676 if (!rdev->pm.power_state[i].clock_info) 2677 return -EINVAL; 2678 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2679 if (ps == NULL) { 2680 kfree(rdev->pm.dpm.ps); 2681 return -ENOMEM; 2682 } 2683 rdev->pm.dpm.ps[i].ps_priv = ps; 2684 k = 0; 2685 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2686 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2687 clock_array_index = idx[j]; 2688 if (clock_array_index >= clock_info_array->ucNumEntries) 2689 continue; 2690 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2691 break; 2692 clock_info = (union pplib_clock_info *) 2693 ((u8 *)&clock_info_array->clockInfo[0] + 2694 (clock_array_index * clock_info_array->ucEntrySize)); 2695 kv_parse_pplib_clock_info(rdev, 2696 &rdev->pm.dpm.ps[i], k, 2697 clock_info); 2698 k++; 2699 } 2700 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2701 non_clock_info, 2702 non_clock_info_array->ucEntrySize); 2703 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2704 } 2705 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 2706 2707 /* fill in the vce power states */ 2708 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 2709 u32 sclk; 2710 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 2711 clock_info = (union pplib_clock_info *) 2712 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2713 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2714 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2715 rdev->pm.dpm.vce_states[i].sclk = sclk; 2716 rdev->pm.dpm.vce_states[i].mclk = 0; 2717 } 2718 2719 return 0; 2720 } 2721 2722 int kv_dpm_init(struct radeon_device *rdev) 2723 { 2724 struct kv_power_info *pi; 2725 int ret, i; 2726 2727 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2728 if (pi == NULL) 2729 return -ENOMEM; 2730 rdev->pm.dpm.priv = pi; 2731 2732 ret = r600_get_platform_caps(rdev); 2733 if (ret) 2734 return ret; 2735 2736 ret = r600_parse_extended_power_table(rdev); 2737 if (ret) 2738 return ret; 2739 2740 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2741 pi->at[i] = TRINITY_AT_DFLT; 2742 2743 pi->sram_end = SMC_RAM_END; 2744 2745 /* Enabling nb dpm on an asrock system prevents dpm from working */ 2746 if (rdev->pdev->subsystem_vendor == 0x1849) 2747 pi->enable_nb_dpm = false; 2748 else 2749 pi->enable_nb_dpm = true; 2750 2751 pi->caps_power_containment = true; 2752 pi->caps_cac = true; 2753 pi->enable_didt = false; 2754 if (pi->enable_didt) { 2755 pi->caps_sq_ramping = true; 2756 pi->caps_db_ramping = true; 2757 pi->caps_td_ramping = true; 2758 pi->caps_tcp_ramping = true; 2759 } 2760 2761 pi->caps_sclk_ds = true; 2762 pi->enable_auto_thermal_throttling = true; 2763 pi->disable_nb_ps3_in_battery = false; 2764 if (radeon_bapm == -1) { 2765 /* only enable bapm on KB, ML by default */ 2766 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2767 pi->bapm_enable = true; 2768 else 2769 pi->bapm_enable = false; 2770 } else if (radeon_bapm == 0) { 2771 pi->bapm_enable = false; 2772 } else { 2773 pi->bapm_enable = true; 2774 } 2775 pi->voltage_drop_t = 0; 2776 pi->caps_sclk_throttle_low_notification = false; 2777 pi->caps_fps = false; /* true? */ 2778 pi->caps_uvd_pg = true; 2779 pi->caps_uvd_dpm = true; 2780 pi->caps_vce_pg = false; /* XXX true */ 2781 pi->caps_samu_pg = false; 2782 pi->caps_acp_pg = false; 2783 pi->caps_stable_p_state = false; 2784 2785 ret = kv_parse_sys_info_table(rdev); 2786 if (ret) 2787 return ret; 2788 2789 kv_patch_voltage_values(rdev); 2790 kv_construct_boot_state(rdev); 2791 2792 ret = kv_parse_power_table(rdev); 2793 if (ret) 2794 return ret; 2795 2796 pi->enable_dpm = true; 2797 2798 return 0; 2799 } 2800 2801 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 2802 struct seq_file *m) 2803 { 2804 struct kv_power_info *pi = kv_get_pi(rdev); 2805 u32 current_index = 2806 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2807 CURR_SCLK_INDEX_SHIFT; 2808 u32 sclk, tmp; 2809 u16 vddc; 2810 2811 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2812 seq_printf(m, "invalid dpm profile %d\n", current_index); 2813 } else { 2814 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2815 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2816 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; 2817 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); 2818 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 2819 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 2820 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2821 current_index, sclk, vddc); 2822 } 2823 } 2824 2825 u32 kv_dpm_get_current_sclk(struct radeon_device *rdev) 2826 { 2827 struct kv_power_info *pi = kv_get_pi(rdev); 2828 u32 current_index = 2829 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2830 CURR_SCLK_INDEX_SHIFT; 2831 u32 sclk; 2832 2833 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2834 return 0; 2835 } else { 2836 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2837 return sclk; 2838 } 2839 } 2840 2841 u32 kv_dpm_get_current_mclk(struct radeon_device *rdev) 2842 { 2843 struct kv_power_info *pi = kv_get_pi(rdev); 2844 2845 return pi->sys_info.bootup_uma_clk; 2846 } 2847 2848 void kv_dpm_print_power_state(struct radeon_device *rdev, 2849 struct radeon_ps *rps) 2850 { 2851 int i; 2852 struct kv_ps *ps = kv_get_ps(rps); 2853 2854 r600_dpm_print_class_info(rps->class, rps->class2); 2855 r600_dpm_print_cap_info(rps->caps); 2856 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2857 for (i = 0; i < ps->num_levels; i++) { 2858 struct kv_pl *pl = &ps->levels[i]; 2859 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2860 i, pl->sclk, 2861 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index)); 2862 } 2863 r600_dpm_print_ps_status(rdev, rps); 2864 } 2865 2866 void kv_dpm_fini(struct radeon_device *rdev) 2867 { 2868 int i; 2869 2870 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 2871 kfree(rdev->pm.dpm.ps[i].ps_priv); 2872 } 2873 kfree(rdev->pm.dpm.ps); 2874 kfree(rdev->pm.dpm.priv); 2875 r600_free_extended_power_table(rdev); 2876 } 2877 2878 void kv_dpm_display_configuration_changed(struct radeon_device *rdev) 2879 { 2880 2881 } 2882 2883 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low) 2884 { 2885 struct kv_power_info *pi = kv_get_pi(rdev); 2886 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2887 2888 if (low) 2889 return requested_state->levels[0].sclk; 2890 else 2891 return requested_state->levels[requested_state->num_levels - 1].sclk; 2892 } 2893 2894 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low) 2895 { 2896 struct kv_power_info *pi = kv_get_pi(rdev); 2897 2898 return pi->sys_info.bootup_uma_clk; 2899 } 2900 2901