1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include "drmP.h" 25 #include "radeon.h" 26 #include "cikd.h" 27 #include "r600_dpm.h" 28 #include "kv_dpm.h" 29 #include "radeon_asic.h" 30 #include <linux/seq_file.h> 31 32 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 33 #define KV_MINIMUM_ENGINE_CLOCK 800 34 #define SMC_RAM_END 0x40000 35 36 static void kv_init_graphics_levels(struct radeon_device *rdev); 37 static int kv_calculate_ds_divider(struct radeon_device *rdev); 38 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev); 39 static int kv_calculate_dpm_settings(struct radeon_device *rdev); 40 static void kv_enable_new_levels(struct radeon_device *rdev); 41 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 42 struct radeon_ps *new_rps); 43 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level); 44 static int kv_set_enabled_levels(struct radeon_device *rdev); 45 static int kv_force_dpm_highest(struct radeon_device *rdev); 46 static int kv_force_dpm_lowest(struct radeon_device *rdev); 47 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 48 struct radeon_ps *new_rps, 49 struct radeon_ps *old_rps); 50 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 51 int min_temp, int max_temp); 52 static int kv_init_fps_limits(struct radeon_device *rdev); 53 54 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); 55 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate); 56 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate); 57 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate); 58 59 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 60 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 61 extern void cik_update_cg(struct radeon_device *rdev, 62 u32 block, bool enable); 63 64 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 65 { 66 { 0, 4, 1 }, 67 { 1, 4, 1 }, 68 { 2, 5, 1 }, 69 { 3, 4, 2 }, 70 { 4, 1, 1 }, 71 { 5, 5, 2 }, 72 { 6, 6, 1 }, 73 { 7, 9, 2 }, 74 { 0xffffffff } 75 }; 76 77 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = 78 { 79 { 0, 4, 1 }, 80 { 0xffffffff } 81 }; 82 83 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = 84 { 85 { 0, 4, 1 }, 86 { 0xffffffff } 87 }; 88 89 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = 90 { 91 { 0, 4, 1 }, 92 { 0xffffffff } 93 }; 94 95 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = 96 { 97 { 0, 4, 1 }, 98 { 0xffffffff } 99 }; 100 101 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = 102 { 103 { 0, 4, 1 }, 104 { 1, 4, 1 }, 105 { 2, 5, 1 }, 106 { 3, 4, 1 }, 107 { 4, 1, 1 }, 108 { 5, 5, 1 }, 109 { 6, 6, 1 }, 110 { 7, 9, 1 }, 111 { 8, 4, 1 }, 112 { 9, 2, 1 }, 113 { 10, 3, 1 }, 114 { 11, 6, 1 }, 115 { 12, 8, 2 }, 116 { 13, 1, 1 }, 117 { 14, 2, 1 }, 118 { 15, 3, 1 }, 119 { 16, 1, 1 }, 120 { 17, 4, 1 }, 121 { 18, 3, 1 }, 122 { 19, 1, 1 }, 123 { 20, 8, 1 }, 124 { 21, 5, 1 }, 125 { 22, 1, 1 }, 126 { 23, 1, 1 }, 127 { 24, 4, 1 }, 128 { 27, 6, 1 }, 129 { 28, 1, 1 }, 130 { 0xffffffff } 131 }; 132 133 static const struct kv_lcac_config_reg sx0_cac_config_reg[] = 134 { 135 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 136 }; 137 138 static const struct kv_lcac_config_reg mc0_cac_config_reg[] = 139 { 140 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 141 }; 142 143 static const struct kv_lcac_config_reg mc1_cac_config_reg[] = 144 { 145 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 146 }; 147 148 static const struct kv_lcac_config_reg mc2_cac_config_reg[] = 149 { 150 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 151 }; 152 153 static const struct kv_lcac_config_reg mc3_cac_config_reg[] = 154 { 155 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 156 }; 157 158 static const struct kv_lcac_config_reg cpl_cac_config_reg[] = 159 { 160 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 161 }; 162 163 static const struct kv_pt_config_reg didt_config_kv[] = 164 { 165 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 166 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 167 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 168 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 169 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 170 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 171 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 172 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 173 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 174 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 175 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 176 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 177 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 178 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 179 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 180 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 181 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 182 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 183 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 184 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 185 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 186 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 187 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 188 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 189 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 190 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 191 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 192 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 193 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 194 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 195 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 196 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 197 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 198 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 199 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 200 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 201 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 202 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 203 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 204 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 205 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 206 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 207 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 208 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 209 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 210 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 211 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 212 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 213 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 214 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 215 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 216 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 217 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 218 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 219 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 220 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 221 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 222 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 223 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 224 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 225 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 226 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 227 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 228 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, 229 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, 230 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, 231 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, 232 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, 233 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, 234 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 235 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, 236 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, 237 { 0xFFFFFFFF } 238 }; 239 240 static struct kv_ps *kv_get_ps(struct radeon_ps *rps) 241 { 242 struct kv_ps *ps = rps->ps_priv; 243 244 return ps; 245 } 246 247 static struct kv_power_info *kv_get_pi(struct radeon_device *rdev) 248 { 249 struct kv_power_info *pi = rdev->pm.dpm.priv; 250 251 return pi; 252 } 253 254 #if 0 255 static void kv_program_local_cac_table(struct radeon_device *rdev, 256 const struct kv_lcac_config_values *local_cac_table, 257 const struct kv_lcac_config_reg *local_cac_reg) 258 { 259 u32 i, count, data; 260 const struct kv_lcac_config_values *values = local_cac_table; 261 262 while (values->block_id != 0xffffffff) { 263 count = values->signal_id; 264 for (i = 0; i < count; i++) { 265 data = ((values->block_id << local_cac_reg->block_shift) & 266 local_cac_reg->block_mask); 267 data |= ((i << local_cac_reg->signal_shift) & 268 local_cac_reg->signal_mask); 269 data |= ((values->t << local_cac_reg->t_shift) & 270 local_cac_reg->t_mask); 271 data |= ((1 << local_cac_reg->enable_shift) & 272 local_cac_reg->enable_mask); 273 WREG32_SMC(local_cac_reg->cntl, data); 274 } 275 values++; 276 } 277 } 278 #endif 279 280 static int kv_program_pt_config_registers(struct radeon_device *rdev, 281 const struct kv_pt_config_reg *cac_config_regs) 282 { 283 const struct kv_pt_config_reg *config_regs = cac_config_regs; 284 u32 data; 285 u32 cache = 0; 286 287 if (config_regs == NULL) 288 return -EINVAL; 289 290 while (config_regs->offset != 0xFFFFFFFF) { 291 if (config_regs->type == KV_CONFIGREG_CACHE) { 292 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 293 } else { 294 switch (config_regs->type) { 295 case KV_CONFIGREG_SMC_IND: 296 data = RREG32_SMC(config_regs->offset); 297 break; 298 case KV_CONFIGREG_DIDT_IND: 299 data = RREG32_DIDT(config_regs->offset); 300 break; 301 default: 302 data = RREG32(config_regs->offset << 2); 303 break; 304 } 305 306 data &= ~config_regs->mask; 307 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 308 data |= cache; 309 cache = 0; 310 311 switch (config_regs->type) { 312 case KV_CONFIGREG_SMC_IND: 313 WREG32_SMC(config_regs->offset, data); 314 break; 315 case KV_CONFIGREG_DIDT_IND: 316 WREG32_DIDT(config_regs->offset, data); 317 break; 318 default: 319 WREG32(config_regs->offset << 2, data); 320 break; 321 } 322 } 323 config_regs++; 324 } 325 326 return 0; 327 } 328 329 static void kv_do_enable_didt(struct radeon_device *rdev, bool enable) 330 { 331 struct kv_power_info *pi = kv_get_pi(rdev); 332 u32 data; 333 334 if (pi->caps_sq_ramping) { 335 data = RREG32_DIDT(DIDT_SQ_CTRL0); 336 if (enable) 337 data |= DIDT_CTRL_EN; 338 else 339 data &= ~DIDT_CTRL_EN; 340 WREG32_DIDT(DIDT_SQ_CTRL0, data); 341 } 342 343 if (pi->caps_db_ramping) { 344 data = RREG32_DIDT(DIDT_DB_CTRL0); 345 if (enable) 346 data |= DIDT_CTRL_EN; 347 else 348 data &= ~DIDT_CTRL_EN; 349 WREG32_DIDT(DIDT_DB_CTRL0, data); 350 } 351 352 if (pi->caps_td_ramping) { 353 data = RREG32_DIDT(DIDT_TD_CTRL0); 354 if (enable) 355 data |= DIDT_CTRL_EN; 356 else 357 data &= ~DIDT_CTRL_EN; 358 WREG32_DIDT(DIDT_TD_CTRL0, data); 359 } 360 361 if (pi->caps_tcp_ramping) { 362 data = RREG32_DIDT(DIDT_TCP_CTRL0); 363 if (enable) 364 data |= DIDT_CTRL_EN; 365 else 366 data &= ~DIDT_CTRL_EN; 367 WREG32_DIDT(DIDT_TCP_CTRL0, data); 368 } 369 } 370 371 static int kv_enable_didt(struct radeon_device *rdev, bool enable) 372 { 373 struct kv_power_info *pi = kv_get_pi(rdev); 374 int ret; 375 376 if (pi->caps_sq_ramping || 377 pi->caps_db_ramping || 378 pi->caps_td_ramping || 379 pi->caps_tcp_ramping) { 380 cik_enter_rlc_safe_mode(rdev); 381 382 if (enable) { 383 ret = kv_program_pt_config_registers(rdev, didt_config_kv); 384 if (ret) { 385 cik_exit_rlc_safe_mode(rdev); 386 return ret; 387 } 388 } 389 390 kv_do_enable_didt(rdev, enable); 391 392 cik_exit_rlc_safe_mode(rdev); 393 } 394 395 return 0; 396 } 397 398 #if 0 399 static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev) 400 { 401 struct kv_power_info *pi = kv_get_pi(rdev); 402 403 if (pi->caps_cac) { 404 WREG32_SMC(LCAC_SX0_OVR_SEL, 0); 405 WREG32_SMC(LCAC_SX0_OVR_VAL, 0); 406 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg); 407 408 WREG32_SMC(LCAC_MC0_OVR_SEL, 0); 409 WREG32_SMC(LCAC_MC0_OVR_VAL, 0); 410 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); 411 412 WREG32_SMC(LCAC_MC1_OVR_SEL, 0); 413 WREG32_SMC(LCAC_MC1_OVR_VAL, 0); 414 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); 415 416 WREG32_SMC(LCAC_MC2_OVR_SEL, 0); 417 WREG32_SMC(LCAC_MC2_OVR_VAL, 0); 418 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); 419 420 WREG32_SMC(LCAC_MC3_OVR_SEL, 0); 421 WREG32_SMC(LCAC_MC3_OVR_VAL, 0); 422 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); 423 424 WREG32_SMC(LCAC_CPL_OVR_SEL, 0); 425 WREG32_SMC(LCAC_CPL_OVR_VAL, 0); 426 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); 427 } 428 } 429 #endif 430 431 static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable) 432 { 433 struct kv_power_info *pi = kv_get_pi(rdev); 434 int ret = 0; 435 436 if (pi->caps_cac) { 437 if (enable) { 438 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac); 439 if (ret) 440 pi->cac_enabled = false; 441 else 442 pi->cac_enabled = true; 443 } else if (pi->cac_enabled) { 444 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac); 445 pi->cac_enabled = false; 446 } 447 } 448 449 return ret; 450 } 451 452 static int kv_process_firmware_header(struct radeon_device *rdev) 453 { 454 struct kv_power_info *pi = kv_get_pi(rdev); 455 u32 tmp; 456 int ret; 457 458 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 459 offsetof(SMU7_Firmware_Header, DpmTable), 460 &tmp, pi->sram_end); 461 462 if (ret == 0) 463 pi->dpm_table_start = tmp; 464 465 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION + 466 offsetof(SMU7_Firmware_Header, SoftRegisters), 467 &tmp, pi->sram_end); 468 469 if (ret == 0) 470 pi->soft_regs_start = tmp; 471 472 return ret; 473 } 474 475 static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev) 476 { 477 struct kv_power_info *pi = kv_get_pi(rdev); 478 int ret; 479 480 pi->graphics_voltage_change_enable = 1; 481 482 ret = kv_copy_bytes_to_smc(rdev, 483 pi->dpm_table_start + 484 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), 485 &pi->graphics_voltage_change_enable, 486 sizeof(u8), pi->sram_end); 487 488 return ret; 489 } 490 491 static int kv_set_dpm_interval(struct radeon_device *rdev) 492 { 493 struct kv_power_info *pi = kv_get_pi(rdev); 494 int ret; 495 496 pi->graphics_interval = 1; 497 498 ret = kv_copy_bytes_to_smc(rdev, 499 pi->dpm_table_start + 500 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), 501 &pi->graphics_interval, 502 sizeof(u8), pi->sram_end); 503 504 return ret; 505 } 506 507 static int kv_set_dpm_boot_state(struct radeon_device *rdev) 508 { 509 struct kv_power_info *pi = kv_get_pi(rdev); 510 int ret; 511 512 ret = kv_copy_bytes_to_smc(rdev, 513 pi->dpm_table_start + 514 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), 515 &pi->graphics_boot_level, 516 sizeof(u8), pi->sram_end); 517 518 return ret; 519 } 520 521 static void kv_program_vc(struct radeon_device *rdev) 522 { 523 WREG32_SMC(CG_FTV_0, 0x3FFFC100); 524 } 525 526 static void kv_clear_vc(struct radeon_device *rdev) 527 { 528 WREG32_SMC(CG_FTV_0, 0); 529 } 530 531 static int kv_set_divider_value(struct radeon_device *rdev, 532 u32 index, u32 sclk) 533 { 534 struct kv_power_info *pi = kv_get_pi(rdev); 535 struct atom_clock_dividers dividers; 536 int ret; 537 538 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 539 sclk, false, ÷rs); 540 if (ret) 541 return ret; 542 543 pi->graphics_level[index].SclkDid = (u8)dividers.post_div; 544 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); 545 546 return 0; 547 } 548 549 static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, 550 struct sumo_vid_mapping_table *vid_mapping_table, 551 u32 vid_2bit) 552 { 553 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 554 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 555 u32 i; 556 557 if (vddc_sclk_table && vddc_sclk_table->count) { 558 if (vid_2bit < vddc_sclk_table->count) 559 return vddc_sclk_table->entries[vid_2bit].v; 560 else 561 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; 562 } else { 563 for (i = 0; i < vid_mapping_table->num_entries; i++) { 564 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) 565 return vid_mapping_table->entries[i].vid_7bit; 566 } 567 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; 568 } 569 } 570 571 static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, 572 struct sumo_vid_mapping_table *vid_mapping_table, 573 u32 vid_7bit) 574 { 575 struct radeon_clock_voltage_dependency_table *vddc_sclk_table = 576 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 577 u32 i; 578 579 if (vddc_sclk_table && vddc_sclk_table->count) { 580 for (i = 0; i < vddc_sclk_table->count; i++) { 581 if (vddc_sclk_table->entries[i].v == vid_7bit) 582 return i; 583 } 584 return vddc_sclk_table->count - 1; 585 } else { 586 for (i = 0; i < vid_mapping_table->num_entries; i++) { 587 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) 588 return vid_mapping_table->entries[i].vid_2bit; 589 } 590 591 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; 592 } 593 } 594 595 static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 596 u16 voltage) 597 { 598 return 6200 - (voltage * 25); 599 } 600 601 static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, 602 u32 vid_2bit) 603 { 604 struct kv_power_info *pi = kv_get_pi(rdev); 605 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, 606 &pi->sys_info.vid_mapping_table, 607 vid_2bit); 608 609 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 610 } 611 612 613 static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid) 614 { 615 struct kv_power_info *pi = kv_get_pi(rdev); 616 617 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; 618 pi->graphics_level[index].MinVddNb = 619 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid)); 620 621 return 0; 622 } 623 624 static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at) 625 { 626 struct kv_power_info *pi = kv_get_pi(rdev); 627 628 pi->graphics_level[index].AT = cpu_to_be16((u16)at); 629 630 return 0; 631 } 632 633 static void kv_dpm_power_level_enable(struct radeon_device *rdev, 634 u32 index, bool enable) 635 { 636 struct kv_power_info *pi = kv_get_pi(rdev); 637 638 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; 639 } 640 641 static void kv_start_dpm(struct radeon_device *rdev) 642 { 643 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 644 645 tmp |= GLOBAL_PWRMGT_EN; 646 WREG32_SMC(GENERAL_PWRMGT, tmp); 647 648 kv_smc_dpm_enable(rdev, true); 649 } 650 651 static void kv_stop_dpm(struct radeon_device *rdev) 652 { 653 kv_smc_dpm_enable(rdev, false); 654 } 655 656 static void kv_start_am(struct radeon_device *rdev) 657 { 658 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 659 660 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 661 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN; 662 663 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 664 } 665 666 static void kv_reset_am(struct radeon_device *rdev) 667 { 668 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL); 669 670 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 671 672 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); 673 } 674 675 static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze) 676 { 677 return kv_notify_message_to_smu(rdev, freeze ? 678 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); 679 } 680 681 static int kv_force_lowest_valid(struct radeon_device *rdev) 682 { 683 return kv_force_dpm_lowest(rdev); 684 } 685 686 static int kv_unforce_levels(struct radeon_device *rdev) 687 { 688 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 689 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 690 else 691 return kv_set_enabled_levels(rdev); 692 } 693 694 static int kv_update_sclk_t(struct radeon_device *rdev) 695 { 696 struct kv_power_info *pi = kv_get_pi(rdev); 697 u32 low_sclk_interrupt_t = 0; 698 int ret = 0; 699 700 if (pi->caps_sclk_throttle_low_notification) { 701 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 702 703 ret = kv_copy_bytes_to_smc(rdev, 704 pi->dpm_table_start + 705 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), 706 (u8 *)&low_sclk_interrupt_t, 707 sizeof(u32), pi->sram_end); 708 } 709 return ret; 710 } 711 712 static int kv_program_bootup_state(struct radeon_device *rdev) 713 { 714 struct kv_power_info *pi = kv_get_pi(rdev); 715 u32 i; 716 struct radeon_clock_voltage_dependency_table *table = 717 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 718 719 if (table && table->count) { 720 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 721 if (table->entries[i].clk == pi->boot_pl.sclk) 722 break; 723 } 724 725 pi->graphics_boot_level = (u8)i; 726 kv_dpm_power_level_enable(rdev, i, true); 727 } else { 728 struct sumo_sclk_voltage_mapping_table *table = 729 &pi->sys_info.sclk_voltage_mapping_table; 730 731 if (table->num_max_dpm_entries == 0) 732 return -EINVAL; 733 734 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 735 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) 736 break; 737 } 738 739 pi->graphics_boot_level = (u8)i; 740 kv_dpm_power_level_enable(rdev, i, true); 741 } 742 return 0; 743 } 744 745 static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev) 746 { 747 struct kv_power_info *pi = kv_get_pi(rdev); 748 int ret; 749 750 pi->graphics_therm_throttle_enable = 1; 751 752 ret = kv_copy_bytes_to_smc(rdev, 753 pi->dpm_table_start + 754 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), 755 &pi->graphics_therm_throttle_enable, 756 sizeof(u8), pi->sram_end); 757 758 return ret; 759 } 760 761 static int kv_upload_dpm_settings(struct radeon_device *rdev) 762 { 763 struct kv_power_info *pi = kv_get_pi(rdev); 764 int ret; 765 766 ret = kv_copy_bytes_to_smc(rdev, 767 pi->dpm_table_start + 768 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), 769 (u8 *)&pi->graphics_level, 770 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, 771 pi->sram_end); 772 773 if (ret) 774 return ret; 775 776 ret = kv_copy_bytes_to_smc(rdev, 777 pi->dpm_table_start + 778 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), 779 &pi->graphics_dpm_level_count, 780 sizeof(u8), pi->sram_end); 781 782 return ret; 783 } 784 785 static u32 kv_get_clock_difference(u32 a, u32 b) 786 { 787 return (a >= b) ? a - b : b - a; 788 } 789 790 static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk) 791 { 792 struct kv_power_info *pi = kv_get_pi(rdev); 793 u32 value; 794 795 if (pi->caps_enable_dfs_bypass) { 796 if (kv_get_clock_difference(clk, 40000) < 200) 797 value = 3; 798 else if (kv_get_clock_difference(clk, 30000) < 200) 799 value = 2; 800 else if (kv_get_clock_difference(clk, 20000) < 200) 801 value = 7; 802 else if (kv_get_clock_difference(clk, 15000) < 200) 803 value = 6; 804 else if (kv_get_clock_difference(clk, 10000) < 200) 805 value = 8; 806 else 807 value = 0; 808 } else { 809 value = 0; 810 } 811 812 return value; 813 } 814 815 static int kv_populate_uvd_table(struct radeon_device *rdev) 816 { 817 struct kv_power_info *pi = kv_get_pi(rdev); 818 struct radeon_uvd_clock_voltage_dependency_table *table = 819 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 820 struct atom_clock_dividers dividers; 821 int ret; 822 u32 i; 823 824 if (table == NULL || table->count == 0) 825 return 0; 826 827 pi->uvd_level_count = 0; 828 for (i = 0; i < table->count; i++) { 829 if (pi->high_voltage_t && 830 (pi->high_voltage_t < table->entries[i].v)) 831 break; 832 833 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); 834 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); 835 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); 836 837 pi->uvd_level[i].VClkBypassCntl = 838 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); 839 pi->uvd_level[i].DClkBypassCntl = 840 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); 841 842 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 843 table->entries[i].vclk, false, ÷rs); 844 if (ret) 845 return ret; 846 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; 847 848 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 849 table->entries[i].dclk, false, ÷rs); 850 if (ret) 851 return ret; 852 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; 853 854 pi->uvd_level_count++; 855 } 856 857 ret = kv_copy_bytes_to_smc(rdev, 858 pi->dpm_table_start + 859 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), 860 (u8 *)&pi->uvd_level_count, 861 sizeof(u8), pi->sram_end); 862 if (ret) 863 return ret; 864 865 pi->uvd_interval = 1; 866 867 ret = kv_copy_bytes_to_smc(rdev, 868 pi->dpm_table_start + 869 offsetof(SMU7_Fusion_DpmTable, UVDInterval), 870 &pi->uvd_interval, 871 sizeof(u8), pi->sram_end); 872 if (ret) 873 return ret; 874 875 ret = kv_copy_bytes_to_smc(rdev, 876 pi->dpm_table_start + 877 offsetof(SMU7_Fusion_DpmTable, UvdLevel), 878 (u8 *)&pi->uvd_level, 879 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, 880 pi->sram_end); 881 882 return ret; 883 884 } 885 886 static int kv_populate_vce_table(struct radeon_device *rdev) 887 { 888 struct kv_power_info *pi = kv_get_pi(rdev); 889 int ret; 890 u32 i; 891 struct radeon_vce_clock_voltage_dependency_table *table = 892 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 893 struct atom_clock_dividers dividers; 894 895 if (table == NULL || table->count == 0) 896 return 0; 897 898 pi->vce_level_count = 0; 899 for (i = 0; i < table->count; i++) { 900 if (pi->high_voltage_t && 901 pi->high_voltage_t < table->entries[i].v) 902 break; 903 904 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); 905 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 906 907 pi->vce_level[i].ClkBypassCntl = 908 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); 909 910 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 911 table->entries[i].evclk, false, ÷rs); 912 if (ret) 913 return ret; 914 pi->vce_level[i].Divider = (u8)dividers.post_div; 915 916 pi->vce_level_count++; 917 } 918 919 ret = kv_copy_bytes_to_smc(rdev, 920 pi->dpm_table_start + 921 offsetof(SMU7_Fusion_DpmTable, VceLevelCount), 922 (u8 *)&pi->vce_level_count, 923 sizeof(u8), 924 pi->sram_end); 925 if (ret) 926 return ret; 927 928 pi->vce_interval = 1; 929 930 ret = kv_copy_bytes_to_smc(rdev, 931 pi->dpm_table_start + 932 offsetof(SMU7_Fusion_DpmTable, VCEInterval), 933 (u8 *)&pi->vce_interval, 934 sizeof(u8), 935 pi->sram_end); 936 if (ret) 937 return ret; 938 939 ret = kv_copy_bytes_to_smc(rdev, 940 pi->dpm_table_start + 941 offsetof(SMU7_Fusion_DpmTable, VceLevel), 942 (u8 *)&pi->vce_level, 943 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, 944 pi->sram_end); 945 946 return ret; 947 } 948 949 static int kv_populate_samu_table(struct radeon_device *rdev) 950 { 951 struct kv_power_info *pi = kv_get_pi(rdev); 952 struct radeon_clock_voltage_dependency_table *table = 953 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 954 struct atom_clock_dividers dividers; 955 int ret; 956 u32 i; 957 958 if (table == NULL || table->count == 0) 959 return 0; 960 961 pi->samu_level_count = 0; 962 for (i = 0; i < table->count; i++) { 963 if (pi->high_voltage_t && 964 pi->high_voltage_t < table->entries[i].v) 965 break; 966 967 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 968 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 969 970 pi->samu_level[i].ClkBypassCntl = 971 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); 972 973 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 974 table->entries[i].clk, false, ÷rs); 975 if (ret) 976 return ret; 977 pi->samu_level[i].Divider = (u8)dividers.post_div; 978 979 pi->samu_level_count++; 980 } 981 982 ret = kv_copy_bytes_to_smc(rdev, 983 pi->dpm_table_start + 984 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), 985 (u8 *)&pi->samu_level_count, 986 sizeof(u8), 987 pi->sram_end); 988 if (ret) 989 return ret; 990 991 pi->samu_interval = 1; 992 993 ret = kv_copy_bytes_to_smc(rdev, 994 pi->dpm_table_start + 995 offsetof(SMU7_Fusion_DpmTable, SAMUInterval), 996 (u8 *)&pi->samu_interval, 997 sizeof(u8), 998 pi->sram_end); 999 if (ret) 1000 return ret; 1001 1002 ret = kv_copy_bytes_to_smc(rdev, 1003 pi->dpm_table_start + 1004 offsetof(SMU7_Fusion_DpmTable, SamuLevel), 1005 (u8 *)&pi->samu_level, 1006 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, 1007 pi->sram_end); 1008 if (ret) 1009 return ret; 1010 1011 return ret; 1012 } 1013 1014 1015 static int kv_populate_acp_table(struct radeon_device *rdev) 1016 { 1017 struct kv_power_info *pi = kv_get_pi(rdev); 1018 struct radeon_clock_voltage_dependency_table *table = 1019 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1020 struct atom_clock_dividers dividers; 1021 int ret; 1022 u32 i; 1023 1024 if (table == NULL || table->count == 0) 1025 return 0; 1026 1027 pi->acp_level_count = 0; 1028 for (i = 0; i < table->count; i++) { 1029 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); 1030 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); 1031 1032 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, 1033 table->entries[i].clk, false, ÷rs); 1034 if (ret) 1035 return ret; 1036 pi->acp_level[i].Divider = (u8)dividers.post_div; 1037 1038 pi->acp_level_count++; 1039 } 1040 1041 ret = kv_copy_bytes_to_smc(rdev, 1042 pi->dpm_table_start + 1043 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), 1044 (u8 *)&pi->acp_level_count, 1045 sizeof(u8), 1046 pi->sram_end); 1047 if (ret) 1048 return ret; 1049 1050 pi->acp_interval = 1; 1051 1052 ret = kv_copy_bytes_to_smc(rdev, 1053 pi->dpm_table_start + 1054 offsetof(SMU7_Fusion_DpmTable, ACPInterval), 1055 (u8 *)&pi->acp_interval, 1056 sizeof(u8), 1057 pi->sram_end); 1058 if (ret) 1059 return ret; 1060 1061 ret = kv_copy_bytes_to_smc(rdev, 1062 pi->dpm_table_start + 1063 offsetof(SMU7_Fusion_DpmTable, AcpLevel), 1064 (u8 *)&pi->acp_level, 1065 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, 1066 pi->sram_end); 1067 if (ret) 1068 return ret; 1069 1070 return ret; 1071 } 1072 1073 static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev) 1074 { 1075 struct kv_power_info *pi = kv_get_pi(rdev); 1076 u32 i; 1077 struct radeon_clock_voltage_dependency_table *table = 1078 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1079 1080 if (table && table->count) { 1081 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1082 if (pi->caps_enable_dfs_bypass) { 1083 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) 1084 pi->graphics_level[i].ClkBypassCntl = 3; 1085 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) 1086 pi->graphics_level[i].ClkBypassCntl = 2; 1087 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) 1088 pi->graphics_level[i].ClkBypassCntl = 7; 1089 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) 1090 pi->graphics_level[i].ClkBypassCntl = 6; 1091 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) 1092 pi->graphics_level[i].ClkBypassCntl = 8; 1093 else 1094 pi->graphics_level[i].ClkBypassCntl = 0; 1095 } else { 1096 pi->graphics_level[i].ClkBypassCntl = 0; 1097 } 1098 } 1099 } else { 1100 struct sumo_sclk_voltage_mapping_table *table = 1101 &pi->sys_info.sclk_voltage_mapping_table; 1102 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1103 if (pi->caps_enable_dfs_bypass) { 1104 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) 1105 pi->graphics_level[i].ClkBypassCntl = 3; 1106 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) 1107 pi->graphics_level[i].ClkBypassCntl = 2; 1108 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) 1109 pi->graphics_level[i].ClkBypassCntl = 7; 1110 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) 1111 pi->graphics_level[i].ClkBypassCntl = 6; 1112 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) 1113 pi->graphics_level[i].ClkBypassCntl = 8; 1114 else 1115 pi->graphics_level[i].ClkBypassCntl = 0; 1116 } else { 1117 pi->graphics_level[i].ClkBypassCntl = 0; 1118 } 1119 } 1120 } 1121 } 1122 1123 static int kv_enable_ulv(struct radeon_device *rdev, bool enable) 1124 { 1125 return kv_notify_message_to_smu(rdev, enable ? 1126 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); 1127 } 1128 1129 static void kv_reset_acp_boot_level(struct radeon_device *rdev) 1130 { 1131 struct kv_power_info *pi = kv_get_pi(rdev); 1132 1133 pi->acp_boot_level = 0xff; 1134 } 1135 1136 static void kv_update_current_ps(struct radeon_device *rdev, 1137 struct radeon_ps *rps) 1138 { 1139 struct kv_ps *new_ps = kv_get_ps(rps); 1140 struct kv_power_info *pi = kv_get_pi(rdev); 1141 1142 pi->current_rps = *rps; 1143 pi->current_ps = *new_ps; 1144 pi->current_rps.ps_priv = &pi->current_ps; 1145 } 1146 1147 static void kv_update_requested_ps(struct radeon_device *rdev, 1148 struct radeon_ps *rps) 1149 { 1150 struct kv_ps *new_ps = kv_get_ps(rps); 1151 struct kv_power_info *pi = kv_get_pi(rdev); 1152 1153 pi->requested_rps = *rps; 1154 pi->requested_ps = *new_ps; 1155 pi->requested_rps.ps_priv = &pi->requested_ps; 1156 } 1157 1158 void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable) 1159 { 1160 struct kv_power_info *pi = kv_get_pi(rdev); 1161 int ret; 1162 1163 if (pi->bapm_enable) { 1164 ret = kv_smc_bapm_enable(rdev, enable); 1165 if (ret) 1166 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1167 } 1168 } 1169 1170 int kv_dpm_enable(struct radeon_device *rdev) 1171 { 1172 struct kv_power_info *pi = kv_get_pi(rdev); 1173 int ret; 1174 1175 ret = kv_process_firmware_header(rdev); 1176 if (ret) { 1177 DRM_ERROR("kv_process_firmware_header failed\n"); 1178 return ret; 1179 } 1180 kv_init_fps_limits(rdev); 1181 kv_init_graphics_levels(rdev); 1182 ret = kv_program_bootup_state(rdev); 1183 if (ret) { 1184 DRM_ERROR("kv_program_bootup_state failed\n"); 1185 return ret; 1186 } 1187 kv_calculate_dfs_bypass_settings(rdev); 1188 ret = kv_upload_dpm_settings(rdev); 1189 if (ret) { 1190 DRM_ERROR("kv_upload_dpm_settings failed\n"); 1191 return ret; 1192 } 1193 ret = kv_populate_uvd_table(rdev); 1194 if (ret) { 1195 DRM_ERROR("kv_populate_uvd_table failed\n"); 1196 return ret; 1197 } 1198 ret = kv_populate_vce_table(rdev); 1199 if (ret) { 1200 DRM_ERROR("kv_populate_vce_table failed\n"); 1201 return ret; 1202 } 1203 ret = kv_populate_samu_table(rdev); 1204 if (ret) { 1205 DRM_ERROR("kv_populate_samu_table failed\n"); 1206 return ret; 1207 } 1208 ret = kv_populate_acp_table(rdev); 1209 if (ret) { 1210 DRM_ERROR("kv_populate_acp_table failed\n"); 1211 return ret; 1212 } 1213 kv_program_vc(rdev); 1214 #if 0 1215 kv_initialize_hardware_cac_manager(rdev); 1216 #endif 1217 kv_start_am(rdev); 1218 if (pi->enable_auto_thermal_throttling) { 1219 ret = kv_enable_auto_thermal_throttling(rdev); 1220 if (ret) { 1221 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); 1222 return ret; 1223 } 1224 } 1225 ret = kv_enable_dpm_voltage_scaling(rdev); 1226 if (ret) { 1227 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); 1228 return ret; 1229 } 1230 ret = kv_set_dpm_interval(rdev); 1231 if (ret) { 1232 DRM_ERROR("kv_set_dpm_interval failed\n"); 1233 return ret; 1234 } 1235 ret = kv_set_dpm_boot_state(rdev); 1236 if (ret) { 1237 DRM_ERROR("kv_set_dpm_boot_state failed\n"); 1238 return ret; 1239 } 1240 ret = kv_enable_ulv(rdev, true); 1241 if (ret) { 1242 DRM_ERROR("kv_enable_ulv failed\n"); 1243 return ret; 1244 } 1245 kv_start_dpm(rdev); 1246 ret = kv_enable_didt(rdev, true); 1247 if (ret) { 1248 DRM_ERROR("kv_enable_didt failed\n"); 1249 return ret; 1250 } 1251 ret = kv_enable_smc_cac(rdev, true); 1252 if (ret) { 1253 DRM_ERROR("kv_enable_smc_cac failed\n"); 1254 return ret; 1255 } 1256 1257 kv_reset_acp_boot_level(rdev); 1258 1259 ret = kv_smc_bapm_enable(rdev, false); 1260 if (ret) { 1261 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1262 return ret; 1263 } 1264 1265 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1266 1267 return ret; 1268 } 1269 1270 int kv_dpm_late_enable(struct radeon_device *rdev) 1271 { 1272 int ret = 0; 1273 1274 if (rdev->irq.installed && 1275 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 1276 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1277 if (ret) { 1278 DRM_ERROR("kv_set_thermal_temperature_range failed\n"); 1279 return ret; 1280 } 1281 rdev->irq.dpm_thermal = true; 1282 radeon_irq_set(rdev); 1283 } 1284 1285 /* powerdown unused blocks for now */ 1286 kv_dpm_powergate_acp(rdev, true); 1287 kv_dpm_powergate_samu(rdev, true); 1288 kv_dpm_powergate_vce(rdev, true); 1289 kv_dpm_powergate_uvd(rdev, true); 1290 1291 return ret; 1292 } 1293 1294 void kv_dpm_disable(struct radeon_device *rdev) 1295 { 1296 kv_smc_bapm_enable(rdev, false); 1297 1298 /* powerup blocks */ 1299 kv_dpm_powergate_acp(rdev, false); 1300 kv_dpm_powergate_samu(rdev, false); 1301 kv_dpm_powergate_vce(rdev, false); 1302 kv_dpm_powergate_uvd(rdev, false); 1303 1304 kv_enable_smc_cac(rdev, false); 1305 kv_enable_didt(rdev, false); 1306 kv_clear_vc(rdev); 1307 kv_stop_dpm(rdev); 1308 kv_enable_ulv(rdev, false); 1309 kv_reset_am(rdev); 1310 1311 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); 1312 } 1313 1314 #if 0 1315 static int kv_write_smc_soft_register(struct radeon_device *rdev, 1316 u16 reg_offset, u32 value) 1317 { 1318 struct kv_power_info *pi = kv_get_pi(rdev); 1319 1320 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset, 1321 (u8 *)&value, sizeof(u16), pi->sram_end); 1322 } 1323 1324 static int kv_read_smc_soft_register(struct radeon_device *rdev, 1325 u16 reg_offset, u32 *value) 1326 { 1327 struct kv_power_info *pi = kv_get_pi(rdev); 1328 1329 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset, 1330 value, pi->sram_end); 1331 } 1332 #endif 1333 1334 static void kv_init_sclk_t(struct radeon_device *rdev) 1335 { 1336 struct kv_power_info *pi = kv_get_pi(rdev); 1337 1338 pi->low_sclk_interrupt_t = 0; 1339 } 1340 1341 static int kv_init_fps_limits(struct radeon_device *rdev) 1342 { 1343 struct kv_power_info *pi = kv_get_pi(rdev); 1344 int ret = 0; 1345 1346 if (pi->caps_fps) { 1347 u16 tmp; 1348 1349 tmp = 45; 1350 pi->fps_high_t = cpu_to_be16(tmp); 1351 ret = kv_copy_bytes_to_smc(rdev, 1352 pi->dpm_table_start + 1353 offsetof(SMU7_Fusion_DpmTable, FpsHighT), 1354 (u8 *)&pi->fps_high_t, 1355 sizeof(u16), pi->sram_end); 1356 1357 tmp = 30; 1358 pi->fps_low_t = cpu_to_be16(tmp); 1359 1360 ret = kv_copy_bytes_to_smc(rdev, 1361 pi->dpm_table_start + 1362 offsetof(SMU7_Fusion_DpmTable, FpsLowT), 1363 (u8 *)&pi->fps_low_t, 1364 sizeof(u16), pi->sram_end); 1365 1366 } 1367 return ret; 1368 } 1369 1370 static void kv_init_powergate_state(struct radeon_device *rdev) 1371 { 1372 struct kv_power_info *pi = kv_get_pi(rdev); 1373 1374 pi->uvd_power_gated = false; 1375 pi->vce_power_gated = false; 1376 pi->samu_power_gated = false; 1377 pi->acp_power_gated = false; 1378 1379 } 1380 1381 static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 1382 { 1383 return kv_notify_message_to_smu(rdev, enable ? 1384 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); 1385 } 1386 1387 static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable) 1388 { 1389 return kv_notify_message_to_smu(rdev, enable ? 1390 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); 1391 } 1392 1393 static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable) 1394 { 1395 return kv_notify_message_to_smu(rdev, enable ? 1396 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); 1397 } 1398 1399 static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable) 1400 { 1401 return kv_notify_message_to_smu(rdev, enable ? 1402 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); 1403 } 1404 1405 static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) 1406 { 1407 struct kv_power_info *pi = kv_get_pi(rdev); 1408 struct radeon_uvd_clock_voltage_dependency_table *table = 1409 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1410 int ret; 1411 u32 mask; 1412 1413 if (!gate) { 1414 if (table->count) 1415 pi->uvd_boot_level = table->count - 1; 1416 else 1417 pi->uvd_boot_level = 0; 1418 1419 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { 1420 mask = 1 << pi->uvd_boot_level; 1421 } else { 1422 mask = 0x1f; 1423 } 1424 1425 ret = kv_copy_bytes_to_smc(rdev, 1426 pi->dpm_table_start + 1427 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1428 (uint8_t *)&pi->uvd_boot_level, 1429 sizeof(u8), pi->sram_end); 1430 if (ret) 1431 return ret; 1432 1433 kv_send_msg_to_smc_with_parameter(rdev, 1434 PPSMC_MSG_UVDDPM_SetEnabledMask, 1435 mask); 1436 } 1437 1438 return kv_enable_uvd_dpm(rdev, !gate); 1439 } 1440 1441 static u8 kv_get_vce_boot_level(struct radeon_device *rdev) 1442 { 1443 u8 i; 1444 struct radeon_vce_clock_voltage_dependency_table *table = 1445 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1446 1447 for (i = 0; i < table->count; i++) { 1448 if (table->entries[i].evclk >= 0) /* XXX */ 1449 break; 1450 } 1451 1452 return i; 1453 } 1454 1455 static int kv_update_vce_dpm(struct radeon_device *rdev, 1456 struct radeon_ps *radeon_new_state, 1457 struct radeon_ps *radeon_current_state) 1458 { 1459 struct kv_power_info *pi = kv_get_pi(rdev); 1460 struct radeon_vce_clock_voltage_dependency_table *table = 1461 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1462 int ret; 1463 1464 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) { 1465 kv_dpm_powergate_vce(rdev, false); 1466 /* turn the clocks on when encoding */ 1467 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 1468 if (pi->caps_stable_p_state) 1469 pi->vce_boot_level = table->count - 1; 1470 else 1471 pi->vce_boot_level = kv_get_vce_boot_level(rdev); 1472 1473 ret = kv_copy_bytes_to_smc(rdev, 1474 pi->dpm_table_start + 1475 offsetof(SMU7_Fusion_DpmTable, VceBootLevel), 1476 (u8 *)&pi->vce_boot_level, 1477 sizeof(u8), 1478 pi->sram_end); 1479 if (ret) 1480 return ret; 1481 1482 if (pi->caps_stable_p_state) 1483 kv_send_msg_to_smc_with_parameter(rdev, 1484 PPSMC_MSG_VCEDPM_SetEnabledMask, 1485 (1 << pi->vce_boot_level)); 1486 1487 kv_enable_vce_dpm(rdev, true); 1488 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) { 1489 kv_enable_vce_dpm(rdev, false); 1490 /* turn the clocks off when not encoding */ 1491 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 1492 kv_dpm_powergate_vce(rdev, true); 1493 } 1494 1495 return 0; 1496 } 1497 1498 static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate) 1499 { 1500 struct kv_power_info *pi = kv_get_pi(rdev); 1501 struct radeon_clock_voltage_dependency_table *table = 1502 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1503 int ret; 1504 1505 if (!gate) { 1506 if (pi->caps_stable_p_state) 1507 pi->samu_boot_level = table->count - 1; 1508 else 1509 pi->samu_boot_level = 0; 1510 1511 ret = kv_copy_bytes_to_smc(rdev, 1512 pi->dpm_table_start + 1513 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), 1514 (u8 *)&pi->samu_boot_level, 1515 sizeof(u8), 1516 pi->sram_end); 1517 if (ret) 1518 return ret; 1519 1520 if (pi->caps_stable_p_state) 1521 kv_send_msg_to_smc_with_parameter(rdev, 1522 PPSMC_MSG_SAMUDPM_SetEnabledMask, 1523 (1 << pi->samu_boot_level)); 1524 } 1525 1526 return kv_enable_samu_dpm(rdev, !gate); 1527 } 1528 1529 static u8 kv_get_acp_boot_level(struct radeon_device *rdev) 1530 { 1531 u8 i; 1532 struct radeon_clock_voltage_dependency_table *table = 1533 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1534 1535 for (i = 0; i < table->count; i++) { 1536 if (table->entries[i].clk >= 0) /* XXX */ 1537 break; 1538 } 1539 1540 if (i >= table->count) 1541 i = table->count - 1; 1542 1543 return i; 1544 } 1545 1546 static void kv_update_acp_boot_level(struct radeon_device *rdev) 1547 { 1548 struct kv_power_info *pi = kv_get_pi(rdev); 1549 u8 acp_boot_level; 1550 1551 if (!pi->caps_stable_p_state) { 1552 acp_boot_level = kv_get_acp_boot_level(rdev); 1553 if (acp_boot_level != pi->acp_boot_level) { 1554 pi->acp_boot_level = acp_boot_level; 1555 kv_send_msg_to_smc_with_parameter(rdev, 1556 PPSMC_MSG_ACPDPM_SetEnabledMask, 1557 (1 << pi->acp_boot_level)); 1558 } 1559 } 1560 } 1561 1562 static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate) 1563 { 1564 struct kv_power_info *pi = kv_get_pi(rdev); 1565 struct radeon_clock_voltage_dependency_table *table = 1566 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1567 int ret; 1568 1569 if (!gate) { 1570 if (pi->caps_stable_p_state) 1571 pi->acp_boot_level = table->count - 1; 1572 else 1573 pi->acp_boot_level = kv_get_acp_boot_level(rdev); 1574 1575 ret = kv_copy_bytes_to_smc(rdev, 1576 pi->dpm_table_start + 1577 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), 1578 (u8 *)&pi->acp_boot_level, 1579 sizeof(u8), 1580 pi->sram_end); 1581 if (ret) 1582 return ret; 1583 1584 if (pi->caps_stable_p_state) 1585 kv_send_msg_to_smc_with_parameter(rdev, 1586 PPSMC_MSG_ACPDPM_SetEnabledMask, 1587 (1 << pi->acp_boot_level)); 1588 } 1589 1590 return kv_enable_acp_dpm(rdev, !gate); 1591 } 1592 1593 void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 1594 { 1595 struct kv_power_info *pi = kv_get_pi(rdev); 1596 1597 if (pi->uvd_power_gated == gate) 1598 return; 1599 1600 pi->uvd_power_gated = gate; 1601 1602 if (gate) { 1603 if (pi->caps_uvd_pg) { 1604 uvd_v1_0_stop(rdev); 1605 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false); 1606 } 1607 kv_update_uvd_dpm(rdev, gate); 1608 if (pi->caps_uvd_pg) 1609 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF); 1610 } else { 1611 if (pi->caps_uvd_pg) { 1612 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON); 1613 uvd_v4_2_resume(rdev); 1614 uvd_v1_0_start(rdev); 1615 cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true); 1616 } 1617 kv_update_uvd_dpm(rdev, gate); 1618 } 1619 } 1620 1621 static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate) 1622 { 1623 struct kv_power_info *pi = kv_get_pi(rdev); 1624 1625 if (pi->vce_power_gated == gate) 1626 return; 1627 1628 pi->vce_power_gated = gate; 1629 1630 if (gate) { 1631 if (pi->caps_vce_pg) { 1632 /* XXX do we need a vce_v1_0_stop() ? */ 1633 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF); 1634 } 1635 } else { 1636 if (pi->caps_vce_pg) { 1637 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON); 1638 vce_v2_0_resume(rdev); 1639 vce_v1_0_start(rdev); 1640 } 1641 } 1642 } 1643 1644 static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate) 1645 { 1646 struct kv_power_info *pi = kv_get_pi(rdev); 1647 1648 if (pi->samu_power_gated == gate) 1649 return; 1650 1651 pi->samu_power_gated = gate; 1652 1653 if (gate) { 1654 kv_update_samu_dpm(rdev, true); 1655 if (pi->caps_samu_pg) 1656 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF); 1657 } else { 1658 if (pi->caps_samu_pg) 1659 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON); 1660 kv_update_samu_dpm(rdev, false); 1661 } 1662 } 1663 1664 static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) 1665 { 1666 struct kv_power_info *pi = kv_get_pi(rdev); 1667 1668 if (pi->acp_power_gated == gate) 1669 return; 1670 1671 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 1672 return; 1673 1674 pi->acp_power_gated = gate; 1675 1676 if (gate) { 1677 kv_update_acp_dpm(rdev, true); 1678 if (pi->caps_acp_pg) 1679 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF); 1680 } else { 1681 if (pi->caps_acp_pg) 1682 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON); 1683 kv_update_acp_dpm(rdev, false); 1684 } 1685 } 1686 1687 static void kv_set_valid_clock_range(struct radeon_device *rdev, 1688 struct radeon_ps *new_rps) 1689 { 1690 struct kv_ps *new_ps = kv_get_ps(new_rps); 1691 struct kv_power_info *pi = kv_get_pi(rdev); 1692 u32 i; 1693 struct radeon_clock_voltage_dependency_table *table = 1694 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1695 1696 if (table && table->count) { 1697 for (i = 0; i < pi->graphics_dpm_level_count; i++) { 1698 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || 1699 (i == (pi->graphics_dpm_level_count - 1))) { 1700 pi->lowest_valid = i; 1701 break; 1702 } 1703 } 1704 1705 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1706 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) 1707 break; 1708 } 1709 pi->highest_valid = i; 1710 1711 if (pi->lowest_valid > pi->highest_valid) { 1712 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > 1713 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) 1714 pi->highest_valid = pi->lowest_valid; 1715 else 1716 pi->lowest_valid = pi->highest_valid; 1717 } 1718 } else { 1719 struct sumo_sclk_voltage_mapping_table *table = 1720 &pi->sys_info.sclk_voltage_mapping_table; 1721 1722 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { 1723 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || 1724 i == (int)(pi->graphics_dpm_level_count - 1)) { 1725 pi->lowest_valid = i; 1726 break; 1727 } 1728 } 1729 1730 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { 1731 if (table->entries[i].sclk_frequency <= 1732 new_ps->levels[new_ps->num_levels - 1].sclk) 1733 break; 1734 } 1735 pi->highest_valid = i; 1736 1737 if (pi->lowest_valid > pi->highest_valid) { 1738 if ((new_ps->levels[0].sclk - 1739 table->entries[pi->highest_valid].sclk_frequency) > 1740 (table->entries[pi->lowest_valid].sclk_frequency - 1741 new_ps->levels[new_ps->num_levels -1].sclk)) 1742 pi->highest_valid = pi->lowest_valid; 1743 else 1744 pi->lowest_valid = pi->highest_valid; 1745 } 1746 } 1747 } 1748 1749 static int kv_update_dfs_bypass_settings(struct radeon_device *rdev, 1750 struct radeon_ps *new_rps) 1751 { 1752 struct kv_ps *new_ps = kv_get_ps(new_rps); 1753 struct kv_power_info *pi = kv_get_pi(rdev); 1754 int ret = 0; 1755 u8 clk_bypass_cntl; 1756 1757 if (pi->caps_enable_dfs_bypass) { 1758 clk_bypass_cntl = new_ps->need_dfs_bypass ? 1759 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; 1760 ret = kv_copy_bytes_to_smc(rdev, 1761 (pi->dpm_table_start + 1762 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + 1763 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + 1764 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), 1765 &clk_bypass_cntl, 1766 sizeof(u8), pi->sram_end); 1767 } 1768 1769 return ret; 1770 } 1771 1772 static int kv_enable_nb_dpm(struct radeon_device *rdev) 1773 { 1774 struct kv_power_info *pi = kv_get_pi(rdev); 1775 int ret = 0; 1776 1777 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { 1778 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable); 1779 if (ret == 0) 1780 pi->nb_dpm_enabled = true; 1781 } 1782 1783 return ret; 1784 } 1785 1786 int kv_dpm_force_performance_level(struct radeon_device *rdev, 1787 enum radeon_dpm_forced_level level) 1788 { 1789 int ret; 1790 1791 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 1792 ret = kv_force_dpm_highest(rdev); 1793 if (ret) 1794 return ret; 1795 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 1796 ret = kv_force_dpm_lowest(rdev); 1797 if (ret) 1798 return ret; 1799 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 1800 ret = kv_unforce_levels(rdev); 1801 if (ret) 1802 return ret; 1803 } 1804 1805 rdev->pm.dpm.forced_level = level; 1806 1807 return 0; 1808 } 1809 1810 int kv_dpm_pre_set_power_state(struct radeon_device *rdev) 1811 { 1812 struct kv_power_info *pi = kv_get_pi(rdev); 1813 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 1814 struct radeon_ps *new_ps = &requested_ps; 1815 1816 kv_update_requested_ps(rdev, new_ps); 1817 1818 kv_apply_state_adjust_rules(rdev, 1819 &pi->requested_rps, 1820 &pi->current_rps); 1821 1822 return 0; 1823 } 1824 1825 int kv_dpm_set_power_state(struct radeon_device *rdev) 1826 { 1827 struct kv_power_info *pi = kv_get_pi(rdev); 1828 struct radeon_ps *new_ps = &pi->requested_rps; 1829 struct radeon_ps *old_ps = &pi->current_rps; 1830 int ret; 1831 1832 if (pi->bapm_enable) { 1833 ret = kv_smc_bapm_enable(rdev, rdev->pm.dpm.ac_power); 1834 if (ret) { 1835 DRM_ERROR("kv_smc_bapm_enable failed\n"); 1836 return ret; 1837 } 1838 } 1839 1840 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1841 if (pi->enable_dpm) { 1842 kv_set_valid_clock_range(rdev, new_ps); 1843 kv_update_dfs_bypass_settings(rdev, new_ps); 1844 ret = kv_calculate_ds_divider(rdev); 1845 if (ret) { 1846 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1847 return ret; 1848 } 1849 kv_calculate_nbps_level_settings(rdev); 1850 kv_calculate_dpm_settings(rdev); 1851 kv_force_lowest_valid(rdev); 1852 kv_enable_new_levels(rdev); 1853 kv_upload_dpm_settings(rdev); 1854 kv_program_nbps_index_settings(rdev, new_ps); 1855 kv_unforce_levels(rdev); 1856 kv_set_enabled_levels(rdev); 1857 kv_force_lowest_valid(rdev); 1858 kv_unforce_levels(rdev); 1859 1860 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1861 if (ret) { 1862 DRM_ERROR("kv_update_vce_dpm failed\n"); 1863 return ret; 1864 } 1865 kv_update_sclk_t(rdev); 1866 if (rdev->family == CHIP_MULLINS) 1867 kv_enable_nb_dpm(rdev); 1868 } 1869 } else { 1870 if (pi->enable_dpm) { 1871 kv_set_valid_clock_range(rdev, new_ps); 1872 kv_update_dfs_bypass_settings(rdev, new_ps); 1873 ret = kv_calculate_ds_divider(rdev); 1874 if (ret) { 1875 DRM_ERROR("kv_calculate_ds_divider failed\n"); 1876 return ret; 1877 } 1878 kv_calculate_nbps_level_settings(rdev); 1879 kv_calculate_dpm_settings(rdev); 1880 kv_freeze_sclk_dpm(rdev, true); 1881 kv_upload_dpm_settings(rdev); 1882 kv_program_nbps_index_settings(rdev, new_ps); 1883 kv_freeze_sclk_dpm(rdev, false); 1884 kv_set_enabled_levels(rdev); 1885 ret = kv_update_vce_dpm(rdev, new_ps, old_ps); 1886 if (ret) { 1887 DRM_ERROR("kv_update_vce_dpm failed\n"); 1888 return ret; 1889 } 1890 kv_update_acp_boot_level(rdev); 1891 kv_update_sclk_t(rdev); 1892 kv_enable_nb_dpm(rdev); 1893 } 1894 } 1895 1896 return 0; 1897 } 1898 1899 void kv_dpm_post_set_power_state(struct radeon_device *rdev) 1900 { 1901 struct kv_power_info *pi = kv_get_pi(rdev); 1902 struct radeon_ps *new_ps = &pi->requested_rps; 1903 1904 kv_update_current_ps(rdev, new_ps); 1905 } 1906 1907 void kv_dpm_setup_asic(struct radeon_device *rdev) 1908 { 1909 sumo_take_smu_control(rdev, true); 1910 kv_init_powergate_state(rdev); 1911 kv_init_sclk_t(rdev); 1912 } 1913 1914 void kv_dpm_reset_asic(struct radeon_device *rdev) 1915 { 1916 struct kv_power_info *pi = kv_get_pi(rdev); 1917 1918 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 1919 kv_force_lowest_valid(rdev); 1920 kv_init_graphics_levels(rdev); 1921 kv_program_bootup_state(rdev); 1922 kv_upload_dpm_settings(rdev); 1923 kv_force_lowest_valid(rdev); 1924 kv_unforce_levels(rdev); 1925 } else { 1926 kv_init_graphics_levels(rdev); 1927 kv_program_bootup_state(rdev); 1928 kv_freeze_sclk_dpm(rdev, true); 1929 kv_upload_dpm_settings(rdev); 1930 kv_freeze_sclk_dpm(rdev, false); 1931 kv_set_enabled_level(rdev, pi->graphics_boot_level); 1932 } 1933 } 1934 1935 //XXX use sumo_dpm_display_configuration_changed 1936 1937 static void kv_construct_max_power_limits_table(struct radeon_device *rdev, 1938 struct radeon_clock_and_voltage_limits *table) 1939 { 1940 struct kv_power_info *pi = kv_get_pi(rdev); 1941 1942 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { 1943 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; 1944 table->sclk = 1945 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; 1946 table->vddc = 1947 kv_convert_2bit_index_to_voltage(rdev, 1948 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); 1949 } 1950 1951 table->mclk = pi->sys_info.nbp_memory_clock[0]; 1952 } 1953 1954 static void kv_patch_voltage_values(struct radeon_device *rdev) 1955 { 1956 int i; 1957 struct radeon_uvd_clock_voltage_dependency_table *uvd_table = 1958 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1959 struct radeon_vce_clock_voltage_dependency_table *vce_table = 1960 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1961 struct radeon_clock_voltage_dependency_table *samu_table = 1962 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; 1963 struct radeon_clock_voltage_dependency_table *acp_table = 1964 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 1965 1966 if (uvd_table->count) { 1967 for (i = 0; i < uvd_table->count; i++) 1968 uvd_table->entries[i].v = 1969 kv_convert_8bit_index_to_voltage(rdev, 1970 uvd_table->entries[i].v); 1971 } 1972 1973 if (vce_table->count) { 1974 for (i = 0; i < vce_table->count; i++) 1975 vce_table->entries[i].v = 1976 kv_convert_8bit_index_to_voltage(rdev, 1977 vce_table->entries[i].v); 1978 } 1979 1980 if (samu_table->count) { 1981 for (i = 0; i < samu_table->count; i++) 1982 samu_table->entries[i].v = 1983 kv_convert_8bit_index_to_voltage(rdev, 1984 samu_table->entries[i].v); 1985 } 1986 1987 if (acp_table->count) { 1988 for (i = 0; i < acp_table->count; i++) 1989 acp_table->entries[i].v = 1990 kv_convert_8bit_index_to_voltage(rdev, 1991 acp_table->entries[i].v); 1992 } 1993 1994 } 1995 1996 static void kv_construct_boot_state(struct radeon_device *rdev) 1997 { 1998 struct kv_power_info *pi = kv_get_pi(rdev); 1999 2000 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 2001 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 2002 pi->boot_pl.ds_divider_index = 0; 2003 pi->boot_pl.ss_divider_index = 0; 2004 pi->boot_pl.allow_gnb_slow = 1; 2005 pi->boot_pl.force_nbp_state = 0; 2006 pi->boot_pl.display_wm = 0; 2007 pi->boot_pl.vce_wm = 0; 2008 } 2009 2010 static int kv_force_dpm_highest(struct radeon_device *rdev) 2011 { 2012 int ret; 2013 u32 enable_mask, i; 2014 2015 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2016 if (ret) 2017 return ret; 2018 2019 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { 2020 if (enable_mask & (1 << i)) 2021 break; 2022 } 2023 2024 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2025 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2026 else 2027 return kv_set_enabled_level(rdev, i); 2028 } 2029 2030 static int kv_force_dpm_lowest(struct radeon_device *rdev) 2031 { 2032 int ret; 2033 u32 enable_mask, i; 2034 2035 ret = kv_dpm_get_enable_mask(rdev, &enable_mask); 2036 if (ret) 2037 return ret; 2038 2039 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2040 if (enable_mask & (1 << i)) 2041 break; 2042 } 2043 2044 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2045 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2046 else 2047 return kv_set_enabled_level(rdev, i); 2048 } 2049 2050 static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2051 u32 sclk, u32 min_sclk_in_sr) 2052 { 2053 struct kv_power_info *pi = kv_get_pi(rdev); 2054 u32 i; 2055 u32 temp; 2056 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? 2057 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK; 2058 2059 if (sclk < min) 2060 return 0; 2061 2062 if (!pi->caps_sclk_ds) 2063 return 0; 2064 2065 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { 2066 temp = sclk / sumo_get_sleep_divider_from_id(i); 2067 if (temp >= min) 2068 break; 2069 } 2070 2071 return (u8)i; 2072 } 2073 2074 static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit) 2075 { 2076 struct kv_power_info *pi = kv_get_pi(rdev); 2077 struct radeon_clock_voltage_dependency_table *table = 2078 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2079 int i; 2080 2081 if (table && table->count) { 2082 for (i = table->count - 1; i >= 0; i--) { 2083 if (pi->high_voltage_t && 2084 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= 2085 pi->high_voltage_t)) { 2086 *limit = i; 2087 return 0; 2088 } 2089 } 2090 } else { 2091 struct sumo_sclk_voltage_mapping_table *table = 2092 &pi->sys_info.sclk_voltage_mapping_table; 2093 2094 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { 2095 if (pi->high_voltage_t && 2096 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= 2097 pi->high_voltage_t)) { 2098 *limit = i; 2099 return 0; 2100 } 2101 } 2102 } 2103 2104 *limit = 0; 2105 return 0; 2106 } 2107 2108 static void kv_apply_state_adjust_rules(struct radeon_device *rdev, 2109 struct radeon_ps *new_rps, 2110 struct radeon_ps *old_rps) 2111 { 2112 struct kv_ps *ps = kv_get_ps(new_rps); 2113 struct kv_power_info *pi = kv_get_pi(rdev); 2114 u32 min_sclk = 10000; /* ??? */ 2115 u32 sclk, mclk = 0; 2116 int i, limit; 2117 bool force_high; 2118 struct radeon_clock_voltage_dependency_table *table = 2119 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2120 u32 stable_p_state_sclk = 0; 2121 struct radeon_clock_and_voltage_limits *max_limits = 2122 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2123 2124 if (new_rps->vce_active) { 2125 new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 2126 new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 2127 } else { 2128 new_rps->evclk = 0; 2129 new_rps->ecclk = 0; 2130 } 2131 2132 mclk = max_limits->mclk; 2133 sclk = min_sclk; 2134 2135 if (pi->caps_stable_p_state) { 2136 stable_p_state_sclk = (max_limits->sclk * 75) / 100; 2137 2138 for (i = table->count - 1; i >= 0; i++) { 2139 if (stable_p_state_sclk >= table->entries[i].clk) { 2140 stable_p_state_sclk = table->entries[i].clk; 2141 break; 2142 } 2143 } 2144 2145 if (i > 0) 2146 stable_p_state_sclk = table->entries[0].clk; 2147 2148 sclk = stable_p_state_sclk; 2149 } 2150 2151 if (new_rps->vce_active) { 2152 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 2153 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 2154 } 2155 2156 ps->need_dfs_bypass = true; 2157 2158 for (i = 0; i < ps->num_levels; i++) { 2159 if (ps->levels[i].sclk < sclk) 2160 ps->levels[i].sclk = sclk; 2161 } 2162 2163 if (table && table->count) { 2164 for (i = 0; i < ps->num_levels; i++) { 2165 if (pi->high_voltage_t && 2166 (pi->high_voltage_t < 2167 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2168 kv_get_high_voltage_limit(rdev, &limit); 2169 ps->levels[i].sclk = table->entries[limit].clk; 2170 } 2171 } 2172 } else { 2173 struct sumo_sclk_voltage_mapping_table *table = 2174 &pi->sys_info.sclk_voltage_mapping_table; 2175 2176 for (i = 0; i < ps->num_levels; i++) { 2177 if (pi->high_voltage_t && 2178 (pi->high_voltage_t < 2179 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) { 2180 kv_get_high_voltage_limit(rdev, &limit); 2181 ps->levels[i].sclk = table->entries[limit].sclk_frequency; 2182 } 2183 } 2184 } 2185 2186 if (pi->caps_stable_p_state) { 2187 for (i = 0; i < ps->num_levels; i++) { 2188 ps->levels[i].sclk = stable_p_state_sclk; 2189 } 2190 } 2191 2192 pi->video_start = new_rps->dclk || new_rps->vclk || 2193 new_rps->evclk || new_rps->ecclk; 2194 2195 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 2196 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 2197 pi->battery_state = true; 2198 else 2199 pi->battery_state = false; 2200 2201 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2202 ps->dpm0_pg_nb_ps_lo = 0x1; 2203 ps->dpm0_pg_nb_ps_hi = 0x0; 2204 ps->dpmx_nb_ps_lo = 0x1; 2205 ps->dpmx_nb_ps_hi = 0x0; 2206 } else { 2207 ps->dpm0_pg_nb_ps_lo = 0x3; 2208 ps->dpm0_pg_nb_ps_hi = 0x0; 2209 ps->dpmx_nb_ps_lo = 0x3; 2210 ps->dpmx_nb_ps_hi = 0x0; 2211 2212 if (pi->sys_info.nb_dpm_enable) { 2213 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2214 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) || 2215 pi->disable_nb_ps3_in_battery; 2216 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2217 ps->dpm0_pg_nb_ps_hi = 0x2; 2218 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; 2219 ps->dpmx_nb_ps_hi = 0x2; 2220 } 2221 } 2222 } 2223 2224 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev, 2225 u32 index, bool enable) 2226 { 2227 struct kv_power_info *pi = kv_get_pi(rdev); 2228 2229 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; 2230 } 2231 2232 static int kv_calculate_ds_divider(struct radeon_device *rdev) 2233 { 2234 struct kv_power_info *pi = kv_get_pi(rdev); 2235 u32 sclk_in_sr = 10000; /* ??? */ 2236 u32 i; 2237 2238 if (pi->lowest_valid > pi->highest_valid) 2239 return -EINVAL; 2240 2241 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2242 pi->graphics_level[i].DeepSleepDivId = 2243 kv_get_sleep_divider_id_from_clock(rdev, 2244 be32_to_cpu(pi->graphics_level[i].SclkFrequency), 2245 sclk_in_sr); 2246 } 2247 return 0; 2248 } 2249 2250 static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) 2251 { 2252 struct kv_power_info *pi = kv_get_pi(rdev); 2253 u32 i; 2254 bool force_high; 2255 struct radeon_clock_and_voltage_limits *max_limits = 2256 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 2257 u32 mclk = max_limits->mclk; 2258 2259 if (pi->lowest_valid > pi->highest_valid) 2260 return -EINVAL; 2261 2262 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { 2263 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2264 pi->graphics_level[i].GnbSlow = 1; 2265 pi->graphics_level[i].ForceNbPs1 = 0; 2266 pi->graphics_level[i].UpH = 0; 2267 } 2268 2269 if (!pi->sys_info.nb_dpm_enable) 2270 return 0; 2271 2272 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2273 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2274 2275 if (force_high) { 2276 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2277 pi->graphics_level[i].GnbSlow = 0; 2278 } else { 2279 if (pi->battery_state) 2280 pi->graphics_level[0].ForceNbPs1 = 1; 2281 2282 pi->graphics_level[1].GnbSlow = 0; 2283 pi->graphics_level[2].GnbSlow = 0; 2284 pi->graphics_level[3].GnbSlow = 0; 2285 pi->graphics_level[4].GnbSlow = 0; 2286 } 2287 } else { 2288 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2289 pi->graphics_level[i].GnbSlow = 1; 2290 pi->graphics_level[i].ForceNbPs1 = 0; 2291 pi->graphics_level[i].UpH = 0; 2292 } 2293 2294 if (pi->sys_info.nb_dpm_enable && pi->battery_state) { 2295 pi->graphics_level[pi->lowest_valid].UpH = 0x28; 2296 pi->graphics_level[pi->lowest_valid].GnbSlow = 0; 2297 if (pi->lowest_valid != pi->highest_valid) 2298 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; 2299 } 2300 } 2301 return 0; 2302 } 2303 2304 static int kv_calculate_dpm_settings(struct radeon_device *rdev) 2305 { 2306 struct kv_power_info *pi = kv_get_pi(rdev); 2307 u32 i; 2308 2309 if (pi->lowest_valid > pi->highest_valid) 2310 return -EINVAL; 2311 2312 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2313 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; 2314 2315 return 0; 2316 } 2317 2318 static void kv_init_graphics_levels(struct radeon_device *rdev) 2319 { 2320 struct kv_power_info *pi = kv_get_pi(rdev); 2321 u32 i; 2322 struct radeon_clock_voltage_dependency_table *table = 2323 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2324 2325 if (table && table->count) { 2326 u32 vid_2bit; 2327 2328 pi->graphics_dpm_level_count = 0; 2329 for (i = 0; i < table->count; i++) { 2330 if (pi->high_voltage_t && 2331 (pi->high_voltage_t < 2332 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) 2333 break; 2334 2335 kv_set_divider_value(rdev, i, table->entries[i].clk); 2336 vid_2bit = kv_convert_vid7_to_vid2(rdev, 2337 &pi->sys_info.vid_mapping_table, 2338 table->entries[i].v); 2339 kv_set_vid(rdev, i, vid_2bit); 2340 kv_set_at(rdev, i, pi->at[i]); 2341 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2342 pi->graphics_dpm_level_count++; 2343 } 2344 } else { 2345 struct sumo_sclk_voltage_mapping_table *table = 2346 &pi->sys_info.sclk_voltage_mapping_table; 2347 2348 pi->graphics_dpm_level_count = 0; 2349 for (i = 0; i < table->num_max_dpm_entries; i++) { 2350 if (pi->high_voltage_t && 2351 pi->high_voltage_t < 2352 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) 2353 break; 2354 2355 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); 2356 kv_set_vid(rdev, i, table->entries[i].vid_2bit); 2357 kv_set_at(rdev, i, pi->at[i]); 2358 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2359 pi->graphics_dpm_level_count++; 2360 } 2361 } 2362 2363 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) 2364 kv_dpm_power_level_enable(rdev, i, false); 2365 } 2366 2367 static void kv_enable_new_levels(struct radeon_device *rdev) 2368 { 2369 struct kv_power_info *pi = kv_get_pi(rdev); 2370 u32 i; 2371 2372 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { 2373 if (i >= pi->lowest_valid && i <= pi->highest_valid) 2374 kv_dpm_power_level_enable(rdev, i, true); 2375 } 2376 } 2377 2378 static int kv_set_enabled_level(struct radeon_device *rdev, u32 level) 2379 { 2380 u32 new_mask = (1 << level); 2381 2382 return kv_send_msg_to_smc_with_parameter(rdev, 2383 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2384 new_mask); 2385 } 2386 2387 static int kv_set_enabled_levels(struct radeon_device *rdev) 2388 { 2389 struct kv_power_info *pi = kv_get_pi(rdev); 2390 u32 i, new_mask = 0; 2391 2392 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) 2393 new_mask |= (1 << i); 2394 2395 return kv_send_msg_to_smc_with_parameter(rdev, 2396 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2397 new_mask); 2398 } 2399 2400 static void kv_program_nbps_index_settings(struct radeon_device *rdev, 2401 struct radeon_ps *new_rps) 2402 { 2403 struct kv_ps *new_ps = kv_get_ps(new_rps); 2404 struct kv_power_info *pi = kv_get_pi(rdev); 2405 u32 nbdpmconfig1; 2406 2407 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) 2408 return; 2409 2410 if (pi->sys_info.nb_dpm_enable) { 2411 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1); 2412 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK | 2413 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK); 2414 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) | 2415 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) | 2416 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) | 2417 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi)); 2418 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1); 2419 } 2420 } 2421 2422 static int kv_set_thermal_temperature_range(struct radeon_device *rdev, 2423 int min_temp, int max_temp) 2424 { 2425 int low_temp = 0 * 1000; 2426 int high_temp = 255 * 1000; 2427 u32 tmp; 2428 2429 if (low_temp < min_temp) 2430 low_temp = min_temp; 2431 if (high_temp > max_temp) 2432 high_temp = max_temp; 2433 if (high_temp < low_temp) { 2434 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 2435 return -EINVAL; 2436 } 2437 2438 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL); 2439 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK); 2440 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) | 2441 DIG_THERM_INTL(49 + (low_temp / 1000))); 2442 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp); 2443 2444 rdev->pm.dpm.thermal.min_temp = low_temp; 2445 rdev->pm.dpm.thermal.max_temp = high_temp; 2446 2447 return 0; 2448 } 2449 2450 union igp_info { 2451 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 2452 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 2453 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; 2454 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; 2455 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 2456 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 2457 }; 2458 2459 static int kv_parse_sys_info_table(struct radeon_device *rdev) 2460 { 2461 struct kv_power_info *pi = kv_get_pi(rdev); 2462 struct radeon_mode_info *mode_info = &rdev->mode_info; 2463 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 2464 union igp_info *igp_info; 2465 u8 frev, crev; 2466 u16 data_offset; 2467 int i; 2468 2469 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 2470 &frev, &crev, &data_offset)) { 2471 igp_info = (union igp_info *)(mode_info->atom_context->bios + 2472 data_offset); 2473 2474 if (crev != 8) { 2475 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 2476 return -EINVAL; 2477 } 2478 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); 2479 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); 2480 pi->sys_info.bootup_nb_voltage_index = 2481 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); 2482 if (igp_info->info_8.ucHtcTmpLmt == 0) 2483 pi->sys_info.htc_tmp_lmt = 203; 2484 else 2485 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; 2486 if (igp_info->info_8.ucHtcHystLmt == 0) 2487 pi->sys_info.htc_hyst_lmt = 5; 2488 else 2489 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; 2490 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 2491 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 2492 } 2493 2494 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) 2495 pi->sys_info.nb_dpm_enable = true; 2496 else 2497 pi->sys_info.nb_dpm_enable = false; 2498 2499 for (i = 0; i < KV_NUM_NBPSTATES; i++) { 2500 pi->sys_info.nbp_memory_clock[i] = 2501 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); 2502 pi->sys_info.nbp_n_clock[i] = 2503 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2504 } 2505 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2506 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2507 pi->caps_enable_dfs_bypass = true; 2508 2509 sumo_construct_sclk_voltage_mapping_table(rdev, 2510 &pi->sys_info.sclk_voltage_mapping_table, 2511 igp_info->info_8.sAvail_SCLK); 2512 2513 sumo_construct_vid_mapping_table(rdev, 2514 &pi->sys_info.vid_mapping_table, 2515 igp_info->info_8.sAvail_SCLK); 2516 2517 kv_construct_max_power_limits_table(rdev, 2518 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 2519 } 2520 return 0; 2521 } 2522 2523 union power_info { 2524 struct _ATOM_POWERPLAY_INFO info; 2525 struct _ATOM_POWERPLAY_INFO_V2 info_2; 2526 struct _ATOM_POWERPLAY_INFO_V3 info_3; 2527 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 2528 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 2529 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 2530 }; 2531 2532 union pplib_clock_info { 2533 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 2534 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 2535 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 2536 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 2537 }; 2538 2539 union pplib_power_state { 2540 struct _ATOM_PPLIB_STATE v1; 2541 struct _ATOM_PPLIB_STATE_V2 v2; 2542 }; 2543 2544 static void kv_patch_boot_state(struct radeon_device *rdev, 2545 struct kv_ps *ps) 2546 { 2547 struct kv_power_info *pi = kv_get_pi(rdev); 2548 2549 ps->num_levels = 1; 2550 ps->levels[0] = pi->boot_pl; 2551 } 2552 2553 static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev, 2554 struct radeon_ps *rps, 2555 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 2556 u8 table_rev) 2557 { 2558 struct kv_ps *ps = kv_get_ps(rps); 2559 2560 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 2561 rps->class = le16_to_cpu(non_clock_info->usClassification); 2562 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 2563 2564 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 2565 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 2566 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 2567 } else { 2568 rps->vclk = 0; 2569 rps->dclk = 0; 2570 } 2571 2572 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 2573 rdev->pm.dpm.boot_ps = rps; 2574 kv_patch_boot_state(rdev, ps); 2575 } 2576 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 2577 rdev->pm.dpm.uvd_ps = rps; 2578 } 2579 2580 static void kv_parse_pplib_clock_info(struct radeon_device *rdev, 2581 struct radeon_ps *rps, int index, 2582 union pplib_clock_info *clock_info) 2583 { 2584 struct kv_power_info *pi = kv_get_pi(rdev); 2585 struct kv_ps *ps = kv_get_ps(rps); 2586 struct kv_pl *pl = &ps->levels[index]; 2587 u32 sclk; 2588 2589 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2590 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2591 pl->sclk = sclk; 2592 pl->vddc_index = clock_info->sumo.vddcIndex; 2593 2594 ps->num_levels = index + 1; 2595 2596 if (pi->caps_sclk_ds) { 2597 pl->ds_divider_index = 5; 2598 pl->ss_divider_index = 5; 2599 } 2600 } 2601 2602 static int kv_parse_power_table(struct radeon_device *rdev) 2603 { 2604 struct radeon_mode_info *mode_info = &rdev->mode_info; 2605 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 2606 union pplib_power_state *power_state; 2607 int i, j, k, non_clock_array_index, clock_array_index; 2608 union pplib_clock_info *clock_info; 2609 struct _StateArray *state_array; 2610 struct _ClockInfoArray *clock_info_array; 2611 struct _NonClockInfoArray *non_clock_info_array; 2612 union power_info *power_info; 2613 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 2614 u16 data_offset; 2615 u8 frev, crev; 2616 u8 *power_state_offset; 2617 struct kv_ps *ps; 2618 2619 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 2620 &frev, &crev, &data_offset)) 2621 return -EINVAL; 2622 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2623 2624 state_array = (struct _StateArray *) 2625 (mode_info->atom_context->bios + data_offset + 2626 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 2627 clock_info_array = (struct _ClockInfoArray *) 2628 (mode_info->atom_context->bios + data_offset + 2629 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 2630 non_clock_info_array = (struct _NonClockInfoArray *) 2631 (mode_info->atom_context->bios + data_offset + 2632 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 2633 2634 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * 2635 state_array->ucNumEntries, GFP_KERNEL); 2636 if (!rdev->pm.dpm.ps) 2637 return -ENOMEM; 2638 power_state_offset = (u8 *)state_array->states; 2639 for (i = 0; i < state_array->ucNumEntries; i++) { 2640 u8 *idx; 2641 power_state = (union pplib_power_state *)power_state_offset; 2642 non_clock_array_index = power_state->v2.nonClockInfoIndex; 2643 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2644 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2645 if (!rdev->pm.power_state[i].clock_info) 2646 return -EINVAL; 2647 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); 2648 if (ps == NULL) { 2649 kfree(rdev->pm.dpm.ps); 2650 return -ENOMEM; 2651 } 2652 rdev->pm.dpm.ps[i].ps_priv = ps; 2653 k = 0; 2654 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 2655 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2656 clock_array_index = idx[j]; 2657 if (clock_array_index >= clock_info_array->ucNumEntries) 2658 continue; 2659 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) 2660 break; 2661 clock_info = (union pplib_clock_info *) 2662 ((u8 *)&clock_info_array->clockInfo[0] + 2663 (clock_array_index * clock_info_array->ucEntrySize)); 2664 kv_parse_pplib_clock_info(rdev, 2665 &rdev->pm.dpm.ps[i], k, 2666 clock_info); 2667 k++; 2668 } 2669 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 2670 non_clock_info, 2671 non_clock_info_array->ucEntrySize); 2672 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 2673 } 2674 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 2675 2676 /* fill in the vce power states */ 2677 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 2678 u32 sclk; 2679 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 2680 clock_info = (union pplib_clock_info *) 2681 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 2682 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); 2683 sclk |= clock_info->sumo.ucEngineClockHigh << 16; 2684 rdev->pm.dpm.vce_states[i].sclk = sclk; 2685 rdev->pm.dpm.vce_states[i].mclk = 0; 2686 } 2687 2688 return 0; 2689 } 2690 2691 int kv_dpm_init(struct radeon_device *rdev) 2692 { 2693 struct kv_power_info *pi; 2694 int ret, i; 2695 2696 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); 2697 if (pi == NULL) 2698 return -ENOMEM; 2699 rdev->pm.dpm.priv = pi; 2700 2701 ret = r600_get_platform_caps(rdev); 2702 if (ret) 2703 return ret; 2704 2705 ret = r600_parse_extended_power_table(rdev); 2706 if (ret) 2707 return ret; 2708 2709 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 2710 pi->at[i] = TRINITY_AT_DFLT; 2711 2712 pi->sram_end = SMC_RAM_END; 2713 2714 pi->enable_nb_dpm = true; 2715 2716 pi->caps_power_containment = true; 2717 pi->caps_cac = true; 2718 pi->enable_didt = false; 2719 if (pi->enable_didt) { 2720 pi->caps_sq_ramping = true; 2721 pi->caps_db_ramping = true; 2722 pi->caps_td_ramping = true; 2723 pi->caps_tcp_ramping = true; 2724 } 2725 2726 pi->caps_sclk_ds = true; 2727 pi->enable_auto_thermal_throttling = true; 2728 pi->disable_nb_ps3_in_battery = false; 2729 pi->bapm_enable = true; 2730 pi->voltage_drop_t = 0; 2731 pi->caps_sclk_throttle_low_notification = false; 2732 pi->caps_fps = false; /* true? */ 2733 pi->caps_uvd_pg = true; 2734 pi->caps_uvd_dpm = true; 2735 pi->caps_vce_pg = false; /* XXX true */ 2736 pi->caps_samu_pg = false; 2737 pi->caps_acp_pg = false; 2738 pi->caps_stable_p_state = false; 2739 2740 ret = kv_parse_sys_info_table(rdev); 2741 if (ret) 2742 return ret; 2743 2744 kv_patch_voltage_values(rdev); 2745 kv_construct_boot_state(rdev); 2746 2747 ret = kv_parse_power_table(rdev); 2748 if (ret) 2749 return ret; 2750 2751 pi->enable_dpm = true; 2752 2753 return 0; 2754 } 2755 2756 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 2757 struct seq_file *m) 2758 { 2759 struct kv_power_info *pi = kv_get_pi(rdev); 2760 u32 current_index = 2761 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> 2762 CURR_SCLK_INDEX_SHIFT; 2763 u32 sclk, tmp; 2764 u16 vddc; 2765 2766 if (current_index >= SMU__NUM_SCLK_DPM_STATE) { 2767 seq_printf(m, "invalid dpm profile %d\n", current_index); 2768 } else { 2769 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); 2770 tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 2771 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; 2772 vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); 2773 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 2774 current_index, sclk, vddc); 2775 } 2776 } 2777 2778 void kv_dpm_print_power_state(struct radeon_device *rdev, 2779 struct radeon_ps *rps) 2780 { 2781 int i; 2782 struct kv_ps *ps = kv_get_ps(rps); 2783 2784 r600_dpm_print_class_info(rps->class, rps->class2); 2785 r600_dpm_print_cap_info(rps->caps); 2786 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 2787 for (i = 0; i < ps->num_levels; i++) { 2788 struct kv_pl *pl = &ps->levels[i]; 2789 printk("\t\tpower level %d sclk: %u vddc: %u\n", 2790 i, pl->sclk, 2791 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index)); 2792 } 2793 r600_dpm_print_ps_status(rdev, rps); 2794 } 2795 2796 void kv_dpm_fini(struct radeon_device *rdev) 2797 { 2798 int i; 2799 2800 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 2801 kfree(rdev->pm.dpm.ps[i].ps_priv); 2802 } 2803 kfree(rdev->pm.dpm.ps); 2804 kfree(rdev->pm.dpm.priv); 2805 r600_free_extended_power_table(rdev); 2806 } 2807 2808 void kv_dpm_display_configuration_changed(struct radeon_device *rdev) 2809 { 2810 2811 } 2812 2813 u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low) 2814 { 2815 struct kv_power_info *pi = kv_get_pi(rdev); 2816 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); 2817 2818 if (low) 2819 return requested_state->levels[0].sclk; 2820 else 2821 return requested_state->levels[requested_state->num_levels - 1].sclk; 2822 } 2823 2824 u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low) 2825 { 2826 struct kv_power_info *pi = kv_get_pi(rdev); 2827 2828 return pi->sys_info.bootup_uma_clk; 2829 } 2830 2831