1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include "drmP.h" 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #include "radeon_ucode.h" 29 #include "cikd.h" 30 #include "r600_dpm.h" 31 #include "ci_dpm.h" 32 #include "atom.h" 33 #include <linux/seq_file.h> 34 35 #define MC_CG_ARB_FREQ_F0 0x0a 36 #define MC_CG_ARB_FREQ_F1 0x0b 37 #define MC_CG_ARB_FREQ_F2 0x0c 38 #define MC_CG_ARB_FREQ_F3 0x0d 39 40 #define SMC_RAM_END 0x40000 41 42 #define VOLTAGE_SCALE 4 43 #define VOLTAGE_VID_OFFSET_SCALE1 625 44 #define VOLTAGE_VID_OFFSET_SCALE2 100 45 46 static const struct ci_pt_defaults defaults_hawaii_xt = 47 { 48 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 49 { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 }, 50 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC } 51 }; 52 53 static const struct ci_pt_defaults defaults_hawaii_pro = 54 { 55 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, 56 { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 }, 57 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC } 58 }; 59 60 static const struct ci_pt_defaults defaults_bonaire_xt = 61 { 62 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, 63 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, 64 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } 65 }; 66 67 static const struct ci_pt_defaults defaults_bonaire_pro = 68 { 69 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, 70 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, 71 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } 72 }; 73 74 static const struct ci_pt_defaults defaults_saturn_xt = 75 { 76 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, 77 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, 78 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } 79 }; 80 81 static const struct ci_pt_defaults defaults_saturn_pro = 82 { 83 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, 84 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, 85 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } 86 }; 87 88 static const struct ci_pt_config_reg didt_config_ci[] = 89 { 90 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 91 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 92 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 93 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 94 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 95 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 96 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 97 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 98 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 99 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 100 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 101 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 102 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 103 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 104 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 105 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 106 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 107 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 108 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 109 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 110 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 111 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 112 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 113 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 114 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 115 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 116 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 117 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 118 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 119 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 120 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 121 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 122 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 123 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 124 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 125 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 126 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 127 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 128 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 129 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 130 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 131 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 132 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 133 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 134 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 135 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 136 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 137 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 138 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 139 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 140 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 141 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 142 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 143 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 144 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 145 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 146 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 147 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 148 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 149 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 150 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 151 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 152 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 153 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 154 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 155 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 156 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 157 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 158 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 159 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 160 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 161 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 162 { 0xFFFFFFFF } 163 }; 164 165 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 166 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 167 u32 arb_freq_src, u32 arb_freq_dest); 168 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 169 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode); 170 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, 171 u32 max_voltage_steps, 172 struct atom_voltage_table *voltage_table); 173 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 174 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 175 extern int ci_mc_load_microcode(struct radeon_device *rdev); 176 extern void cik_update_cg(struct radeon_device *rdev, 177 u32 block, bool enable); 178 179 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 180 struct atom_voltage_table_entry *voltage_table, 181 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd); 182 static int ci_set_power_limit(struct radeon_device *rdev, u32 n); 183 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 184 u32 target_tdp); 185 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); 186 187 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) 188 { 189 struct ci_power_info *pi = rdev->pm.dpm.priv; 190 191 return pi; 192 } 193 194 static struct ci_ps *ci_get_ps(struct radeon_ps *rps) 195 { 196 struct ci_ps *ps = rps->ps_priv; 197 198 return ps; 199 } 200 201 static void ci_initialize_powertune_defaults(struct radeon_device *rdev) 202 { 203 struct ci_power_info *pi = ci_get_pi(rdev); 204 205 switch (rdev->pdev->device) { 206 case 0x6649: 207 case 0x6650: 208 case 0x6651: 209 case 0x6658: 210 case 0x665C: 211 case 0x665D: 212 default: 213 pi->powertune_defaults = &defaults_bonaire_xt; 214 break; 215 case 0x6640: 216 case 0x6641: 217 case 0x6646: 218 case 0x6647: 219 pi->powertune_defaults = &defaults_saturn_xt; 220 break; 221 case 0x67B8: 222 case 0x67B0: 223 pi->powertune_defaults = &defaults_hawaii_xt; 224 break; 225 case 0x67BA: 226 case 0x67B1: 227 pi->powertune_defaults = &defaults_hawaii_pro; 228 break; 229 case 0x67A0: 230 case 0x67A1: 231 case 0x67A2: 232 case 0x67A8: 233 case 0x67A9: 234 case 0x67AA: 235 case 0x67B9: 236 case 0x67BE: 237 pi->powertune_defaults = &defaults_bonaire_xt; 238 break; 239 } 240 241 pi->dte_tj_offset = 0; 242 243 pi->caps_power_containment = true; 244 pi->caps_cac = false; 245 pi->caps_sq_ramping = false; 246 pi->caps_db_ramping = false; 247 pi->caps_td_ramping = false; 248 pi->caps_tcp_ramping = false; 249 250 if (pi->caps_power_containment) { 251 pi->caps_cac = true; 252 pi->enable_bapm_feature = true; 253 pi->enable_tdc_limit_feature = true; 254 pi->enable_pkg_pwr_tracking_feature = true; 255 } 256 } 257 258 static u8 ci_convert_to_vid(u16 vddc) 259 { 260 return (6200 - (vddc * VOLTAGE_SCALE)) / 25; 261 } 262 263 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev) 264 { 265 struct ci_power_info *pi = ci_get_pi(rdev); 266 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 267 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 268 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2; 269 u32 i; 270 271 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) 272 return -EINVAL; 273 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8) 274 return -EINVAL; 275 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count != 276 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) 277 return -EINVAL; 278 279 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { 280 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 281 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); 282 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); 283 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); 284 } else { 285 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); 286 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); 287 } 288 } 289 return 0; 290 } 291 292 static int ci_populate_vddc_vid(struct radeon_device *rdev) 293 { 294 struct ci_power_info *pi = ci_get_pi(rdev); 295 u8 *vid = pi->smc_powertune_table.VddCVid; 296 u32 i; 297 298 if (pi->vddc_voltage_table.count > 8) 299 return -EINVAL; 300 301 for (i = 0; i < pi->vddc_voltage_table.count; i++) 302 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); 303 304 return 0; 305 } 306 307 static int ci_populate_svi_load_line(struct radeon_device *rdev) 308 { 309 struct ci_power_info *pi = ci_get_pi(rdev); 310 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 311 312 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en; 313 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc; 314 pi->smc_powertune_table.SviLoadLineTrimVddC = 3; 315 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0; 316 317 return 0; 318 } 319 320 static int ci_populate_tdc_limit(struct radeon_device *rdev) 321 { 322 struct ci_power_info *pi = ci_get_pi(rdev); 323 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 324 u16 tdc_limit; 325 326 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256; 327 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit); 328 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc = 329 pt_defaults->tdc_vddc_throttle_release_limit_perc; 330 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt; 331 332 return 0; 333 } 334 335 static int ci_populate_dw8(struct radeon_device *rdev) 336 { 337 struct ci_power_info *pi = ci_get_pi(rdev); 338 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 339 int ret; 340 341 ret = ci_read_smc_sram_dword(rdev, 342 SMU7_FIRMWARE_HEADER_LOCATION + 343 offsetof(SMU7_Firmware_Header, PmFuseTable) + 344 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), 345 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl, 346 pi->sram_end); 347 if (ret) 348 return -EINVAL; 349 else 350 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl; 351 352 return 0; 353 } 354 355 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) 356 { 357 struct ci_power_info *pi = ci_get_pi(rdev); 358 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 359 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 360 int i, min, max; 361 362 min = max = hi_vid[0]; 363 for (i = 0; i < 8; i++) { 364 if (0 != hi_vid[i]) { 365 if (min > hi_vid[i]) 366 min = hi_vid[i]; 367 if (max < hi_vid[i]) 368 max = hi_vid[i]; 369 } 370 371 if (0 != lo_vid[i]) { 372 if (min > lo_vid[i]) 373 min = lo_vid[i]; 374 if (max < lo_vid[i]) 375 max = lo_vid[i]; 376 } 377 } 378 379 if ((min == 0) || (max == 0)) 380 return -EINVAL; 381 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max; 382 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min; 383 384 return 0; 385 } 386 387 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev) 388 { 389 struct ci_power_info *pi = ci_get_pi(rdev); 390 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; 391 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; 392 struct radeon_cac_tdp_table *cac_tdp_table = 393 rdev->pm.dpm.dyn_state.cac_tdp_table; 394 395 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256; 396 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256; 397 398 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd); 399 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd); 400 401 return 0; 402 } 403 404 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev) 405 { 406 struct ci_power_info *pi = ci_get_pi(rdev); 407 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 408 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table; 409 struct radeon_cac_tdp_table *cac_tdp_table = 410 rdev->pm.dpm.dyn_state.cac_tdp_table; 411 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table; 412 int i, j, k; 413 const u16 *def1; 414 const u16 *def2; 415 416 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256; 417 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256; 418 419 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset; 420 dpm_table->GpuTjMax = 421 (u8)(pi->thermal_temp_setting.temperature_high / 1000); 422 dpm_table->GpuTjHyst = 8; 423 424 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base; 425 426 if (ppm) { 427 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000); 428 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256); 429 } else { 430 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0); 431 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0); 432 } 433 434 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient); 435 def1 = pt_defaults->bapmti_r; 436 def2 = pt_defaults->bapmti_rc; 437 438 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { 439 for (j = 0; j < SMU7_DTE_SOURCES; j++) { 440 for (k = 0; k < SMU7_DTE_SINKS; k++) { 441 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1); 442 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2); 443 def1++; 444 def2++; 445 } 446 } 447 } 448 449 return 0; 450 } 451 452 static int ci_populate_pm_base(struct radeon_device *rdev) 453 { 454 struct ci_power_info *pi = ci_get_pi(rdev); 455 u32 pm_fuse_table_offset; 456 int ret; 457 458 if (pi->caps_power_containment) { 459 ret = ci_read_smc_sram_dword(rdev, 460 SMU7_FIRMWARE_HEADER_LOCATION + 461 offsetof(SMU7_Firmware_Header, PmFuseTable), 462 &pm_fuse_table_offset, pi->sram_end); 463 if (ret) 464 return ret; 465 ret = ci_populate_bapm_vddc_vid_sidd(rdev); 466 if (ret) 467 return ret; 468 ret = ci_populate_vddc_vid(rdev); 469 if (ret) 470 return ret; 471 ret = ci_populate_svi_load_line(rdev); 472 if (ret) 473 return ret; 474 ret = ci_populate_tdc_limit(rdev); 475 if (ret) 476 return ret; 477 ret = ci_populate_dw8(rdev); 478 if (ret) 479 return ret; 480 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); 481 if (ret) 482 return ret; 483 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev); 484 if (ret) 485 return ret; 486 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset, 487 (u8 *)&pi->smc_powertune_table, 488 sizeof(SMU7_Discrete_PmFuses), pi->sram_end); 489 if (ret) 490 return ret; 491 } 492 493 return 0; 494 } 495 496 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable) 497 { 498 struct ci_power_info *pi = ci_get_pi(rdev); 499 u32 data; 500 501 if (pi->caps_sq_ramping) { 502 data = RREG32_DIDT(DIDT_SQ_CTRL0); 503 if (enable) 504 data |= DIDT_CTRL_EN; 505 else 506 data &= ~DIDT_CTRL_EN; 507 WREG32_DIDT(DIDT_SQ_CTRL0, data); 508 } 509 510 if (pi->caps_db_ramping) { 511 data = RREG32_DIDT(DIDT_DB_CTRL0); 512 if (enable) 513 data |= DIDT_CTRL_EN; 514 else 515 data &= ~DIDT_CTRL_EN; 516 WREG32_DIDT(DIDT_DB_CTRL0, data); 517 } 518 519 if (pi->caps_td_ramping) { 520 data = RREG32_DIDT(DIDT_TD_CTRL0); 521 if (enable) 522 data |= DIDT_CTRL_EN; 523 else 524 data &= ~DIDT_CTRL_EN; 525 WREG32_DIDT(DIDT_TD_CTRL0, data); 526 } 527 528 if (pi->caps_tcp_ramping) { 529 data = RREG32_DIDT(DIDT_TCP_CTRL0); 530 if (enable) 531 data |= DIDT_CTRL_EN; 532 else 533 data &= ~DIDT_CTRL_EN; 534 WREG32_DIDT(DIDT_TCP_CTRL0, data); 535 } 536 } 537 538 static int ci_program_pt_config_registers(struct radeon_device *rdev, 539 const struct ci_pt_config_reg *cac_config_regs) 540 { 541 const struct ci_pt_config_reg *config_regs = cac_config_regs; 542 u32 data; 543 u32 cache = 0; 544 545 if (config_regs == NULL) 546 return -EINVAL; 547 548 while (config_regs->offset != 0xFFFFFFFF) { 549 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) { 550 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 551 } else { 552 switch (config_regs->type) { 553 case CISLANDS_CONFIGREG_SMC_IND: 554 data = RREG32_SMC(config_regs->offset); 555 break; 556 case CISLANDS_CONFIGREG_DIDT_IND: 557 data = RREG32_DIDT(config_regs->offset); 558 break; 559 default: 560 data = RREG32(config_regs->offset << 2); 561 break; 562 } 563 564 data &= ~config_regs->mask; 565 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 566 data |= cache; 567 568 switch (config_regs->type) { 569 case CISLANDS_CONFIGREG_SMC_IND: 570 WREG32_SMC(config_regs->offset, data); 571 break; 572 case CISLANDS_CONFIGREG_DIDT_IND: 573 WREG32_DIDT(config_regs->offset, data); 574 break; 575 default: 576 WREG32(config_regs->offset << 2, data); 577 break; 578 } 579 cache = 0; 580 } 581 config_regs++; 582 } 583 return 0; 584 } 585 586 static int ci_enable_didt(struct radeon_device *rdev, bool enable) 587 { 588 struct ci_power_info *pi = ci_get_pi(rdev); 589 int ret; 590 591 if (pi->caps_sq_ramping || pi->caps_db_ramping || 592 pi->caps_td_ramping || pi->caps_tcp_ramping) { 593 cik_enter_rlc_safe_mode(rdev); 594 595 if (enable) { 596 ret = ci_program_pt_config_registers(rdev, didt_config_ci); 597 if (ret) { 598 cik_exit_rlc_safe_mode(rdev); 599 return ret; 600 } 601 } 602 603 ci_do_enable_didt(rdev, enable); 604 605 cik_exit_rlc_safe_mode(rdev); 606 } 607 608 return 0; 609 } 610 611 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable) 612 { 613 struct ci_power_info *pi = ci_get_pi(rdev); 614 PPSMC_Result smc_result; 615 int ret = 0; 616 617 if (enable) { 618 pi->power_containment_features = 0; 619 if (pi->caps_power_containment) { 620 if (pi->enable_bapm_feature) { 621 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE); 622 if (smc_result != PPSMC_Result_OK) 623 ret = -EINVAL; 624 else 625 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; 626 } 627 628 if (pi->enable_tdc_limit_feature) { 629 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable); 630 if (smc_result != PPSMC_Result_OK) 631 ret = -EINVAL; 632 else 633 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit; 634 } 635 636 if (pi->enable_pkg_pwr_tracking_feature) { 637 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable); 638 if (smc_result != PPSMC_Result_OK) { 639 ret = -EINVAL; 640 } else { 641 struct radeon_cac_tdp_table *cac_tdp_table = 642 rdev->pm.dpm.dyn_state.cac_tdp_table; 643 u32 default_pwr_limit = 644 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 645 646 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; 647 648 ci_set_power_limit(rdev, default_pwr_limit); 649 } 650 } 651 } 652 } else { 653 if (pi->caps_power_containment && pi->power_containment_features) { 654 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) 655 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable); 656 657 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM) 658 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE); 659 660 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) 661 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable); 662 pi->power_containment_features = 0; 663 } 664 } 665 666 return ret; 667 } 668 669 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable) 670 { 671 struct ci_power_info *pi = ci_get_pi(rdev); 672 PPSMC_Result smc_result; 673 int ret = 0; 674 675 if (pi->caps_cac) { 676 if (enable) { 677 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac); 678 if (smc_result != PPSMC_Result_OK) { 679 ret = -EINVAL; 680 pi->cac_enabled = false; 681 } else { 682 pi->cac_enabled = true; 683 } 684 } else if (pi->cac_enabled) { 685 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac); 686 pi->cac_enabled = false; 687 } 688 } 689 690 return ret; 691 } 692 693 static int ci_power_control_set_level(struct radeon_device *rdev) 694 { 695 struct ci_power_info *pi = ci_get_pi(rdev); 696 struct radeon_cac_tdp_table *cac_tdp_table = 697 rdev->pm.dpm.dyn_state.cac_tdp_table; 698 s32 adjust_percent; 699 s32 target_tdp; 700 int ret = 0; 701 bool adjust_polarity = false; /* ??? */ 702 703 if (pi->caps_power_containment && 704 (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) { 705 adjust_percent = adjust_polarity ? 706 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); 707 target_tdp = ((100 + adjust_percent) * 708 (s32)cac_tdp_table->configurable_tdp) / 100; 709 target_tdp *= 256; 710 711 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); 712 } 713 714 return ret; 715 } 716 717 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 718 { 719 struct ci_power_info *pi = ci_get_pi(rdev); 720 721 if (pi->uvd_power_gated == gate) 722 return; 723 724 pi->uvd_power_gated = gate; 725 726 ci_update_uvd_dpm(rdev, gate); 727 } 728 729 bool ci_dpm_vblank_too_short(struct radeon_device *rdev) 730 { 731 struct ci_power_info *pi = ci_get_pi(rdev); 732 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 733 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 734 735 if (vblank_time < switch_limit) 736 return true; 737 else 738 return false; 739 740 } 741 742 static void ci_apply_state_adjust_rules(struct radeon_device *rdev, 743 struct radeon_ps *rps) 744 { 745 struct ci_ps *ps = ci_get_ps(rps); 746 struct ci_power_info *pi = ci_get_pi(rdev); 747 struct radeon_clock_and_voltage_limits *max_limits; 748 bool disable_mclk_switching; 749 u32 sclk, mclk; 750 int i; 751 752 if (rps->vce_active) { 753 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 754 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 755 } else { 756 rps->evclk = 0; 757 rps->ecclk = 0; 758 } 759 760 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 761 ci_dpm_vblank_too_short(rdev)) 762 disable_mclk_switching = true; 763 else 764 disable_mclk_switching = false; 765 766 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 767 pi->battery_state = true; 768 else 769 pi->battery_state = false; 770 771 if (rdev->pm.dpm.ac_power) 772 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 773 else 774 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 775 776 if (rdev->pm.dpm.ac_power == false) { 777 for (i = 0; i < ps->performance_level_count; i++) { 778 if (ps->performance_levels[i].mclk > max_limits->mclk) 779 ps->performance_levels[i].mclk = max_limits->mclk; 780 if (ps->performance_levels[i].sclk > max_limits->sclk) 781 ps->performance_levels[i].sclk = max_limits->sclk; 782 } 783 } 784 785 /* XXX validate the min clocks required for display */ 786 787 if (disable_mclk_switching) { 788 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 789 sclk = ps->performance_levels[0].sclk; 790 } else { 791 mclk = ps->performance_levels[0].mclk; 792 sclk = ps->performance_levels[0].sclk; 793 } 794 795 if (rps->vce_active) { 796 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 797 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 798 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk) 799 mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk; 800 } 801 802 ps->performance_levels[0].sclk = sclk; 803 ps->performance_levels[0].mclk = mclk; 804 805 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk) 806 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk; 807 808 if (disable_mclk_switching) { 809 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk) 810 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk; 811 } else { 812 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk) 813 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk; 814 } 815 } 816 817 static int ci_set_thermal_temperature_range(struct radeon_device *rdev, 818 int min_temp, int max_temp) 819 { 820 int low_temp = 0 * 1000; 821 int high_temp = 255 * 1000; 822 u32 tmp; 823 824 if (low_temp < min_temp) 825 low_temp = min_temp; 826 if (high_temp > max_temp) 827 high_temp = max_temp; 828 if (high_temp < low_temp) { 829 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 830 return -EINVAL; 831 } 832 833 tmp = RREG32_SMC(CG_THERMAL_INT); 834 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK); 835 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) | 836 CI_DIG_THERM_INTL(low_temp / 1000); 837 WREG32_SMC(CG_THERMAL_INT, tmp); 838 839 #if 0 840 /* XXX: need to figure out how to handle this properly */ 841 tmp = RREG32_SMC(CG_THERMAL_CTRL); 842 tmp &= DIG_THERM_DPM_MASK; 843 tmp |= DIG_THERM_DPM(high_temp / 1000); 844 WREG32_SMC(CG_THERMAL_CTRL, tmp); 845 #endif 846 847 rdev->pm.dpm.thermal.min_temp = low_temp; 848 rdev->pm.dpm.thermal.max_temp = high_temp; 849 850 return 0; 851 } 852 853 #if 0 854 static int ci_read_smc_soft_register(struct radeon_device *rdev, 855 u16 reg_offset, u32 *value) 856 { 857 struct ci_power_info *pi = ci_get_pi(rdev); 858 859 return ci_read_smc_sram_dword(rdev, 860 pi->soft_regs_start + reg_offset, 861 value, pi->sram_end); 862 } 863 #endif 864 865 static int ci_write_smc_soft_register(struct radeon_device *rdev, 866 u16 reg_offset, u32 value) 867 { 868 struct ci_power_info *pi = ci_get_pi(rdev); 869 870 return ci_write_smc_sram_dword(rdev, 871 pi->soft_regs_start + reg_offset, 872 value, pi->sram_end); 873 } 874 875 static void ci_init_fps_limits(struct radeon_device *rdev) 876 { 877 struct ci_power_info *pi = ci_get_pi(rdev); 878 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 879 880 if (pi->caps_fps) { 881 u16 tmp; 882 883 tmp = 45; 884 table->FpsHighT = cpu_to_be16(tmp); 885 886 tmp = 30; 887 table->FpsLowT = cpu_to_be16(tmp); 888 } 889 } 890 891 static int ci_update_sclk_t(struct radeon_device *rdev) 892 { 893 struct ci_power_info *pi = ci_get_pi(rdev); 894 int ret = 0; 895 u32 low_sclk_interrupt_t = 0; 896 897 if (pi->caps_sclk_throttle_low_notification) { 898 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 899 900 ret = ci_copy_bytes_to_smc(rdev, 901 pi->dpm_table_start + 902 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), 903 (u8 *)&low_sclk_interrupt_t, 904 sizeof(u32), pi->sram_end); 905 906 } 907 908 return ret; 909 } 910 911 static void ci_get_leakage_voltages(struct radeon_device *rdev) 912 { 913 struct ci_power_info *pi = ci_get_pi(rdev); 914 u16 leakage_id, virtual_voltage_id; 915 u16 vddc, vddci; 916 int i; 917 918 pi->vddc_leakage.count = 0; 919 pi->vddci_leakage.count = 0; 920 921 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 922 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 923 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 924 if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0) 925 continue; 926 if (vddc != 0 && vddc != virtual_voltage_id) { 927 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 928 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 929 pi->vddc_leakage.count++; 930 } 931 } 932 } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { 933 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 934 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 935 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci, 936 virtual_voltage_id, 937 leakage_id) == 0) { 938 if (vddc != 0 && vddc != virtual_voltage_id) { 939 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 940 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 941 pi->vddc_leakage.count++; 942 } 943 if (vddci != 0 && vddci != virtual_voltage_id) { 944 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci; 945 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id; 946 pi->vddci_leakage.count++; 947 } 948 } 949 } 950 } 951 } 952 953 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources) 954 { 955 struct ci_power_info *pi = ci_get_pi(rdev); 956 bool want_thermal_protection; 957 enum radeon_dpm_event_src dpm_event_src; 958 u32 tmp; 959 960 switch (sources) { 961 case 0: 962 default: 963 want_thermal_protection = false; 964 break; 965 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): 966 want_thermal_protection = true; 967 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; 968 break; 969 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): 970 want_thermal_protection = true; 971 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; 972 break; 973 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | 974 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): 975 want_thermal_protection = true; 976 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; 977 break; 978 } 979 980 if (want_thermal_protection) { 981 #if 0 982 /* XXX: need to figure out how to handle this properly */ 983 tmp = RREG32_SMC(CG_THERMAL_CTRL); 984 tmp &= DPM_EVENT_SRC_MASK; 985 tmp |= DPM_EVENT_SRC(dpm_event_src); 986 WREG32_SMC(CG_THERMAL_CTRL, tmp); 987 #endif 988 989 tmp = RREG32_SMC(GENERAL_PWRMGT); 990 if (pi->thermal_protection) 991 tmp &= ~THERMAL_PROTECTION_DIS; 992 else 993 tmp |= THERMAL_PROTECTION_DIS; 994 WREG32_SMC(GENERAL_PWRMGT, tmp); 995 } else { 996 tmp = RREG32_SMC(GENERAL_PWRMGT); 997 tmp |= THERMAL_PROTECTION_DIS; 998 WREG32_SMC(GENERAL_PWRMGT, tmp); 999 } 1000 } 1001 1002 static void ci_enable_auto_throttle_source(struct radeon_device *rdev, 1003 enum radeon_dpm_auto_throttle_src source, 1004 bool enable) 1005 { 1006 struct ci_power_info *pi = ci_get_pi(rdev); 1007 1008 if (enable) { 1009 if (!(pi->active_auto_throttle_sources & (1 << source))) { 1010 pi->active_auto_throttle_sources |= 1 << source; 1011 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1012 } 1013 } else { 1014 if (pi->active_auto_throttle_sources & (1 << source)) { 1015 pi->active_auto_throttle_sources &= ~(1 << source); 1016 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1017 } 1018 } 1019 } 1020 1021 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev) 1022 { 1023 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) 1024 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt); 1025 } 1026 1027 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev) 1028 { 1029 struct ci_power_info *pi = ci_get_pi(rdev); 1030 PPSMC_Result smc_result; 1031 1032 if (!pi->need_update_smu7_dpm_table) 1033 return 0; 1034 1035 if ((!pi->sclk_dpm_key_disabled) && 1036 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1037 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 1038 if (smc_result != PPSMC_Result_OK) 1039 return -EINVAL; 1040 } 1041 1042 if ((!pi->mclk_dpm_key_disabled) && 1043 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1044 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 1045 if (smc_result != PPSMC_Result_OK) 1046 return -EINVAL; 1047 } 1048 1049 pi->need_update_smu7_dpm_table = 0; 1050 return 0; 1051 } 1052 1053 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable) 1054 { 1055 struct ci_power_info *pi = ci_get_pi(rdev); 1056 PPSMC_Result smc_result; 1057 1058 if (enable) { 1059 if (!pi->sclk_dpm_key_disabled) { 1060 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable); 1061 if (smc_result != PPSMC_Result_OK) 1062 return -EINVAL; 1063 } 1064 1065 if (!pi->mclk_dpm_key_disabled) { 1066 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable); 1067 if (smc_result != PPSMC_Result_OK) 1068 return -EINVAL; 1069 1070 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN); 1071 1072 WREG32_SMC(LCAC_MC0_CNTL, 0x05); 1073 WREG32_SMC(LCAC_MC1_CNTL, 0x05); 1074 WREG32_SMC(LCAC_CPL_CNTL, 0x100005); 1075 1076 udelay(10); 1077 1078 WREG32_SMC(LCAC_MC0_CNTL, 0x400005); 1079 WREG32_SMC(LCAC_MC1_CNTL, 0x400005); 1080 WREG32_SMC(LCAC_CPL_CNTL, 0x500005); 1081 } 1082 } else { 1083 if (!pi->sclk_dpm_key_disabled) { 1084 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable); 1085 if (smc_result != PPSMC_Result_OK) 1086 return -EINVAL; 1087 } 1088 1089 if (!pi->mclk_dpm_key_disabled) { 1090 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable); 1091 if (smc_result != PPSMC_Result_OK) 1092 return -EINVAL; 1093 } 1094 } 1095 1096 return 0; 1097 } 1098 1099 static int ci_start_dpm(struct radeon_device *rdev) 1100 { 1101 struct ci_power_info *pi = ci_get_pi(rdev); 1102 PPSMC_Result smc_result; 1103 int ret; 1104 u32 tmp; 1105 1106 tmp = RREG32_SMC(GENERAL_PWRMGT); 1107 tmp |= GLOBAL_PWRMGT_EN; 1108 WREG32_SMC(GENERAL_PWRMGT, tmp); 1109 1110 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1111 tmp |= DYNAMIC_PM_EN; 1112 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1113 1114 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000); 1115 1116 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN); 1117 1118 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable); 1119 if (smc_result != PPSMC_Result_OK) 1120 return -EINVAL; 1121 1122 ret = ci_enable_sclk_mclk_dpm(rdev, true); 1123 if (ret) 1124 return ret; 1125 1126 if (!pi->pcie_dpm_key_disabled) { 1127 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable); 1128 if (smc_result != PPSMC_Result_OK) 1129 return -EINVAL; 1130 } 1131 1132 return 0; 1133 } 1134 1135 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev) 1136 { 1137 struct ci_power_info *pi = ci_get_pi(rdev); 1138 PPSMC_Result smc_result; 1139 1140 if (!pi->need_update_smu7_dpm_table) 1141 return 0; 1142 1143 if ((!pi->sclk_dpm_key_disabled) && 1144 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1145 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel); 1146 if (smc_result != PPSMC_Result_OK) 1147 return -EINVAL; 1148 } 1149 1150 if ((!pi->mclk_dpm_key_disabled) && 1151 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1152 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel); 1153 if (smc_result != PPSMC_Result_OK) 1154 return -EINVAL; 1155 } 1156 1157 return 0; 1158 } 1159 1160 static int ci_stop_dpm(struct radeon_device *rdev) 1161 { 1162 struct ci_power_info *pi = ci_get_pi(rdev); 1163 PPSMC_Result smc_result; 1164 int ret; 1165 u32 tmp; 1166 1167 tmp = RREG32_SMC(GENERAL_PWRMGT); 1168 tmp &= ~GLOBAL_PWRMGT_EN; 1169 WREG32_SMC(GENERAL_PWRMGT, tmp); 1170 1171 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1172 tmp &= ~DYNAMIC_PM_EN; 1173 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1174 1175 if (!pi->pcie_dpm_key_disabled) { 1176 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable); 1177 if (smc_result != PPSMC_Result_OK) 1178 return -EINVAL; 1179 } 1180 1181 ret = ci_enable_sclk_mclk_dpm(rdev, false); 1182 if (ret) 1183 return ret; 1184 1185 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable); 1186 if (smc_result != PPSMC_Result_OK) 1187 return -EINVAL; 1188 1189 return 0; 1190 } 1191 1192 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable) 1193 { 1194 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1195 1196 if (enable) 1197 tmp &= ~SCLK_PWRMGT_OFF; 1198 else 1199 tmp |= SCLK_PWRMGT_OFF; 1200 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1201 } 1202 1203 #if 0 1204 static int ci_notify_hw_of_power_source(struct radeon_device *rdev, 1205 bool ac_power) 1206 { 1207 struct ci_power_info *pi = ci_get_pi(rdev); 1208 struct radeon_cac_tdp_table *cac_tdp_table = 1209 rdev->pm.dpm.dyn_state.cac_tdp_table; 1210 u32 power_limit; 1211 1212 if (ac_power) 1213 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 1214 else 1215 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); 1216 1217 ci_set_power_limit(rdev, power_limit); 1218 1219 if (pi->caps_automatic_dc_transition) { 1220 if (ac_power) 1221 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC); 1222 else 1223 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp); 1224 } 1225 1226 return 0; 1227 } 1228 #endif 1229 1230 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 1231 PPSMC_Msg msg, u32 parameter) 1232 { 1233 WREG32(SMC_MSG_ARG_0, parameter); 1234 return ci_send_msg_to_smc(rdev, msg); 1235 } 1236 1237 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev, 1238 PPSMC_Msg msg, u32 *parameter) 1239 { 1240 PPSMC_Result smc_result; 1241 1242 smc_result = ci_send_msg_to_smc(rdev, msg); 1243 1244 if ((smc_result == PPSMC_Result_OK) && parameter) 1245 *parameter = RREG32(SMC_MSG_ARG_0); 1246 1247 return smc_result; 1248 } 1249 1250 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n) 1251 { 1252 struct ci_power_info *pi = ci_get_pi(rdev); 1253 1254 if (!pi->sclk_dpm_key_disabled) { 1255 PPSMC_Result smc_result = 1256 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n); 1257 if (smc_result != PPSMC_Result_OK) 1258 return -EINVAL; 1259 } 1260 1261 return 0; 1262 } 1263 1264 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n) 1265 { 1266 struct ci_power_info *pi = ci_get_pi(rdev); 1267 1268 if (!pi->mclk_dpm_key_disabled) { 1269 PPSMC_Result smc_result = 1270 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n); 1271 if (smc_result != PPSMC_Result_OK) 1272 return -EINVAL; 1273 } 1274 1275 return 0; 1276 } 1277 1278 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n) 1279 { 1280 struct ci_power_info *pi = ci_get_pi(rdev); 1281 1282 if (!pi->pcie_dpm_key_disabled) { 1283 PPSMC_Result smc_result = 1284 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n); 1285 if (smc_result != PPSMC_Result_OK) 1286 return -EINVAL; 1287 } 1288 1289 return 0; 1290 } 1291 1292 static int ci_set_power_limit(struct radeon_device *rdev, u32 n) 1293 { 1294 struct ci_power_info *pi = ci_get_pi(rdev); 1295 1296 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { 1297 PPSMC_Result smc_result = 1298 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n); 1299 if (smc_result != PPSMC_Result_OK) 1300 return -EINVAL; 1301 } 1302 1303 return 0; 1304 } 1305 1306 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 1307 u32 target_tdp) 1308 { 1309 PPSMC_Result smc_result = 1310 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); 1311 if (smc_result != PPSMC_Result_OK) 1312 return -EINVAL; 1313 return 0; 1314 } 1315 1316 static int ci_set_boot_state(struct radeon_device *rdev) 1317 { 1318 return ci_enable_sclk_mclk_dpm(rdev, false); 1319 } 1320 1321 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev) 1322 { 1323 u32 sclk_freq; 1324 PPSMC_Result smc_result = 1325 ci_send_msg_to_smc_return_parameter(rdev, 1326 PPSMC_MSG_API_GetSclkFrequency, 1327 &sclk_freq); 1328 if (smc_result != PPSMC_Result_OK) 1329 sclk_freq = 0; 1330 1331 return sclk_freq; 1332 } 1333 1334 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev) 1335 { 1336 u32 mclk_freq; 1337 PPSMC_Result smc_result = 1338 ci_send_msg_to_smc_return_parameter(rdev, 1339 PPSMC_MSG_API_GetMclkFrequency, 1340 &mclk_freq); 1341 if (smc_result != PPSMC_Result_OK) 1342 mclk_freq = 0; 1343 1344 return mclk_freq; 1345 } 1346 1347 static void ci_dpm_start_smc(struct radeon_device *rdev) 1348 { 1349 int i; 1350 1351 ci_program_jump_on_start(rdev); 1352 ci_start_smc_clock(rdev); 1353 ci_start_smc(rdev); 1354 for (i = 0; i < rdev->usec_timeout; i++) { 1355 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED) 1356 break; 1357 } 1358 } 1359 1360 static void ci_dpm_stop_smc(struct radeon_device *rdev) 1361 { 1362 ci_reset_smc(rdev); 1363 ci_stop_smc_clock(rdev); 1364 } 1365 1366 static int ci_process_firmware_header(struct radeon_device *rdev) 1367 { 1368 struct ci_power_info *pi = ci_get_pi(rdev); 1369 u32 tmp; 1370 int ret; 1371 1372 ret = ci_read_smc_sram_dword(rdev, 1373 SMU7_FIRMWARE_HEADER_LOCATION + 1374 offsetof(SMU7_Firmware_Header, DpmTable), 1375 &tmp, pi->sram_end); 1376 if (ret) 1377 return ret; 1378 1379 pi->dpm_table_start = tmp; 1380 1381 ret = ci_read_smc_sram_dword(rdev, 1382 SMU7_FIRMWARE_HEADER_LOCATION + 1383 offsetof(SMU7_Firmware_Header, SoftRegisters), 1384 &tmp, pi->sram_end); 1385 if (ret) 1386 return ret; 1387 1388 pi->soft_regs_start = tmp; 1389 1390 ret = ci_read_smc_sram_dword(rdev, 1391 SMU7_FIRMWARE_HEADER_LOCATION + 1392 offsetof(SMU7_Firmware_Header, mcRegisterTable), 1393 &tmp, pi->sram_end); 1394 if (ret) 1395 return ret; 1396 1397 pi->mc_reg_table_start = tmp; 1398 1399 ret = ci_read_smc_sram_dword(rdev, 1400 SMU7_FIRMWARE_HEADER_LOCATION + 1401 offsetof(SMU7_Firmware_Header, FanTable), 1402 &tmp, pi->sram_end); 1403 if (ret) 1404 return ret; 1405 1406 pi->fan_table_start = tmp; 1407 1408 ret = ci_read_smc_sram_dword(rdev, 1409 SMU7_FIRMWARE_HEADER_LOCATION + 1410 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), 1411 &tmp, pi->sram_end); 1412 if (ret) 1413 return ret; 1414 1415 pi->arb_table_start = tmp; 1416 1417 return 0; 1418 } 1419 1420 static void ci_read_clock_registers(struct radeon_device *rdev) 1421 { 1422 struct ci_power_info *pi = ci_get_pi(rdev); 1423 1424 pi->clock_registers.cg_spll_func_cntl = 1425 RREG32_SMC(CG_SPLL_FUNC_CNTL); 1426 pi->clock_registers.cg_spll_func_cntl_2 = 1427 RREG32_SMC(CG_SPLL_FUNC_CNTL_2); 1428 pi->clock_registers.cg_spll_func_cntl_3 = 1429 RREG32_SMC(CG_SPLL_FUNC_CNTL_3); 1430 pi->clock_registers.cg_spll_func_cntl_4 = 1431 RREG32_SMC(CG_SPLL_FUNC_CNTL_4); 1432 pi->clock_registers.cg_spll_spread_spectrum = 1433 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 1434 pi->clock_registers.cg_spll_spread_spectrum_2 = 1435 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2); 1436 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); 1437 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); 1438 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); 1439 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); 1440 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); 1441 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); 1442 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); 1443 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); 1444 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); 1445 } 1446 1447 static void ci_init_sclk_t(struct radeon_device *rdev) 1448 { 1449 struct ci_power_info *pi = ci_get_pi(rdev); 1450 1451 pi->low_sclk_interrupt_t = 0; 1452 } 1453 1454 static void ci_enable_thermal_protection(struct radeon_device *rdev, 1455 bool enable) 1456 { 1457 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1458 1459 if (enable) 1460 tmp &= ~THERMAL_PROTECTION_DIS; 1461 else 1462 tmp |= THERMAL_PROTECTION_DIS; 1463 WREG32_SMC(GENERAL_PWRMGT, tmp); 1464 } 1465 1466 static void ci_enable_acpi_power_management(struct radeon_device *rdev) 1467 { 1468 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1469 1470 tmp |= STATIC_PM_EN; 1471 1472 WREG32_SMC(GENERAL_PWRMGT, tmp); 1473 } 1474 1475 #if 0 1476 static int ci_enter_ulp_state(struct radeon_device *rdev) 1477 { 1478 1479 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); 1480 1481 udelay(25000); 1482 1483 return 0; 1484 } 1485 1486 static int ci_exit_ulp_state(struct radeon_device *rdev) 1487 { 1488 int i; 1489 1490 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); 1491 1492 udelay(7000); 1493 1494 for (i = 0; i < rdev->usec_timeout; i++) { 1495 if (RREG32(SMC_RESP_0) == 1) 1496 break; 1497 udelay(1000); 1498 } 1499 1500 return 0; 1501 } 1502 #endif 1503 1504 static int ci_notify_smc_display_change(struct radeon_device *rdev, 1505 bool has_display) 1506 { 1507 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; 1508 1509 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; 1510 } 1511 1512 static int ci_enable_ds_master_switch(struct radeon_device *rdev, 1513 bool enable) 1514 { 1515 struct ci_power_info *pi = ci_get_pi(rdev); 1516 1517 if (enable) { 1518 if (pi->caps_sclk_ds) { 1519 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK) 1520 return -EINVAL; 1521 } else { 1522 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1523 return -EINVAL; 1524 } 1525 } else { 1526 if (pi->caps_sclk_ds) { 1527 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1528 return -EINVAL; 1529 } 1530 } 1531 1532 return 0; 1533 } 1534 1535 static void ci_program_display_gap(struct radeon_device *rdev) 1536 { 1537 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 1538 u32 pre_vbi_time_in_us; 1539 u32 frame_time_in_us; 1540 u32 ref_clock = rdev->clock.spll.reference_freq; 1541 u32 refresh_rate = r600_dpm_get_vrefresh(rdev); 1542 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 1543 1544 tmp &= ~DISP_GAP_MASK; 1545 if (rdev->pm.dpm.new_active_crtc_count > 0) 1546 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); 1547 else 1548 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE); 1549 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 1550 1551 if (refresh_rate == 0) 1552 refresh_rate = 60; 1553 if (vblank_time == 0xffffffff) 1554 vblank_time = 500; 1555 frame_time_in_us = 1000000 / refresh_rate; 1556 pre_vbi_time_in_us = 1557 frame_time_in_us - 200 - vblank_time; 1558 tmp = pre_vbi_time_in_us * (ref_clock / 100); 1559 1560 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp); 1561 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64); 1562 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); 1563 1564 1565 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1)); 1566 1567 } 1568 1569 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable) 1570 { 1571 struct ci_power_info *pi = ci_get_pi(rdev); 1572 u32 tmp; 1573 1574 if (enable) { 1575 if (pi->caps_sclk_ss_support) { 1576 tmp = RREG32_SMC(GENERAL_PWRMGT); 1577 tmp |= DYN_SPREAD_SPECTRUM_EN; 1578 WREG32_SMC(GENERAL_PWRMGT, tmp); 1579 } 1580 } else { 1581 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 1582 tmp &= ~SSEN; 1583 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp); 1584 1585 tmp = RREG32_SMC(GENERAL_PWRMGT); 1586 tmp &= ~DYN_SPREAD_SPECTRUM_EN; 1587 WREG32_SMC(GENERAL_PWRMGT, tmp); 1588 } 1589 } 1590 1591 static void ci_program_sstp(struct radeon_device *rdev) 1592 { 1593 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); 1594 } 1595 1596 static void ci_enable_display_gap(struct radeon_device *rdev) 1597 { 1598 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 1599 1600 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK); 1601 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) | 1602 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK)); 1603 1604 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 1605 } 1606 1607 static void ci_program_vc(struct radeon_device *rdev) 1608 { 1609 u32 tmp; 1610 1611 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1612 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 1613 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1614 1615 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0); 1616 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1); 1617 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2); 1618 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3); 1619 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4); 1620 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5); 1621 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6); 1622 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7); 1623 } 1624 1625 static void ci_clear_vc(struct radeon_device *rdev) 1626 { 1627 u32 tmp; 1628 1629 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1630 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 1631 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1632 1633 WREG32_SMC(CG_FTV_0, 0); 1634 WREG32_SMC(CG_FTV_1, 0); 1635 WREG32_SMC(CG_FTV_2, 0); 1636 WREG32_SMC(CG_FTV_3, 0); 1637 WREG32_SMC(CG_FTV_4, 0); 1638 WREG32_SMC(CG_FTV_5, 0); 1639 WREG32_SMC(CG_FTV_6, 0); 1640 WREG32_SMC(CG_FTV_7, 0); 1641 } 1642 1643 static int ci_upload_firmware(struct radeon_device *rdev) 1644 { 1645 struct ci_power_info *pi = ci_get_pi(rdev); 1646 int i, ret; 1647 1648 for (i = 0; i < rdev->usec_timeout; i++) { 1649 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE) 1650 break; 1651 } 1652 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1); 1653 1654 ci_stop_smc_clock(rdev); 1655 ci_reset_smc(rdev); 1656 1657 ret = ci_load_smc_ucode(rdev, pi->sram_end); 1658 1659 return ret; 1660 1661 } 1662 1663 static int ci_get_svi2_voltage_table(struct radeon_device *rdev, 1664 struct radeon_clock_voltage_dependency_table *voltage_dependency_table, 1665 struct atom_voltage_table *voltage_table) 1666 { 1667 u32 i; 1668 1669 if (voltage_dependency_table == NULL) 1670 return -EINVAL; 1671 1672 voltage_table->mask_low = 0; 1673 voltage_table->phase_delay = 0; 1674 1675 voltage_table->count = voltage_dependency_table->count; 1676 for (i = 0; i < voltage_table->count; i++) { 1677 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; 1678 voltage_table->entries[i].smio_low = 0; 1679 } 1680 1681 return 0; 1682 } 1683 1684 static int ci_construct_voltage_tables(struct radeon_device *rdev) 1685 { 1686 struct ci_power_info *pi = ci_get_pi(rdev); 1687 int ret; 1688 1689 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 1690 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC, 1691 VOLTAGE_OBJ_GPIO_LUT, 1692 &pi->vddc_voltage_table); 1693 if (ret) 1694 return ret; 1695 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 1696 ret = ci_get_svi2_voltage_table(rdev, 1697 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 1698 &pi->vddc_voltage_table); 1699 if (ret) 1700 return ret; 1701 } 1702 1703 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC) 1704 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC, 1705 &pi->vddc_voltage_table); 1706 1707 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 1708 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, 1709 VOLTAGE_OBJ_GPIO_LUT, 1710 &pi->vddci_voltage_table); 1711 if (ret) 1712 return ret; 1713 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 1714 ret = ci_get_svi2_voltage_table(rdev, 1715 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 1716 &pi->vddci_voltage_table); 1717 if (ret) 1718 return ret; 1719 } 1720 1721 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI) 1722 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI, 1723 &pi->vddci_voltage_table); 1724 1725 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 1726 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC, 1727 VOLTAGE_OBJ_GPIO_LUT, 1728 &pi->mvdd_voltage_table); 1729 if (ret) 1730 return ret; 1731 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 1732 ret = ci_get_svi2_voltage_table(rdev, 1733 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 1734 &pi->mvdd_voltage_table); 1735 if (ret) 1736 return ret; 1737 } 1738 1739 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD) 1740 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD, 1741 &pi->mvdd_voltage_table); 1742 1743 return 0; 1744 } 1745 1746 static void ci_populate_smc_voltage_table(struct radeon_device *rdev, 1747 struct atom_voltage_table_entry *voltage_table, 1748 SMU7_Discrete_VoltageLevel *smc_voltage_table) 1749 { 1750 int ret; 1751 1752 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table, 1753 &smc_voltage_table->StdVoltageHiSidd, 1754 &smc_voltage_table->StdVoltageLoSidd); 1755 1756 if (ret) { 1757 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE; 1758 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE; 1759 } 1760 1761 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE); 1762 smc_voltage_table->StdVoltageHiSidd = 1763 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd); 1764 smc_voltage_table->StdVoltageLoSidd = 1765 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd); 1766 } 1767 1768 static int ci_populate_smc_vddc_table(struct radeon_device *rdev, 1769 SMU7_Discrete_DpmTable *table) 1770 { 1771 struct ci_power_info *pi = ci_get_pi(rdev); 1772 unsigned int count; 1773 1774 table->VddcLevelCount = pi->vddc_voltage_table.count; 1775 for (count = 0; count < table->VddcLevelCount; count++) { 1776 ci_populate_smc_voltage_table(rdev, 1777 &pi->vddc_voltage_table.entries[count], 1778 &table->VddcLevel[count]); 1779 1780 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 1781 table->VddcLevel[count].Smio |= 1782 pi->vddc_voltage_table.entries[count].smio_low; 1783 else 1784 table->VddcLevel[count].Smio = 0; 1785 } 1786 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); 1787 1788 return 0; 1789 } 1790 1791 static int ci_populate_smc_vddci_table(struct radeon_device *rdev, 1792 SMU7_Discrete_DpmTable *table) 1793 { 1794 unsigned int count; 1795 struct ci_power_info *pi = ci_get_pi(rdev); 1796 1797 table->VddciLevelCount = pi->vddci_voltage_table.count; 1798 for (count = 0; count < table->VddciLevelCount; count++) { 1799 ci_populate_smc_voltage_table(rdev, 1800 &pi->vddci_voltage_table.entries[count], 1801 &table->VddciLevel[count]); 1802 1803 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 1804 table->VddciLevel[count].Smio |= 1805 pi->vddci_voltage_table.entries[count].smio_low; 1806 else 1807 table->VddciLevel[count].Smio = 0; 1808 } 1809 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); 1810 1811 return 0; 1812 } 1813 1814 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev, 1815 SMU7_Discrete_DpmTable *table) 1816 { 1817 struct ci_power_info *pi = ci_get_pi(rdev); 1818 unsigned int count; 1819 1820 table->MvddLevelCount = pi->mvdd_voltage_table.count; 1821 for (count = 0; count < table->MvddLevelCount; count++) { 1822 ci_populate_smc_voltage_table(rdev, 1823 &pi->mvdd_voltage_table.entries[count], 1824 &table->MvddLevel[count]); 1825 1826 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 1827 table->MvddLevel[count].Smio |= 1828 pi->mvdd_voltage_table.entries[count].smio_low; 1829 else 1830 table->MvddLevel[count].Smio = 0; 1831 } 1832 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); 1833 1834 return 0; 1835 } 1836 1837 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev, 1838 SMU7_Discrete_DpmTable *table) 1839 { 1840 int ret; 1841 1842 ret = ci_populate_smc_vddc_table(rdev, table); 1843 if (ret) 1844 return ret; 1845 1846 ret = ci_populate_smc_vddci_table(rdev, table); 1847 if (ret) 1848 return ret; 1849 1850 ret = ci_populate_smc_mvdd_table(rdev, table); 1851 if (ret) 1852 return ret; 1853 1854 return 0; 1855 } 1856 1857 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk, 1858 SMU7_Discrete_VoltageLevel *voltage) 1859 { 1860 struct ci_power_info *pi = ci_get_pi(rdev); 1861 u32 i = 0; 1862 1863 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 1864 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) { 1865 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { 1866 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; 1867 break; 1868 } 1869 } 1870 1871 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count) 1872 return -EINVAL; 1873 } 1874 1875 return -EINVAL; 1876 } 1877 1878 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 1879 struct atom_voltage_table_entry *voltage_table, 1880 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd) 1881 { 1882 u16 v_index, idx; 1883 bool voltage_found = false; 1884 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE; 1885 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE; 1886 1887 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) 1888 return -EINVAL; 1889 1890 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 1891 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 1892 if (voltage_table->value == 1893 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 1894 voltage_found = true; 1895 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 1896 idx = v_index; 1897 else 1898 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 1899 *std_voltage_lo_sidd = 1900 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 1901 *std_voltage_hi_sidd = 1902 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 1903 break; 1904 } 1905 } 1906 1907 if (!voltage_found) { 1908 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 1909 if (voltage_table->value <= 1910 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 1911 voltage_found = true; 1912 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 1913 idx = v_index; 1914 else 1915 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 1916 *std_voltage_lo_sidd = 1917 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 1918 *std_voltage_hi_sidd = 1919 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 1920 break; 1921 } 1922 } 1923 } 1924 } 1925 1926 return 0; 1927 } 1928 1929 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev, 1930 const struct radeon_phase_shedding_limits_table *limits, 1931 u32 sclk, 1932 u32 *phase_shedding) 1933 { 1934 unsigned int i; 1935 1936 *phase_shedding = 1; 1937 1938 for (i = 0; i < limits->count; i++) { 1939 if (sclk < limits->entries[i].sclk) { 1940 *phase_shedding = i; 1941 break; 1942 } 1943 } 1944 } 1945 1946 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev, 1947 const struct radeon_phase_shedding_limits_table *limits, 1948 u32 mclk, 1949 u32 *phase_shedding) 1950 { 1951 unsigned int i; 1952 1953 *phase_shedding = 1; 1954 1955 for (i = 0; i < limits->count; i++) { 1956 if (mclk < limits->entries[i].mclk) { 1957 *phase_shedding = i; 1958 break; 1959 } 1960 } 1961 } 1962 1963 static int ci_init_arb_table_index(struct radeon_device *rdev) 1964 { 1965 struct ci_power_info *pi = ci_get_pi(rdev); 1966 u32 tmp; 1967 int ret; 1968 1969 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start, 1970 &tmp, pi->sram_end); 1971 if (ret) 1972 return ret; 1973 1974 tmp &= 0x00FFFFFF; 1975 tmp |= MC_CG_ARB_FREQ_F1 << 24; 1976 1977 return ci_write_smc_sram_dword(rdev, pi->arb_table_start, 1978 tmp, pi->sram_end); 1979 } 1980 1981 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev, 1982 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table, 1983 u32 clock, u32 *voltage) 1984 { 1985 u32 i = 0; 1986 1987 if (allowed_clock_voltage_table->count == 0) 1988 return -EINVAL; 1989 1990 for (i = 0; i < allowed_clock_voltage_table->count; i++) { 1991 if (allowed_clock_voltage_table->entries[i].clk >= clock) { 1992 *voltage = allowed_clock_voltage_table->entries[i].v; 1993 return 0; 1994 } 1995 } 1996 1997 *voltage = allowed_clock_voltage_table->entries[i-1].v; 1998 1999 return 0; 2000 } 2001 2002 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2003 u32 sclk, u32 min_sclk_in_sr) 2004 { 2005 u32 i; 2006 u32 tmp; 2007 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? 2008 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK; 2009 2010 if (sclk < min) 2011 return 0; 2012 2013 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 2014 tmp = sclk / (1 << i); 2015 if (tmp >= min || i == 0) 2016 break; 2017 } 2018 2019 return (u8)i; 2020 } 2021 2022 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev) 2023 { 2024 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 2025 } 2026 2027 static int ci_reset_to_default(struct radeon_device *rdev) 2028 { 2029 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? 2030 0 : -EINVAL; 2031 } 2032 2033 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev) 2034 { 2035 u32 tmp; 2036 2037 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8; 2038 2039 if (tmp == MC_CG_ARB_FREQ_F0) 2040 return 0; 2041 2042 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); 2043 } 2044 2045 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, 2046 u32 sclk, 2047 u32 mclk, 2048 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs) 2049 { 2050 u32 dram_timing; 2051 u32 dram_timing2; 2052 u32 burst_time; 2053 2054 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk); 2055 2056 dram_timing = RREG32(MC_ARB_DRAM_TIMING); 2057 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 2058 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; 2059 2060 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); 2061 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); 2062 arb_regs->McArbBurstTime = (u8)burst_time; 2063 2064 return 0; 2065 } 2066 2067 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev) 2068 { 2069 struct ci_power_info *pi = ci_get_pi(rdev); 2070 SMU7_Discrete_MCArbDramTimingTable arb_regs; 2071 u32 i, j; 2072 int ret = 0; 2073 2074 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable)); 2075 2076 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) { 2077 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) { 2078 ret = ci_populate_memory_timing_parameters(rdev, 2079 pi->dpm_table.sclk_table.dpm_levels[i].value, 2080 pi->dpm_table.mclk_table.dpm_levels[j].value, 2081 &arb_regs.entries[i][j]); 2082 if (ret) 2083 break; 2084 } 2085 } 2086 2087 if (ret == 0) 2088 ret = ci_copy_bytes_to_smc(rdev, 2089 pi->arb_table_start, 2090 (u8 *)&arb_regs, 2091 sizeof(SMU7_Discrete_MCArbDramTimingTable), 2092 pi->sram_end); 2093 2094 return ret; 2095 } 2096 2097 static int ci_program_memory_timing_parameters(struct radeon_device *rdev) 2098 { 2099 struct ci_power_info *pi = ci_get_pi(rdev); 2100 2101 if (pi->need_update_smu7_dpm_table == 0) 2102 return 0; 2103 2104 return ci_do_program_memory_timing_parameters(rdev); 2105 } 2106 2107 static void ci_populate_smc_initial_state(struct radeon_device *rdev, 2108 struct radeon_ps *radeon_boot_state) 2109 { 2110 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state); 2111 struct ci_power_info *pi = ci_get_pi(rdev); 2112 u32 level = 0; 2113 2114 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) { 2115 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= 2116 boot_state->performance_levels[0].sclk) { 2117 pi->smc_state_table.GraphicsBootLevel = level; 2118 break; 2119 } 2120 } 2121 2122 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) { 2123 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= 2124 boot_state->performance_levels[0].mclk) { 2125 pi->smc_state_table.MemoryBootLevel = level; 2126 break; 2127 } 2128 } 2129 } 2130 2131 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table) 2132 { 2133 u32 i; 2134 u32 mask_value = 0; 2135 2136 for (i = dpm_table->count; i > 0; i--) { 2137 mask_value = mask_value << 1; 2138 if (dpm_table->dpm_levels[i-1].enabled) 2139 mask_value |= 0x1; 2140 else 2141 mask_value &= 0xFFFFFFFE; 2142 } 2143 2144 return mask_value; 2145 } 2146 2147 static void ci_populate_smc_link_level(struct radeon_device *rdev, 2148 SMU7_Discrete_DpmTable *table) 2149 { 2150 struct ci_power_info *pi = ci_get_pi(rdev); 2151 struct ci_dpm_table *dpm_table = &pi->dpm_table; 2152 u32 i; 2153 2154 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) { 2155 table->LinkLevel[i].PcieGenSpeed = 2156 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value; 2157 table->LinkLevel[i].PcieLaneCount = 2158 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); 2159 table->LinkLevel[i].EnabledForActivity = 1; 2160 table->LinkLevel[i].DownT = cpu_to_be32(5); 2161 table->LinkLevel[i].UpT = cpu_to_be32(30); 2162 } 2163 2164 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count; 2165 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 2166 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); 2167 } 2168 2169 static int ci_populate_smc_uvd_level(struct radeon_device *rdev, 2170 SMU7_Discrete_DpmTable *table) 2171 { 2172 u32 count; 2173 struct atom_clock_dividers dividers; 2174 int ret = -EINVAL; 2175 2176 table->UvdLevelCount = 2177 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count; 2178 2179 for (count = 0; count < table->UvdLevelCount; count++) { 2180 table->UvdLevel[count].VclkFrequency = 2181 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; 2182 table->UvdLevel[count].DclkFrequency = 2183 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; 2184 table->UvdLevel[count].MinVddc = 2185 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2186 table->UvdLevel[count].MinVddcPhases = 1; 2187 2188 ret = radeon_atom_get_clock_dividers(rdev, 2189 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2190 table->UvdLevel[count].VclkFrequency, false, ÷rs); 2191 if (ret) 2192 return ret; 2193 2194 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; 2195 2196 ret = radeon_atom_get_clock_dividers(rdev, 2197 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2198 table->UvdLevel[count].DclkFrequency, false, ÷rs); 2199 if (ret) 2200 return ret; 2201 2202 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; 2203 2204 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); 2205 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); 2206 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); 2207 } 2208 2209 return ret; 2210 } 2211 2212 static int ci_populate_smc_vce_level(struct radeon_device *rdev, 2213 SMU7_Discrete_DpmTable *table) 2214 { 2215 u32 count; 2216 struct atom_clock_dividers dividers; 2217 int ret = -EINVAL; 2218 2219 table->VceLevelCount = 2220 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count; 2221 2222 for (count = 0; count < table->VceLevelCount; count++) { 2223 table->VceLevel[count].Frequency = 2224 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; 2225 table->VceLevel[count].MinVoltage = 2226 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2227 table->VceLevel[count].MinPhases = 1; 2228 2229 ret = radeon_atom_get_clock_dividers(rdev, 2230 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2231 table->VceLevel[count].Frequency, false, ÷rs); 2232 if (ret) 2233 return ret; 2234 2235 table->VceLevel[count].Divider = (u8)dividers.post_divider; 2236 2237 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); 2238 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); 2239 } 2240 2241 return ret; 2242 2243 } 2244 2245 static int ci_populate_smc_acp_level(struct radeon_device *rdev, 2246 SMU7_Discrete_DpmTable *table) 2247 { 2248 u32 count; 2249 struct atom_clock_dividers dividers; 2250 int ret = -EINVAL; 2251 2252 table->AcpLevelCount = (u8) 2253 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count); 2254 2255 for (count = 0; count < table->AcpLevelCount; count++) { 2256 table->AcpLevel[count].Frequency = 2257 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; 2258 table->AcpLevel[count].MinVoltage = 2259 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; 2260 table->AcpLevel[count].MinPhases = 1; 2261 2262 ret = radeon_atom_get_clock_dividers(rdev, 2263 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2264 table->AcpLevel[count].Frequency, false, ÷rs); 2265 if (ret) 2266 return ret; 2267 2268 table->AcpLevel[count].Divider = (u8)dividers.post_divider; 2269 2270 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); 2271 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); 2272 } 2273 2274 return ret; 2275 } 2276 2277 static int ci_populate_smc_samu_level(struct radeon_device *rdev, 2278 SMU7_Discrete_DpmTable *table) 2279 { 2280 u32 count; 2281 struct atom_clock_dividers dividers; 2282 int ret = -EINVAL; 2283 2284 table->SamuLevelCount = 2285 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count; 2286 2287 for (count = 0; count < table->SamuLevelCount; count++) { 2288 table->SamuLevel[count].Frequency = 2289 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; 2290 table->SamuLevel[count].MinVoltage = 2291 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2292 table->SamuLevel[count].MinPhases = 1; 2293 2294 ret = radeon_atom_get_clock_dividers(rdev, 2295 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2296 table->SamuLevel[count].Frequency, false, ÷rs); 2297 if (ret) 2298 return ret; 2299 2300 table->SamuLevel[count].Divider = (u8)dividers.post_divider; 2301 2302 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); 2303 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); 2304 } 2305 2306 return ret; 2307 } 2308 2309 static int ci_calculate_mclk_params(struct radeon_device *rdev, 2310 u32 memory_clock, 2311 SMU7_Discrete_MemoryLevel *mclk, 2312 bool strobe_mode, 2313 bool dll_state_on) 2314 { 2315 struct ci_power_info *pi = ci_get_pi(rdev); 2316 u32 dll_cntl = pi->clock_registers.dll_cntl; 2317 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2318 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl; 2319 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl; 2320 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl; 2321 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1; 2322 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2; 2323 u32 mpll_ss1 = pi->clock_registers.mpll_ss1; 2324 u32 mpll_ss2 = pi->clock_registers.mpll_ss2; 2325 struct atom_mpll_param mpll_param; 2326 int ret; 2327 2328 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param); 2329 if (ret) 2330 return ret; 2331 2332 mpll_func_cntl &= ~BWCTRL_MASK; 2333 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); 2334 2335 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); 2336 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | 2337 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); 2338 2339 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; 2340 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); 2341 2342 if (pi->mem_gddr5) { 2343 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); 2344 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | 2345 YCLK_POST_DIV(mpll_param.post_div); 2346 } 2347 2348 if (pi->caps_mclk_ss_support) { 2349 struct radeon_atom_ss ss; 2350 u32 freq_nom; 2351 u32 tmp; 2352 u32 reference_clock = rdev->clock.mpll.reference_freq; 2353 2354 if (pi->mem_gddr5) 2355 freq_nom = memory_clock * 4; 2356 else 2357 freq_nom = memory_clock * 2; 2358 2359 tmp = (freq_nom / reference_clock); 2360 tmp = tmp * tmp; 2361 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 2362 ASIC_INTERNAL_MEMORY_SS, freq_nom)) { 2363 u32 clks = reference_clock * 5 / ss.rate; 2364 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); 2365 2366 mpll_ss1 &= ~CLKV_MASK; 2367 mpll_ss1 |= CLKV(clkv); 2368 2369 mpll_ss2 &= ~CLKS_MASK; 2370 mpll_ss2 |= CLKS(clks); 2371 } 2372 } 2373 2374 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; 2375 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); 2376 2377 if (dll_state_on) 2378 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; 2379 else 2380 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 2381 2382 mclk->MclkFrequency = memory_clock; 2383 mclk->MpllFuncCntl = mpll_func_cntl; 2384 mclk->MpllFuncCntl_1 = mpll_func_cntl_1; 2385 mclk->MpllFuncCntl_2 = mpll_func_cntl_2; 2386 mclk->MpllAdFuncCntl = mpll_ad_func_cntl; 2387 mclk->MpllDqFuncCntl = mpll_dq_func_cntl; 2388 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; 2389 mclk->DllCntl = dll_cntl; 2390 mclk->MpllSs1 = mpll_ss1; 2391 mclk->MpllSs2 = mpll_ss2; 2392 2393 return 0; 2394 } 2395 2396 static int ci_populate_single_memory_level(struct radeon_device *rdev, 2397 u32 memory_clock, 2398 SMU7_Discrete_MemoryLevel *memory_level) 2399 { 2400 struct ci_power_info *pi = ci_get_pi(rdev); 2401 int ret; 2402 bool dll_state_on; 2403 2404 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { 2405 ret = ci_get_dependency_volt_by_clk(rdev, 2406 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2407 memory_clock, &memory_level->MinVddc); 2408 if (ret) 2409 return ret; 2410 } 2411 2412 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { 2413 ret = ci_get_dependency_volt_by_clk(rdev, 2414 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2415 memory_clock, &memory_level->MinVddci); 2416 if (ret) 2417 return ret; 2418 } 2419 2420 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { 2421 ret = ci_get_dependency_volt_by_clk(rdev, 2422 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2423 memory_clock, &memory_level->MinMvdd); 2424 if (ret) 2425 return ret; 2426 } 2427 2428 memory_level->MinVddcPhases = 1; 2429 2430 if (pi->vddc_phase_shed_control) 2431 ci_populate_phase_value_based_on_mclk(rdev, 2432 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 2433 memory_clock, 2434 &memory_level->MinVddcPhases); 2435 2436 memory_level->EnabledForThrottle = 1; 2437 memory_level->EnabledForActivity = 1; 2438 memory_level->UpH = 0; 2439 memory_level->DownH = 100; 2440 memory_level->VoltageDownH = 0; 2441 memory_level->ActivityLevel = (u16)pi->mclk_activity_target; 2442 2443 memory_level->StutterEnable = false; 2444 memory_level->StrobeEnable = false; 2445 memory_level->EdcReadEnable = false; 2446 memory_level->EdcWriteEnable = false; 2447 memory_level->RttEnable = false; 2448 2449 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 2450 2451 if (pi->mclk_stutter_mode_threshold && 2452 (memory_clock <= pi->mclk_stutter_mode_threshold) && 2453 (pi->uvd_enabled == false) && 2454 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && 2455 (rdev->pm.dpm.new_active_crtc_count <= 2)) 2456 memory_level->StutterEnable = true; 2457 2458 if (pi->mclk_strobe_mode_threshold && 2459 (memory_clock <= pi->mclk_strobe_mode_threshold)) 2460 memory_level->StrobeEnable = 1; 2461 2462 if (pi->mem_gddr5) { 2463 memory_level->StrobeRatio = 2464 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); 2465 if (pi->mclk_edc_enable_threshold && 2466 (memory_clock > pi->mclk_edc_enable_threshold)) 2467 memory_level->EdcReadEnable = true; 2468 2469 if (pi->mclk_edc_wr_enable_threshold && 2470 (memory_clock > pi->mclk_edc_wr_enable_threshold)) 2471 memory_level->EdcWriteEnable = true; 2472 2473 if (memory_level->StrobeEnable) { 2474 if (si_get_mclk_frequency_ratio(memory_clock, true) >= 2475 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) 2476 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2477 else 2478 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; 2479 } else { 2480 dll_state_on = pi->dll_default_on; 2481 } 2482 } else { 2483 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock); 2484 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2485 } 2486 2487 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); 2488 if (ret) 2489 return ret; 2490 2491 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); 2492 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); 2493 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); 2494 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); 2495 2496 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); 2497 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); 2498 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl); 2499 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1); 2500 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2); 2501 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl); 2502 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl); 2503 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl); 2504 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl); 2505 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1); 2506 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2); 2507 2508 return 0; 2509 } 2510 2511 static int ci_populate_smc_acpi_level(struct radeon_device *rdev, 2512 SMU7_Discrete_DpmTable *table) 2513 { 2514 struct ci_power_info *pi = ci_get_pi(rdev); 2515 struct atom_clock_dividers dividers; 2516 SMU7_Discrete_VoltageLevel voltage_level; 2517 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl; 2518 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2; 2519 u32 dll_cntl = pi->clock_registers.dll_cntl; 2520 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2521 int ret; 2522 2523 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 2524 2525 if (pi->acpi_vddc) 2526 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); 2527 else 2528 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); 2529 2530 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; 2531 2532 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq; 2533 2534 ret = radeon_atom_get_clock_dividers(rdev, 2535 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 2536 table->ACPILevel.SclkFrequency, false, ÷rs); 2537 if (ret) 2538 return ret; 2539 2540 table->ACPILevel.SclkDid = (u8)dividers.post_divider; 2541 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 2542 table->ACPILevel.DeepSleepDivId = 0; 2543 2544 spll_func_cntl &= ~SPLL_PWRON; 2545 spll_func_cntl |= SPLL_RESET; 2546 2547 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 2548 spll_func_cntl_2 |= SCLK_MUX_SEL(4); 2549 2550 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; 2551 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; 2552 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; 2553 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; 2554 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; 2555 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; 2556 table->ACPILevel.CcPwrDynRm = 0; 2557 table->ACPILevel.CcPwrDynRm1 = 0; 2558 2559 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); 2560 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); 2561 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); 2562 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); 2563 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); 2564 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); 2565 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); 2566 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); 2567 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); 2568 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); 2569 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); 2570 2571 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; 2572 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; 2573 2574 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 2575 if (pi->acpi_vddci) 2576 table->MemoryACPILevel.MinVddci = 2577 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE); 2578 else 2579 table->MemoryACPILevel.MinVddci = 2580 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE); 2581 } 2582 2583 if (ci_populate_mvdd_value(rdev, 0, &voltage_level)) 2584 table->MemoryACPILevel.MinMvdd = 0; 2585 else 2586 table->MemoryACPILevel.MinMvdd = 2587 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE); 2588 2589 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; 2590 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 2591 2592 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); 2593 2594 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); 2595 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); 2596 table->MemoryACPILevel.MpllAdFuncCntl = 2597 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl); 2598 table->MemoryACPILevel.MpllDqFuncCntl = 2599 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl); 2600 table->MemoryACPILevel.MpllFuncCntl = 2601 cpu_to_be32(pi->clock_registers.mpll_func_cntl); 2602 table->MemoryACPILevel.MpllFuncCntl_1 = 2603 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1); 2604 table->MemoryACPILevel.MpllFuncCntl_2 = 2605 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2); 2606 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); 2607 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); 2608 2609 table->MemoryACPILevel.EnabledForThrottle = 0; 2610 table->MemoryACPILevel.EnabledForActivity = 0; 2611 table->MemoryACPILevel.UpH = 0; 2612 table->MemoryACPILevel.DownH = 100; 2613 table->MemoryACPILevel.VoltageDownH = 0; 2614 table->MemoryACPILevel.ActivityLevel = 2615 cpu_to_be16((u16)pi->mclk_activity_target); 2616 2617 table->MemoryACPILevel.StutterEnable = false; 2618 table->MemoryACPILevel.StrobeEnable = false; 2619 table->MemoryACPILevel.EdcReadEnable = false; 2620 table->MemoryACPILevel.EdcWriteEnable = false; 2621 table->MemoryACPILevel.RttEnable = false; 2622 2623 return 0; 2624 } 2625 2626 2627 static int ci_enable_ulv(struct radeon_device *rdev, bool enable) 2628 { 2629 struct ci_power_info *pi = ci_get_pi(rdev); 2630 struct ci_ulv_parm *ulv = &pi->ulv; 2631 2632 if (ulv->supported) { 2633 if (enable) 2634 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? 2635 0 : -EINVAL; 2636 else 2637 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? 2638 0 : -EINVAL; 2639 } 2640 2641 return 0; 2642 } 2643 2644 static int ci_populate_ulv_level(struct radeon_device *rdev, 2645 SMU7_Discrete_Ulv *state) 2646 { 2647 struct ci_power_info *pi = ci_get_pi(rdev); 2648 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time; 2649 2650 state->CcPwrDynRm = 0; 2651 state->CcPwrDynRm1 = 0; 2652 2653 if (ulv_voltage == 0) { 2654 pi->ulv.supported = false; 2655 return 0; 2656 } 2657 2658 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2659 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 2660 state->VddcOffset = 0; 2661 else 2662 state->VddcOffset = 2663 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; 2664 } else { 2665 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 2666 state->VddcOffsetVid = 0; 2667 else 2668 state->VddcOffsetVid = (u8) 2669 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * 2670 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); 2671 } 2672 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1; 2673 2674 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm); 2675 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1); 2676 state->VddcOffset = cpu_to_be16(state->VddcOffset); 2677 2678 return 0; 2679 } 2680 2681 static int ci_calculate_sclk_params(struct radeon_device *rdev, 2682 u32 engine_clock, 2683 SMU7_Discrete_GraphicsLevel *sclk) 2684 { 2685 struct ci_power_info *pi = ci_get_pi(rdev); 2686 struct atom_clock_dividers dividers; 2687 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3; 2688 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4; 2689 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum; 2690 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2; 2691 u32 reference_clock = rdev->clock.spll.reference_freq; 2692 u32 reference_divider; 2693 u32 fbdiv; 2694 int ret; 2695 2696 ret = radeon_atom_get_clock_dividers(rdev, 2697 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 2698 engine_clock, false, ÷rs); 2699 if (ret) 2700 return ret; 2701 2702 reference_divider = 1 + dividers.ref_div; 2703 fbdiv = dividers.fb_div & 0x3FFFFFF; 2704 2705 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; 2706 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); 2707 spll_func_cntl_3 |= SPLL_DITHEN; 2708 2709 if (pi->caps_sclk_ss_support) { 2710 struct radeon_atom_ss ss; 2711 u32 vco_freq = engine_clock * dividers.post_div; 2712 2713 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 2714 ASIC_INTERNAL_ENGINE_SS, vco_freq)) { 2715 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); 2716 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); 2717 2718 cg_spll_spread_spectrum &= ~CLK_S_MASK; 2719 cg_spll_spread_spectrum |= CLK_S(clk_s); 2720 cg_spll_spread_spectrum |= SSEN; 2721 2722 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; 2723 cg_spll_spread_spectrum_2 |= CLK_V(clk_v); 2724 } 2725 } 2726 2727 sclk->SclkFrequency = engine_clock; 2728 sclk->CgSpllFuncCntl3 = spll_func_cntl_3; 2729 sclk->CgSpllFuncCntl4 = spll_func_cntl_4; 2730 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; 2731 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; 2732 sclk->SclkDid = (u8)dividers.post_divider; 2733 2734 return 0; 2735 } 2736 2737 static int ci_populate_single_graphic_level(struct radeon_device *rdev, 2738 u32 engine_clock, 2739 u16 sclk_activity_level_t, 2740 SMU7_Discrete_GraphicsLevel *graphic_level) 2741 { 2742 struct ci_power_info *pi = ci_get_pi(rdev); 2743 int ret; 2744 2745 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level); 2746 if (ret) 2747 return ret; 2748 2749 ret = ci_get_dependency_volt_by_clk(rdev, 2750 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 2751 engine_clock, &graphic_level->MinVddc); 2752 if (ret) 2753 return ret; 2754 2755 graphic_level->SclkFrequency = engine_clock; 2756 2757 graphic_level->Flags = 0; 2758 graphic_level->MinVddcPhases = 1; 2759 2760 if (pi->vddc_phase_shed_control) 2761 ci_populate_phase_value_based_on_sclk(rdev, 2762 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 2763 engine_clock, 2764 &graphic_level->MinVddcPhases); 2765 2766 graphic_level->ActivityLevel = sclk_activity_level_t; 2767 2768 graphic_level->CcPwrDynRm = 0; 2769 graphic_level->CcPwrDynRm1 = 0; 2770 graphic_level->EnabledForActivity = 1; 2771 graphic_level->EnabledForThrottle = 1; 2772 graphic_level->UpH = 0; 2773 graphic_level->DownH = 0; 2774 graphic_level->VoltageDownH = 0; 2775 graphic_level->PowerThrottle = 0; 2776 2777 if (pi->caps_sclk_ds) 2778 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev, 2779 engine_clock, 2780 CISLAND_MINIMUM_ENGINE_CLOCK); 2781 2782 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 2783 2784 graphic_level->Flags = cpu_to_be32(graphic_level->Flags); 2785 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); 2786 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); 2787 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); 2788 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); 2789 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3); 2790 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4); 2791 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum); 2792 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2); 2793 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm); 2794 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1); 2795 2796 return 0; 2797 } 2798 2799 static int ci_populate_all_graphic_levels(struct radeon_device *rdev) 2800 { 2801 struct ci_power_info *pi = ci_get_pi(rdev); 2802 struct ci_dpm_table *dpm_table = &pi->dpm_table; 2803 u32 level_array_address = pi->dpm_table_start + 2804 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); 2805 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * 2806 SMU7_MAX_LEVELS_GRAPHICS; 2807 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; 2808 u32 i, ret; 2809 2810 memset(levels, 0, level_array_size); 2811 2812 for (i = 0; i < dpm_table->sclk_table.count; i++) { 2813 ret = ci_populate_single_graphic_level(rdev, 2814 dpm_table->sclk_table.dpm_levels[i].value, 2815 (u16)pi->activity_target[i], 2816 &pi->smc_state_table.GraphicsLevel[i]); 2817 if (ret) 2818 return ret; 2819 if (i == (dpm_table->sclk_table.count - 1)) 2820 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = 2821 PPSMC_DISPLAY_WATERMARK_HIGH; 2822 } 2823 2824 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; 2825 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 2826 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); 2827 2828 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 2829 (u8 *)levels, level_array_size, 2830 pi->sram_end); 2831 if (ret) 2832 return ret; 2833 2834 return 0; 2835 } 2836 2837 static int ci_populate_ulv_state(struct radeon_device *rdev, 2838 SMU7_Discrete_Ulv *ulv_level) 2839 { 2840 return ci_populate_ulv_level(rdev, ulv_level); 2841 } 2842 2843 static int ci_populate_all_memory_levels(struct radeon_device *rdev) 2844 { 2845 struct ci_power_info *pi = ci_get_pi(rdev); 2846 struct ci_dpm_table *dpm_table = &pi->dpm_table; 2847 u32 level_array_address = pi->dpm_table_start + 2848 offsetof(SMU7_Discrete_DpmTable, MemoryLevel); 2849 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * 2850 SMU7_MAX_LEVELS_MEMORY; 2851 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; 2852 u32 i, ret; 2853 2854 memset(levels, 0, level_array_size); 2855 2856 for (i = 0; i < dpm_table->mclk_table.count; i++) { 2857 if (dpm_table->mclk_table.dpm_levels[i].value == 0) 2858 return -EINVAL; 2859 ret = ci_populate_single_memory_level(rdev, 2860 dpm_table->mclk_table.dpm_levels[i].value, 2861 &pi->smc_state_table.MemoryLevel[i]); 2862 if (ret) 2863 return ret; 2864 } 2865 2866 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); 2867 2868 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; 2869 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 2870 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); 2871 2872 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark = 2873 PPSMC_DISPLAY_WATERMARK_HIGH; 2874 2875 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 2876 (u8 *)levels, level_array_size, 2877 pi->sram_end); 2878 if (ret) 2879 return ret; 2880 2881 return 0; 2882 } 2883 2884 static void ci_reset_single_dpm_table(struct radeon_device *rdev, 2885 struct ci_single_dpm_table* dpm_table, 2886 u32 count) 2887 { 2888 u32 i; 2889 2890 dpm_table->count = count; 2891 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) 2892 dpm_table->dpm_levels[i].enabled = false; 2893 } 2894 2895 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, 2896 u32 index, u32 pcie_gen, u32 pcie_lanes) 2897 { 2898 dpm_table->dpm_levels[index].value = pcie_gen; 2899 dpm_table->dpm_levels[index].param1 = pcie_lanes; 2900 dpm_table->dpm_levels[index].enabled = true; 2901 } 2902 2903 static int ci_setup_default_pcie_tables(struct radeon_device *rdev) 2904 { 2905 struct ci_power_info *pi = ci_get_pi(rdev); 2906 2907 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) 2908 return -EINVAL; 2909 2910 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) { 2911 pi->pcie_gen_powersaving = pi->pcie_gen_performance; 2912 pi->pcie_lane_powersaving = pi->pcie_lane_performance; 2913 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) { 2914 pi->pcie_gen_performance = pi->pcie_gen_powersaving; 2915 pi->pcie_lane_performance = pi->pcie_lane_powersaving; 2916 } 2917 2918 ci_reset_single_dpm_table(rdev, 2919 &pi->dpm_table.pcie_speed_table, 2920 SMU7_MAX_LEVELS_LINK); 2921 2922 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 2923 pi->pcie_gen_powersaving.min, 2924 pi->pcie_lane_powersaving.min); 2925 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, 2926 pi->pcie_gen_performance.min, 2927 pi->pcie_lane_performance.min); 2928 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2, 2929 pi->pcie_gen_powersaving.min, 2930 pi->pcie_lane_powersaving.max); 2931 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3, 2932 pi->pcie_gen_performance.min, 2933 pi->pcie_lane_performance.max); 2934 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4, 2935 pi->pcie_gen_powersaving.max, 2936 pi->pcie_lane_powersaving.max); 2937 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5, 2938 pi->pcie_gen_performance.max, 2939 pi->pcie_lane_performance.max); 2940 2941 pi->dpm_table.pcie_speed_table.count = 6; 2942 2943 return 0; 2944 } 2945 2946 static int ci_setup_default_dpm_tables(struct radeon_device *rdev) 2947 { 2948 struct ci_power_info *pi = ci_get_pi(rdev); 2949 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 2950 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 2951 struct radeon_clock_voltage_dependency_table *allowed_mclk_table = 2952 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 2953 struct radeon_cac_leakage_table *std_voltage_table = 2954 &rdev->pm.dpm.dyn_state.cac_leakage_table; 2955 u32 i; 2956 2957 if (allowed_sclk_vddc_table == NULL) 2958 return -EINVAL; 2959 if (allowed_sclk_vddc_table->count < 1) 2960 return -EINVAL; 2961 if (allowed_mclk_table == NULL) 2962 return -EINVAL; 2963 if (allowed_mclk_table->count < 1) 2964 return -EINVAL; 2965 2966 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table)); 2967 2968 ci_reset_single_dpm_table(rdev, 2969 &pi->dpm_table.sclk_table, 2970 SMU7_MAX_LEVELS_GRAPHICS); 2971 ci_reset_single_dpm_table(rdev, 2972 &pi->dpm_table.mclk_table, 2973 SMU7_MAX_LEVELS_MEMORY); 2974 ci_reset_single_dpm_table(rdev, 2975 &pi->dpm_table.vddc_table, 2976 SMU7_MAX_LEVELS_VDDC); 2977 ci_reset_single_dpm_table(rdev, 2978 &pi->dpm_table.vddci_table, 2979 SMU7_MAX_LEVELS_VDDCI); 2980 ci_reset_single_dpm_table(rdev, 2981 &pi->dpm_table.mvdd_table, 2982 SMU7_MAX_LEVELS_MVDD); 2983 2984 pi->dpm_table.sclk_table.count = 0; 2985 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 2986 if ((i == 0) || 2987 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value != 2988 allowed_sclk_vddc_table->entries[i].clk)) { 2989 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = 2990 allowed_sclk_vddc_table->entries[i].clk; 2991 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true; 2992 pi->dpm_table.sclk_table.count++; 2993 } 2994 } 2995 2996 pi->dpm_table.mclk_table.count = 0; 2997 for (i = 0; i < allowed_mclk_table->count; i++) { 2998 if ((i==0) || 2999 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != 3000 allowed_mclk_table->entries[i].clk)) { 3001 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = 3002 allowed_mclk_table->entries[i].clk; 3003 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true; 3004 pi->dpm_table.mclk_table.count++; 3005 } 3006 } 3007 3008 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3009 pi->dpm_table.vddc_table.dpm_levels[i].value = 3010 allowed_sclk_vddc_table->entries[i].v; 3011 pi->dpm_table.vddc_table.dpm_levels[i].param1 = 3012 std_voltage_table->entries[i].leakage; 3013 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true; 3014 } 3015 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; 3016 3017 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 3018 if (allowed_mclk_table) { 3019 for (i = 0; i < allowed_mclk_table->count; i++) { 3020 pi->dpm_table.vddci_table.dpm_levels[i].value = 3021 allowed_mclk_table->entries[i].v; 3022 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; 3023 } 3024 pi->dpm_table.vddci_table.count = allowed_mclk_table->count; 3025 } 3026 3027 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; 3028 if (allowed_mclk_table) { 3029 for (i = 0; i < allowed_mclk_table->count; i++) { 3030 pi->dpm_table.mvdd_table.dpm_levels[i].value = 3031 allowed_mclk_table->entries[i].v; 3032 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 3033 } 3034 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; 3035 } 3036 3037 ci_setup_default_pcie_tables(rdev); 3038 3039 return 0; 3040 } 3041 3042 static int ci_find_boot_level(struct ci_single_dpm_table *table, 3043 u32 value, u32 *boot_level) 3044 { 3045 u32 i; 3046 int ret = -EINVAL; 3047 3048 for(i = 0; i < table->count; i++) { 3049 if (value == table->dpm_levels[i].value) { 3050 *boot_level = i; 3051 ret = 0; 3052 } 3053 } 3054 3055 return ret; 3056 } 3057 3058 static int ci_init_smc_table(struct radeon_device *rdev) 3059 { 3060 struct ci_power_info *pi = ci_get_pi(rdev); 3061 struct ci_ulv_parm *ulv = &pi->ulv; 3062 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps; 3063 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 3064 int ret; 3065 3066 ret = ci_setup_default_dpm_tables(rdev); 3067 if (ret) 3068 return ret; 3069 3070 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) 3071 ci_populate_smc_voltage_tables(rdev, table); 3072 3073 ci_init_fps_limits(rdev); 3074 3075 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) 3076 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 3077 3078 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 3079 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 3080 3081 if (pi->mem_gddr5) 3082 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 3083 3084 if (ulv->supported) { 3085 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv); 3086 if (ret) 3087 return ret; 3088 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); 3089 } 3090 3091 ret = ci_populate_all_graphic_levels(rdev); 3092 if (ret) 3093 return ret; 3094 3095 ret = ci_populate_all_memory_levels(rdev); 3096 if (ret) 3097 return ret; 3098 3099 ci_populate_smc_link_level(rdev, table); 3100 3101 ret = ci_populate_smc_acpi_level(rdev, table); 3102 if (ret) 3103 return ret; 3104 3105 ret = ci_populate_smc_vce_level(rdev, table); 3106 if (ret) 3107 return ret; 3108 3109 ret = ci_populate_smc_acp_level(rdev, table); 3110 if (ret) 3111 return ret; 3112 3113 ret = ci_populate_smc_samu_level(rdev, table); 3114 if (ret) 3115 return ret; 3116 3117 ret = ci_do_program_memory_timing_parameters(rdev); 3118 if (ret) 3119 return ret; 3120 3121 ret = ci_populate_smc_uvd_level(rdev, table); 3122 if (ret) 3123 return ret; 3124 3125 table->UvdBootLevel = 0; 3126 table->VceBootLevel = 0; 3127 table->AcpBootLevel = 0; 3128 table->SamuBootLevel = 0; 3129 table->GraphicsBootLevel = 0; 3130 table->MemoryBootLevel = 0; 3131 3132 ret = ci_find_boot_level(&pi->dpm_table.sclk_table, 3133 pi->vbios_boot_state.sclk_bootup_value, 3134 (u32 *)&pi->smc_state_table.GraphicsBootLevel); 3135 3136 ret = ci_find_boot_level(&pi->dpm_table.mclk_table, 3137 pi->vbios_boot_state.mclk_bootup_value, 3138 (u32 *)&pi->smc_state_table.MemoryBootLevel); 3139 3140 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; 3141 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; 3142 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; 3143 3144 ci_populate_smc_initial_state(rdev, radeon_boot_state); 3145 3146 ret = ci_populate_bapm_parameters_in_dpm_table(rdev); 3147 if (ret) 3148 return ret; 3149 3150 table->UVDInterval = 1; 3151 table->VCEInterval = 1; 3152 table->ACPInterval = 1; 3153 table->SAMUInterval = 1; 3154 table->GraphicsVoltageChangeEnable = 1; 3155 table->GraphicsThermThrottleEnable = 1; 3156 table->GraphicsInterval = 1; 3157 table->VoltageInterval = 1; 3158 table->ThermalInterval = 1; 3159 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * 3160 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3161 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * 3162 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3163 table->MemoryVoltageChangeEnable = 1; 3164 table->MemoryInterval = 1; 3165 table->VoltageResponseTime = 0; 3166 table->VddcVddciDelta = 4000; 3167 table->PhaseResponseTime = 0; 3168 table->MemoryThermThrottleEnable = 1; 3169 table->PCIeBootLinkLevel = 0; 3170 table->PCIeGenInterval = 1; 3171 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) 3172 table->SVI2Enable = 1; 3173 else 3174 table->SVI2Enable = 0; 3175 3176 table->ThermGpio = 17; 3177 table->SclkStepSize = 0x4000; 3178 3179 table->SystemFlags = cpu_to_be32(table->SystemFlags); 3180 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); 3181 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); 3182 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); 3183 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); 3184 table->SclkStepSize = cpu_to_be32(table->SclkStepSize); 3185 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); 3186 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); 3187 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); 3188 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); 3189 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); 3190 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); 3191 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); 3192 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); 3193 3194 ret = ci_copy_bytes_to_smc(rdev, 3195 pi->dpm_table_start + 3196 offsetof(SMU7_Discrete_DpmTable, SystemFlags), 3197 (u8 *)&table->SystemFlags, 3198 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController), 3199 pi->sram_end); 3200 if (ret) 3201 return ret; 3202 3203 return 0; 3204 } 3205 3206 static void ci_trim_single_dpm_states(struct radeon_device *rdev, 3207 struct ci_single_dpm_table *dpm_table, 3208 u32 low_limit, u32 high_limit) 3209 { 3210 u32 i; 3211 3212 for (i = 0; i < dpm_table->count; i++) { 3213 if ((dpm_table->dpm_levels[i].value < low_limit) || 3214 (dpm_table->dpm_levels[i].value > high_limit)) 3215 dpm_table->dpm_levels[i].enabled = false; 3216 else 3217 dpm_table->dpm_levels[i].enabled = true; 3218 } 3219 } 3220 3221 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev, 3222 u32 speed_low, u32 lanes_low, 3223 u32 speed_high, u32 lanes_high) 3224 { 3225 struct ci_power_info *pi = ci_get_pi(rdev); 3226 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; 3227 u32 i, j; 3228 3229 for (i = 0; i < pcie_table->count; i++) { 3230 if ((pcie_table->dpm_levels[i].value < speed_low) || 3231 (pcie_table->dpm_levels[i].param1 < lanes_low) || 3232 (pcie_table->dpm_levels[i].value > speed_high) || 3233 (pcie_table->dpm_levels[i].param1 > lanes_high)) 3234 pcie_table->dpm_levels[i].enabled = false; 3235 else 3236 pcie_table->dpm_levels[i].enabled = true; 3237 } 3238 3239 for (i = 0; i < pcie_table->count; i++) { 3240 if (pcie_table->dpm_levels[i].enabled) { 3241 for (j = i + 1; j < pcie_table->count; j++) { 3242 if (pcie_table->dpm_levels[j].enabled) { 3243 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) && 3244 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1)) 3245 pcie_table->dpm_levels[j].enabled = false; 3246 } 3247 } 3248 } 3249 } 3250 } 3251 3252 static int ci_trim_dpm_states(struct radeon_device *rdev, 3253 struct radeon_ps *radeon_state) 3254 { 3255 struct ci_ps *state = ci_get_ps(radeon_state); 3256 struct ci_power_info *pi = ci_get_pi(rdev); 3257 u32 high_limit_count; 3258 3259 if (state->performance_level_count < 1) 3260 return -EINVAL; 3261 3262 if (state->performance_level_count == 1) 3263 high_limit_count = 0; 3264 else 3265 high_limit_count = 1; 3266 3267 ci_trim_single_dpm_states(rdev, 3268 &pi->dpm_table.sclk_table, 3269 state->performance_levels[0].sclk, 3270 state->performance_levels[high_limit_count].sclk); 3271 3272 ci_trim_single_dpm_states(rdev, 3273 &pi->dpm_table.mclk_table, 3274 state->performance_levels[0].mclk, 3275 state->performance_levels[high_limit_count].mclk); 3276 3277 ci_trim_pcie_dpm_states(rdev, 3278 state->performance_levels[0].pcie_gen, 3279 state->performance_levels[0].pcie_lane, 3280 state->performance_levels[high_limit_count].pcie_gen, 3281 state->performance_levels[high_limit_count].pcie_lane); 3282 3283 return 0; 3284 } 3285 3286 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev) 3287 { 3288 struct radeon_clock_voltage_dependency_table *disp_voltage_table = 3289 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk; 3290 struct radeon_clock_voltage_dependency_table *vddc_table = 3291 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3292 u32 requested_voltage = 0; 3293 u32 i; 3294 3295 if (disp_voltage_table == NULL) 3296 return -EINVAL; 3297 if (!disp_voltage_table->count) 3298 return -EINVAL; 3299 3300 for (i = 0; i < disp_voltage_table->count; i++) { 3301 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk) 3302 requested_voltage = disp_voltage_table->entries[i].v; 3303 } 3304 3305 for (i = 0; i < vddc_table->count; i++) { 3306 if (requested_voltage <= vddc_table->entries[i].v) { 3307 requested_voltage = vddc_table->entries[i].v; 3308 return (ci_send_msg_to_smc_with_parameter(rdev, 3309 PPSMC_MSG_VddC_Request, 3310 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ? 3311 0 : -EINVAL; 3312 } 3313 } 3314 3315 return -EINVAL; 3316 } 3317 3318 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev) 3319 { 3320 struct ci_power_info *pi = ci_get_pi(rdev); 3321 PPSMC_Result result; 3322 3323 if (!pi->sclk_dpm_key_disabled) { 3324 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3325 result = ci_send_msg_to_smc_with_parameter(rdev, 3326 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3327 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 3328 if (result != PPSMC_Result_OK) 3329 return -EINVAL; 3330 } 3331 } 3332 3333 if (!pi->mclk_dpm_key_disabled) { 3334 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3335 result = ci_send_msg_to_smc_with_parameter(rdev, 3336 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3337 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3338 if (result != PPSMC_Result_OK) 3339 return -EINVAL; 3340 } 3341 } 3342 3343 if (!pi->pcie_dpm_key_disabled) { 3344 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3345 result = ci_send_msg_to_smc_with_parameter(rdev, 3346 PPSMC_MSG_PCIeDPM_SetEnabledMask, 3347 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 3348 if (result != PPSMC_Result_OK) 3349 return -EINVAL; 3350 } 3351 } 3352 3353 ci_apply_disp_minimum_voltage_request(rdev); 3354 3355 return 0; 3356 } 3357 3358 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev, 3359 struct radeon_ps *radeon_state) 3360 { 3361 struct ci_power_info *pi = ci_get_pi(rdev); 3362 struct ci_ps *state = ci_get_ps(radeon_state); 3363 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; 3364 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3365 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; 3366 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3367 u32 i; 3368 3369 pi->need_update_smu7_dpm_table = 0; 3370 3371 for (i = 0; i < sclk_table->count; i++) { 3372 if (sclk == sclk_table->dpm_levels[i].value) 3373 break; 3374 } 3375 3376 if (i >= sclk_table->count) { 3377 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3378 } else { 3379 /* XXX check display min clock requirements */ 3380 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK) 3381 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3382 } 3383 3384 for (i = 0; i < mclk_table->count; i++) { 3385 if (mclk == mclk_table->dpm_levels[i].value) 3386 break; 3387 } 3388 3389 if (i >= mclk_table->count) 3390 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3391 3392 if (rdev->pm.dpm.current_active_crtc_count != 3393 rdev->pm.dpm.new_active_crtc_count) 3394 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 3395 } 3396 3397 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev, 3398 struct radeon_ps *radeon_state) 3399 { 3400 struct ci_power_info *pi = ci_get_pi(rdev); 3401 struct ci_ps *state = ci_get_ps(radeon_state); 3402 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3403 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3404 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3405 int ret; 3406 3407 if (!pi->need_update_smu7_dpm_table) 3408 return 0; 3409 3410 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) 3411 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk; 3412 3413 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) 3414 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk; 3415 3416 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { 3417 ret = ci_populate_all_graphic_levels(rdev); 3418 if (ret) 3419 return ret; 3420 } 3421 3422 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { 3423 ret = ci_populate_all_memory_levels(rdev); 3424 if (ret) 3425 return ret; 3426 } 3427 3428 return 0; 3429 } 3430 3431 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 3432 { 3433 struct ci_power_info *pi = ci_get_pi(rdev); 3434 const struct radeon_clock_and_voltage_limits *max_limits; 3435 int i; 3436 3437 if (rdev->pm.dpm.ac_power) 3438 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3439 else 3440 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3441 3442 if (enable) { 3443 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; 3444 3445 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3446 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3447 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; 3448 3449 if (!pi->caps_uvd_dpm) 3450 break; 3451 } 3452 } 3453 3454 ci_send_msg_to_smc_with_parameter(rdev, 3455 PPSMC_MSG_UVDDPM_SetEnabledMask, 3456 pi->dpm_level_enable_mask.uvd_dpm_enable_mask); 3457 3458 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3459 pi->uvd_enabled = true; 3460 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 3461 ci_send_msg_to_smc_with_parameter(rdev, 3462 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3463 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3464 } 3465 } else { 3466 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3467 pi->uvd_enabled = false; 3468 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; 3469 ci_send_msg_to_smc_with_parameter(rdev, 3470 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3471 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3472 } 3473 } 3474 3475 return (ci_send_msg_to_smc(rdev, enable ? 3476 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ? 3477 0 : -EINVAL; 3478 } 3479 3480 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) 3481 { 3482 struct ci_power_info *pi = ci_get_pi(rdev); 3483 const struct radeon_clock_and_voltage_limits *max_limits; 3484 int i; 3485 3486 if (rdev->pm.dpm.ac_power) 3487 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3488 else 3489 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3490 3491 if (enable) { 3492 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0; 3493 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3494 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3495 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; 3496 3497 if (!pi->caps_vce_dpm) 3498 break; 3499 } 3500 } 3501 3502 ci_send_msg_to_smc_with_parameter(rdev, 3503 PPSMC_MSG_VCEDPM_SetEnabledMask, 3504 pi->dpm_level_enable_mask.vce_dpm_enable_mask); 3505 } 3506 3507 return (ci_send_msg_to_smc(rdev, enable ? 3508 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ? 3509 0 : -EINVAL; 3510 } 3511 3512 #if 0 3513 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) 3514 { 3515 struct ci_power_info *pi = ci_get_pi(rdev); 3516 const struct radeon_clock_and_voltage_limits *max_limits; 3517 int i; 3518 3519 if (rdev->pm.dpm.ac_power) 3520 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3521 else 3522 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3523 3524 if (enable) { 3525 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0; 3526 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3527 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3528 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i; 3529 3530 if (!pi->caps_samu_dpm) 3531 break; 3532 } 3533 } 3534 3535 ci_send_msg_to_smc_with_parameter(rdev, 3536 PPSMC_MSG_SAMUDPM_SetEnabledMask, 3537 pi->dpm_level_enable_mask.samu_dpm_enable_mask); 3538 } 3539 return (ci_send_msg_to_smc(rdev, enable ? 3540 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ? 3541 0 : -EINVAL; 3542 } 3543 3544 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable) 3545 { 3546 struct ci_power_info *pi = ci_get_pi(rdev); 3547 const struct radeon_clock_and_voltage_limits *max_limits; 3548 int i; 3549 3550 if (rdev->pm.dpm.ac_power) 3551 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3552 else 3553 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3554 3555 if (enable) { 3556 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0; 3557 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3558 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3559 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i; 3560 3561 if (!pi->caps_acp_dpm) 3562 break; 3563 } 3564 } 3565 3566 ci_send_msg_to_smc_with_parameter(rdev, 3567 PPSMC_MSG_ACPDPM_SetEnabledMask, 3568 pi->dpm_level_enable_mask.acp_dpm_enable_mask); 3569 } 3570 3571 return (ci_send_msg_to_smc(rdev, enable ? 3572 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ? 3573 0 : -EINVAL; 3574 } 3575 #endif 3576 3577 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate) 3578 { 3579 struct ci_power_info *pi = ci_get_pi(rdev); 3580 u32 tmp; 3581 3582 if (!gate) { 3583 if (pi->caps_uvd_dpm || 3584 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) 3585 pi->smc_state_table.UvdBootLevel = 0; 3586 else 3587 pi->smc_state_table.UvdBootLevel = 3588 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; 3589 3590 tmp = RREG32_SMC(DPM_TABLE_475); 3591 tmp &= ~UvdBootLevel_MASK; 3592 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel); 3593 WREG32_SMC(DPM_TABLE_475, tmp); 3594 } 3595 3596 return ci_enable_uvd_dpm(rdev, !gate); 3597 } 3598 3599 static u8 ci_get_vce_boot_level(struct radeon_device *rdev) 3600 { 3601 u8 i; 3602 u32 min_evclk = 30000; /* ??? */ 3603 struct radeon_vce_clock_voltage_dependency_table *table = 3604 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 3605 3606 for (i = 0; i < table->count; i++) { 3607 if (table->entries[i].evclk >= min_evclk) 3608 return i; 3609 } 3610 3611 return table->count - 1; 3612 } 3613 3614 static int ci_update_vce_dpm(struct radeon_device *rdev, 3615 struct radeon_ps *radeon_new_state, 3616 struct radeon_ps *radeon_current_state) 3617 { 3618 struct ci_power_info *pi = ci_get_pi(rdev); 3619 int ret = 0; 3620 u32 tmp; 3621 3622 if (radeon_current_state->evclk != radeon_new_state->evclk) { 3623 if (radeon_new_state->evclk) { 3624 /* turn the clocks on when encoding */ 3625 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 3626 3627 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); 3628 tmp = RREG32_SMC(DPM_TABLE_475); 3629 tmp &= ~VceBootLevel_MASK; 3630 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); 3631 WREG32_SMC(DPM_TABLE_475, tmp); 3632 3633 ret = ci_enable_vce_dpm(rdev, true); 3634 } else { 3635 /* turn the clocks off when not encoding */ 3636 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 3637 3638 ret = ci_enable_vce_dpm(rdev, false); 3639 } 3640 } 3641 return ret; 3642 } 3643 3644 #if 0 3645 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) 3646 { 3647 return ci_enable_samu_dpm(rdev, gate); 3648 } 3649 3650 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate) 3651 { 3652 struct ci_power_info *pi = ci_get_pi(rdev); 3653 u32 tmp; 3654 3655 if (!gate) { 3656 pi->smc_state_table.AcpBootLevel = 0; 3657 3658 tmp = RREG32_SMC(DPM_TABLE_475); 3659 tmp &= ~AcpBootLevel_MASK; 3660 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel); 3661 WREG32_SMC(DPM_TABLE_475, tmp); 3662 } 3663 3664 return ci_enable_acp_dpm(rdev, !gate); 3665 } 3666 #endif 3667 3668 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev, 3669 struct radeon_ps *radeon_state) 3670 { 3671 struct ci_power_info *pi = ci_get_pi(rdev); 3672 int ret; 3673 3674 ret = ci_trim_dpm_states(rdev, radeon_state); 3675 if (ret) 3676 return ret; 3677 3678 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 3679 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table); 3680 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 3681 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table); 3682 pi->last_mclk_dpm_enable_mask = 3683 pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 3684 if (pi->uvd_enabled) { 3685 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) 3686 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 3687 } 3688 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 3689 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table); 3690 3691 return 0; 3692 } 3693 3694 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev, 3695 u32 level_mask) 3696 { 3697 u32 level = 0; 3698 3699 while ((level_mask & (1 << level)) == 0) 3700 level++; 3701 3702 return level; 3703 } 3704 3705 3706 int ci_dpm_force_performance_level(struct radeon_device *rdev, 3707 enum radeon_dpm_forced_level level) 3708 { 3709 struct ci_power_info *pi = ci_get_pi(rdev); 3710 PPSMC_Result smc_result; 3711 u32 tmp, levels, i; 3712 int ret; 3713 3714 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 3715 if ((!pi->sclk_dpm_key_disabled) && 3716 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3717 levels = 0; 3718 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; 3719 while (tmp >>= 1) 3720 levels++; 3721 if (levels) { 3722 ret = ci_dpm_force_state_sclk(rdev, levels); 3723 if (ret) 3724 return ret; 3725 for (i = 0; i < rdev->usec_timeout; i++) { 3726 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 3727 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 3728 if (tmp == levels) 3729 break; 3730 udelay(1); 3731 } 3732 } 3733 } 3734 if ((!pi->mclk_dpm_key_disabled) && 3735 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3736 levels = 0; 3737 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 3738 while (tmp >>= 1) 3739 levels++; 3740 if (levels) { 3741 ret = ci_dpm_force_state_mclk(rdev, levels); 3742 if (ret) 3743 return ret; 3744 for (i = 0; i < rdev->usec_timeout; i++) { 3745 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 3746 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 3747 if (tmp == levels) 3748 break; 3749 udelay(1); 3750 } 3751 } 3752 } 3753 if ((!pi->pcie_dpm_key_disabled) && 3754 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3755 levels = 0; 3756 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; 3757 while (tmp >>= 1) 3758 levels++; 3759 if (levels) { 3760 ret = ci_dpm_force_state_pcie(rdev, level); 3761 if (ret) 3762 return ret; 3763 for (i = 0; i < rdev->usec_timeout; i++) { 3764 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 3765 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 3766 if (tmp == levels) 3767 break; 3768 udelay(1); 3769 } 3770 } 3771 } 3772 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 3773 if ((!pi->sclk_dpm_key_disabled) && 3774 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3775 levels = ci_get_lowest_enabled_level(rdev, 3776 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 3777 ret = ci_dpm_force_state_sclk(rdev, levels); 3778 if (ret) 3779 return ret; 3780 for (i = 0; i < rdev->usec_timeout; i++) { 3781 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 3782 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 3783 if (tmp == levels) 3784 break; 3785 udelay(1); 3786 } 3787 } 3788 if ((!pi->mclk_dpm_key_disabled) && 3789 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3790 levels = ci_get_lowest_enabled_level(rdev, 3791 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3792 ret = ci_dpm_force_state_mclk(rdev, levels); 3793 if (ret) 3794 return ret; 3795 for (i = 0; i < rdev->usec_timeout; i++) { 3796 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 3797 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 3798 if (tmp == levels) 3799 break; 3800 udelay(1); 3801 } 3802 } 3803 if ((!pi->pcie_dpm_key_disabled) && 3804 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3805 levels = ci_get_lowest_enabled_level(rdev, 3806 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 3807 ret = ci_dpm_force_state_pcie(rdev, levels); 3808 if (ret) 3809 return ret; 3810 for (i = 0; i < rdev->usec_timeout; i++) { 3811 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 3812 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 3813 if (tmp == levels) 3814 break; 3815 udelay(1); 3816 } 3817 } 3818 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 3819 if (!pi->sclk_dpm_key_disabled) { 3820 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel); 3821 if (smc_result != PPSMC_Result_OK) 3822 return -EINVAL; 3823 } 3824 if (!pi->mclk_dpm_key_disabled) { 3825 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel); 3826 if (smc_result != PPSMC_Result_OK) 3827 return -EINVAL; 3828 } 3829 if (!pi->pcie_dpm_key_disabled) { 3830 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel); 3831 if (smc_result != PPSMC_Result_OK) 3832 return -EINVAL; 3833 } 3834 } 3835 3836 rdev->pm.dpm.forced_level = level; 3837 3838 return 0; 3839 } 3840 3841 static int ci_set_mc_special_registers(struct radeon_device *rdev, 3842 struct ci_mc_reg_table *table) 3843 { 3844 struct ci_power_info *pi = ci_get_pi(rdev); 3845 u8 i, j, k; 3846 u32 temp_reg; 3847 3848 for (i = 0, j = table->last; i < table->last; i++) { 3849 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 3850 return -EINVAL; 3851 switch(table->mc_reg_address[i].s1 << 2) { 3852 case MC_SEQ_MISC1: 3853 temp_reg = RREG32(MC_PMG_CMD_EMRS); 3854 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; 3855 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 3856 for (k = 0; k < table->num_entries; k++) { 3857 table->mc_reg_table_entry[k].mc_data[j] = 3858 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); 3859 } 3860 j++; 3861 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 3862 return -EINVAL; 3863 3864 temp_reg = RREG32(MC_PMG_CMD_MRS); 3865 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; 3866 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; 3867 for (k = 0; k < table->num_entries; k++) { 3868 table->mc_reg_table_entry[k].mc_data[j] = 3869 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 3870 if (!pi->mem_gddr5) 3871 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 3872 } 3873 j++; 3874 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 3875 return -EINVAL; 3876 3877 if (!pi->mem_gddr5) { 3878 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2; 3879 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2; 3880 for (k = 0; k < table->num_entries; k++) { 3881 table->mc_reg_table_entry[k].mc_data[j] = 3882 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 3883 } 3884 j++; 3885 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 3886 return -EINVAL; 3887 } 3888 break; 3889 case MC_SEQ_RESERVE_M: 3890 temp_reg = RREG32(MC_PMG_CMD_MRS1); 3891 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; 3892 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 3893 for (k = 0; k < table->num_entries; k++) { 3894 table->mc_reg_table_entry[k].mc_data[j] = 3895 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 3896 } 3897 j++; 3898 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 3899 return -EINVAL; 3900 break; 3901 default: 3902 break; 3903 } 3904 3905 } 3906 3907 table->last = j; 3908 3909 return 0; 3910 } 3911 3912 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) 3913 { 3914 bool result = true; 3915 3916 switch(in_reg) { 3917 case MC_SEQ_RAS_TIMING >> 2: 3918 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2; 3919 break; 3920 case MC_SEQ_DLL_STBY >> 2: 3921 *out_reg = MC_SEQ_DLL_STBY_LP >> 2; 3922 break; 3923 case MC_SEQ_G5PDX_CMD0 >> 2: 3924 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2; 3925 break; 3926 case MC_SEQ_G5PDX_CMD1 >> 2: 3927 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2; 3928 break; 3929 case MC_SEQ_G5PDX_CTRL >> 2: 3930 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2; 3931 break; 3932 case MC_SEQ_CAS_TIMING >> 2: 3933 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2; 3934 break; 3935 case MC_SEQ_MISC_TIMING >> 2: 3936 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2; 3937 break; 3938 case MC_SEQ_MISC_TIMING2 >> 2: 3939 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; 3940 break; 3941 case MC_SEQ_PMG_DVS_CMD >> 2: 3942 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2; 3943 break; 3944 case MC_SEQ_PMG_DVS_CTL >> 2: 3945 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2; 3946 break; 3947 case MC_SEQ_RD_CTL_D0 >> 2: 3948 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; 3949 break; 3950 case MC_SEQ_RD_CTL_D1 >> 2: 3951 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; 3952 break; 3953 case MC_SEQ_WR_CTL_D0 >> 2: 3954 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; 3955 break; 3956 case MC_SEQ_WR_CTL_D1 >> 2: 3957 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; 3958 break; 3959 case MC_PMG_CMD_EMRS >> 2: 3960 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 3961 break; 3962 case MC_PMG_CMD_MRS >> 2: 3963 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; 3964 break; 3965 case MC_PMG_CMD_MRS1 >> 2: 3966 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 3967 break; 3968 case MC_SEQ_PMG_TIMING >> 2: 3969 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2; 3970 break; 3971 case MC_PMG_CMD_MRS2 >> 2: 3972 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2; 3973 break; 3974 case MC_SEQ_WR_CTL_2 >> 2: 3975 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2; 3976 break; 3977 default: 3978 result = false; 3979 break; 3980 } 3981 3982 return result; 3983 } 3984 3985 static void ci_set_valid_flag(struct ci_mc_reg_table *table) 3986 { 3987 u8 i, j; 3988 3989 for (i = 0; i < table->last; i++) { 3990 for (j = 1; j < table->num_entries; j++) { 3991 if (table->mc_reg_table_entry[j-1].mc_data[i] != 3992 table->mc_reg_table_entry[j].mc_data[i]) { 3993 table->valid_flag |= 1 << i; 3994 break; 3995 } 3996 } 3997 } 3998 } 3999 4000 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) 4001 { 4002 u32 i; 4003 u16 address; 4004 4005 for (i = 0; i < table->last; i++) { 4006 table->mc_reg_address[i].s0 = 4007 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? 4008 address : table->mc_reg_address[i].s1; 4009 } 4010 } 4011 4012 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, 4013 struct ci_mc_reg_table *ci_table) 4014 { 4015 u8 i, j; 4016 4017 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4018 return -EINVAL; 4019 if (table->num_entries > MAX_AC_TIMING_ENTRIES) 4020 return -EINVAL; 4021 4022 for (i = 0; i < table->last; i++) 4023 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; 4024 4025 ci_table->last = table->last; 4026 4027 for (i = 0; i < table->num_entries; i++) { 4028 ci_table->mc_reg_table_entry[i].mclk_max = 4029 table->mc_reg_table_entry[i].mclk_max; 4030 for (j = 0; j < table->last; j++) 4031 ci_table->mc_reg_table_entry[i].mc_data[j] = 4032 table->mc_reg_table_entry[i].mc_data[j]; 4033 } 4034 ci_table->num_entries = table->num_entries; 4035 4036 return 0; 4037 } 4038 4039 static int ci_initialize_mc_reg_table(struct radeon_device *rdev) 4040 { 4041 struct ci_power_info *pi = ci_get_pi(rdev); 4042 struct atom_mc_reg_table *table; 4043 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table; 4044 u8 module_index = rv770_get_memory_module_index(rdev); 4045 int ret; 4046 4047 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); 4048 if (!table) 4049 return -ENOMEM; 4050 4051 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); 4052 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); 4053 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY)); 4054 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0)); 4055 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1)); 4056 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL)); 4057 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD)); 4058 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL)); 4059 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); 4060 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); 4061 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); 4062 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); 4063 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); 4064 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); 4065 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); 4066 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); 4067 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); 4068 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); 4069 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); 4070 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); 4071 4072 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); 4073 if (ret) 4074 goto init_mc_done; 4075 4076 ret = ci_copy_vbios_mc_reg_table(table, ci_table); 4077 if (ret) 4078 goto init_mc_done; 4079 4080 ci_set_s0_mc_reg_index(ci_table); 4081 4082 ret = ci_set_mc_special_registers(rdev, ci_table); 4083 if (ret) 4084 goto init_mc_done; 4085 4086 ci_set_valid_flag(ci_table); 4087 4088 init_mc_done: 4089 kfree(table); 4090 4091 return ret; 4092 } 4093 4094 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev, 4095 SMU7_Discrete_MCRegisters *mc_reg_table) 4096 { 4097 struct ci_power_info *pi = ci_get_pi(rdev); 4098 u32 i, j; 4099 4100 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) { 4101 if (pi->mc_reg_table.valid_flag & (1 << j)) { 4102 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4103 return -EINVAL; 4104 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0); 4105 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1); 4106 i++; 4107 } 4108 } 4109 4110 mc_reg_table->last = (u8)i; 4111 4112 return 0; 4113 } 4114 4115 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry, 4116 SMU7_Discrete_MCRegisterSet *data, 4117 u32 num_entries, u32 valid_flag) 4118 { 4119 u32 i, j; 4120 4121 for (i = 0, j = 0; j < num_entries; j++) { 4122 if (valid_flag & (1 << j)) { 4123 data->value[i] = cpu_to_be32(entry->mc_data[j]); 4124 i++; 4125 } 4126 } 4127 } 4128 4129 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev, 4130 const u32 memory_clock, 4131 SMU7_Discrete_MCRegisterSet *mc_reg_table_data) 4132 { 4133 struct ci_power_info *pi = ci_get_pi(rdev); 4134 u32 i = 0; 4135 4136 for(i = 0; i < pi->mc_reg_table.num_entries; i++) { 4137 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) 4138 break; 4139 } 4140 4141 if ((i == pi->mc_reg_table.num_entries) && (i > 0)) 4142 --i; 4143 4144 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i], 4145 mc_reg_table_data, pi->mc_reg_table.last, 4146 pi->mc_reg_table.valid_flag); 4147 } 4148 4149 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev, 4150 SMU7_Discrete_MCRegisters *mc_reg_table) 4151 { 4152 struct ci_power_info *pi = ci_get_pi(rdev); 4153 u32 i; 4154 4155 for (i = 0; i < pi->dpm_table.mclk_table.count; i++) 4156 ci_convert_mc_reg_table_entry_to_smc(rdev, 4157 pi->dpm_table.mclk_table.dpm_levels[i].value, 4158 &mc_reg_table->data[i]); 4159 } 4160 4161 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev) 4162 { 4163 struct ci_power_info *pi = ci_get_pi(rdev); 4164 int ret; 4165 4166 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4167 4168 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table); 4169 if (ret) 4170 return ret; 4171 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4172 4173 return ci_copy_bytes_to_smc(rdev, 4174 pi->mc_reg_table_start, 4175 (u8 *)&pi->smc_mc_reg_table, 4176 sizeof(SMU7_Discrete_MCRegisters), 4177 pi->sram_end); 4178 } 4179 4180 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev) 4181 { 4182 struct ci_power_info *pi = ci_get_pi(rdev); 4183 4184 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) 4185 return 0; 4186 4187 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4188 4189 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4190 4191 return ci_copy_bytes_to_smc(rdev, 4192 pi->mc_reg_table_start + 4193 offsetof(SMU7_Discrete_MCRegisters, data[0]), 4194 (u8 *)&pi->smc_mc_reg_table.data[0], 4195 sizeof(SMU7_Discrete_MCRegisterSet) * 4196 pi->dpm_table.mclk_table.count, 4197 pi->sram_end); 4198 } 4199 4200 static void ci_enable_voltage_control(struct radeon_device *rdev) 4201 { 4202 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 4203 4204 tmp |= VOLT_PWRMGT_EN; 4205 WREG32_SMC(GENERAL_PWRMGT, tmp); 4206 } 4207 4208 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev, 4209 struct radeon_ps *radeon_state) 4210 { 4211 struct ci_ps *state = ci_get_ps(radeon_state); 4212 int i; 4213 u16 pcie_speed, max_speed = 0; 4214 4215 for (i = 0; i < state->performance_level_count; i++) { 4216 pcie_speed = state->performance_levels[i].pcie_gen; 4217 if (max_speed < pcie_speed) 4218 max_speed = pcie_speed; 4219 } 4220 4221 return max_speed; 4222 } 4223 4224 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev) 4225 { 4226 u32 speed_cntl = 0; 4227 4228 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; 4229 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; 4230 4231 return (u16)speed_cntl; 4232 } 4233 4234 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev) 4235 { 4236 u32 link_width = 0; 4237 4238 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK; 4239 link_width >>= LC_LINK_WIDTH_RD_SHIFT; 4240 4241 switch (link_width) { 4242 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4243 return 1; 4244 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4245 return 2; 4246 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4247 return 4; 4248 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4249 return 8; 4250 case RADEON_PCIE_LC_LINK_WIDTH_X12: 4251 /* not actually supported */ 4252 return 12; 4253 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4254 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4255 default: 4256 return 16; 4257 } 4258 } 4259 4260 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev, 4261 struct radeon_ps *radeon_new_state, 4262 struct radeon_ps *radeon_current_state) 4263 { 4264 struct ci_power_info *pi = ci_get_pi(rdev); 4265 enum radeon_pcie_gen target_link_speed = 4266 ci_get_maximum_link_speed(rdev, radeon_new_state); 4267 enum radeon_pcie_gen current_link_speed; 4268 4269 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID) 4270 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state); 4271 else 4272 current_link_speed = pi->force_pcie_gen; 4273 4274 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 4275 pi->pspp_notify_required = false; 4276 if (target_link_speed > current_link_speed) { 4277 switch (target_link_speed) { 4278 #ifdef CONFIG_ACPI 4279 case RADEON_PCIE_GEN3: 4280 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) 4281 break; 4282 pi->force_pcie_gen = RADEON_PCIE_GEN2; 4283 if (current_link_speed == RADEON_PCIE_GEN2) 4284 break; 4285 case RADEON_PCIE_GEN2: 4286 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) 4287 break; 4288 #endif 4289 default: 4290 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev); 4291 break; 4292 } 4293 } else { 4294 if (target_link_speed < current_link_speed) 4295 pi->pspp_notify_required = true; 4296 } 4297 } 4298 4299 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev, 4300 struct radeon_ps *radeon_new_state, 4301 struct radeon_ps *radeon_current_state) 4302 { 4303 struct ci_power_info *pi = ci_get_pi(rdev); 4304 enum radeon_pcie_gen target_link_speed = 4305 ci_get_maximum_link_speed(rdev, radeon_new_state); 4306 u8 request; 4307 4308 if (pi->pspp_notify_required) { 4309 if (target_link_speed == RADEON_PCIE_GEN3) 4310 request = PCIE_PERF_REQ_PECI_GEN3; 4311 else if (target_link_speed == RADEON_PCIE_GEN2) 4312 request = PCIE_PERF_REQ_PECI_GEN2; 4313 else 4314 request = PCIE_PERF_REQ_PECI_GEN1; 4315 4316 if ((request == PCIE_PERF_REQ_PECI_GEN1) && 4317 (ci_get_current_pcie_speed(rdev) > 0)) 4318 return; 4319 4320 #ifdef CONFIG_ACPI 4321 radeon_acpi_pcie_performance_request(rdev, request, false); 4322 #endif 4323 } 4324 } 4325 4326 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev) 4327 { 4328 struct ci_power_info *pi = ci_get_pi(rdev); 4329 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 4330 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 4331 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table = 4332 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 4333 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table = 4334 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 4335 4336 if (allowed_sclk_vddc_table == NULL) 4337 return -EINVAL; 4338 if (allowed_sclk_vddc_table->count < 1) 4339 return -EINVAL; 4340 if (allowed_mclk_vddc_table == NULL) 4341 return -EINVAL; 4342 if (allowed_mclk_vddc_table->count < 1) 4343 return -EINVAL; 4344 if (allowed_mclk_vddci_table == NULL) 4345 return -EINVAL; 4346 if (allowed_mclk_vddci_table->count < 1) 4347 return -EINVAL; 4348 4349 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; 4350 pi->max_vddc_in_pp_table = 4351 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4352 4353 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; 4354 pi->max_vddci_in_pp_table = 4355 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4356 4357 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = 4358 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4359 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = 4360 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4361 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = 4362 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4363 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = 4364 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4365 4366 return 0; 4367 } 4368 4369 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc) 4370 { 4371 struct ci_power_info *pi = ci_get_pi(rdev); 4372 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage; 4373 u32 leakage_index; 4374 4375 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4376 if (leakage_table->leakage_id[leakage_index] == *vddc) { 4377 *vddc = leakage_table->actual_voltage[leakage_index]; 4378 break; 4379 } 4380 } 4381 } 4382 4383 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci) 4384 { 4385 struct ci_power_info *pi = ci_get_pi(rdev); 4386 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage; 4387 u32 leakage_index; 4388 4389 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4390 if (leakage_table->leakage_id[leakage_index] == *vddci) { 4391 *vddci = leakage_table->actual_voltage[leakage_index]; 4392 break; 4393 } 4394 } 4395 } 4396 4397 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 4398 struct radeon_clock_voltage_dependency_table *table) 4399 { 4400 u32 i; 4401 4402 if (table) { 4403 for (i = 0; i < table->count; i++) 4404 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 4405 } 4406 } 4407 4408 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev, 4409 struct radeon_clock_voltage_dependency_table *table) 4410 { 4411 u32 i; 4412 4413 if (table) { 4414 for (i = 0; i < table->count; i++) 4415 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v); 4416 } 4417 } 4418 4419 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 4420 struct radeon_vce_clock_voltage_dependency_table *table) 4421 { 4422 u32 i; 4423 4424 if (table) { 4425 for (i = 0; i < table->count; i++) 4426 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 4427 } 4428 } 4429 4430 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 4431 struct radeon_uvd_clock_voltage_dependency_table *table) 4432 { 4433 u32 i; 4434 4435 if (table) { 4436 for (i = 0; i < table->count; i++) 4437 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 4438 } 4439 } 4440 4441 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev, 4442 struct radeon_phase_shedding_limits_table *table) 4443 { 4444 u32 i; 4445 4446 if (table) { 4447 for (i = 0; i < table->count; i++) 4448 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage); 4449 } 4450 } 4451 4452 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev, 4453 struct radeon_clock_and_voltage_limits *table) 4454 { 4455 if (table) { 4456 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc); 4457 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci); 4458 } 4459 } 4460 4461 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev, 4462 struct radeon_cac_leakage_table *table) 4463 { 4464 u32 i; 4465 4466 if (table) { 4467 for (i = 0; i < table->count; i++) 4468 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc); 4469 } 4470 } 4471 4472 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev) 4473 { 4474 4475 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 4476 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk); 4477 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 4478 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk); 4479 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 4480 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk); 4481 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev, 4482 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk); 4483 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev, 4484 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table); 4485 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev, 4486 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table); 4487 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 4488 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table); 4489 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 4490 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table); 4491 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev, 4492 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table); 4493 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 4494 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 4495 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 4496 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc); 4497 ci_patch_cac_leakage_table_with_vddc_leakage(rdev, 4498 &rdev->pm.dpm.dyn_state.cac_leakage_table); 4499 4500 } 4501 4502 static void ci_get_memory_type(struct radeon_device *rdev) 4503 { 4504 struct ci_power_info *pi = ci_get_pi(rdev); 4505 u32 tmp; 4506 4507 tmp = RREG32(MC_SEQ_MISC0); 4508 4509 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) == 4510 MC_SEQ_MISC0_GDDR5_VALUE) 4511 pi->mem_gddr5 = true; 4512 else 4513 pi->mem_gddr5 = false; 4514 4515 } 4516 4517 static void ci_update_current_ps(struct radeon_device *rdev, 4518 struct radeon_ps *rps) 4519 { 4520 struct ci_ps *new_ps = ci_get_ps(rps); 4521 struct ci_power_info *pi = ci_get_pi(rdev); 4522 4523 pi->current_rps = *rps; 4524 pi->current_ps = *new_ps; 4525 pi->current_rps.ps_priv = &pi->current_ps; 4526 } 4527 4528 static void ci_update_requested_ps(struct radeon_device *rdev, 4529 struct radeon_ps *rps) 4530 { 4531 struct ci_ps *new_ps = ci_get_ps(rps); 4532 struct ci_power_info *pi = ci_get_pi(rdev); 4533 4534 pi->requested_rps = *rps; 4535 pi->requested_ps = *new_ps; 4536 pi->requested_rps.ps_priv = &pi->requested_ps; 4537 } 4538 4539 int ci_dpm_pre_set_power_state(struct radeon_device *rdev) 4540 { 4541 struct ci_power_info *pi = ci_get_pi(rdev); 4542 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 4543 struct radeon_ps *new_ps = &requested_ps; 4544 4545 ci_update_requested_ps(rdev, new_ps); 4546 4547 ci_apply_state_adjust_rules(rdev, &pi->requested_rps); 4548 4549 return 0; 4550 } 4551 4552 void ci_dpm_post_set_power_state(struct radeon_device *rdev) 4553 { 4554 struct ci_power_info *pi = ci_get_pi(rdev); 4555 struct radeon_ps *new_ps = &pi->requested_rps; 4556 4557 ci_update_current_ps(rdev, new_ps); 4558 } 4559 4560 4561 void ci_dpm_setup_asic(struct radeon_device *rdev) 4562 { 4563 int r; 4564 4565 r = ci_mc_load_microcode(rdev); 4566 if (r) 4567 DRM_ERROR("Failed to load MC firmware!\n"); 4568 ci_read_clock_registers(rdev); 4569 ci_get_memory_type(rdev); 4570 ci_enable_acpi_power_management(rdev); 4571 ci_init_sclk_t(rdev); 4572 } 4573 4574 int ci_dpm_enable(struct radeon_device *rdev) 4575 { 4576 struct ci_power_info *pi = ci_get_pi(rdev); 4577 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 4578 int ret; 4579 4580 if (ci_is_smc_running(rdev)) 4581 return -EINVAL; 4582 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 4583 ci_enable_voltage_control(rdev); 4584 ret = ci_construct_voltage_tables(rdev); 4585 if (ret) { 4586 DRM_ERROR("ci_construct_voltage_tables failed\n"); 4587 return ret; 4588 } 4589 } 4590 if (pi->caps_dynamic_ac_timing) { 4591 ret = ci_initialize_mc_reg_table(rdev); 4592 if (ret) 4593 pi->caps_dynamic_ac_timing = false; 4594 } 4595 if (pi->dynamic_ss) 4596 ci_enable_spread_spectrum(rdev, true); 4597 if (pi->thermal_protection) 4598 ci_enable_thermal_protection(rdev, true); 4599 ci_program_sstp(rdev); 4600 ci_enable_display_gap(rdev); 4601 ci_program_vc(rdev); 4602 ret = ci_upload_firmware(rdev); 4603 if (ret) { 4604 DRM_ERROR("ci_upload_firmware failed\n"); 4605 return ret; 4606 } 4607 ret = ci_process_firmware_header(rdev); 4608 if (ret) { 4609 DRM_ERROR("ci_process_firmware_header failed\n"); 4610 return ret; 4611 } 4612 ret = ci_initial_switch_from_arb_f0_to_f1(rdev); 4613 if (ret) { 4614 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n"); 4615 return ret; 4616 } 4617 ret = ci_init_smc_table(rdev); 4618 if (ret) { 4619 DRM_ERROR("ci_init_smc_table failed\n"); 4620 return ret; 4621 } 4622 ret = ci_init_arb_table_index(rdev); 4623 if (ret) { 4624 DRM_ERROR("ci_init_arb_table_index failed\n"); 4625 return ret; 4626 } 4627 if (pi->caps_dynamic_ac_timing) { 4628 ret = ci_populate_initial_mc_reg_table(rdev); 4629 if (ret) { 4630 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n"); 4631 return ret; 4632 } 4633 } 4634 ret = ci_populate_pm_base(rdev); 4635 if (ret) { 4636 DRM_ERROR("ci_populate_pm_base failed\n"); 4637 return ret; 4638 } 4639 ci_dpm_start_smc(rdev); 4640 ci_enable_vr_hot_gpio_interrupt(rdev); 4641 ret = ci_notify_smc_display_change(rdev, false); 4642 if (ret) { 4643 DRM_ERROR("ci_notify_smc_display_change failed\n"); 4644 return ret; 4645 } 4646 ci_enable_sclk_control(rdev, true); 4647 ret = ci_enable_ulv(rdev, true); 4648 if (ret) { 4649 DRM_ERROR("ci_enable_ulv failed\n"); 4650 return ret; 4651 } 4652 ret = ci_enable_ds_master_switch(rdev, true); 4653 if (ret) { 4654 DRM_ERROR("ci_enable_ds_master_switch failed\n"); 4655 return ret; 4656 } 4657 ret = ci_start_dpm(rdev); 4658 if (ret) { 4659 DRM_ERROR("ci_start_dpm failed\n"); 4660 return ret; 4661 } 4662 ret = ci_enable_didt(rdev, true); 4663 if (ret) { 4664 DRM_ERROR("ci_enable_didt failed\n"); 4665 return ret; 4666 } 4667 ret = ci_enable_smc_cac(rdev, true); 4668 if (ret) { 4669 DRM_ERROR("ci_enable_smc_cac failed\n"); 4670 return ret; 4671 } 4672 ret = ci_enable_power_containment(rdev, true); 4673 if (ret) { 4674 DRM_ERROR("ci_enable_power_containment failed\n"); 4675 return ret; 4676 } 4677 4678 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 4679 4680 ci_update_current_ps(rdev, boot_ps); 4681 4682 return 0; 4683 } 4684 4685 int ci_dpm_late_enable(struct radeon_device *rdev) 4686 { 4687 int ret; 4688 4689 if (rdev->irq.installed && 4690 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 4691 #if 0 4692 PPSMC_Result result; 4693 #endif 4694 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 4695 if (ret) { 4696 DRM_ERROR("ci_set_thermal_temperature_range failed\n"); 4697 return ret; 4698 } 4699 rdev->irq.dpm_thermal = true; 4700 radeon_irq_set(rdev); 4701 #if 0 4702 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); 4703 4704 if (result != PPSMC_Result_OK) 4705 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 4706 #endif 4707 } 4708 4709 ci_dpm_powergate_uvd(rdev, true); 4710 4711 return 0; 4712 } 4713 4714 void ci_dpm_disable(struct radeon_device *rdev) 4715 { 4716 struct ci_power_info *pi = ci_get_pi(rdev); 4717 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 4718 4719 ci_dpm_powergate_uvd(rdev, false); 4720 4721 if (!ci_is_smc_running(rdev)) 4722 return; 4723 4724 if (pi->thermal_protection) 4725 ci_enable_thermal_protection(rdev, false); 4726 ci_enable_power_containment(rdev, false); 4727 ci_enable_smc_cac(rdev, false); 4728 ci_enable_didt(rdev, false); 4729 ci_enable_spread_spectrum(rdev, false); 4730 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); 4731 ci_stop_dpm(rdev); 4732 ci_enable_ds_master_switch(rdev, true); 4733 ci_enable_ulv(rdev, false); 4734 ci_clear_vc(rdev); 4735 ci_reset_to_default(rdev); 4736 ci_dpm_stop_smc(rdev); 4737 ci_force_switch_to_arb_f0(rdev); 4738 4739 ci_update_current_ps(rdev, boot_ps); 4740 } 4741 4742 int ci_dpm_set_power_state(struct radeon_device *rdev) 4743 { 4744 struct ci_power_info *pi = ci_get_pi(rdev); 4745 struct radeon_ps *new_ps = &pi->requested_rps; 4746 struct radeon_ps *old_ps = &pi->current_rps; 4747 int ret; 4748 4749 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); 4750 if (pi->pcie_performance_request) 4751 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); 4752 ret = ci_freeze_sclk_mclk_dpm(rdev); 4753 if (ret) { 4754 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n"); 4755 return ret; 4756 } 4757 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps); 4758 if (ret) { 4759 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n"); 4760 return ret; 4761 } 4762 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps); 4763 if (ret) { 4764 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); 4765 return ret; 4766 } 4767 4768 ret = ci_update_vce_dpm(rdev, new_ps, old_ps); 4769 if (ret) { 4770 DRM_ERROR("ci_update_vce_dpm failed\n"); 4771 return ret; 4772 } 4773 4774 ret = ci_update_sclk_t(rdev); 4775 if (ret) { 4776 DRM_ERROR("ci_update_sclk_t failed\n"); 4777 return ret; 4778 } 4779 if (pi->caps_dynamic_ac_timing) { 4780 ret = ci_update_and_upload_mc_reg_table(rdev); 4781 if (ret) { 4782 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n"); 4783 return ret; 4784 } 4785 } 4786 ret = ci_program_memory_timing_parameters(rdev); 4787 if (ret) { 4788 DRM_ERROR("ci_program_memory_timing_parameters failed\n"); 4789 return ret; 4790 } 4791 ret = ci_unfreeze_sclk_mclk_dpm(rdev); 4792 if (ret) { 4793 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n"); 4794 return ret; 4795 } 4796 ret = ci_upload_dpm_level_enable_mask(rdev); 4797 if (ret) { 4798 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n"); 4799 return ret; 4800 } 4801 if (pi->pcie_performance_request) 4802 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 4803 4804 return 0; 4805 } 4806 4807 int ci_dpm_power_control_set_level(struct radeon_device *rdev) 4808 { 4809 return ci_power_control_set_level(rdev); 4810 } 4811 4812 void ci_dpm_reset_asic(struct radeon_device *rdev) 4813 { 4814 ci_set_boot_state(rdev); 4815 } 4816 4817 void ci_dpm_display_configuration_changed(struct radeon_device *rdev) 4818 { 4819 ci_program_display_gap(rdev); 4820 } 4821 4822 union power_info { 4823 struct _ATOM_POWERPLAY_INFO info; 4824 struct _ATOM_POWERPLAY_INFO_V2 info_2; 4825 struct _ATOM_POWERPLAY_INFO_V3 info_3; 4826 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 4827 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 4828 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 4829 }; 4830 4831 union pplib_clock_info { 4832 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 4833 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 4834 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 4835 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 4836 struct _ATOM_PPLIB_SI_CLOCK_INFO si; 4837 struct _ATOM_PPLIB_CI_CLOCK_INFO ci; 4838 }; 4839 4840 union pplib_power_state { 4841 struct _ATOM_PPLIB_STATE v1; 4842 struct _ATOM_PPLIB_STATE_V2 v2; 4843 }; 4844 4845 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev, 4846 struct radeon_ps *rps, 4847 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 4848 u8 table_rev) 4849 { 4850 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 4851 rps->class = le16_to_cpu(non_clock_info->usClassification); 4852 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 4853 4854 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 4855 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 4856 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 4857 } else { 4858 rps->vclk = 0; 4859 rps->dclk = 0; 4860 } 4861 4862 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 4863 rdev->pm.dpm.boot_ps = rps; 4864 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 4865 rdev->pm.dpm.uvd_ps = rps; 4866 } 4867 4868 static void ci_parse_pplib_clock_info(struct radeon_device *rdev, 4869 struct radeon_ps *rps, int index, 4870 union pplib_clock_info *clock_info) 4871 { 4872 struct ci_power_info *pi = ci_get_pi(rdev); 4873 struct ci_ps *ps = ci_get_ps(rps); 4874 struct ci_pl *pl = &ps->performance_levels[index]; 4875 4876 ps->performance_level_count = index + 1; 4877 4878 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 4879 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; 4880 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 4881 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; 4882 4883 pl->pcie_gen = r600_get_pcie_gen_support(rdev, 4884 pi->sys_pcie_mask, 4885 pi->vbios_boot_state.pcie_gen_bootup_value, 4886 clock_info->ci.ucPCIEGen); 4887 pl->pcie_lane = r600_get_pcie_lane_support(rdev, 4888 pi->vbios_boot_state.pcie_lane_bootup_value, 4889 le16_to_cpu(clock_info->ci.usPCIELane)); 4890 4891 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 4892 pi->acpi_pcie_gen = pl->pcie_gen; 4893 } 4894 4895 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { 4896 pi->ulv.supported = true; 4897 pi->ulv.pl = *pl; 4898 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT; 4899 } 4900 4901 /* patch up boot state */ 4902 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 4903 pl->mclk = pi->vbios_boot_state.mclk_bootup_value; 4904 pl->sclk = pi->vbios_boot_state.sclk_bootup_value; 4905 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; 4906 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; 4907 } 4908 4909 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 4910 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 4911 pi->use_pcie_powersaving_levels = true; 4912 if (pi->pcie_gen_powersaving.max < pl->pcie_gen) 4913 pi->pcie_gen_powersaving.max = pl->pcie_gen; 4914 if (pi->pcie_gen_powersaving.min > pl->pcie_gen) 4915 pi->pcie_gen_powersaving.min = pl->pcie_gen; 4916 if (pi->pcie_lane_powersaving.max < pl->pcie_lane) 4917 pi->pcie_lane_powersaving.max = pl->pcie_lane; 4918 if (pi->pcie_lane_powersaving.min > pl->pcie_lane) 4919 pi->pcie_lane_powersaving.min = pl->pcie_lane; 4920 break; 4921 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 4922 pi->use_pcie_performance_levels = true; 4923 if (pi->pcie_gen_performance.max < pl->pcie_gen) 4924 pi->pcie_gen_performance.max = pl->pcie_gen; 4925 if (pi->pcie_gen_performance.min > pl->pcie_gen) 4926 pi->pcie_gen_performance.min = pl->pcie_gen; 4927 if (pi->pcie_lane_performance.max < pl->pcie_lane) 4928 pi->pcie_lane_performance.max = pl->pcie_lane; 4929 if (pi->pcie_lane_performance.min > pl->pcie_lane) 4930 pi->pcie_lane_performance.min = pl->pcie_lane; 4931 break; 4932 default: 4933 break; 4934 } 4935 } 4936 4937 static int ci_parse_power_table(struct radeon_device *rdev) 4938 { 4939 struct radeon_mode_info *mode_info = &rdev->mode_info; 4940 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 4941 union pplib_power_state *power_state; 4942 int i, j, k, non_clock_array_index, clock_array_index; 4943 union pplib_clock_info *clock_info; 4944 struct _StateArray *state_array; 4945 struct _ClockInfoArray *clock_info_array; 4946 struct _NonClockInfoArray *non_clock_info_array; 4947 union power_info *power_info; 4948 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 4949 u16 data_offset; 4950 u8 frev, crev; 4951 u8 *power_state_offset; 4952 struct ci_ps *ps; 4953 4954 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 4955 &frev, &crev, &data_offset)) 4956 return -EINVAL; 4957 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 4958 4959 state_array = (struct _StateArray *) 4960 (mode_info->atom_context->bios + data_offset + 4961 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 4962 clock_info_array = (struct _ClockInfoArray *) 4963 (mode_info->atom_context->bios + data_offset + 4964 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 4965 non_clock_info_array = (struct _NonClockInfoArray *) 4966 (mode_info->atom_context->bios + data_offset + 4967 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 4968 4969 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * 4970 state_array->ucNumEntries, GFP_KERNEL); 4971 if (!rdev->pm.dpm.ps) 4972 return -ENOMEM; 4973 power_state_offset = (u8 *)state_array->states; 4974 for (i = 0; i < state_array->ucNumEntries; i++) { 4975 u8 *idx; 4976 power_state = (union pplib_power_state *)power_state_offset; 4977 non_clock_array_index = power_state->v2.nonClockInfoIndex; 4978 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 4979 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 4980 if (!rdev->pm.power_state[i].clock_info) 4981 return -EINVAL; 4982 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); 4983 if (ps == NULL) { 4984 kfree(rdev->pm.dpm.ps); 4985 return -ENOMEM; 4986 } 4987 rdev->pm.dpm.ps[i].ps_priv = ps; 4988 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 4989 non_clock_info, 4990 non_clock_info_array->ucEntrySize); 4991 k = 0; 4992 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 4993 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 4994 clock_array_index = idx[j]; 4995 if (clock_array_index >= clock_info_array->ucNumEntries) 4996 continue; 4997 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) 4998 break; 4999 clock_info = (union pplib_clock_info *) 5000 ((u8 *)&clock_info_array->clockInfo[0] + 5001 (clock_array_index * clock_info_array->ucEntrySize)); 5002 ci_parse_pplib_clock_info(rdev, 5003 &rdev->pm.dpm.ps[i], k, 5004 clock_info); 5005 k++; 5006 } 5007 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 5008 } 5009 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 5010 5011 /* fill in the vce power states */ 5012 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 5013 u32 sclk, mclk; 5014 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 5015 clock_info = (union pplib_clock_info *) 5016 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 5017 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5018 sclk |= clock_info->ci.ucEngineClockHigh << 16; 5019 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5020 mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5021 rdev->pm.dpm.vce_states[i].sclk = sclk; 5022 rdev->pm.dpm.vce_states[i].mclk = mclk; 5023 } 5024 5025 return 0; 5026 } 5027 5028 static int ci_get_vbios_boot_values(struct radeon_device *rdev, 5029 struct ci_vbios_boot_state *boot_state) 5030 { 5031 struct radeon_mode_info *mode_info = &rdev->mode_info; 5032 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 5033 ATOM_FIRMWARE_INFO_V2_2 *firmware_info; 5034 u8 frev, crev; 5035 u16 data_offset; 5036 5037 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 5038 &frev, &crev, &data_offset)) { 5039 firmware_info = 5040 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios + 5041 data_offset); 5042 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage); 5043 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage); 5044 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage); 5045 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev); 5046 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev); 5047 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock); 5048 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock); 5049 5050 return 0; 5051 } 5052 return -EINVAL; 5053 } 5054 5055 void ci_dpm_fini(struct radeon_device *rdev) 5056 { 5057 int i; 5058 5059 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 5060 kfree(rdev->pm.dpm.ps[i].ps_priv); 5061 } 5062 kfree(rdev->pm.dpm.ps); 5063 kfree(rdev->pm.dpm.priv); 5064 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); 5065 r600_free_extended_power_table(rdev); 5066 } 5067 5068 int ci_dpm_init(struct radeon_device *rdev) 5069 { 5070 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 5071 u16 data_offset, size; 5072 u8 frev, crev; 5073 struct ci_power_info *pi; 5074 int ret; 5075 u32 mask; 5076 5077 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); 5078 if (pi == NULL) 5079 return -ENOMEM; 5080 rdev->pm.dpm.priv = pi; 5081 5082 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 5083 if (ret) 5084 pi->sys_pcie_mask = 0; 5085 else 5086 pi->sys_pcie_mask = mask; 5087 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 5088 5089 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1; 5090 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3; 5091 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1; 5092 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3; 5093 5094 pi->pcie_lane_performance.max = 0; 5095 pi->pcie_lane_performance.min = 16; 5096 pi->pcie_lane_powersaving.max = 0; 5097 pi->pcie_lane_powersaving.min = 16; 5098 5099 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state); 5100 if (ret) { 5101 ci_dpm_fini(rdev); 5102 return ret; 5103 } 5104 5105 ret = r600_get_platform_caps(rdev); 5106 if (ret) { 5107 ci_dpm_fini(rdev); 5108 return ret; 5109 } 5110 5111 ret = r600_parse_extended_power_table(rdev); 5112 if (ret) { 5113 ci_dpm_fini(rdev); 5114 return ret; 5115 } 5116 5117 ret = ci_parse_power_table(rdev); 5118 if (ret) { 5119 ci_dpm_fini(rdev); 5120 return ret; 5121 } 5122 5123 pi->dll_default_on = false; 5124 pi->sram_end = SMC_RAM_END; 5125 5126 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; 5127 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; 5128 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT; 5129 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT; 5130 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT; 5131 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT; 5132 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT; 5133 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT; 5134 5135 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT; 5136 5137 pi->sclk_dpm_key_disabled = 0; 5138 pi->mclk_dpm_key_disabled = 0; 5139 pi->pcie_dpm_key_disabled = 0; 5140 5141 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ 5142 if ((rdev->pdev->device == 0x6658) && 5143 (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) { 5144 pi->mclk_dpm_key_disabled = 1; 5145 } 5146 5147 pi->caps_sclk_ds = true; 5148 5149 pi->mclk_strobe_mode_threshold = 40000; 5150 pi->mclk_stutter_mode_threshold = 40000; 5151 pi->mclk_edc_enable_threshold = 40000; 5152 pi->mclk_edc_wr_enable_threshold = 40000; 5153 5154 ci_initialize_powertune_defaults(rdev); 5155 5156 pi->caps_fps = false; 5157 5158 pi->caps_sclk_throttle_low_notification = false; 5159 5160 pi->caps_uvd_dpm = true; 5161 pi->caps_vce_dpm = true; 5162 5163 ci_get_leakage_voltages(rdev); 5164 ci_patch_dependency_tables_with_leakage(rdev); 5165 ci_set_private_data_variables_based_on_pptable(rdev); 5166 5167 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 5168 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); 5169 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 5170 ci_dpm_fini(rdev); 5171 return -ENOMEM; 5172 } 5173 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; 5174 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; 5175 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; 5176 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; 5177 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; 5178 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; 5179 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; 5180 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; 5181 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; 5182 5183 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; 5184 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; 5185 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200; 5186 5187 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0; 5188 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; 5189 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 5190 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 5191 5192 if (rdev->family == CHIP_HAWAII) { 5193 pi->thermal_temp_setting.temperature_low = 94500; 5194 pi->thermal_temp_setting.temperature_high = 95000; 5195 pi->thermal_temp_setting.temperature_shutdown = 104000; 5196 } else { 5197 pi->thermal_temp_setting.temperature_low = 99500; 5198 pi->thermal_temp_setting.temperature_high = 100000; 5199 pi->thermal_temp_setting.temperature_shutdown = 104000; 5200 } 5201 5202 pi->uvd_enabled = false; 5203 5204 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5205 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5206 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5207 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) 5208 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5209 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 5210 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5211 5212 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) { 5213 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 5214 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5215 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) 5216 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5217 else 5218 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; 5219 } 5220 5221 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { 5222 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) 5223 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5224 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) 5225 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5226 else 5227 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL; 5228 } 5229 5230 pi->vddc_phase_shed_control = true; 5231 5232 #if defined(CONFIG_ACPI) 5233 pi->pcie_performance_request = 5234 radeon_acpi_is_pcie_performance_request_supported(rdev); 5235 #else 5236 pi->pcie_performance_request = false; 5237 #endif 5238 5239 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 5240 &frev, &crev, &data_offset)) { 5241 pi->caps_sclk_ss_support = true; 5242 pi->caps_mclk_ss_support = true; 5243 pi->dynamic_ss = true; 5244 } else { 5245 pi->caps_sclk_ss_support = false; 5246 pi->caps_mclk_ss_support = false; 5247 pi->dynamic_ss = true; 5248 } 5249 5250 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) 5251 pi->thermal_protection = true; 5252 else 5253 pi->thermal_protection = false; 5254 5255 pi->caps_dynamic_ac_timing = true; 5256 5257 pi->uvd_power_gated = false; 5258 5259 /* make sure dc limits are valid */ 5260 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || 5261 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) 5262 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = 5263 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 5264 5265 return 0; 5266 } 5267 5268 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 5269 struct seq_file *m) 5270 { 5271 struct ci_power_info *pi = ci_get_pi(rdev); 5272 struct radeon_ps *rps = &pi->current_rps; 5273 u32 sclk = ci_get_average_sclk_freq(rdev); 5274 u32 mclk = ci_get_average_mclk_freq(rdev); 5275 5276 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); 5277 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); 5278 seq_printf(m, "power level avg sclk: %u mclk: %u\n", 5279 sclk, mclk); 5280 } 5281 5282 void ci_dpm_print_power_state(struct radeon_device *rdev, 5283 struct radeon_ps *rps) 5284 { 5285 struct ci_ps *ps = ci_get_ps(rps); 5286 struct ci_pl *pl; 5287 int i; 5288 5289 r600_dpm_print_class_info(rps->class, rps->class2); 5290 r600_dpm_print_cap_info(rps->caps); 5291 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 5292 for (i = 0; i < ps->performance_level_count; i++) { 5293 pl = &ps->performance_levels[i]; 5294 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n", 5295 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); 5296 } 5297 r600_dpm_print_ps_status(rdev, rps); 5298 } 5299 5300 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low) 5301 { 5302 struct ci_power_info *pi = ci_get_pi(rdev); 5303 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 5304 5305 if (low) 5306 return requested_state->performance_levels[0].sclk; 5307 else 5308 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; 5309 } 5310 5311 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low) 5312 { 5313 struct ci_power_info *pi = ci_get_pi(rdev); 5314 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 5315 5316 if (low) 5317 return requested_state->performance_levels[0].mclk; 5318 else 5319 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; 5320 } 5321