1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #include "radeon_ucode.h" 29 #include "cikd.h" 30 #include "r600_dpm.h" 31 #include "ci_dpm.h" 32 #include "atom.h" 33 #include <linux/seq_file.h> 34 35 #define MC_CG_ARB_FREQ_F0 0x0a 36 #define MC_CG_ARB_FREQ_F1 0x0b 37 #define MC_CG_ARB_FREQ_F2 0x0c 38 #define MC_CG_ARB_FREQ_F3 0x0d 39 40 #define SMC_RAM_END 0x40000 41 42 #define VOLTAGE_SCALE 4 43 #define VOLTAGE_VID_OFFSET_SCALE1 625 44 #define VOLTAGE_VID_OFFSET_SCALE2 100 45 46 static const struct ci_pt_defaults defaults_hawaii_xt = 47 { 48 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 49 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 50 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 51 }; 52 53 static const struct ci_pt_defaults defaults_hawaii_pro = 54 { 55 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, 56 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 57 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 58 }; 59 60 static const struct ci_pt_defaults defaults_bonaire_xt = 61 { 62 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, 63 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, 64 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } 65 }; 66 67 static const struct ci_pt_defaults defaults_bonaire_pro = 68 { 69 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, 70 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, 71 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } 72 }; 73 74 static const struct ci_pt_defaults defaults_saturn_xt = 75 { 76 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, 77 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, 78 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } 79 }; 80 81 static const struct ci_pt_defaults defaults_saturn_pro = 82 { 83 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, 84 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, 85 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } 86 }; 87 88 static const struct ci_pt_config_reg didt_config_ci[] = 89 { 90 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 91 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 92 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 93 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 94 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 95 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 96 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 97 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 98 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 99 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 100 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 101 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 102 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 103 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 104 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 105 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 106 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 107 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 108 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 109 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 110 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 111 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 112 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 113 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 114 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 115 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 116 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 117 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 118 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 119 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 120 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 121 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 122 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 123 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 124 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 125 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 126 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 127 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 128 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 129 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 130 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 131 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 132 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 133 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 134 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 135 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 136 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 137 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 138 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 139 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 140 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 141 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 142 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 143 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 144 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 145 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 146 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 147 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 148 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 149 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 150 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 151 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 152 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 153 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 154 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 155 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 156 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 157 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 158 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 159 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 160 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 161 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 162 { 0xFFFFFFFF } 163 }; 164 165 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 166 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 167 u32 arb_freq_src, u32 arb_freq_dest); 168 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 169 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode); 170 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, 171 u32 max_voltage_steps, 172 struct atom_voltage_table *voltage_table); 173 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 174 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 175 extern int ci_mc_load_microcode(struct radeon_device *rdev); 176 extern void cik_update_cg(struct radeon_device *rdev, 177 u32 block, bool enable); 178 179 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 180 struct atom_voltage_table_entry *voltage_table, 181 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd); 182 static int ci_set_power_limit(struct radeon_device *rdev, u32 n); 183 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 184 u32 target_tdp); 185 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); 186 187 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg); 188 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 189 PPSMC_Msg msg, u32 parameter); 190 191 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev); 192 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev); 193 194 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) 195 { 196 struct ci_power_info *pi = rdev->pm.dpm.priv; 197 198 return pi; 199 } 200 201 static struct ci_ps *ci_get_ps(struct radeon_ps *rps) 202 { 203 struct ci_ps *ps = rps->ps_priv; 204 205 return ps; 206 } 207 208 static void ci_initialize_powertune_defaults(struct radeon_device *rdev) 209 { 210 struct ci_power_info *pi = ci_get_pi(rdev); 211 212 switch (rdev->pdev->device) { 213 case 0x6649: 214 case 0x6650: 215 case 0x6651: 216 case 0x6658: 217 case 0x665C: 218 case 0x665D: 219 default: 220 pi->powertune_defaults = &defaults_bonaire_xt; 221 break; 222 case 0x6640: 223 case 0x6641: 224 case 0x6646: 225 case 0x6647: 226 pi->powertune_defaults = &defaults_saturn_xt; 227 break; 228 case 0x67B8: 229 case 0x67B0: 230 pi->powertune_defaults = &defaults_hawaii_xt; 231 break; 232 case 0x67BA: 233 case 0x67B1: 234 pi->powertune_defaults = &defaults_hawaii_pro; 235 break; 236 case 0x67A0: 237 case 0x67A1: 238 case 0x67A2: 239 case 0x67A8: 240 case 0x67A9: 241 case 0x67AA: 242 case 0x67B9: 243 case 0x67BE: 244 pi->powertune_defaults = &defaults_bonaire_xt; 245 break; 246 } 247 248 pi->dte_tj_offset = 0; 249 250 pi->caps_power_containment = true; 251 pi->caps_cac = false; 252 pi->caps_sq_ramping = false; 253 pi->caps_db_ramping = false; 254 pi->caps_td_ramping = false; 255 pi->caps_tcp_ramping = false; 256 257 if (pi->caps_power_containment) { 258 pi->caps_cac = true; 259 if (rdev->family == CHIP_HAWAII) 260 pi->enable_bapm_feature = false; 261 else 262 pi->enable_bapm_feature = true; 263 pi->enable_tdc_limit_feature = true; 264 pi->enable_pkg_pwr_tracking_feature = true; 265 } 266 } 267 268 static u8 ci_convert_to_vid(u16 vddc) 269 { 270 return (6200 - (vddc * VOLTAGE_SCALE)) / 25; 271 } 272 273 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev) 274 { 275 struct ci_power_info *pi = ci_get_pi(rdev); 276 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 277 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 278 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2; 279 u32 i; 280 281 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) 282 return -EINVAL; 283 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8) 284 return -EINVAL; 285 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count != 286 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) 287 return -EINVAL; 288 289 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { 290 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 291 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); 292 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); 293 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); 294 } else { 295 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); 296 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); 297 } 298 } 299 return 0; 300 } 301 302 static int ci_populate_vddc_vid(struct radeon_device *rdev) 303 { 304 struct ci_power_info *pi = ci_get_pi(rdev); 305 u8 *vid = pi->smc_powertune_table.VddCVid; 306 u32 i; 307 308 if (pi->vddc_voltage_table.count > 8) 309 return -EINVAL; 310 311 for (i = 0; i < pi->vddc_voltage_table.count; i++) 312 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); 313 314 return 0; 315 } 316 317 static int ci_populate_svi_load_line(struct radeon_device *rdev) 318 { 319 struct ci_power_info *pi = ci_get_pi(rdev); 320 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 321 322 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en; 323 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc; 324 pi->smc_powertune_table.SviLoadLineTrimVddC = 3; 325 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0; 326 327 return 0; 328 } 329 330 static int ci_populate_tdc_limit(struct radeon_device *rdev) 331 { 332 struct ci_power_info *pi = ci_get_pi(rdev); 333 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 334 u16 tdc_limit; 335 336 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256; 337 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit); 338 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc = 339 pt_defaults->tdc_vddc_throttle_release_limit_perc; 340 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt; 341 342 return 0; 343 } 344 345 static int ci_populate_dw8(struct radeon_device *rdev) 346 { 347 struct ci_power_info *pi = ci_get_pi(rdev); 348 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 349 int ret; 350 351 ret = ci_read_smc_sram_dword(rdev, 352 SMU7_FIRMWARE_HEADER_LOCATION + 353 offsetof(SMU7_Firmware_Header, PmFuseTable) + 354 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), 355 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl, 356 pi->sram_end); 357 if (ret) 358 return -EINVAL; 359 else 360 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl; 361 362 return 0; 363 } 364 365 static int ci_populate_fuzzy_fan(struct radeon_device *rdev) 366 { 367 struct ci_power_info *pi = ci_get_pi(rdev); 368 369 if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) || 370 (rdev->pm.dpm.fan.fan_output_sensitivity == 0)) 371 rdev->pm.dpm.fan.fan_output_sensitivity = 372 rdev->pm.dpm.fan.default_fan_output_sensitivity; 373 374 pi->smc_powertune_table.FuzzyFan_PwmSetDelta = 375 cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity); 376 377 return 0; 378 } 379 380 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) 381 { 382 struct ci_power_info *pi = ci_get_pi(rdev); 383 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 384 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 385 int i, min, max; 386 387 min = max = hi_vid[0]; 388 for (i = 0; i < 8; i++) { 389 if (0 != hi_vid[i]) { 390 if (min > hi_vid[i]) 391 min = hi_vid[i]; 392 if (max < hi_vid[i]) 393 max = hi_vid[i]; 394 } 395 396 if (0 != lo_vid[i]) { 397 if (min > lo_vid[i]) 398 min = lo_vid[i]; 399 if (max < lo_vid[i]) 400 max = lo_vid[i]; 401 } 402 } 403 404 if ((min == 0) || (max == 0)) 405 return -EINVAL; 406 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max; 407 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min; 408 409 return 0; 410 } 411 412 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev) 413 { 414 struct ci_power_info *pi = ci_get_pi(rdev); 415 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; 416 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; 417 struct radeon_cac_tdp_table *cac_tdp_table = 418 rdev->pm.dpm.dyn_state.cac_tdp_table; 419 420 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256; 421 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256; 422 423 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd); 424 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd); 425 426 return 0; 427 } 428 429 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev) 430 { 431 struct ci_power_info *pi = ci_get_pi(rdev); 432 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 433 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table; 434 struct radeon_cac_tdp_table *cac_tdp_table = 435 rdev->pm.dpm.dyn_state.cac_tdp_table; 436 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table; 437 int i, j, k; 438 const u16 *def1; 439 const u16 *def2; 440 441 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256; 442 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256; 443 444 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset; 445 dpm_table->GpuTjMax = 446 (u8)(pi->thermal_temp_setting.temperature_high / 1000); 447 dpm_table->GpuTjHyst = 8; 448 449 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base; 450 451 if (ppm) { 452 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000); 453 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256); 454 } else { 455 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0); 456 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0); 457 } 458 459 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient); 460 def1 = pt_defaults->bapmti_r; 461 def2 = pt_defaults->bapmti_rc; 462 463 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { 464 for (j = 0; j < SMU7_DTE_SOURCES; j++) { 465 for (k = 0; k < SMU7_DTE_SINKS; k++) { 466 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1); 467 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2); 468 def1++; 469 def2++; 470 } 471 } 472 } 473 474 return 0; 475 } 476 477 static int ci_populate_pm_base(struct radeon_device *rdev) 478 { 479 struct ci_power_info *pi = ci_get_pi(rdev); 480 u32 pm_fuse_table_offset; 481 int ret; 482 483 if (pi->caps_power_containment) { 484 ret = ci_read_smc_sram_dword(rdev, 485 SMU7_FIRMWARE_HEADER_LOCATION + 486 offsetof(SMU7_Firmware_Header, PmFuseTable), 487 &pm_fuse_table_offset, pi->sram_end); 488 if (ret) 489 return ret; 490 ret = ci_populate_bapm_vddc_vid_sidd(rdev); 491 if (ret) 492 return ret; 493 ret = ci_populate_vddc_vid(rdev); 494 if (ret) 495 return ret; 496 ret = ci_populate_svi_load_line(rdev); 497 if (ret) 498 return ret; 499 ret = ci_populate_tdc_limit(rdev); 500 if (ret) 501 return ret; 502 ret = ci_populate_dw8(rdev); 503 if (ret) 504 return ret; 505 ret = ci_populate_fuzzy_fan(rdev); 506 if (ret) 507 return ret; 508 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); 509 if (ret) 510 return ret; 511 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev); 512 if (ret) 513 return ret; 514 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset, 515 (u8 *)&pi->smc_powertune_table, 516 sizeof(SMU7_Discrete_PmFuses), pi->sram_end); 517 if (ret) 518 return ret; 519 } 520 521 return 0; 522 } 523 524 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable) 525 { 526 struct ci_power_info *pi = ci_get_pi(rdev); 527 u32 data; 528 529 if (pi->caps_sq_ramping) { 530 data = RREG32_DIDT(DIDT_SQ_CTRL0); 531 if (enable) 532 data |= DIDT_CTRL_EN; 533 else 534 data &= ~DIDT_CTRL_EN; 535 WREG32_DIDT(DIDT_SQ_CTRL0, data); 536 } 537 538 if (pi->caps_db_ramping) { 539 data = RREG32_DIDT(DIDT_DB_CTRL0); 540 if (enable) 541 data |= DIDT_CTRL_EN; 542 else 543 data &= ~DIDT_CTRL_EN; 544 WREG32_DIDT(DIDT_DB_CTRL0, data); 545 } 546 547 if (pi->caps_td_ramping) { 548 data = RREG32_DIDT(DIDT_TD_CTRL0); 549 if (enable) 550 data |= DIDT_CTRL_EN; 551 else 552 data &= ~DIDT_CTRL_EN; 553 WREG32_DIDT(DIDT_TD_CTRL0, data); 554 } 555 556 if (pi->caps_tcp_ramping) { 557 data = RREG32_DIDT(DIDT_TCP_CTRL0); 558 if (enable) 559 data |= DIDT_CTRL_EN; 560 else 561 data &= ~DIDT_CTRL_EN; 562 WREG32_DIDT(DIDT_TCP_CTRL0, data); 563 } 564 } 565 566 static int ci_program_pt_config_registers(struct radeon_device *rdev, 567 const struct ci_pt_config_reg *cac_config_regs) 568 { 569 const struct ci_pt_config_reg *config_regs = cac_config_regs; 570 u32 data; 571 u32 cache = 0; 572 573 if (config_regs == NULL) 574 return -EINVAL; 575 576 while (config_regs->offset != 0xFFFFFFFF) { 577 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) { 578 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 579 } else { 580 switch (config_regs->type) { 581 case CISLANDS_CONFIGREG_SMC_IND: 582 data = RREG32_SMC(config_regs->offset); 583 break; 584 case CISLANDS_CONFIGREG_DIDT_IND: 585 data = RREG32_DIDT(config_regs->offset); 586 break; 587 default: 588 data = RREG32(config_regs->offset << 2); 589 break; 590 } 591 592 data &= ~config_regs->mask; 593 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 594 data |= cache; 595 596 switch (config_regs->type) { 597 case CISLANDS_CONFIGREG_SMC_IND: 598 WREG32_SMC(config_regs->offset, data); 599 break; 600 case CISLANDS_CONFIGREG_DIDT_IND: 601 WREG32_DIDT(config_regs->offset, data); 602 break; 603 default: 604 WREG32(config_regs->offset << 2, data); 605 break; 606 } 607 cache = 0; 608 } 609 config_regs++; 610 } 611 return 0; 612 } 613 614 static int ci_enable_didt(struct radeon_device *rdev, bool enable) 615 { 616 struct ci_power_info *pi = ci_get_pi(rdev); 617 int ret; 618 619 if (pi->caps_sq_ramping || pi->caps_db_ramping || 620 pi->caps_td_ramping || pi->caps_tcp_ramping) { 621 cik_enter_rlc_safe_mode(rdev); 622 623 if (enable) { 624 ret = ci_program_pt_config_registers(rdev, didt_config_ci); 625 if (ret) { 626 cik_exit_rlc_safe_mode(rdev); 627 return ret; 628 } 629 } 630 631 ci_do_enable_didt(rdev, enable); 632 633 cik_exit_rlc_safe_mode(rdev); 634 } 635 636 return 0; 637 } 638 639 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable) 640 { 641 struct ci_power_info *pi = ci_get_pi(rdev); 642 PPSMC_Result smc_result; 643 int ret = 0; 644 645 if (enable) { 646 pi->power_containment_features = 0; 647 if (pi->caps_power_containment) { 648 if (pi->enable_bapm_feature) { 649 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE); 650 if (smc_result != PPSMC_Result_OK) 651 ret = -EINVAL; 652 else 653 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; 654 } 655 656 if (pi->enable_tdc_limit_feature) { 657 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable); 658 if (smc_result != PPSMC_Result_OK) 659 ret = -EINVAL; 660 else 661 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit; 662 } 663 664 if (pi->enable_pkg_pwr_tracking_feature) { 665 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable); 666 if (smc_result != PPSMC_Result_OK) { 667 ret = -EINVAL; 668 } else { 669 struct radeon_cac_tdp_table *cac_tdp_table = 670 rdev->pm.dpm.dyn_state.cac_tdp_table; 671 u32 default_pwr_limit = 672 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 673 674 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; 675 676 ci_set_power_limit(rdev, default_pwr_limit); 677 } 678 } 679 } 680 } else { 681 if (pi->caps_power_containment && pi->power_containment_features) { 682 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) 683 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable); 684 685 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM) 686 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE); 687 688 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) 689 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable); 690 pi->power_containment_features = 0; 691 } 692 } 693 694 return ret; 695 } 696 697 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable) 698 { 699 struct ci_power_info *pi = ci_get_pi(rdev); 700 PPSMC_Result smc_result; 701 int ret = 0; 702 703 if (pi->caps_cac) { 704 if (enable) { 705 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac); 706 if (smc_result != PPSMC_Result_OK) { 707 ret = -EINVAL; 708 pi->cac_enabled = false; 709 } else { 710 pi->cac_enabled = true; 711 } 712 } else if (pi->cac_enabled) { 713 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac); 714 pi->cac_enabled = false; 715 } 716 } 717 718 return ret; 719 } 720 721 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev, 722 bool enable) 723 { 724 struct ci_power_info *pi = ci_get_pi(rdev); 725 PPSMC_Result smc_result = PPSMC_Result_OK; 726 727 if (pi->thermal_sclk_dpm_enabled) { 728 if (enable) 729 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM); 730 else 731 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM); 732 } 733 734 if (smc_result == PPSMC_Result_OK) 735 return 0; 736 else 737 return -EINVAL; 738 } 739 740 static int ci_power_control_set_level(struct radeon_device *rdev) 741 { 742 struct ci_power_info *pi = ci_get_pi(rdev); 743 struct radeon_cac_tdp_table *cac_tdp_table = 744 rdev->pm.dpm.dyn_state.cac_tdp_table; 745 s32 adjust_percent; 746 s32 target_tdp; 747 int ret = 0; 748 bool adjust_polarity = false; /* ??? */ 749 750 if (pi->caps_power_containment) { 751 adjust_percent = adjust_polarity ? 752 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); 753 target_tdp = ((100 + adjust_percent) * 754 (s32)cac_tdp_table->configurable_tdp) / 100; 755 756 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); 757 } 758 759 return ret; 760 } 761 762 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 763 { 764 struct ci_power_info *pi = ci_get_pi(rdev); 765 766 if (pi->uvd_power_gated == gate) 767 return; 768 769 pi->uvd_power_gated = gate; 770 771 ci_update_uvd_dpm(rdev, gate); 772 } 773 774 bool ci_dpm_vblank_too_short(struct radeon_device *rdev) 775 { 776 struct ci_power_info *pi = ci_get_pi(rdev); 777 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 778 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 779 780 /* disable mclk switching if the refresh is >120Hz, even if the 781 * blanking period would allow it 782 */ 783 if (r600_dpm_get_vrefresh(rdev) > 120) 784 return true; 785 786 if (vblank_time < switch_limit) 787 return true; 788 else 789 return false; 790 791 } 792 793 static void ci_apply_state_adjust_rules(struct radeon_device *rdev, 794 struct radeon_ps *rps) 795 { 796 struct ci_ps *ps = ci_get_ps(rps); 797 struct ci_power_info *pi = ci_get_pi(rdev); 798 struct radeon_clock_and_voltage_limits *max_limits; 799 bool disable_mclk_switching; 800 u32 sclk, mclk; 801 int i; 802 803 if (rps->vce_active) { 804 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 805 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 806 } else { 807 rps->evclk = 0; 808 rps->ecclk = 0; 809 } 810 811 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 812 ci_dpm_vblank_too_short(rdev)) 813 disable_mclk_switching = true; 814 else 815 disable_mclk_switching = false; 816 817 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 818 pi->battery_state = true; 819 else 820 pi->battery_state = false; 821 822 if (rdev->pm.dpm.ac_power) 823 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 824 else 825 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 826 827 if (rdev->pm.dpm.ac_power == false) { 828 for (i = 0; i < ps->performance_level_count; i++) { 829 if (ps->performance_levels[i].mclk > max_limits->mclk) 830 ps->performance_levels[i].mclk = max_limits->mclk; 831 if (ps->performance_levels[i].sclk > max_limits->sclk) 832 ps->performance_levels[i].sclk = max_limits->sclk; 833 } 834 } 835 836 /* XXX validate the min clocks required for display */ 837 838 if (disable_mclk_switching) { 839 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 840 sclk = ps->performance_levels[0].sclk; 841 } else { 842 mclk = ps->performance_levels[0].mclk; 843 sclk = ps->performance_levels[0].sclk; 844 } 845 846 if (rps->vce_active) { 847 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 848 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 849 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk) 850 mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk; 851 } 852 853 ps->performance_levels[0].sclk = sclk; 854 ps->performance_levels[0].mclk = mclk; 855 856 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk) 857 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk; 858 859 if (disable_mclk_switching) { 860 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk) 861 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk; 862 } else { 863 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk) 864 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk; 865 } 866 } 867 868 static int ci_thermal_set_temperature_range(struct radeon_device *rdev, 869 int min_temp, int max_temp) 870 { 871 int low_temp = 0 * 1000; 872 int high_temp = 255 * 1000; 873 u32 tmp; 874 875 if (low_temp < min_temp) 876 low_temp = min_temp; 877 if (high_temp > max_temp) 878 high_temp = max_temp; 879 if (high_temp < low_temp) { 880 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 881 return -EINVAL; 882 } 883 884 tmp = RREG32_SMC(CG_THERMAL_INT); 885 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK); 886 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) | 887 CI_DIG_THERM_INTL(low_temp / 1000); 888 WREG32_SMC(CG_THERMAL_INT, tmp); 889 890 #if 0 891 /* XXX: need to figure out how to handle this properly */ 892 tmp = RREG32_SMC(CG_THERMAL_CTRL); 893 tmp &= DIG_THERM_DPM_MASK; 894 tmp |= DIG_THERM_DPM(high_temp / 1000); 895 WREG32_SMC(CG_THERMAL_CTRL, tmp); 896 #endif 897 898 rdev->pm.dpm.thermal.min_temp = low_temp; 899 rdev->pm.dpm.thermal.max_temp = high_temp; 900 901 return 0; 902 } 903 904 static int ci_thermal_enable_alert(struct radeon_device *rdev, 905 bool enable) 906 { 907 u32 thermal_int = RREG32_SMC(CG_THERMAL_INT); 908 PPSMC_Result result; 909 910 if (enable) { 911 thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 912 WREG32_SMC(CG_THERMAL_INT, thermal_int); 913 rdev->irq.dpm_thermal = false; 914 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable); 915 if (result != PPSMC_Result_OK) { 916 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 917 return -EINVAL; 918 } 919 } else { 920 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; 921 WREG32_SMC(CG_THERMAL_INT, thermal_int); 922 rdev->irq.dpm_thermal = true; 923 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable); 924 if (result != PPSMC_Result_OK) { 925 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n"); 926 return -EINVAL; 927 } 928 } 929 930 return 0; 931 } 932 933 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode) 934 { 935 struct ci_power_info *pi = ci_get_pi(rdev); 936 u32 tmp; 937 938 if (pi->fan_ctrl_is_in_default_mode) { 939 tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; 940 pi->fan_ctrl_default_mode = tmp; 941 tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; 942 pi->t_min = tmp; 943 pi->fan_ctrl_is_in_default_mode = false; 944 } 945 946 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; 947 tmp |= TMIN(0); 948 WREG32_SMC(CG_FDO_CTRL2, tmp); 949 950 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 951 tmp |= FDO_PWM_MODE(mode); 952 WREG32_SMC(CG_FDO_CTRL2, tmp); 953 } 954 955 static int ci_thermal_setup_fan_table(struct radeon_device *rdev) 956 { 957 struct ci_power_info *pi = ci_get_pi(rdev); 958 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; 959 u32 duty100; 960 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; 961 u16 fdo_min, slope1, slope2; 962 u32 reference_clock, tmp; 963 int ret; 964 u64 tmp64; 965 966 if (!pi->fan_table_start) { 967 rdev->pm.dpm.fan.ucode_fan_control = false; 968 return 0; 969 } 970 971 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 972 973 if (duty100 == 0) { 974 rdev->pm.dpm.fan.ucode_fan_control = false; 975 return 0; 976 } 977 978 tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100; 979 do_div(tmp64, 10000); 980 fdo_min = (u16)tmp64; 981 982 t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min; 983 t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med; 984 985 pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min; 986 pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med; 987 988 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); 989 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); 990 991 fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100); 992 fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100); 993 fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100); 994 995 fan_table.Slope1 = cpu_to_be16(slope1); 996 fan_table.Slope2 = cpu_to_be16(slope2); 997 998 fan_table.FdoMin = cpu_to_be16(fdo_min); 999 1000 fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst); 1001 1002 fan_table.HystUp = cpu_to_be16(1); 1003 1004 fan_table.HystSlope = cpu_to_be16(1); 1005 1006 fan_table.TempRespLim = cpu_to_be16(5); 1007 1008 reference_clock = radeon_get_xclk(rdev); 1009 1010 fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay * 1011 reference_clock) / 1600); 1012 1013 fan_table.FdoMax = cpu_to_be16((u16)duty100); 1014 1015 tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; 1016 fan_table.TempSrc = (uint8_t)tmp; 1017 1018 ret = ci_copy_bytes_to_smc(rdev, 1019 pi->fan_table_start, 1020 (u8 *)(&fan_table), 1021 sizeof(fan_table), 1022 pi->sram_end); 1023 1024 if (ret) { 1025 DRM_ERROR("Failed to load fan table to the SMC."); 1026 rdev->pm.dpm.fan.ucode_fan_control = false; 1027 } 1028 1029 return 0; 1030 } 1031 1032 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev) 1033 { 1034 struct ci_power_info *pi = ci_get_pi(rdev); 1035 PPSMC_Result ret; 1036 1037 if (pi->caps_od_fuzzy_fan_control_support) { 1038 ret = ci_send_msg_to_smc_with_parameter(rdev, 1039 PPSMC_StartFanControl, 1040 FAN_CONTROL_FUZZY); 1041 if (ret != PPSMC_Result_OK) 1042 return -EINVAL; 1043 ret = ci_send_msg_to_smc_with_parameter(rdev, 1044 PPSMC_MSG_SetFanPwmMax, 1045 rdev->pm.dpm.fan.default_max_fan_pwm); 1046 if (ret != PPSMC_Result_OK) 1047 return -EINVAL; 1048 } else { 1049 ret = ci_send_msg_to_smc_with_parameter(rdev, 1050 PPSMC_StartFanControl, 1051 FAN_CONTROL_TABLE); 1052 if (ret != PPSMC_Result_OK) 1053 return -EINVAL; 1054 } 1055 1056 pi->fan_is_controlled_by_smc = true; 1057 return 0; 1058 } 1059 1060 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev) 1061 { 1062 PPSMC_Result ret; 1063 struct ci_power_info *pi = ci_get_pi(rdev); 1064 1065 ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl); 1066 if (ret == PPSMC_Result_OK) { 1067 pi->fan_is_controlled_by_smc = false; 1068 return 0; 1069 } else 1070 return -EINVAL; 1071 } 1072 1073 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, 1074 u32 *speed) 1075 { 1076 u32 duty, duty100; 1077 u64 tmp64; 1078 1079 if (rdev->pm.no_fan) 1080 return -ENOENT; 1081 1082 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 1083 duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; 1084 1085 if (duty100 == 0) 1086 return -EINVAL; 1087 1088 tmp64 = (u64)duty * 100; 1089 do_div(tmp64, duty100); 1090 *speed = (u32)tmp64; 1091 1092 if (*speed > 100) 1093 *speed = 100; 1094 1095 return 0; 1096 } 1097 1098 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, 1099 u32 speed) 1100 { 1101 u32 tmp; 1102 u32 duty, duty100; 1103 u64 tmp64; 1104 struct ci_power_info *pi = ci_get_pi(rdev); 1105 1106 if (rdev->pm.no_fan) 1107 return -ENOENT; 1108 1109 if (pi->fan_is_controlled_by_smc) 1110 return -EINVAL; 1111 1112 if (speed > 100) 1113 return -EINVAL; 1114 1115 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 1116 1117 if (duty100 == 0) 1118 return -EINVAL; 1119 1120 tmp64 = (u64)speed * duty100; 1121 do_div(tmp64, 100); 1122 duty = (u32)tmp64; 1123 1124 tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; 1125 tmp |= FDO_STATIC_DUTY(duty); 1126 WREG32_SMC(CG_FDO_CTRL0, tmp); 1127 1128 return 0; 1129 } 1130 1131 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode) 1132 { 1133 if (mode) { 1134 /* stop auto-manage */ 1135 if (rdev->pm.dpm.fan.ucode_fan_control) 1136 ci_fan_ctrl_stop_smc_fan_control(rdev); 1137 ci_fan_ctrl_set_static_mode(rdev, mode); 1138 } else { 1139 /* restart auto-manage */ 1140 if (rdev->pm.dpm.fan.ucode_fan_control) 1141 ci_thermal_start_smc_fan_control(rdev); 1142 else 1143 ci_fan_ctrl_set_default_mode(rdev); 1144 } 1145 } 1146 1147 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev) 1148 { 1149 struct ci_power_info *pi = ci_get_pi(rdev); 1150 u32 tmp; 1151 1152 if (pi->fan_is_controlled_by_smc) 1153 return 0; 1154 1155 tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; 1156 return (tmp >> FDO_PWM_MODE_SHIFT); 1157 } 1158 1159 #if 0 1160 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev, 1161 u32 *speed) 1162 { 1163 u32 tach_period; 1164 u32 xclk = radeon_get_xclk(rdev); 1165 1166 if (rdev->pm.no_fan) 1167 return -ENOENT; 1168 1169 if (rdev->pm.fan_pulses_per_revolution == 0) 1170 return -ENOENT; 1171 1172 tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; 1173 if (tach_period == 0) 1174 return -ENOENT; 1175 1176 *speed = 60 * xclk * 10000 / tach_period; 1177 1178 return 0; 1179 } 1180 1181 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev, 1182 u32 speed) 1183 { 1184 u32 tach_period, tmp; 1185 u32 xclk = radeon_get_xclk(rdev); 1186 1187 if (rdev->pm.no_fan) 1188 return -ENOENT; 1189 1190 if (rdev->pm.fan_pulses_per_revolution == 0) 1191 return -ENOENT; 1192 1193 if ((speed < rdev->pm.fan_min_rpm) || 1194 (speed > rdev->pm.fan_max_rpm)) 1195 return -EINVAL; 1196 1197 if (rdev->pm.dpm.fan.ucode_fan_control) 1198 ci_fan_ctrl_stop_smc_fan_control(rdev); 1199 1200 tach_period = 60 * xclk * 10000 / (8 * speed); 1201 tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; 1202 tmp |= TARGET_PERIOD(tach_period); 1203 WREG32_SMC(CG_TACH_CTRL, tmp); 1204 1205 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM); 1206 1207 return 0; 1208 } 1209 #endif 1210 1211 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev) 1212 { 1213 struct ci_power_info *pi = ci_get_pi(rdev); 1214 u32 tmp; 1215 1216 if (!pi->fan_ctrl_is_in_default_mode) { 1217 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 1218 tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode); 1219 WREG32_SMC(CG_FDO_CTRL2, tmp); 1220 1221 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; 1222 tmp |= TMIN(pi->t_min); 1223 WREG32_SMC(CG_FDO_CTRL2, tmp); 1224 pi->fan_ctrl_is_in_default_mode = true; 1225 } 1226 } 1227 1228 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev) 1229 { 1230 if (rdev->pm.dpm.fan.ucode_fan_control) { 1231 ci_fan_ctrl_start_smc_fan_control(rdev); 1232 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); 1233 } 1234 } 1235 1236 static void ci_thermal_initialize(struct radeon_device *rdev) 1237 { 1238 u32 tmp; 1239 1240 if (rdev->pm.fan_pulses_per_revolution) { 1241 tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; 1242 tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1); 1243 WREG32_SMC(CG_TACH_CTRL, tmp); 1244 } 1245 1246 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; 1247 tmp |= TACH_PWM_RESP_RATE(0x28); 1248 WREG32_SMC(CG_FDO_CTRL2, tmp); 1249 } 1250 1251 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev) 1252 { 1253 int ret; 1254 1255 ci_thermal_initialize(rdev); 1256 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1257 if (ret) 1258 return ret; 1259 ret = ci_thermal_enable_alert(rdev, true); 1260 if (ret) 1261 return ret; 1262 if (rdev->pm.dpm.fan.ucode_fan_control) { 1263 ret = ci_thermal_setup_fan_table(rdev); 1264 if (ret) 1265 return ret; 1266 ci_thermal_start_smc_fan_control(rdev); 1267 } 1268 1269 return 0; 1270 } 1271 1272 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev) 1273 { 1274 if (!rdev->pm.no_fan) 1275 ci_fan_ctrl_set_default_mode(rdev); 1276 } 1277 1278 #if 0 1279 static int ci_read_smc_soft_register(struct radeon_device *rdev, 1280 u16 reg_offset, u32 *value) 1281 { 1282 struct ci_power_info *pi = ci_get_pi(rdev); 1283 1284 return ci_read_smc_sram_dword(rdev, 1285 pi->soft_regs_start + reg_offset, 1286 value, pi->sram_end); 1287 } 1288 #endif 1289 1290 static int ci_write_smc_soft_register(struct radeon_device *rdev, 1291 u16 reg_offset, u32 value) 1292 { 1293 struct ci_power_info *pi = ci_get_pi(rdev); 1294 1295 return ci_write_smc_sram_dword(rdev, 1296 pi->soft_regs_start + reg_offset, 1297 value, pi->sram_end); 1298 } 1299 1300 static void ci_init_fps_limits(struct radeon_device *rdev) 1301 { 1302 struct ci_power_info *pi = ci_get_pi(rdev); 1303 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 1304 1305 if (pi->caps_fps) { 1306 u16 tmp; 1307 1308 tmp = 45; 1309 table->FpsHighT = cpu_to_be16(tmp); 1310 1311 tmp = 30; 1312 table->FpsLowT = cpu_to_be16(tmp); 1313 } 1314 } 1315 1316 static int ci_update_sclk_t(struct radeon_device *rdev) 1317 { 1318 struct ci_power_info *pi = ci_get_pi(rdev); 1319 int ret = 0; 1320 u32 low_sclk_interrupt_t = 0; 1321 1322 if (pi->caps_sclk_throttle_low_notification) { 1323 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 1324 1325 ret = ci_copy_bytes_to_smc(rdev, 1326 pi->dpm_table_start + 1327 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), 1328 (u8 *)&low_sclk_interrupt_t, 1329 sizeof(u32), pi->sram_end); 1330 1331 } 1332 1333 return ret; 1334 } 1335 1336 static void ci_get_leakage_voltages(struct radeon_device *rdev) 1337 { 1338 struct ci_power_info *pi = ci_get_pi(rdev); 1339 u16 leakage_id, virtual_voltage_id; 1340 u16 vddc, vddci; 1341 int i; 1342 1343 pi->vddc_leakage.count = 0; 1344 pi->vddci_leakage.count = 0; 1345 1346 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 1347 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1348 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1349 if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0) 1350 continue; 1351 if (vddc != 0 && vddc != virtual_voltage_id) { 1352 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1353 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1354 pi->vddc_leakage.count++; 1355 } 1356 } 1357 } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { 1358 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1359 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1360 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci, 1361 virtual_voltage_id, 1362 leakage_id) == 0) { 1363 if (vddc != 0 && vddc != virtual_voltage_id) { 1364 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1365 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1366 pi->vddc_leakage.count++; 1367 } 1368 if (vddci != 0 && vddci != virtual_voltage_id) { 1369 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci; 1370 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id; 1371 pi->vddci_leakage.count++; 1372 } 1373 } 1374 } 1375 } 1376 } 1377 1378 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources) 1379 { 1380 struct ci_power_info *pi = ci_get_pi(rdev); 1381 bool want_thermal_protection; 1382 enum radeon_dpm_event_src dpm_event_src; 1383 u32 tmp; 1384 1385 switch (sources) { 1386 case 0: 1387 default: 1388 want_thermal_protection = false; 1389 break; 1390 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): 1391 want_thermal_protection = true; 1392 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; 1393 break; 1394 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): 1395 want_thermal_protection = true; 1396 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; 1397 break; 1398 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | 1399 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): 1400 want_thermal_protection = true; 1401 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; 1402 break; 1403 } 1404 1405 if (want_thermal_protection) { 1406 #if 0 1407 /* XXX: need to figure out how to handle this properly */ 1408 tmp = RREG32_SMC(CG_THERMAL_CTRL); 1409 tmp &= DPM_EVENT_SRC_MASK; 1410 tmp |= DPM_EVENT_SRC(dpm_event_src); 1411 WREG32_SMC(CG_THERMAL_CTRL, tmp); 1412 #endif 1413 1414 tmp = RREG32_SMC(GENERAL_PWRMGT); 1415 if (pi->thermal_protection) 1416 tmp &= ~THERMAL_PROTECTION_DIS; 1417 else 1418 tmp |= THERMAL_PROTECTION_DIS; 1419 WREG32_SMC(GENERAL_PWRMGT, tmp); 1420 } else { 1421 tmp = RREG32_SMC(GENERAL_PWRMGT); 1422 tmp |= THERMAL_PROTECTION_DIS; 1423 WREG32_SMC(GENERAL_PWRMGT, tmp); 1424 } 1425 } 1426 1427 static void ci_enable_auto_throttle_source(struct radeon_device *rdev, 1428 enum radeon_dpm_auto_throttle_src source, 1429 bool enable) 1430 { 1431 struct ci_power_info *pi = ci_get_pi(rdev); 1432 1433 if (enable) { 1434 if (!(pi->active_auto_throttle_sources & (1 << source))) { 1435 pi->active_auto_throttle_sources |= 1 << source; 1436 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1437 } 1438 } else { 1439 if (pi->active_auto_throttle_sources & (1 << source)) { 1440 pi->active_auto_throttle_sources &= ~(1 << source); 1441 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1442 } 1443 } 1444 } 1445 1446 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev) 1447 { 1448 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) 1449 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt); 1450 } 1451 1452 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev) 1453 { 1454 struct ci_power_info *pi = ci_get_pi(rdev); 1455 PPSMC_Result smc_result; 1456 1457 if (!pi->need_update_smu7_dpm_table) 1458 return 0; 1459 1460 if ((!pi->sclk_dpm_key_disabled) && 1461 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1462 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 1463 if (smc_result != PPSMC_Result_OK) 1464 return -EINVAL; 1465 } 1466 1467 if ((!pi->mclk_dpm_key_disabled) && 1468 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1469 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 1470 if (smc_result != PPSMC_Result_OK) 1471 return -EINVAL; 1472 } 1473 1474 pi->need_update_smu7_dpm_table = 0; 1475 return 0; 1476 } 1477 1478 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable) 1479 { 1480 struct ci_power_info *pi = ci_get_pi(rdev); 1481 PPSMC_Result smc_result; 1482 1483 if (enable) { 1484 if (!pi->sclk_dpm_key_disabled) { 1485 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable); 1486 if (smc_result != PPSMC_Result_OK) 1487 return -EINVAL; 1488 } 1489 1490 if (!pi->mclk_dpm_key_disabled) { 1491 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable); 1492 if (smc_result != PPSMC_Result_OK) 1493 return -EINVAL; 1494 1495 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN); 1496 1497 WREG32_SMC(LCAC_MC0_CNTL, 0x05); 1498 WREG32_SMC(LCAC_MC1_CNTL, 0x05); 1499 WREG32_SMC(LCAC_CPL_CNTL, 0x100005); 1500 1501 udelay(10); 1502 1503 WREG32_SMC(LCAC_MC0_CNTL, 0x400005); 1504 WREG32_SMC(LCAC_MC1_CNTL, 0x400005); 1505 WREG32_SMC(LCAC_CPL_CNTL, 0x500005); 1506 } 1507 } else { 1508 if (!pi->sclk_dpm_key_disabled) { 1509 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable); 1510 if (smc_result != PPSMC_Result_OK) 1511 return -EINVAL; 1512 } 1513 1514 if (!pi->mclk_dpm_key_disabled) { 1515 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable); 1516 if (smc_result != PPSMC_Result_OK) 1517 return -EINVAL; 1518 } 1519 } 1520 1521 return 0; 1522 } 1523 1524 static int ci_start_dpm(struct radeon_device *rdev) 1525 { 1526 struct ci_power_info *pi = ci_get_pi(rdev); 1527 PPSMC_Result smc_result; 1528 int ret; 1529 u32 tmp; 1530 1531 tmp = RREG32_SMC(GENERAL_PWRMGT); 1532 tmp |= GLOBAL_PWRMGT_EN; 1533 WREG32_SMC(GENERAL_PWRMGT, tmp); 1534 1535 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1536 tmp |= DYNAMIC_PM_EN; 1537 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1538 1539 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000); 1540 1541 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN); 1542 1543 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable); 1544 if (smc_result != PPSMC_Result_OK) 1545 return -EINVAL; 1546 1547 ret = ci_enable_sclk_mclk_dpm(rdev, true); 1548 if (ret) 1549 return ret; 1550 1551 if (!pi->pcie_dpm_key_disabled) { 1552 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable); 1553 if (smc_result != PPSMC_Result_OK) 1554 return -EINVAL; 1555 } 1556 1557 return 0; 1558 } 1559 1560 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev) 1561 { 1562 struct ci_power_info *pi = ci_get_pi(rdev); 1563 PPSMC_Result smc_result; 1564 1565 if (!pi->need_update_smu7_dpm_table) 1566 return 0; 1567 1568 if ((!pi->sclk_dpm_key_disabled) && 1569 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1570 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel); 1571 if (smc_result != PPSMC_Result_OK) 1572 return -EINVAL; 1573 } 1574 1575 if ((!pi->mclk_dpm_key_disabled) && 1576 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1577 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel); 1578 if (smc_result != PPSMC_Result_OK) 1579 return -EINVAL; 1580 } 1581 1582 return 0; 1583 } 1584 1585 static int ci_stop_dpm(struct radeon_device *rdev) 1586 { 1587 struct ci_power_info *pi = ci_get_pi(rdev); 1588 PPSMC_Result smc_result; 1589 int ret; 1590 u32 tmp; 1591 1592 tmp = RREG32_SMC(GENERAL_PWRMGT); 1593 tmp &= ~GLOBAL_PWRMGT_EN; 1594 WREG32_SMC(GENERAL_PWRMGT, tmp); 1595 1596 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1597 tmp &= ~DYNAMIC_PM_EN; 1598 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1599 1600 if (!pi->pcie_dpm_key_disabled) { 1601 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable); 1602 if (smc_result != PPSMC_Result_OK) 1603 return -EINVAL; 1604 } 1605 1606 ret = ci_enable_sclk_mclk_dpm(rdev, false); 1607 if (ret) 1608 return ret; 1609 1610 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable); 1611 if (smc_result != PPSMC_Result_OK) 1612 return -EINVAL; 1613 1614 return 0; 1615 } 1616 1617 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable) 1618 { 1619 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1620 1621 if (enable) 1622 tmp &= ~SCLK_PWRMGT_OFF; 1623 else 1624 tmp |= SCLK_PWRMGT_OFF; 1625 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1626 } 1627 1628 #if 0 1629 static int ci_notify_hw_of_power_source(struct radeon_device *rdev, 1630 bool ac_power) 1631 { 1632 struct ci_power_info *pi = ci_get_pi(rdev); 1633 struct radeon_cac_tdp_table *cac_tdp_table = 1634 rdev->pm.dpm.dyn_state.cac_tdp_table; 1635 u32 power_limit; 1636 1637 if (ac_power) 1638 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 1639 else 1640 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); 1641 1642 ci_set_power_limit(rdev, power_limit); 1643 1644 if (pi->caps_automatic_dc_transition) { 1645 if (ac_power) 1646 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC); 1647 else 1648 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp); 1649 } 1650 1651 return 0; 1652 } 1653 #endif 1654 1655 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg) 1656 { 1657 u32 tmp; 1658 int i; 1659 1660 if (!ci_is_smc_running(rdev)) 1661 return PPSMC_Result_Failed; 1662 1663 WREG32(SMC_MESSAGE_0, msg); 1664 1665 for (i = 0; i < rdev->usec_timeout; i++) { 1666 tmp = RREG32(SMC_RESP_0); 1667 if (tmp != 0) 1668 break; 1669 udelay(1); 1670 } 1671 tmp = RREG32(SMC_RESP_0); 1672 1673 return (PPSMC_Result)tmp; 1674 } 1675 1676 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 1677 PPSMC_Msg msg, u32 parameter) 1678 { 1679 WREG32(SMC_MSG_ARG_0, parameter); 1680 return ci_send_msg_to_smc(rdev, msg); 1681 } 1682 1683 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev, 1684 PPSMC_Msg msg, u32 *parameter) 1685 { 1686 PPSMC_Result smc_result; 1687 1688 smc_result = ci_send_msg_to_smc(rdev, msg); 1689 1690 if ((smc_result == PPSMC_Result_OK) && parameter) 1691 *parameter = RREG32(SMC_MSG_ARG_0); 1692 1693 return smc_result; 1694 } 1695 1696 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n) 1697 { 1698 struct ci_power_info *pi = ci_get_pi(rdev); 1699 1700 if (!pi->sclk_dpm_key_disabled) { 1701 PPSMC_Result smc_result = 1702 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); 1703 if (smc_result != PPSMC_Result_OK) 1704 return -EINVAL; 1705 } 1706 1707 return 0; 1708 } 1709 1710 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n) 1711 { 1712 struct ci_power_info *pi = ci_get_pi(rdev); 1713 1714 if (!pi->mclk_dpm_key_disabled) { 1715 PPSMC_Result smc_result = 1716 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); 1717 if (smc_result != PPSMC_Result_OK) 1718 return -EINVAL; 1719 } 1720 1721 return 0; 1722 } 1723 1724 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n) 1725 { 1726 struct ci_power_info *pi = ci_get_pi(rdev); 1727 1728 if (!pi->pcie_dpm_key_disabled) { 1729 PPSMC_Result smc_result = 1730 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n); 1731 if (smc_result != PPSMC_Result_OK) 1732 return -EINVAL; 1733 } 1734 1735 return 0; 1736 } 1737 1738 static int ci_set_power_limit(struct radeon_device *rdev, u32 n) 1739 { 1740 struct ci_power_info *pi = ci_get_pi(rdev); 1741 1742 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { 1743 PPSMC_Result smc_result = 1744 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n); 1745 if (smc_result != PPSMC_Result_OK) 1746 return -EINVAL; 1747 } 1748 1749 return 0; 1750 } 1751 1752 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 1753 u32 target_tdp) 1754 { 1755 PPSMC_Result smc_result = 1756 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); 1757 if (smc_result != PPSMC_Result_OK) 1758 return -EINVAL; 1759 return 0; 1760 } 1761 1762 #if 0 1763 static int ci_set_boot_state(struct radeon_device *rdev) 1764 { 1765 return ci_enable_sclk_mclk_dpm(rdev, false); 1766 } 1767 #endif 1768 1769 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev) 1770 { 1771 u32 sclk_freq; 1772 PPSMC_Result smc_result = 1773 ci_send_msg_to_smc_return_parameter(rdev, 1774 PPSMC_MSG_API_GetSclkFrequency, 1775 &sclk_freq); 1776 if (smc_result != PPSMC_Result_OK) 1777 sclk_freq = 0; 1778 1779 return sclk_freq; 1780 } 1781 1782 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev) 1783 { 1784 u32 mclk_freq; 1785 PPSMC_Result smc_result = 1786 ci_send_msg_to_smc_return_parameter(rdev, 1787 PPSMC_MSG_API_GetMclkFrequency, 1788 &mclk_freq); 1789 if (smc_result != PPSMC_Result_OK) 1790 mclk_freq = 0; 1791 1792 return mclk_freq; 1793 } 1794 1795 static void ci_dpm_start_smc(struct radeon_device *rdev) 1796 { 1797 int i; 1798 1799 ci_program_jump_on_start(rdev); 1800 ci_start_smc_clock(rdev); 1801 ci_start_smc(rdev); 1802 for (i = 0; i < rdev->usec_timeout; i++) { 1803 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED) 1804 break; 1805 } 1806 } 1807 1808 static void ci_dpm_stop_smc(struct radeon_device *rdev) 1809 { 1810 ci_reset_smc(rdev); 1811 ci_stop_smc_clock(rdev); 1812 } 1813 1814 static int ci_process_firmware_header(struct radeon_device *rdev) 1815 { 1816 struct ci_power_info *pi = ci_get_pi(rdev); 1817 u32 tmp; 1818 int ret; 1819 1820 ret = ci_read_smc_sram_dword(rdev, 1821 SMU7_FIRMWARE_HEADER_LOCATION + 1822 offsetof(SMU7_Firmware_Header, DpmTable), 1823 &tmp, pi->sram_end); 1824 if (ret) 1825 return ret; 1826 1827 pi->dpm_table_start = tmp; 1828 1829 ret = ci_read_smc_sram_dword(rdev, 1830 SMU7_FIRMWARE_HEADER_LOCATION + 1831 offsetof(SMU7_Firmware_Header, SoftRegisters), 1832 &tmp, pi->sram_end); 1833 if (ret) 1834 return ret; 1835 1836 pi->soft_regs_start = tmp; 1837 1838 ret = ci_read_smc_sram_dword(rdev, 1839 SMU7_FIRMWARE_HEADER_LOCATION + 1840 offsetof(SMU7_Firmware_Header, mcRegisterTable), 1841 &tmp, pi->sram_end); 1842 if (ret) 1843 return ret; 1844 1845 pi->mc_reg_table_start = tmp; 1846 1847 ret = ci_read_smc_sram_dword(rdev, 1848 SMU7_FIRMWARE_HEADER_LOCATION + 1849 offsetof(SMU7_Firmware_Header, FanTable), 1850 &tmp, pi->sram_end); 1851 if (ret) 1852 return ret; 1853 1854 pi->fan_table_start = tmp; 1855 1856 ret = ci_read_smc_sram_dword(rdev, 1857 SMU7_FIRMWARE_HEADER_LOCATION + 1858 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), 1859 &tmp, pi->sram_end); 1860 if (ret) 1861 return ret; 1862 1863 pi->arb_table_start = tmp; 1864 1865 return 0; 1866 } 1867 1868 static void ci_read_clock_registers(struct radeon_device *rdev) 1869 { 1870 struct ci_power_info *pi = ci_get_pi(rdev); 1871 1872 pi->clock_registers.cg_spll_func_cntl = 1873 RREG32_SMC(CG_SPLL_FUNC_CNTL); 1874 pi->clock_registers.cg_spll_func_cntl_2 = 1875 RREG32_SMC(CG_SPLL_FUNC_CNTL_2); 1876 pi->clock_registers.cg_spll_func_cntl_3 = 1877 RREG32_SMC(CG_SPLL_FUNC_CNTL_3); 1878 pi->clock_registers.cg_spll_func_cntl_4 = 1879 RREG32_SMC(CG_SPLL_FUNC_CNTL_4); 1880 pi->clock_registers.cg_spll_spread_spectrum = 1881 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 1882 pi->clock_registers.cg_spll_spread_spectrum_2 = 1883 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2); 1884 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); 1885 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); 1886 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); 1887 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); 1888 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); 1889 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); 1890 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); 1891 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); 1892 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); 1893 } 1894 1895 static void ci_init_sclk_t(struct radeon_device *rdev) 1896 { 1897 struct ci_power_info *pi = ci_get_pi(rdev); 1898 1899 pi->low_sclk_interrupt_t = 0; 1900 } 1901 1902 static void ci_enable_thermal_protection(struct radeon_device *rdev, 1903 bool enable) 1904 { 1905 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1906 1907 if (enable) 1908 tmp &= ~THERMAL_PROTECTION_DIS; 1909 else 1910 tmp |= THERMAL_PROTECTION_DIS; 1911 WREG32_SMC(GENERAL_PWRMGT, tmp); 1912 } 1913 1914 static void ci_enable_acpi_power_management(struct radeon_device *rdev) 1915 { 1916 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1917 1918 tmp |= STATIC_PM_EN; 1919 1920 WREG32_SMC(GENERAL_PWRMGT, tmp); 1921 } 1922 1923 #if 0 1924 static int ci_enter_ulp_state(struct radeon_device *rdev) 1925 { 1926 1927 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); 1928 1929 udelay(25000); 1930 1931 return 0; 1932 } 1933 1934 static int ci_exit_ulp_state(struct radeon_device *rdev) 1935 { 1936 int i; 1937 1938 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); 1939 1940 udelay(7000); 1941 1942 for (i = 0; i < rdev->usec_timeout; i++) { 1943 if (RREG32(SMC_RESP_0) == 1) 1944 break; 1945 udelay(1000); 1946 } 1947 1948 return 0; 1949 } 1950 #endif 1951 1952 static int ci_notify_smc_display_change(struct radeon_device *rdev, 1953 bool has_display) 1954 { 1955 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; 1956 1957 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; 1958 } 1959 1960 static int ci_enable_ds_master_switch(struct radeon_device *rdev, 1961 bool enable) 1962 { 1963 struct ci_power_info *pi = ci_get_pi(rdev); 1964 1965 if (enable) { 1966 if (pi->caps_sclk_ds) { 1967 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK) 1968 return -EINVAL; 1969 } else { 1970 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1971 return -EINVAL; 1972 } 1973 } else { 1974 if (pi->caps_sclk_ds) { 1975 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1976 return -EINVAL; 1977 } 1978 } 1979 1980 return 0; 1981 } 1982 1983 static void ci_program_display_gap(struct radeon_device *rdev) 1984 { 1985 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 1986 u32 pre_vbi_time_in_us; 1987 u32 frame_time_in_us; 1988 u32 ref_clock = rdev->clock.spll.reference_freq; 1989 u32 refresh_rate = r600_dpm_get_vrefresh(rdev); 1990 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 1991 1992 tmp &= ~DISP_GAP_MASK; 1993 if (rdev->pm.dpm.new_active_crtc_count > 0) 1994 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); 1995 else 1996 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE); 1997 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 1998 1999 if (refresh_rate == 0) 2000 refresh_rate = 60; 2001 if (vblank_time == 0xffffffff) 2002 vblank_time = 500; 2003 frame_time_in_us = 1000000 / refresh_rate; 2004 pre_vbi_time_in_us = 2005 frame_time_in_us - 200 - vblank_time; 2006 tmp = pre_vbi_time_in_us * (ref_clock / 100); 2007 2008 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp); 2009 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64); 2010 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); 2011 2012 2013 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1)); 2014 2015 } 2016 2017 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable) 2018 { 2019 struct ci_power_info *pi = ci_get_pi(rdev); 2020 u32 tmp; 2021 2022 if (enable) { 2023 if (pi->caps_sclk_ss_support) { 2024 tmp = RREG32_SMC(GENERAL_PWRMGT); 2025 tmp |= DYN_SPREAD_SPECTRUM_EN; 2026 WREG32_SMC(GENERAL_PWRMGT, tmp); 2027 } 2028 } else { 2029 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 2030 tmp &= ~SSEN; 2031 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp); 2032 2033 tmp = RREG32_SMC(GENERAL_PWRMGT); 2034 tmp &= ~DYN_SPREAD_SPECTRUM_EN; 2035 WREG32_SMC(GENERAL_PWRMGT, tmp); 2036 } 2037 } 2038 2039 static void ci_program_sstp(struct radeon_device *rdev) 2040 { 2041 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); 2042 } 2043 2044 static void ci_enable_display_gap(struct radeon_device *rdev) 2045 { 2046 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 2047 2048 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK); 2049 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) | 2050 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK)); 2051 2052 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 2053 } 2054 2055 static void ci_program_vc(struct radeon_device *rdev) 2056 { 2057 u32 tmp; 2058 2059 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 2060 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 2061 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 2062 2063 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0); 2064 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1); 2065 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2); 2066 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3); 2067 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4); 2068 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5); 2069 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6); 2070 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7); 2071 } 2072 2073 static void ci_clear_vc(struct radeon_device *rdev) 2074 { 2075 u32 tmp; 2076 2077 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 2078 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 2079 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 2080 2081 WREG32_SMC(CG_FTV_0, 0); 2082 WREG32_SMC(CG_FTV_1, 0); 2083 WREG32_SMC(CG_FTV_2, 0); 2084 WREG32_SMC(CG_FTV_3, 0); 2085 WREG32_SMC(CG_FTV_4, 0); 2086 WREG32_SMC(CG_FTV_5, 0); 2087 WREG32_SMC(CG_FTV_6, 0); 2088 WREG32_SMC(CG_FTV_7, 0); 2089 } 2090 2091 static int ci_upload_firmware(struct radeon_device *rdev) 2092 { 2093 struct ci_power_info *pi = ci_get_pi(rdev); 2094 int i, ret; 2095 2096 for (i = 0; i < rdev->usec_timeout; i++) { 2097 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE) 2098 break; 2099 } 2100 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1); 2101 2102 ci_stop_smc_clock(rdev); 2103 ci_reset_smc(rdev); 2104 2105 ret = ci_load_smc_ucode(rdev, pi->sram_end); 2106 2107 return ret; 2108 2109 } 2110 2111 static int ci_get_svi2_voltage_table(struct radeon_device *rdev, 2112 struct radeon_clock_voltage_dependency_table *voltage_dependency_table, 2113 struct atom_voltage_table *voltage_table) 2114 { 2115 u32 i; 2116 2117 if (voltage_dependency_table == NULL) 2118 return -EINVAL; 2119 2120 voltage_table->mask_low = 0; 2121 voltage_table->phase_delay = 0; 2122 2123 voltage_table->count = voltage_dependency_table->count; 2124 for (i = 0; i < voltage_table->count; i++) { 2125 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; 2126 voltage_table->entries[i].smio_low = 0; 2127 } 2128 2129 return 0; 2130 } 2131 2132 static int ci_construct_voltage_tables(struct radeon_device *rdev) 2133 { 2134 struct ci_power_info *pi = ci_get_pi(rdev); 2135 int ret; 2136 2137 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2138 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC, 2139 VOLTAGE_OBJ_GPIO_LUT, 2140 &pi->vddc_voltage_table); 2141 if (ret) 2142 return ret; 2143 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2144 ret = ci_get_svi2_voltage_table(rdev, 2145 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2146 &pi->vddc_voltage_table); 2147 if (ret) 2148 return ret; 2149 } 2150 2151 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC) 2152 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC, 2153 &pi->vddc_voltage_table); 2154 2155 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2156 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, 2157 VOLTAGE_OBJ_GPIO_LUT, 2158 &pi->vddci_voltage_table); 2159 if (ret) 2160 return ret; 2161 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2162 ret = ci_get_svi2_voltage_table(rdev, 2163 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2164 &pi->vddci_voltage_table); 2165 if (ret) 2166 return ret; 2167 } 2168 2169 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI) 2170 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI, 2171 &pi->vddci_voltage_table); 2172 2173 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2174 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC, 2175 VOLTAGE_OBJ_GPIO_LUT, 2176 &pi->mvdd_voltage_table); 2177 if (ret) 2178 return ret; 2179 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2180 ret = ci_get_svi2_voltage_table(rdev, 2181 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2182 &pi->mvdd_voltage_table); 2183 if (ret) 2184 return ret; 2185 } 2186 2187 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD) 2188 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD, 2189 &pi->mvdd_voltage_table); 2190 2191 return 0; 2192 } 2193 2194 static void ci_populate_smc_voltage_table(struct radeon_device *rdev, 2195 struct atom_voltage_table_entry *voltage_table, 2196 SMU7_Discrete_VoltageLevel *smc_voltage_table) 2197 { 2198 int ret; 2199 2200 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table, 2201 &smc_voltage_table->StdVoltageHiSidd, 2202 &smc_voltage_table->StdVoltageLoSidd); 2203 2204 if (ret) { 2205 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE; 2206 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE; 2207 } 2208 2209 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE); 2210 smc_voltage_table->StdVoltageHiSidd = 2211 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd); 2212 smc_voltage_table->StdVoltageLoSidd = 2213 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd); 2214 } 2215 2216 static int ci_populate_smc_vddc_table(struct radeon_device *rdev, 2217 SMU7_Discrete_DpmTable *table) 2218 { 2219 struct ci_power_info *pi = ci_get_pi(rdev); 2220 unsigned int count; 2221 2222 table->VddcLevelCount = pi->vddc_voltage_table.count; 2223 for (count = 0; count < table->VddcLevelCount; count++) { 2224 ci_populate_smc_voltage_table(rdev, 2225 &pi->vddc_voltage_table.entries[count], 2226 &table->VddcLevel[count]); 2227 2228 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2229 table->VddcLevel[count].Smio |= 2230 pi->vddc_voltage_table.entries[count].smio_low; 2231 else 2232 table->VddcLevel[count].Smio = 0; 2233 } 2234 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); 2235 2236 return 0; 2237 } 2238 2239 static int ci_populate_smc_vddci_table(struct radeon_device *rdev, 2240 SMU7_Discrete_DpmTable *table) 2241 { 2242 unsigned int count; 2243 struct ci_power_info *pi = ci_get_pi(rdev); 2244 2245 table->VddciLevelCount = pi->vddci_voltage_table.count; 2246 for (count = 0; count < table->VddciLevelCount; count++) { 2247 ci_populate_smc_voltage_table(rdev, 2248 &pi->vddci_voltage_table.entries[count], 2249 &table->VddciLevel[count]); 2250 2251 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2252 table->VddciLevel[count].Smio |= 2253 pi->vddci_voltage_table.entries[count].smio_low; 2254 else 2255 table->VddciLevel[count].Smio = 0; 2256 } 2257 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); 2258 2259 return 0; 2260 } 2261 2262 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev, 2263 SMU7_Discrete_DpmTable *table) 2264 { 2265 struct ci_power_info *pi = ci_get_pi(rdev); 2266 unsigned int count; 2267 2268 table->MvddLevelCount = pi->mvdd_voltage_table.count; 2269 for (count = 0; count < table->MvddLevelCount; count++) { 2270 ci_populate_smc_voltage_table(rdev, 2271 &pi->mvdd_voltage_table.entries[count], 2272 &table->MvddLevel[count]); 2273 2274 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2275 table->MvddLevel[count].Smio |= 2276 pi->mvdd_voltage_table.entries[count].smio_low; 2277 else 2278 table->MvddLevel[count].Smio = 0; 2279 } 2280 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); 2281 2282 return 0; 2283 } 2284 2285 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev, 2286 SMU7_Discrete_DpmTable *table) 2287 { 2288 int ret; 2289 2290 ret = ci_populate_smc_vddc_table(rdev, table); 2291 if (ret) 2292 return ret; 2293 2294 ret = ci_populate_smc_vddci_table(rdev, table); 2295 if (ret) 2296 return ret; 2297 2298 ret = ci_populate_smc_mvdd_table(rdev, table); 2299 if (ret) 2300 return ret; 2301 2302 return 0; 2303 } 2304 2305 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk, 2306 SMU7_Discrete_VoltageLevel *voltage) 2307 { 2308 struct ci_power_info *pi = ci_get_pi(rdev); 2309 u32 i = 0; 2310 2311 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 2312 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) { 2313 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { 2314 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; 2315 break; 2316 } 2317 } 2318 2319 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count) 2320 return -EINVAL; 2321 } 2322 2323 return -EINVAL; 2324 } 2325 2326 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 2327 struct atom_voltage_table_entry *voltage_table, 2328 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd) 2329 { 2330 u16 v_index, idx; 2331 bool voltage_found = false; 2332 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE; 2333 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE; 2334 2335 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) 2336 return -EINVAL; 2337 2338 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 2339 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2340 if (voltage_table->value == 2341 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2342 voltage_found = true; 2343 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 2344 idx = v_index; 2345 else 2346 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2347 *std_voltage_lo_sidd = 2348 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2349 *std_voltage_hi_sidd = 2350 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2351 break; 2352 } 2353 } 2354 2355 if (!voltage_found) { 2356 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2357 if (voltage_table->value <= 2358 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2359 voltage_found = true; 2360 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 2361 idx = v_index; 2362 else 2363 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2364 *std_voltage_lo_sidd = 2365 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2366 *std_voltage_hi_sidd = 2367 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2368 break; 2369 } 2370 } 2371 } 2372 } 2373 2374 return 0; 2375 } 2376 2377 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev, 2378 const struct radeon_phase_shedding_limits_table *limits, 2379 u32 sclk, 2380 u32 *phase_shedding) 2381 { 2382 unsigned int i; 2383 2384 *phase_shedding = 1; 2385 2386 for (i = 0; i < limits->count; i++) { 2387 if (sclk < limits->entries[i].sclk) { 2388 *phase_shedding = i; 2389 break; 2390 } 2391 } 2392 } 2393 2394 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev, 2395 const struct radeon_phase_shedding_limits_table *limits, 2396 u32 mclk, 2397 u32 *phase_shedding) 2398 { 2399 unsigned int i; 2400 2401 *phase_shedding = 1; 2402 2403 for (i = 0; i < limits->count; i++) { 2404 if (mclk < limits->entries[i].mclk) { 2405 *phase_shedding = i; 2406 break; 2407 } 2408 } 2409 } 2410 2411 static int ci_init_arb_table_index(struct radeon_device *rdev) 2412 { 2413 struct ci_power_info *pi = ci_get_pi(rdev); 2414 u32 tmp; 2415 int ret; 2416 2417 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start, 2418 &tmp, pi->sram_end); 2419 if (ret) 2420 return ret; 2421 2422 tmp &= 0x00FFFFFF; 2423 tmp |= MC_CG_ARB_FREQ_F1 << 24; 2424 2425 return ci_write_smc_sram_dword(rdev, pi->arb_table_start, 2426 tmp, pi->sram_end); 2427 } 2428 2429 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev, 2430 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table, 2431 u32 clock, u32 *voltage) 2432 { 2433 u32 i = 0; 2434 2435 if (allowed_clock_voltage_table->count == 0) 2436 return -EINVAL; 2437 2438 for (i = 0; i < allowed_clock_voltage_table->count; i++) { 2439 if (allowed_clock_voltage_table->entries[i].clk >= clock) { 2440 *voltage = allowed_clock_voltage_table->entries[i].v; 2441 return 0; 2442 } 2443 } 2444 2445 *voltage = allowed_clock_voltage_table->entries[i-1].v; 2446 2447 return 0; 2448 } 2449 2450 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2451 u32 sclk, u32 min_sclk_in_sr) 2452 { 2453 u32 i; 2454 u32 tmp; 2455 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? 2456 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK; 2457 2458 if (sclk < min) 2459 return 0; 2460 2461 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 2462 tmp = sclk / (1 << i); 2463 if (tmp >= min || i == 0) 2464 break; 2465 } 2466 2467 return (u8)i; 2468 } 2469 2470 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev) 2471 { 2472 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 2473 } 2474 2475 static int ci_reset_to_default(struct radeon_device *rdev) 2476 { 2477 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? 2478 0 : -EINVAL; 2479 } 2480 2481 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev) 2482 { 2483 u32 tmp; 2484 2485 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8; 2486 2487 if (tmp == MC_CG_ARB_FREQ_F0) 2488 return 0; 2489 2490 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); 2491 } 2492 2493 static void ci_register_patching_mc_arb(struct radeon_device *rdev, 2494 const u32 engine_clock, 2495 const u32 memory_clock, 2496 u32 *dram_timimg2) 2497 { 2498 bool patch; 2499 u32 tmp, tmp2; 2500 2501 tmp = RREG32(MC_SEQ_MISC0); 2502 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 2503 2504 if (patch && 2505 ((rdev->pdev->device == 0x67B0) || 2506 (rdev->pdev->device == 0x67B1))) { 2507 if ((memory_clock > 100000) && (memory_clock <= 125000)) { 2508 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff; 2509 *dram_timimg2 &= ~0x00ff0000; 2510 *dram_timimg2 |= tmp2 << 16; 2511 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) { 2512 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff; 2513 *dram_timimg2 &= ~0x00ff0000; 2514 *dram_timimg2 |= tmp2 << 16; 2515 } 2516 } 2517 } 2518 2519 2520 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, 2521 u32 sclk, 2522 u32 mclk, 2523 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs) 2524 { 2525 u32 dram_timing; 2526 u32 dram_timing2; 2527 u32 burst_time; 2528 2529 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk); 2530 2531 dram_timing = RREG32(MC_ARB_DRAM_TIMING); 2532 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 2533 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; 2534 2535 ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2); 2536 2537 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); 2538 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); 2539 arb_regs->McArbBurstTime = (u8)burst_time; 2540 2541 return 0; 2542 } 2543 2544 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev) 2545 { 2546 struct ci_power_info *pi = ci_get_pi(rdev); 2547 SMU7_Discrete_MCArbDramTimingTable arb_regs; 2548 u32 i, j; 2549 int ret = 0; 2550 2551 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable)); 2552 2553 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) { 2554 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) { 2555 ret = ci_populate_memory_timing_parameters(rdev, 2556 pi->dpm_table.sclk_table.dpm_levels[i].value, 2557 pi->dpm_table.mclk_table.dpm_levels[j].value, 2558 &arb_regs.entries[i][j]); 2559 if (ret) 2560 break; 2561 } 2562 } 2563 2564 if (ret == 0) 2565 ret = ci_copy_bytes_to_smc(rdev, 2566 pi->arb_table_start, 2567 (u8 *)&arb_regs, 2568 sizeof(SMU7_Discrete_MCArbDramTimingTable), 2569 pi->sram_end); 2570 2571 return ret; 2572 } 2573 2574 static int ci_program_memory_timing_parameters(struct radeon_device *rdev) 2575 { 2576 struct ci_power_info *pi = ci_get_pi(rdev); 2577 2578 if (pi->need_update_smu7_dpm_table == 0) 2579 return 0; 2580 2581 return ci_do_program_memory_timing_parameters(rdev); 2582 } 2583 2584 static void ci_populate_smc_initial_state(struct radeon_device *rdev, 2585 struct radeon_ps *radeon_boot_state) 2586 { 2587 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state); 2588 struct ci_power_info *pi = ci_get_pi(rdev); 2589 u32 level = 0; 2590 2591 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) { 2592 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= 2593 boot_state->performance_levels[0].sclk) { 2594 pi->smc_state_table.GraphicsBootLevel = level; 2595 break; 2596 } 2597 } 2598 2599 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) { 2600 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= 2601 boot_state->performance_levels[0].mclk) { 2602 pi->smc_state_table.MemoryBootLevel = level; 2603 break; 2604 } 2605 } 2606 } 2607 2608 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table) 2609 { 2610 u32 i; 2611 u32 mask_value = 0; 2612 2613 for (i = dpm_table->count; i > 0; i--) { 2614 mask_value = mask_value << 1; 2615 if (dpm_table->dpm_levels[i-1].enabled) 2616 mask_value |= 0x1; 2617 else 2618 mask_value &= 0xFFFFFFFE; 2619 } 2620 2621 return mask_value; 2622 } 2623 2624 static void ci_populate_smc_link_level(struct radeon_device *rdev, 2625 SMU7_Discrete_DpmTable *table) 2626 { 2627 struct ci_power_info *pi = ci_get_pi(rdev); 2628 struct ci_dpm_table *dpm_table = &pi->dpm_table; 2629 u32 i; 2630 2631 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) { 2632 table->LinkLevel[i].PcieGenSpeed = 2633 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value; 2634 table->LinkLevel[i].PcieLaneCount = 2635 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); 2636 table->LinkLevel[i].EnabledForActivity = 1; 2637 table->LinkLevel[i].DownT = cpu_to_be32(5); 2638 table->LinkLevel[i].UpT = cpu_to_be32(30); 2639 } 2640 2641 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count; 2642 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 2643 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); 2644 } 2645 2646 static int ci_populate_smc_uvd_level(struct radeon_device *rdev, 2647 SMU7_Discrete_DpmTable *table) 2648 { 2649 u32 count; 2650 struct atom_clock_dividers dividers; 2651 int ret = -EINVAL; 2652 2653 table->UvdLevelCount = 2654 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count; 2655 2656 for (count = 0; count < table->UvdLevelCount; count++) { 2657 table->UvdLevel[count].VclkFrequency = 2658 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; 2659 table->UvdLevel[count].DclkFrequency = 2660 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; 2661 table->UvdLevel[count].MinVddc = 2662 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2663 table->UvdLevel[count].MinVddcPhases = 1; 2664 2665 ret = radeon_atom_get_clock_dividers(rdev, 2666 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2667 table->UvdLevel[count].VclkFrequency, false, ÷rs); 2668 if (ret) 2669 return ret; 2670 2671 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; 2672 2673 ret = radeon_atom_get_clock_dividers(rdev, 2674 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2675 table->UvdLevel[count].DclkFrequency, false, ÷rs); 2676 if (ret) 2677 return ret; 2678 2679 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; 2680 2681 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); 2682 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); 2683 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); 2684 } 2685 2686 return ret; 2687 } 2688 2689 static int ci_populate_smc_vce_level(struct radeon_device *rdev, 2690 SMU7_Discrete_DpmTable *table) 2691 { 2692 u32 count; 2693 struct atom_clock_dividers dividers; 2694 int ret = -EINVAL; 2695 2696 table->VceLevelCount = 2697 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count; 2698 2699 for (count = 0; count < table->VceLevelCount; count++) { 2700 table->VceLevel[count].Frequency = 2701 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; 2702 table->VceLevel[count].MinVoltage = 2703 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2704 table->VceLevel[count].MinPhases = 1; 2705 2706 ret = radeon_atom_get_clock_dividers(rdev, 2707 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2708 table->VceLevel[count].Frequency, false, ÷rs); 2709 if (ret) 2710 return ret; 2711 2712 table->VceLevel[count].Divider = (u8)dividers.post_divider; 2713 2714 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); 2715 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); 2716 } 2717 2718 return ret; 2719 2720 } 2721 2722 static int ci_populate_smc_acp_level(struct radeon_device *rdev, 2723 SMU7_Discrete_DpmTable *table) 2724 { 2725 u32 count; 2726 struct atom_clock_dividers dividers; 2727 int ret = -EINVAL; 2728 2729 table->AcpLevelCount = (u8) 2730 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count); 2731 2732 for (count = 0; count < table->AcpLevelCount; count++) { 2733 table->AcpLevel[count].Frequency = 2734 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; 2735 table->AcpLevel[count].MinVoltage = 2736 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; 2737 table->AcpLevel[count].MinPhases = 1; 2738 2739 ret = radeon_atom_get_clock_dividers(rdev, 2740 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2741 table->AcpLevel[count].Frequency, false, ÷rs); 2742 if (ret) 2743 return ret; 2744 2745 table->AcpLevel[count].Divider = (u8)dividers.post_divider; 2746 2747 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); 2748 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); 2749 } 2750 2751 return ret; 2752 } 2753 2754 static int ci_populate_smc_samu_level(struct radeon_device *rdev, 2755 SMU7_Discrete_DpmTable *table) 2756 { 2757 u32 count; 2758 struct atom_clock_dividers dividers; 2759 int ret = -EINVAL; 2760 2761 table->SamuLevelCount = 2762 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count; 2763 2764 for (count = 0; count < table->SamuLevelCount; count++) { 2765 table->SamuLevel[count].Frequency = 2766 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; 2767 table->SamuLevel[count].MinVoltage = 2768 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2769 table->SamuLevel[count].MinPhases = 1; 2770 2771 ret = radeon_atom_get_clock_dividers(rdev, 2772 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2773 table->SamuLevel[count].Frequency, false, ÷rs); 2774 if (ret) 2775 return ret; 2776 2777 table->SamuLevel[count].Divider = (u8)dividers.post_divider; 2778 2779 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); 2780 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); 2781 } 2782 2783 return ret; 2784 } 2785 2786 static int ci_calculate_mclk_params(struct radeon_device *rdev, 2787 u32 memory_clock, 2788 SMU7_Discrete_MemoryLevel *mclk, 2789 bool strobe_mode, 2790 bool dll_state_on) 2791 { 2792 struct ci_power_info *pi = ci_get_pi(rdev); 2793 u32 dll_cntl = pi->clock_registers.dll_cntl; 2794 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2795 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl; 2796 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl; 2797 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl; 2798 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1; 2799 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2; 2800 u32 mpll_ss1 = pi->clock_registers.mpll_ss1; 2801 u32 mpll_ss2 = pi->clock_registers.mpll_ss2; 2802 struct atom_mpll_param mpll_param; 2803 int ret; 2804 2805 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param); 2806 if (ret) 2807 return ret; 2808 2809 mpll_func_cntl &= ~BWCTRL_MASK; 2810 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); 2811 2812 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); 2813 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | 2814 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); 2815 2816 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; 2817 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); 2818 2819 if (pi->mem_gddr5) { 2820 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); 2821 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | 2822 YCLK_POST_DIV(mpll_param.post_div); 2823 } 2824 2825 if (pi->caps_mclk_ss_support) { 2826 struct radeon_atom_ss ss; 2827 u32 freq_nom; 2828 u32 tmp; 2829 u32 reference_clock = rdev->clock.mpll.reference_freq; 2830 2831 if (mpll_param.qdr == 1) 2832 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div); 2833 else 2834 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div); 2835 2836 tmp = (freq_nom / reference_clock); 2837 tmp = tmp * tmp; 2838 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 2839 ASIC_INTERNAL_MEMORY_SS, freq_nom)) { 2840 u32 clks = reference_clock * 5 / ss.rate; 2841 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); 2842 2843 mpll_ss1 &= ~CLKV_MASK; 2844 mpll_ss1 |= CLKV(clkv); 2845 2846 mpll_ss2 &= ~CLKS_MASK; 2847 mpll_ss2 |= CLKS(clks); 2848 } 2849 } 2850 2851 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; 2852 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); 2853 2854 if (dll_state_on) 2855 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; 2856 else 2857 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 2858 2859 mclk->MclkFrequency = memory_clock; 2860 mclk->MpllFuncCntl = mpll_func_cntl; 2861 mclk->MpllFuncCntl_1 = mpll_func_cntl_1; 2862 mclk->MpllFuncCntl_2 = mpll_func_cntl_2; 2863 mclk->MpllAdFuncCntl = mpll_ad_func_cntl; 2864 mclk->MpllDqFuncCntl = mpll_dq_func_cntl; 2865 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; 2866 mclk->DllCntl = dll_cntl; 2867 mclk->MpllSs1 = mpll_ss1; 2868 mclk->MpllSs2 = mpll_ss2; 2869 2870 return 0; 2871 } 2872 2873 static int ci_populate_single_memory_level(struct radeon_device *rdev, 2874 u32 memory_clock, 2875 SMU7_Discrete_MemoryLevel *memory_level) 2876 { 2877 struct ci_power_info *pi = ci_get_pi(rdev); 2878 int ret; 2879 bool dll_state_on; 2880 2881 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { 2882 ret = ci_get_dependency_volt_by_clk(rdev, 2883 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2884 memory_clock, &memory_level->MinVddc); 2885 if (ret) 2886 return ret; 2887 } 2888 2889 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { 2890 ret = ci_get_dependency_volt_by_clk(rdev, 2891 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2892 memory_clock, &memory_level->MinVddci); 2893 if (ret) 2894 return ret; 2895 } 2896 2897 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { 2898 ret = ci_get_dependency_volt_by_clk(rdev, 2899 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2900 memory_clock, &memory_level->MinMvdd); 2901 if (ret) 2902 return ret; 2903 } 2904 2905 memory_level->MinVddcPhases = 1; 2906 2907 if (pi->vddc_phase_shed_control) 2908 ci_populate_phase_value_based_on_mclk(rdev, 2909 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 2910 memory_clock, 2911 &memory_level->MinVddcPhases); 2912 2913 memory_level->EnabledForThrottle = 1; 2914 memory_level->UpH = 0; 2915 memory_level->DownH = 100; 2916 memory_level->VoltageDownH = 0; 2917 memory_level->ActivityLevel = (u16)pi->mclk_activity_target; 2918 2919 memory_level->StutterEnable = false; 2920 memory_level->StrobeEnable = false; 2921 memory_level->EdcReadEnable = false; 2922 memory_level->EdcWriteEnable = false; 2923 memory_level->RttEnable = false; 2924 2925 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 2926 2927 if (pi->mclk_stutter_mode_threshold && 2928 (memory_clock <= pi->mclk_stutter_mode_threshold) && 2929 (pi->uvd_enabled == false) && 2930 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && 2931 (rdev->pm.dpm.new_active_crtc_count <= 2)) 2932 memory_level->StutterEnable = true; 2933 2934 if (pi->mclk_strobe_mode_threshold && 2935 (memory_clock <= pi->mclk_strobe_mode_threshold)) 2936 memory_level->StrobeEnable = 1; 2937 2938 if (pi->mem_gddr5) { 2939 memory_level->StrobeRatio = 2940 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); 2941 if (pi->mclk_edc_enable_threshold && 2942 (memory_clock > pi->mclk_edc_enable_threshold)) 2943 memory_level->EdcReadEnable = true; 2944 2945 if (pi->mclk_edc_wr_enable_threshold && 2946 (memory_clock > pi->mclk_edc_wr_enable_threshold)) 2947 memory_level->EdcWriteEnable = true; 2948 2949 if (memory_level->StrobeEnable) { 2950 if (si_get_mclk_frequency_ratio(memory_clock, true) >= 2951 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) 2952 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2953 else 2954 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; 2955 } else { 2956 dll_state_on = pi->dll_default_on; 2957 } 2958 } else { 2959 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock); 2960 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2961 } 2962 2963 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); 2964 if (ret) 2965 return ret; 2966 2967 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); 2968 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); 2969 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); 2970 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); 2971 2972 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); 2973 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); 2974 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl); 2975 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1); 2976 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2); 2977 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl); 2978 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl); 2979 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl); 2980 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl); 2981 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1); 2982 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2); 2983 2984 return 0; 2985 } 2986 2987 static int ci_populate_smc_acpi_level(struct radeon_device *rdev, 2988 SMU7_Discrete_DpmTable *table) 2989 { 2990 struct ci_power_info *pi = ci_get_pi(rdev); 2991 struct atom_clock_dividers dividers; 2992 SMU7_Discrete_VoltageLevel voltage_level; 2993 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl; 2994 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2; 2995 u32 dll_cntl = pi->clock_registers.dll_cntl; 2996 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2997 int ret; 2998 2999 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 3000 3001 if (pi->acpi_vddc) 3002 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); 3003 else 3004 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); 3005 3006 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; 3007 3008 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq; 3009 3010 ret = radeon_atom_get_clock_dividers(rdev, 3011 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 3012 table->ACPILevel.SclkFrequency, false, ÷rs); 3013 if (ret) 3014 return ret; 3015 3016 table->ACPILevel.SclkDid = (u8)dividers.post_divider; 3017 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3018 table->ACPILevel.DeepSleepDivId = 0; 3019 3020 spll_func_cntl &= ~SPLL_PWRON; 3021 spll_func_cntl |= SPLL_RESET; 3022 3023 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 3024 spll_func_cntl_2 |= SCLK_MUX_SEL(4); 3025 3026 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; 3027 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; 3028 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; 3029 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; 3030 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; 3031 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; 3032 table->ACPILevel.CcPwrDynRm = 0; 3033 table->ACPILevel.CcPwrDynRm1 = 0; 3034 3035 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); 3036 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); 3037 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); 3038 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); 3039 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); 3040 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); 3041 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); 3042 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); 3043 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); 3044 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); 3045 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); 3046 3047 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; 3048 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; 3049 3050 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 3051 if (pi->acpi_vddci) 3052 table->MemoryACPILevel.MinVddci = 3053 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE); 3054 else 3055 table->MemoryACPILevel.MinVddci = 3056 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE); 3057 } 3058 3059 if (ci_populate_mvdd_value(rdev, 0, &voltage_level)) 3060 table->MemoryACPILevel.MinMvdd = 0; 3061 else 3062 table->MemoryACPILevel.MinMvdd = 3063 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE); 3064 3065 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; 3066 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 3067 3068 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); 3069 3070 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); 3071 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); 3072 table->MemoryACPILevel.MpllAdFuncCntl = 3073 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl); 3074 table->MemoryACPILevel.MpllDqFuncCntl = 3075 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl); 3076 table->MemoryACPILevel.MpllFuncCntl = 3077 cpu_to_be32(pi->clock_registers.mpll_func_cntl); 3078 table->MemoryACPILevel.MpllFuncCntl_1 = 3079 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1); 3080 table->MemoryACPILevel.MpllFuncCntl_2 = 3081 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2); 3082 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); 3083 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); 3084 3085 table->MemoryACPILevel.EnabledForThrottle = 0; 3086 table->MemoryACPILevel.EnabledForActivity = 0; 3087 table->MemoryACPILevel.UpH = 0; 3088 table->MemoryACPILevel.DownH = 100; 3089 table->MemoryACPILevel.VoltageDownH = 0; 3090 table->MemoryACPILevel.ActivityLevel = 3091 cpu_to_be16((u16)pi->mclk_activity_target); 3092 3093 table->MemoryACPILevel.StutterEnable = false; 3094 table->MemoryACPILevel.StrobeEnable = false; 3095 table->MemoryACPILevel.EdcReadEnable = false; 3096 table->MemoryACPILevel.EdcWriteEnable = false; 3097 table->MemoryACPILevel.RttEnable = false; 3098 3099 return 0; 3100 } 3101 3102 3103 static int ci_enable_ulv(struct radeon_device *rdev, bool enable) 3104 { 3105 struct ci_power_info *pi = ci_get_pi(rdev); 3106 struct ci_ulv_parm *ulv = &pi->ulv; 3107 3108 if (ulv->supported) { 3109 if (enable) 3110 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? 3111 0 : -EINVAL; 3112 else 3113 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? 3114 0 : -EINVAL; 3115 } 3116 3117 return 0; 3118 } 3119 3120 static int ci_populate_ulv_level(struct radeon_device *rdev, 3121 SMU7_Discrete_Ulv *state) 3122 { 3123 struct ci_power_info *pi = ci_get_pi(rdev); 3124 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time; 3125 3126 state->CcPwrDynRm = 0; 3127 state->CcPwrDynRm1 = 0; 3128 3129 if (ulv_voltage == 0) { 3130 pi->ulv.supported = false; 3131 return 0; 3132 } 3133 3134 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 3135 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3136 state->VddcOffset = 0; 3137 else 3138 state->VddcOffset = 3139 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; 3140 } else { 3141 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3142 state->VddcOffsetVid = 0; 3143 else 3144 state->VddcOffsetVid = (u8) 3145 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * 3146 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); 3147 } 3148 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1; 3149 3150 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm); 3151 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1); 3152 state->VddcOffset = cpu_to_be16(state->VddcOffset); 3153 3154 return 0; 3155 } 3156 3157 static int ci_calculate_sclk_params(struct radeon_device *rdev, 3158 u32 engine_clock, 3159 SMU7_Discrete_GraphicsLevel *sclk) 3160 { 3161 struct ci_power_info *pi = ci_get_pi(rdev); 3162 struct atom_clock_dividers dividers; 3163 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3; 3164 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4; 3165 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum; 3166 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2; 3167 u32 reference_clock = rdev->clock.spll.reference_freq; 3168 u32 reference_divider; 3169 u32 fbdiv; 3170 int ret; 3171 3172 ret = radeon_atom_get_clock_dividers(rdev, 3173 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 3174 engine_clock, false, ÷rs); 3175 if (ret) 3176 return ret; 3177 3178 reference_divider = 1 + dividers.ref_div; 3179 fbdiv = dividers.fb_div & 0x3FFFFFF; 3180 3181 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; 3182 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); 3183 spll_func_cntl_3 |= SPLL_DITHEN; 3184 3185 if (pi->caps_sclk_ss_support) { 3186 struct radeon_atom_ss ss; 3187 u32 vco_freq = engine_clock * dividers.post_div; 3188 3189 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 3190 ASIC_INTERNAL_ENGINE_SS, vco_freq)) { 3191 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); 3192 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); 3193 3194 cg_spll_spread_spectrum &= ~CLK_S_MASK; 3195 cg_spll_spread_spectrum |= CLK_S(clk_s); 3196 cg_spll_spread_spectrum |= SSEN; 3197 3198 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; 3199 cg_spll_spread_spectrum_2 |= CLK_V(clk_v); 3200 } 3201 } 3202 3203 sclk->SclkFrequency = engine_clock; 3204 sclk->CgSpllFuncCntl3 = spll_func_cntl_3; 3205 sclk->CgSpllFuncCntl4 = spll_func_cntl_4; 3206 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; 3207 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; 3208 sclk->SclkDid = (u8)dividers.post_divider; 3209 3210 return 0; 3211 } 3212 3213 static int ci_populate_single_graphic_level(struct radeon_device *rdev, 3214 u32 engine_clock, 3215 u16 sclk_activity_level_t, 3216 SMU7_Discrete_GraphicsLevel *graphic_level) 3217 { 3218 struct ci_power_info *pi = ci_get_pi(rdev); 3219 int ret; 3220 3221 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level); 3222 if (ret) 3223 return ret; 3224 3225 ret = ci_get_dependency_volt_by_clk(rdev, 3226 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 3227 engine_clock, &graphic_level->MinVddc); 3228 if (ret) 3229 return ret; 3230 3231 graphic_level->SclkFrequency = engine_clock; 3232 3233 graphic_level->Flags = 0; 3234 graphic_level->MinVddcPhases = 1; 3235 3236 if (pi->vddc_phase_shed_control) 3237 ci_populate_phase_value_based_on_sclk(rdev, 3238 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 3239 engine_clock, 3240 &graphic_level->MinVddcPhases); 3241 3242 graphic_level->ActivityLevel = sclk_activity_level_t; 3243 3244 graphic_level->CcPwrDynRm = 0; 3245 graphic_level->CcPwrDynRm1 = 0; 3246 graphic_level->EnabledForThrottle = 1; 3247 graphic_level->UpH = 0; 3248 graphic_level->DownH = 0; 3249 graphic_level->VoltageDownH = 0; 3250 graphic_level->PowerThrottle = 0; 3251 3252 if (pi->caps_sclk_ds) 3253 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev, 3254 engine_clock, 3255 CISLAND_MINIMUM_ENGINE_CLOCK); 3256 3257 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3258 3259 graphic_level->Flags = cpu_to_be32(graphic_level->Flags); 3260 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); 3261 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); 3262 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); 3263 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); 3264 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3); 3265 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4); 3266 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum); 3267 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2); 3268 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm); 3269 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1); 3270 3271 return 0; 3272 } 3273 3274 static int ci_populate_all_graphic_levels(struct radeon_device *rdev) 3275 { 3276 struct ci_power_info *pi = ci_get_pi(rdev); 3277 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3278 u32 level_array_address = pi->dpm_table_start + 3279 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); 3280 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * 3281 SMU7_MAX_LEVELS_GRAPHICS; 3282 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; 3283 u32 i, ret; 3284 3285 memset(levels, 0, level_array_size); 3286 3287 for (i = 0; i < dpm_table->sclk_table.count; i++) { 3288 ret = ci_populate_single_graphic_level(rdev, 3289 dpm_table->sclk_table.dpm_levels[i].value, 3290 (u16)pi->activity_target[i], 3291 &pi->smc_state_table.GraphicsLevel[i]); 3292 if (ret) 3293 return ret; 3294 if (i > 1) 3295 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; 3296 if (i == (dpm_table->sclk_table.count - 1)) 3297 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = 3298 PPSMC_DISPLAY_WATERMARK_HIGH; 3299 } 3300 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; 3301 3302 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; 3303 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 3304 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); 3305 3306 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 3307 (u8 *)levels, level_array_size, 3308 pi->sram_end); 3309 if (ret) 3310 return ret; 3311 3312 return 0; 3313 } 3314 3315 static int ci_populate_ulv_state(struct radeon_device *rdev, 3316 SMU7_Discrete_Ulv *ulv_level) 3317 { 3318 return ci_populate_ulv_level(rdev, ulv_level); 3319 } 3320 3321 static int ci_populate_all_memory_levels(struct radeon_device *rdev) 3322 { 3323 struct ci_power_info *pi = ci_get_pi(rdev); 3324 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3325 u32 level_array_address = pi->dpm_table_start + 3326 offsetof(SMU7_Discrete_DpmTable, MemoryLevel); 3327 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * 3328 SMU7_MAX_LEVELS_MEMORY; 3329 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; 3330 u32 i, ret; 3331 3332 memset(levels, 0, level_array_size); 3333 3334 for (i = 0; i < dpm_table->mclk_table.count; i++) { 3335 if (dpm_table->mclk_table.dpm_levels[i].value == 0) 3336 return -EINVAL; 3337 ret = ci_populate_single_memory_level(rdev, 3338 dpm_table->mclk_table.dpm_levels[i].value, 3339 &pi->smc_state_table.MemoryLevel[i]); 3340 if (ret) 3341 return ret; 3342 } 3343 3344 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; 3345 3346 if ((dpm_table->mclk_table.count >= 2) && 3347 ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) { 3348 pi->smc_state_table.MemoryLevel[1].MinVddc = 3349 pi->smc_state_table.MemoryLevel[0].MinVddc; 3350 pi->smc_state_table.MemoryLevel[1].MinVddcPhases = 3351 pi->smc_state_table.MemoryLevel[0].MinVddcPhases; 3352 } 3353 3354 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); 3355 3356 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; 3357 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 3358 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); 3359 3360 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark = 3361 PPSMC_DISPLAY_WATERMARK_HIGH; 3362 3363 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 3364 (u8 *)levels, level_array_size, 3365 pi->sram_end); 3366 if (ret) 3367 return ret; 3368 3369 return 0; 3370 } 3371 3372 static void ci_reset_single_dpm_table(struct radeon_device *rdev, 3373 struct ci_single_dpm_table* dpm_table, 3374 u32 count) 3375 { 3376 u32 i; 3377 3378 dpm_table->count = count; 3379 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) 3380 dpm_table->dpm_levels[i].enabled = false; 3381 } 3382 3383 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, 3384 u32 index, u32 pcie_gen, u32 pcie_lanes) 3385 { 3386 dpm_table->dpm_levels[index].value = pcie_gen; 3387 dpm_table->dpm_levels[index].param1 = pcie_lanes; 3388 dpm_table->dpm_levels[index].enabled = true; 3389 } 3390 3391 static int ci_setup_default_pcie_tables(struct radeon_device *rdev) 3392 { 3393 struct ci_power_info *pi = ci_get_pi(rdev); 3394 3395 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) 3396 return -EINVAL; 3397 3398 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) { 3399 pi->pcie_gen_powersaving = pi->pcie_gen_performance; 3400 pi->pcie_lane_powersaving = pi->pcie_lane_performance; 3401 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) { 3402 pi->pcie_gen_performance = pi->pcie_gen_powersaving; 3403 pi->pcie_lane_performance = pi->pcie_lane_powersaving; 3404 } 3405 3406 ci_reset_single_dpm_table(rdev, 3407 &pi->dpm_table.pcie_speed_table, 3408 SMU7_MAX_LEVELS_LINK); 3409 3410 if (rdev->family == CHIP_BONAIRE) 3411 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3412 pi->pcie_gen_powersaving.min, 3413 pi->pcie_lane_powersaving.max); 3414 else 3415 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3416 pi->pcie_gen_powersaving.min, 3417 pi->pcie_lane_powersaving.min); 3418 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, 3419 pi->pcie_gen_performance.min, 3420 pi->pcie_lane_performance.min); 3421 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2, 3422 pi->pcie_gen_powersaving.min, 3423 pi->pcie_lane_powersaving.max); 3424 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3, 3425 pi->pcie_gen_performance.min, 3426 pi->pcie_lane_performance.max); 3427 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4, 3428 pi->pcie_gen_powersaving.max, 3429 pi->pcie_lane_powersaving.max); 3430 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5, 3431 pi->pcie_gen_performance.max, 3432 pi->pcie_lane_performance.max); 3433 3434 pi->dpm_table.pcie_speed_table.count = 6; 3435 3436 return 0; 3437 } 3438 3439 static int ci_setup_default_dpm_tables(struct radeon_device *rdev) 3440 { 3441 struct ci_power_info *pi = ci_get_pi(rdev); 3442 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 3443 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3444 struct radeon_clock_voltage_dependency_table *allowed_mclk_table = 3445 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 3446 struct radeon_cac_leakage_table *std_voltage_table = 3447 &rdev->pm.dpm.dyn_state.cac_leakage_table; 3448 u32 i; 3449 3450 if (allowed_sclk_vddc_table == NULL) 3451 return -EINVAL; 3452 if (allowed_sclk_vddc_table->count < 1) 3453 return -EINVAL; 3454 if (allowed_mclk_table == NULL) 3455 return -EINVAL; 3456 if (allowed_mclk_table->count < 1) 3457 return -EINVAL; 3458 3459 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table)); 3460 3461 ci_reset_single_dpm_table(rdev, 3462 &pi->dpm_table.sclk_table, 3463 SMU7_MAX_LEVELS_GRAPHICS); 3464 ci_reset_single_dpm_table(rdev, 3465 &pi->dpm_table.mclk_table, 3466 SMU7_MAX_LEVELS_MEMORY); 3467 ci_reset_single_dpm_table(rdev, 3468 &pi->dpm_table.vddc_table, 3469 SMU7_MAX_LEVELS_VDDC); 3470 ci_reset_single_dpm_table(rdev, 3471 &pi->dpm_table.vddci_table, 3472 SMU7_MAX_LEVELS_VDDCI); 3473 ci_reset_single_dpm_table(rdev, 3474 &pi->dpm_table.mvdd_table, 3475 SMU7_MAX_LEVELS_MVDD); 3476 3477 pi->dpm_table.sclk_table.count = 0; 3478 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3479 if ((i == 0) || 3480 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value != 3481 allowed_sclk_vddc_table->entries[i].clk)) { 3482 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = 3483 allowed_sclk_vddc_table->entries[i].clk; 3484 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = 3485 (i == 0) ? true : false; 3486 pi->dpm_table.sclk_table.count++; 3487 } 3488 } 3489 3490 pi->dpm_table.mclk_table.count = 0; 3491 for (i = 0; i < allowed_mclk_table->count; i++) { 3492 if ((i == 0) || 3493 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != 3494 allowed_mclk_table->entries[i].clk)) { 3495 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = 3496 allowed_mclk_table->entries[i].clk; 3497 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = 3498 (i == 0) ? true : false; 3499 pi->dpm_table.mclk_table.count++; 3500 } 3501 } 3502 3503 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3504 pi->dpm_table.vddc_table.dpm_levels[i].value = 3505 allowed_sclk_vddc_table->entries[i].v; 3506 pi->dpm_table.vddc_table.dpm_levels[i].param1 = 3507 std_voltage_table->entries[i].leakage; 3508 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true; 3509 } 3510 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; 3511 3512 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 3513 if (allowed_mclk_table) { 3514 for (i = 0; i < allowed_mclk_table->count; i++) { 3515 pi->dpm_table.vddci_table.dpm_levels[i].value = 3516 allowed_mclk_table->entries[i].v; 3517 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; 3518 } 3519 pi->dpm_table.vddci_table.count = allowed_mclk_table->count; 3520 } 3521 3522 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; 3523 if (allowed_mclk_table) { 3524 for (i = 0; i < allowed_mclk_table->count; i++) { 3525 pi->dpm_table.mvdd_table.dpm_levels[i].value = 3526 allowed_mclk_table->entries[i].v; 3527 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 3528 } 3529 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; 3530 } 3531 3532 ci_setup_default_pcie_tables(rdev); 3533 3534 return 0; 3535 } 3536 3537 static int ci_find_boot_level(struct ci_single_dpm_table *table, 3538 u32 value, u32 *boot_level) 3539 { 3540 u32 i; 3541 int ret = -EINVAL; 3542 3543 for(i = 0; i < table->count; i++) { 3544 if (value == table->dpm_levels[i].value) { 3545 *boot_level = i; 3546 ret = 0; 3547 } 3548 } 3549 3550 return ret; 3551 } 3552 3553 static int ci_init_smc_table(struct radeon_device *rdev) 3554 { 3555 struct ci_power_info *pi = ci_get_pi(rdev); 3556 struct ci_ulv_parm *ulv = &pi->ulv; 3557 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps; 3558 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 3559 int ret; 3560 3561 ret = ci_setup_default_dpm_tables(rdev); 3562 if (ret) 3563 return ret; 3564 3565 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) 3566 ci_populate_smc_voltage_tables(rdev, table); 3567 3568 ci_init_fps_limits(rdev); 3569 3570 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) 3571 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 3572 3573 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 3574 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 3575 3576 if (pi->mem_gddr5) 3577 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 3578 3579 if (ulv->supported) { 3580 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv); 3581 if (ret) 3582 return ret; 3583 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); 3584 } 3585 3586 ret = ci_populate_all_graphic_levels(rdev); 3587 if (ret) 3588 return ret; 3589 3590 ret = ci_populate_all_memory_levels(rdev); 3591 if (ret) 3592 return ret; 3593 3594 ci_populate_smc_link_level(rdev, table); 3595 3596 ret = ci_populate_smc_acpi_level(rdev, table); 3597 if (ret) 3598 return ret; 3599 3600 ret = ci_populate_smc_vce_level(rdev, table); 3601 if (ret) 3602 return ret; 3603 3604 ret = ci_populate_smc_acp_level(rdev, table); 3605 if (ret) 3606 return ret; 3607 3608 ret = ci_populate_smc_samu_level(rdev, table); 3609 if (ret) 3610 return ret; 3611 3612 ret = ci_do_program_memory_timing_parameters(rdev); 3613 if (ret) 3614 return ret; 3615 3616 ret = ci_populate_smc_uvd_level(rdev, table); 3617 if (ret) 3618 return ret; 3619 3620 table->UvdBootLevel = 0; 3621 table->VceBootLevel = 0; 3622 table->AcpBootLevel = 0; 3623 table->SamuBootLevel = 0; 3624 table->GraphicsBootLevel = 0; 3625 table->MemoryBootLevel = 0; 3626 3627 ret = ci_find_boot_level(&pi->dpm_table.sclk_table, 3628 pi->vbios_boot_state.sclk_bootup_value, 3629 (u32 *)&pi->smc_state_table.GraphicsBootLevel); 3630 3631 ret = ci_find_boot_level(&pi->dpm_table.mclk_table, 3632 pi->vbios_boot_state.mclk_bootup_value, 3633 (u32 *)&pi->smc_state_table.MemoryBootLevel); 3634 3635 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; 3636 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; 3637 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; 3638 3639 ci_populate_smc_initial_state(rdev, radeon_boot_state); 3640 3641 ret = ci_populate_bapm_parameters_in_dpm_table(rdev); 3642 if (ret) 3643 return ret; 3644 3645 table->UVDInterval = 1; 3646 table->VCEInterval = 1; 3647 table->ACPInterval = 1; 3648 table->SAMUInterval = 1; 3649 table->GraphicsVoltageChangeEnable = 1; 3650 table->GraphicsThermThrottleEnable = 1; 3651 table->GraphicsInterval = 1; 3652 table->VoltageInterval = 1; 3653 table->ThermalInterval = 1; 3654 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * 3655 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3656 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * 3657 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3658 table->MemoryVoltageChangeEnable = 1; 3659 table->MemoryInterval = 1; 3660 table->VoltageResponseTime = 0; 3661 table->VddcVddciDelta = 4000; 3662 table->PhaseResponseTime = 0; 3663 table->MemoryThermThrottleEnable = 1; 3664 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; 3665 table->PCIeGenInterval = 1; 3666 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) 3667 table->SVI2Enable = 1; 3668 else 3669 table->SVI2Enable = 0; 3670 3671 table->ThermGpio = 17; 3672 table->SclkStepSize = 0x4000; 3673 3674 table->SystemFlags = cpu_to_be32(table->SystemFlags); 3675 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); 3676 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); 3677 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); 3678 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); 3679 table->SclkStepSize = cpu_to_be32(table->SclkStepSize); 3680 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); 3681 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); 3682 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); 3683 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); 3684 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); 3685 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); 3686 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); 3687 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); 3688 3689 ret = ci_copy_bytes_to_smc(rdev, 3690 pi->dpm_table_start + 3691 offsetof(SMU7_Discrete_DpmTable, SystemFlags), 3692 (u8 *)&table->SystemFlags, 3693 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController), 3694 pi->sram_end); 3695 if (ret) 3696 return ret; 3697 3698 return 0; 3699 } 3700 3701 static void ci_trim_single_dpm_states(struct radeon_device *rdev, 3702 struct ci_single_dpm_table *dpm_table, 3703 u32 low_limit, u32 high_limit) 3704 { 3705 u32 i; 3706 3707 for (i = 0; i < dpm_table->count; i++) { 3708 if ((dpm_table->dpm_levels[i].value < low_limit) || 3709 (dpm_table->dpm_levels[i].value > high_limit)) 3710 dpm_table->dpm_levels[i].enabled = false; 3711 else 3712 dpm_table->dpm_levels[i].enabled = true; 3713 } 3714 } 3715 3716 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev, 3717 u32 speed_low, u32 lanes_low, 3718 u32 speed_high, u32 lanes_high) 3719 { 3720 struct ci_power_info *pi = ci_get_pi(rdev); 3721 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; 3722 u32 i, j; 3723 3724 for (i = 0; i < pcie_table->count; i++) { 3725 if ((pcie_table->dpm_levels[i].value < speed_low) || 3726 (pcie_table->dpm_levels[i].param1 < lanes_low) || 3727 (pcie_table->dpm_levels[i].value > speed_high) || 3728 (pcie_table->dpm_levels[i].param1 > lanes_high)) 3729 pcie_table->dpm_levels[i].enabled = false; 3730 else 3731 pcie_table->dpm_levels[i].enabled = true; 3732 } 3733 3734 for (i = 0; i < pcie_table->count; i++) { 3735 if (pcie_table->dpm_levels[i].enabled) { 3736 for (j = i + 1; j < pcie_table->count; j++) { 3737 if (pcie_table->dpm_levels[j].enabled) { 3738 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) && 3739 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1)) 3740 pcie_table->dpm_levels[j].enabled = false; 3741 } 3742 } 3743 } 3744 } 3745 } 3746 3747 static int ci_trim_dpm_states(struct radeon_device *rdev, 3748 struct radeon_ps *radeon_state) 3749 { 3750 struct ci_ps *state = ci_get_ps(radeon_state); 3751 struct ci_power_info *pi = ci_get_pi(rdev); 3752 u32 high_limit_count; 3753 3754 if (state->performance_level_count < 1) 3755 return -EINVAL; 3756 3757 if (state->performance_level_count == 1) 3758 high_limit_count = 0; 3759 else 3760 high_limit_count = 1; 3761 3762 ci_trim_single_dpm_states(rdev, 3763 &pi->dpm_table.sclk_table, 3764 state->performance_levels[0].sclk, 3765 state->performance_levels[high_limit_count].sclk); 3766 3767 ci_trim_single_dpm_states(rdev, 3768 &pi->dpm_table.mclk_table, 3769 state->performance_levels[0].mclk, 3770 state->performance_levels[high_limit_count].mclk); 3771 3772 ci_trim_pcie_dpm_states(rdev, 3773 state->performance_levels[0].pcie_gen, 3774 state->performance_levels[0].pcie_lane, 3775 state->performance_levels[high_limit_count].pcie_gen, 3776 state->performance_levels[high_limit_count].pcie_lane); 3777 3778 return 0; 3779 } 3780 3781 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev) 3782 { 3783 struct radeon_clock_voltage_dependency_table *disp_voltage_table = 3784 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk; 3785 struct radeon_clock_voltage_dependency_table *vddc_table = 3786 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3787 u32 requested_voltage = 0; 3788 u32 i; 3789 3790 if (disp_voltage_table == NULL) 3791 return -EINVAL; 3792 if (!disp_voltage_table->count) 3793 return -EINVAL; 3794 3795 for (i = 0; i < disp_voltage_table->count; i++) { 3796 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk) 3797 requested_voltage = disp_voltage_table->entries[i].v; 3798 } 3799 3800 for (i = 0; i < vddc_table->count; i++) { 3801 if (requested_voltage <= vddc_table->entries[i].v) { 3802 requested_voltage = vddc_table->entries[i].v; 3803 return (ci_send_msg_to_smc_with_parameter(rdev, 3804 PPSMC_MSG_VddC_Request, 3805 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ? 3806 0 : -EINVAL; 3807 } 3808 } 3809 3810 return -EINVAL; 3811 } 3812 3813 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev) 3814 { 3815 struct ci_power_info *pi = ci_get_pi(rdev); 3816 PPSMC_Result result; 3817 3818 ci_apply_disp_minimum_voltage_request(rdev); 3819 3820 if (!pi->sclk_dpm_key_disabled) { 3821 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3822 result = ci_send_msg_to_smc_with_parameter(rdev, 3823 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3824 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 3825 if (result != PPSMC_Result_OK) 3826 return -EINVAL; 3827 } 3828 } 3829 3830 if (!pi->mclk_dpm_key_disabled) { 3831 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3832 result = ci_send_msg_to_smc_with_parameter(rdev, 3833 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3834 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3835 if (result != PPSMC_Result_OK) 3836 return -EINVAL; 3837 } 3838 } 3839 #if 0 3840 if (!pi->pcie_dpm_key_disabled) { 3841 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3842 result = ci_send_msg_to_smc_with_parameter(rdev, 3843 PPSMC_MSG_PCIeDPM_SetEnabledMask, 3844 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 3845 if (result != PPSMC_Result_OK) 3846 return -EINVAL; 3847 } 3848 } 3849 #endif 3850 return 0; 3851 } 3852 3853 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev, 3854 struct radeon_ps *radeon_state) 3855 { 3856 struct ci_power_info *pi = ci_get_pi(rdev); 3857 struct ci_ps *state = ci_get_ps(radeon_state); 3858 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; 3859 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3860 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; 3861 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3862 u32 i; 3863 3864 pi->need_update_smu7_dpm_table = 0; 3865 3866 for (i = 0; i < sclk_table->count; i++) { 3867 if (sclk == sclk_table->dpm_levels[i].value) 3868 break; 3869 } 3870 3871 if (i >= sclk_table->count) { 3872 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3873 } else { 3874 /* XXX The current code always reprogrammed the sclk levels, 3875 * but we don't currently handle disp sclk requirements 3876 * so just skip it. 3877 */ 3878 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) 3879 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3880 } 3881 3882 for (i = 0; i < mclk_table->count; i++) { 3883 if (mclk == mclk_table->dpm_levels[i].value) 3884 break; 3885 } 3886 3887 if (i >= mclk_table->count) 3888 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3889 3890 if (rdev->pm.dpm.current_active_crtc_count != 3891 rdev->pm.dpm.new_active_crtc_count) 3892 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 3893 } 3894 3895 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev, 3896 struct radeon_ps *radeon_state) 3897 { 3898 struct ci_power_info *pi = ci_get_pi(rdev); 3899 struct ci_ps *state = ci_get_ps(radeon_state); 3900 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3901 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3902 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3903 int ret; 3904 3905 if (!pi->need_update_smu7_dpm_table) 3906 return 0; 3907 3908 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) 3909 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk; 3910 3911 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) 3912 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk; 3913 3914 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { 3915 ret = ci_populate_all_graphic_levels(rdev); 3916 if (ret) 3917 return ret; 3918 } 3919 3920 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { 3921 ret = ci_populate_all_memory_levels(rdev); 3922 if (ret) 3923 return ret; 3924 } 3925 3926 return 0; 3927 } 3928 3929 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 3930 { 3931 struct ci_power_info *pi = ci_get_pi(rdev); 3932 const struct radeon_clock_and_voltage_limits *max_limits; 3933 int i; 3934 3935 if (rdev->pm.dpm.ac_power) 3936 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3937 else 3938 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3939 3940 if (enable) { 3941 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; 3942 3943 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3944 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3945 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; 3946 3947 if (!pi->caps_uvd_dpm) 3948 break; 3949 } 3950 } 3951 3952 ci_send_msg_to_smc_with_parameter(rdev, 3953 PPSMC_MSG_UVDDPM_SetEnabledMask, 3954 pi->dpm_level_enable_mask.uvd_dpm_enable_mask); 3955 3956 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3957 pi->uvd_enabled = true; 3958 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 3959 ci_send_msg_to_smc_with_parameter(rdev, 3960 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3961 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3962 } 3963 } else { 3964 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3965 pi->uvd_enabled = false; 3966 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; 3967 ci_send_msg_to_smc_with_parameter(rdev, 3968 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3969 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3970 } 3971 } 3972 3973 return (ci_send_msg_to_smc(rdev, enable ? 3974 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ? 3975 0 : -EINVAL; 3976 } 3977 3978 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) 3979 { 3980 struct ci_power_info *pi = ci_get_pi(rdev); 3981 const struct radeon_clock_and_voltage_limits *max_limits; 3982 int i; 3983 3984 if (rdev->pm.dpm.ac_power) 3985 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3986 else 3987 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3988 3989 if (enable) { 3990 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0; 3991 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3992 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3993 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; 3994 3995 if (!pi->caps_vce_dpm) 3996 break; 3997 } 3998 } 3999 4000 ci_send_msg_to_smc_with_parameter(rdev, 4001 PPSMC_MSG_VCEDPM_SetEnabledMask, 4002 pi->dpm_level_enable_mask.vce_dpm_enable_mask); 4003 } 4004 4005 return (ci_send_msg_to_smc(rdev, enable ? 4006 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ? 4007 0 : -EINVAL; 4008 } 4009 4010 #if 0 4011 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) 4012 { 4013 struct ci_power_info *pi = ci_get_pi(rdev); 4014 const struct radeon_clock_and_voltage_limits *max_limits; 4015 int i; 4016 4017 if (rdev->pm.dpm.ac_power) 4018 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4019 else 4020 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4021 4022 if (enable) { 4023 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0; 4024 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4025 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4026 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i; 4027 4028 if (!pi->caps_samu_dpm) 4029 break; 4030 } 4031 } 4032 4033 ci_send_msg_to_smc_with_parameter(rdev, 4034 PPSMC_MSG_SAMUDPM_SetEnabledMask, 4035 pi->dpm_level_enable_mask.samu_dpm_enable_mask); 4036 } 4037 return (ci_send_msg_to_smc(rdev, enable ? 4038 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ? 4039 0 : -EINVAL; 4040 } 4041 4042 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable) 4043 { 4044 struct ci_power_info *pi = ci_get_pi(rdev); 4045 const struct radeon_clock_and_voltage_limits *max_limits; 4046 int i; 4047 4048 if (rdev->pm.dpm.ac_power) 4049 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4050 else 4051 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4052 4053 if (enable) { 4054 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0; 4055 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4056 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4057 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i; 4058 4059 if (!pi->caps_acp_dpm) 4060 break; 4061 } 4062 } 4063 4064 ci_send_msg_to_smc_with_parameter(rdev, 4065 PPSMC_MSG_ACPDPM_SetEnabledMask, 4066 pi->dpm_level_enable_mask.acp_dpm_enable_mask); 4067 } 4068 4069 return (ci_send_msg_to_smc(rdev, enable ? 4070 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ? 4071 0 : -EINVAL; 4072 } 4073 #endif 4074 4075 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate) 4076 { 4077 struct ci_power_info *pi = ci_get_pi(rdev); 4078 u32 tmp; 4079 4080 if (!gate) { 4081 if (pi->caps_uvd_dpm || 4082 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) 4083 pi->smc_state_table.UvdBootLevel = 0; 4084 else 4085 pi->smc_state_table.UvdBootLevel = 4086 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; 4087 4088 tmp = RREG32_SMC(DPM_TABLE_475); 4089 tmp &= ~UvdBootLevel_MASK; 4090 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel); 4091 WREG32_SMC(DPM_TABLE_475, tmp); 4092 } 4093 4094 return ci_enable_uvd_dpm(rdev, !gate); 4095 } 4096 4097 static u8 ci_get_vce_boot_level(struct radeon_device *rdev) 4098 { 4099 u8 i; 4100 u32 min_evclk = 30000; /* ??? */ 4101 struct radeon_vce_clock_voltage_dependency_table *table = 4102 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 4103 4104 for (i = 0; i < table->count; i++) { 4105 if (table->entries[i].evclk >= min_evclk) 4106 return i; 4107 } 4108 4109 return table->count - 1; 4110 } 4111 4112 static int ci_update_vce_dpm(struct radeon_device *rdev, 4113 struct radeon_ps *radeon_new_state, 4114 struct radeon_ps *radeon_current_state) 4115 { 4116 struct ci_power_info *pi = ci_get_pi(rdev); 4117 int ret = 0; 4118 u32 tmp; 4119 4120 if (radeon_current_state->evclk != radeon_new_state->evclk) { 4121 if (radeon_new_state->evclk) { 4122 /* turn the clocks on when encoding */ 4123 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 4124 4125 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); 4126 tmp = RREG32_SMC(DPM_TABLE_475); 4127 tmp &= ~VceBootLevel_MASK; 4128 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); 4129 WREG32_SMC(DPM_TABLE_475, tmp); 4130 4131 ret = ci_enable_vce_dpm(rdev, true); 4132 } else { 4133 /* turn the clocks off when not encoding */ 4134 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 4135 4136 ret = ci_enable_vce_dpm(rdev, false); 4137 } 4138 } 4139 return ret; 4140 } 4141 4142 #if 0 4143 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) 4144 { 4145 return ci_enable_samu_dpm(rdev, gate); 4146 } 4147 4148 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate) 4149 { 4150 struct ci_power_info *pi = ci_get_pi(rdev); 4151 u32 tmp; 4152 4153 if (!gate) { 4154 pi->smc_state_table.AcpBootLevel = 0; 4155 4156 tmp = RREG32_SMC(DPM_TABLE_475); 4157 tmp &= ~AcpBootLevel_MASK; 4158 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel); 4159 WREG32_SMC(DPM_TABLE_475, tmp); 4160 } 4161 4162 return ci_enable_acp_dpm(rdev, !gate); 4163 } 4164 #endif 4165 4166 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev, 4167 struct radeon_ps *radeon_state) 4168 { 4169 struct ci_power_info *pi = ci_get_pi(rdev); 4170 int ret; 4171 4172 ret = ci_trim_dpm_states(rdev, radeon_state); 4173 if (ret) 4174 return ret; 4175 4176 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 4177 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table); 4178 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 4179 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table); 4180 pi->last_mclk_dpm_enable_mask = 4181 pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4182 if (pi->uvd_enabled) { 4183 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) 4184 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 4185 } 4186 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 4187 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table); 4188 4189 return 0; 4190 } 4191 4192 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev, 4193 u32 level_mask) 4194 { 4195 u32 level = 0; 4196 4197 while ((level_mask & (1 << level)) == 0) 4198 level++; 4199 4200 return level; 4201 } 4202 4203 4204 int ci_dpm_force_performance_level(struct radeon_device *rdev, 4205 enum radeon_dpm_forced_level level) 4206 { 4207 struct ci_power_info *pi = ci_get_pi(rdev); 4208 u32 tmp, levels, i; 4209 int ret; 4210 4211 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 4212 if ((!pi->pcie_dpm_key_disabled) && 4213 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4214 levels = 0; 4215 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; 4216 while (tmp >>= 1) 4217 levels++; 4218 if (levels) { 4219 ret = ci_dpm_force_state_pcie(rdev, level); 4220 if (ret) 4221 return ret; 4222 for (i = 0; i < rdev->usec_timeout; i++) { 4223 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 4224 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 4225 if (tmp == levels) 4226 break; 4227 udelay(1); 4228 } 4229 } 4230 } 4231 if ((!pi->sclk_dpm_key_disabled) && 4232 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4233 levels = 0; 4234 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; 4235 while (tmp >>= 1) 4236 levels++; 4237 if (levels) { 4238 ret = ci_dpm_force_state_sclk(rdev, levels); 4239 if (ret) 4240 return ret; 4241 for (i = 0; i < rdev->usec_timeout; i++) { 4242 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4243 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 4244 if (tmp == levels) 4245 break; 4246 udelay(1); 4247 } 4248 } 4249 } 4250 if ((!pi->mclk_dpm_key_disabled) && 4251 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4252 levels = 0; 4253 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4254 while (tmp >>= 1) 4255 levels++; 4256 if (levels) { 4257 ret = ci_dpm_force_state_mclk(rdev, levels); 4258 if (ret) 4259 return ret; 4260 for (i = 0; i < rdev->usec_timeout; i++) { 4261 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4262 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 4263 if (tmp == levels) 4264 break; 4265 udelay(1); 4266 } 4267 } 4268 } 4269 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 4270 if ((!pi->sclk_dpm_key_disabled) && 4271 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4272 levels = ci_get_lowest_enabled_level(rdev, 4273 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 4274 ret = ci_dpm_force_state_sclk(rdev, levels); 4275 if (ret) 4276 return ret; 4277 for (i = 0; i < rdev->usec_timeout; i++) { 4278 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4279 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 4280 if (tmp == levels) 4281 break; 4282 udelay(1); 4283 } 4284 } 4285 if ((!pi->mclk_dpm_key_disabled) && 4286 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4287 levels = ci_get_lowest_enabled_level(rdev, 4288 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 4289 ret = ci_dpm_force_state_mclk(rdev, levels); 4290 if (ret) 4291 return ret; 4292 for (i = 0; i < rdev->usec_timeout; i++) { 4293 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4294 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 4295 if (tmp == levels) 4296 break; 4297 udelay(1); 4298 } 4299 } 4300 if ((!pi->pcie_dpm_key_disabled) && 4301 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4302 levels = ci_get_lowest_enabled_level(rdev, 4303 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 4304 ret = ci_dpm_force_state_pcie(rdev, levels); 4305 if (ret) 4306 return ret; 4307 for (i = 0; i < rdev->usec_timeout; i++) { 4308 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 4309 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 4310 if (tmp == levels) 4311 break; 4312 udelay(1); 4313 } 4314 } 4315 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 4316 if (!pi->pcie_dpm_key_disabled) { 4317 PPSMC_Result smc_result; 4318 4319 smc_result = ci_send_msg_to_smc(rdev, 4320 PPSMC_MSG_PCIeDPM_UnForceLevel); 4321 if (smc_result != PPSMC_Result_OK) 4322 return -EINVAL; 4323 } 4324 ret = ci_upload_dpm_level_enable_mask(rdev); 4325 if (ret) 4326 return ret; 4327 } 4328 4329 rdev->pm.dpm.forced_level = level; 4330 4331 return 0; 4332 } 4333 4334 static int ci_set_mc_special_registers(struct radeon_device *rdev, 4335 struct ci_mc_reg_table *table) 4336 { 4337 struct ci_power_info *pi = ci_get_pi(rdev); 4338 u8 i, j, k; 4339 u32 temp_reg; 4340 4341 for (i = 0, j = table->last; i < table->last; i++) { 4342 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4343 return -EINVAL; 4344 switch(table->mc_reg_address[i].s1 << 2) { 4345 case MC_SEQ_MISC1: 4346 temp_reg = RREG32(MC_PMG_CMD_EMRS); 4347 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; 4348 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 4349 for (k = 0; k < table->num_entries; k++) { 4350 table->mc_reg_table_entry[k].mc_data[j] = 4351 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); 4352 } 4353 j++; 4354 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4355 return -EINVAL; 4356 4357 temp_reg = RREG32(MC_PMG_CMD_MRS); 4358 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; 4359 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; 4360 for (k = 0; k < table->num_entries; k++) { 4361 table->mc_reg_table_entry[k].mc_data[j] = 4362 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4363 if (!pi->mem_gddr5) 4364 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 4365 } 4366 j++; 4367 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4368 return -EINVAL; 4369 4370 if (!pi->mem_gddr5) { 4371 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2; 4372 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2; 4373 for (k = 0; k < table->num_entries; k++) { 4374 table->mc_reg_table_entry[k].mc_data[j] = 4375 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 4376 } 4377 j++; 4378 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4379 return -EINVAL; 4380 } 4381 break; 4382 case MC_SEQ_RESERVE_M: 4383 temp_reg = RREG32(MC_PMG_CMD_MRS1); 4384 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; 4385 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 4386 for (k = 0; k < table->num_entries; k++) { 4387 table->mc_reg_table_entry[k].mc_data[j] = 4388 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4389 } 4390 j++; 4391 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4392 return -EINVAL; 4393 break; 4394 default: 4395 break; 4396 } 4397 4398 } 4399 4400 table->last = j; 4401 4402 return 0; 4403 } 4404 4405 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) 4406 { 4407 bool result = true; 4408 4409 switch(in_reg) { 4410 case MC_SEQ_RAS_TIMING >> 2: 4411 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2; 4412 break; 4413 case MC_SEQ_DLL_STBY >> 2: 4414 *out_reg = MC_SEQ_DLL_STBY_LP >> 2; 4415 break; 4416 case MC_SEQ_G5PDX_CMD0 >> 2: 4417 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2; 4418 break; 4419 case MC_SEQ_G5PDX_CMD1 >> 2: 4420 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2; 4421 break; 4422 case MC_SEQ_G5PDX_CTRL >> 2: 4423 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2; 4424 break; 4425 case MC_SEQ_CAS_TIMING >> 2: 4426 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2; 4427 break; 4428 case MC_SEQ_MISC_TIMING >> 2: 4429 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2; 4430 break; 4431 case MC_SEQ_MISC_TIMING2 >> 2: 4432 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; 4433 break; 4434 case MC_SEQ_PMG_DVS_CMD >> 2: 4435 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2; 4436 break; 4437 case MC_SEQ_PMG_DVS_CTL >> 2: 4438 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2; 4439 break; 4440 case MC_SEQ_RD_CTL_D0 >> 2: 4441 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; 4442 break; 4443 case MC_SEQ_RD_CTL_D1 >> 2: 4444 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; 4445 break; 4446 case MC_SEQ_WR_CTL_D0 >> 2: 4447 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; 4448 break; 4449 case MC_SEQ_WR_CTL_D1 >> 2: 4450 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; 4451 break; 4452 case MC_PMG_CMD_EMRS >> 2: 4453 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 4454 break; 4455 case MC_PMG_CMD_MRS >> 2: 4456 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; 4457 break; 4458 case MC_PMG_CMD_MRS1 >> 2: 4459 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 4460 break; 4461 case MC_SEQ_PMG_TIMING >> 2: 4462 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2; 4463 break; 4464 case MC_PMG_CMD_MRS2 >> 2: 4465 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2; 4466 break; 4467 case MC_SEQ_WR_CTL_2 >> 2: 4468 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2; 4469 break; 4470 default: 4471 result = false; 4472 break; 4473 } 4474 4475 return result; 4476 } 4477 4478 static void ci_set_valid_flag(struct ci_mc_reg_table *table) 4479 { 4480 u8 i, j; 4481 4482 for (i = 0; i < table->last; i++) { 4483 for (j = 1; j < table->num_entries; j++) { 4484 if (table->mc_reg_table_entry[j-1].mc_data[i] != 4485 table->mc_reg_table_entry[j].mc_data[i]) { 4486 table->valid_flag |= 1 << i; 4487 break; 4488 } 4489 } 4490 } 4491 } 4492 4493 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) 4494 { 4495 u32 i; 4496 u16 address; 4497 4498 for (i = 0; i < table->last; i++) { 4499 table->mc_reg_address[i].s0 = 4500 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? 4501 address : table->mc_reg_address[i].s1; 4502 } 4503 } 4504 4505 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, 4506 struct ci_mc_reg_table *ci_table) 4507 { 4508 u8 i, j; 4509 4510 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4511 return -EINVAL; 4512 if (table->num_entries > MAX_AC_TIMING_ENTRIES) 4513 return -EINVAL; 4514 4515 for (i = 0; i < table->last; i++) 4516 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; 4517 4518 ci_table->last = table->last; 4519 4520 for (i = 0; i < table->num_entries; i++) { 4521 ci_table->mc_reg_table_entry[i].mclk_max = 4522 table->mc_reg_table_entry[i].mclk_max; 4523 for (j = 0; j < table->last; j++) 4524 ci_table->mc_reg_table_entry[i].mc_data[j] = 4525 table->mc_reg_table_entry[i].mc_data[j]; 4526 } 4527 ci_table->num_entries = table->num_entries; 4528 4529 return 0; 4530 } 4531 4532 static int ci_register_patching_mc_seq(struct radeon_device *rdev, 4533 struct ci_mc_reg_table *table) 4534 { 4535 u8 i, k; 4536 u32 tmp; 4537 bool patch; 4538 4539 tmp = RREG32(MC_SEQ_MISC0); 4540 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 4541 4542 if (patch && 4543 ((rdev->pdev->device == 0x67B0) || 4544 (rdev->pdev->device == 0x67B1))) { 4545 for (i = 0; i < table->last; i++) { 4546 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4547 return -EINVAL; 4548 switch(table->mc_reg_address[i].s1 >> 2) { 4549 case MC_SEQ_MISC1: 4550 for (k = 0; k < table->num_entries; k++) { 4551 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4552 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4553 table->mc_reg_table_entry[k].mc_data[i] = 4554 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | 4555 0x00000007; 4556 } 4557 break; 4558 case MC_SEQ_WR_CTL_D0: 4559 for (k = 0; k < table->num_entries; k++) { 4560 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4561 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4562 table->mc_reg_table_entry[k].mc_data[i] = 4563 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4564 0x0000D0DD; 4565 } 4566 break; 4567 case MC_SEQ_WR_CTL_D1: 4568 for (k = 0; k < table->num_entries; k++) { 4569 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4570 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4571 table->mc_reg_table_entry[k].mc_data[i] = 4572 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4573 0x0000D0DD; 4574 } 4575 break; 4576 case MC_SEQ_WR_CTL_2: 4577 for (k = 0; k < table->num_entries; k++) { 4578 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4579 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4580 table->mc_reg_table_entry[k].mc_data[i] = 0; 4581 } 4582 break; 4583 case MC_SEQ_CAS_TIMING: 4584 for (k = 0; k < table->num_entries; k++) { 4585 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4586 table->mc_reg_table_entry[k].mc_data[i] = 4587 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4588 0x000C0140; 4589 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4590 table->mc_reg_table_entry[k].mc_data[i] = 4591 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4592 0x000C0150; 4593 } 4594 break; 4595 case MC_SEQ_MISC_TIMING: 4596 for (k = 0; k < table->num_entries; k++) { 4597 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4598 table->mc_reg_table_entry[k].mc_data[i] = 4599 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4600 0x00000030; 4601 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4602 table->mc_reg_table_entry[k].mc_data[i] = 4603 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4604 0x00000035; 4605 } 4606 break; 4607 default: 4608 break; 4609 } 4610 } 4611 4612 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); 4613 tmp = RREG32(MC_SEQ_IO_DEBUG_DATA); 4614 tmp = (tmp & 0xFFF8FFFF) | (1 << 16); 4615 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); 4616 WREG32(MC_SEQ_IO_DEBUG_DATA, tmp); 4617 } 4618 4619 return 0; 4620 } 4621 4622 static int ci_initialize_mc_reg_table(struct radeon_device *rdev) 4623 { 4624 struct ci_power_info *pi = ci_get_pi(rdev); 4625 struct atom_mc_reg_table *table; 4626 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table; 4627 u8 module_index = rv770_get_memory_module_index(rdev); 4628 int ret; 4629 4630 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); 4631 if (!table) 4632 return -ENOMEM; 4633 4634 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); 4635 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); 4636 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY)); 4637 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0)); 4638 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1)); 4639 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL)); 4640 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD)); 4641 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL)); 4642 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); 4643 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); 4644 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); 4645 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); 4646 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); 4647 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); 4648 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); 4649 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); 4650 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); 4651 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); 4652 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); 4653 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); 4654 4655 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); 4656 if (ret) 4657 goto init_mc_done; 4658 4659 ret = ci_copy_vbios_mc_reg_table(table, ci_table); 4660 if (ret) 4661 goto init_mc_done; 4662 4663 ci_set_s0_mc_reg_index(ci_table); 4664 4665 ret = ci_register_patching_mc_seq(rdev, ci_table); 4666 if (ret) 4667 goto init_mc_done; 4668 4669 ret = ci_set_mc_special_registers(rdev, ci_table); 4670 if (ret) 4671 goto init_mc_done; 4672 4673 ci_set_valid_flag(ci_table); 4674 4675 init_mc_done: 4676 kfree(table); 4677 4678 return ret; 4679 } 4680 4681 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev, 4682 SMU7_Discrete_MCRegisters *mc_reg_table) 4683 { 4684 struct ci_power_info *pi = ci_get_pi(rdev); 4685 u32 i, j; 4686 4687 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) { 4688 if (pi->mc_reg_table.valid_flag & (1 << j)) { 4689 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4690 return -EINVAL; 4691 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0); 4692 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1); 4693 i++; 4694 } 4695 } 4696 4697 mc_reg_table->last = (u8)i; 4698 4699 return 0; 4700 } 4701 4702 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry, 4703 SMU7_Discrete_MCRegisterSet *data, 4704 u32 num_entries, u32 valid_flag) 4705 { 4706 u32 i, j; 4707 4708 for (i = 0, j = 0; j < num_entries; j++) { 4709 if (valid_flag & (1 << j)) { 4710 data->value[i] = cpu_to_be32(entry->mc_data[j]); 4711 i++; 4712 } 4713 } 4714 } 4715 4716 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev, 4717 const u32 memory_clock, 4718 SMU7_Discrete_MCRegisterSet *mc_reg_table_data) 4719 { 4720 struct ci_power_info *pi = ci_get_pi(rdev); 4721 u32 i = 0; 4722 4723 for(i = 0; i < pi->mc_reg_table.num_entries; i++) { 4724 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) 4725 break; 4726 } 4727 4728 if ((i == pi->mc_reg_table.num_entries) && (i > 0)) 4729 --i; 4730 4731 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i], 4732 mc_reg_table_data, pi->mc_reg_table.last, 4733 pi->mc_reg_table.valid_flag); 4734 } 4735 4736 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev, 4737 SMU7_Discrete_MCRegisters *mc_reg_table) 4738 { 4739 struct ci_power_info *pi = ci_get_pi(rdev); 4740 u32 i; 4741 4742 for (i = 0; i < pi->dpm_table.mclk_table.count; i++) 4743 ci_convert_mc_reg_table_entry_to_smc(rdev, 4744 pi->dpm_table.mclk_table.dpm_levels[i].value, 4745 &mc_reg_table->data[i]); 4746 } 4747 4748 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev) 4749 { 4750 struct ci_power_info *pi = ci_get_pi(rdev); 4751 int ret; 4752 4753 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4754 4755 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table); 4756 if (ret) 4757 return ret; 4758 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4759 4760 return ci_copy_bytes_to_smc(rdev, 4761 pi->mc_reg_table_start, 4762 (u8 *)&pi->smc_mc_reg_table, 4763 sizeof(SMU7_Discrete_MCRegisters), 4764 pi->sram_end); 4765 } 4766 4767 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev) 4768 { 4769 struct ci_power_info *pi = ci_get_pi(rdev); 4770 4771 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) 4772 return 0; 4773 4774 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4775 4776 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4777 4778 return ci_copy_bytes_to_smc(rdev, 4779 pi->mc_reg_table_start + 4780 offsetof(SMU7_Discrete_MCRegisters, data[0]), 4781 (u8 *)&pi->smc_mc_reg_table.data[0], 4782 sizeof(SMU7_Discrete_MCRegisterSet) * 4783 pi->dpm_table.mclk_table.count, 4784 pi->sram_end); 4785 } 4786 4787 static void ci_enable_voltage_control(struct radeon_device *rdev) 4788 { 4789 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 4790 4791 tmp |= VOLT_PWRMGT_EN; 4792 WREG32_SMC(GENERAL_PWRMGT, tmp); 4793 } 4794 4795 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev, 4796 struct radeon_ps *radeon_state) 4797 { 4798 struct ci_ps *state = ci_get_ps(radeon_state); 4799 int i; 4800 u16 pcie_speed, max_speed = 0; 4801 4802 for (i = 0; i < state->performance_level_count; i++) { 4803 pcie_speed = state->performance_levels[i].pcie_gen; 4804 if (max_speed < pcie_speed) 4805 max_speed = pcie_speed; 4806 } 4807 4808 return max_speed; 4809 } 4810 4811 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev) 4812 { 4813 u32 speed_cntl = 0; 4814 4815 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; 4816 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; 4817 4818 return (u16)speed_cntl; 4819 } 4820 4821 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev) 4822 { 4823 u32 link_width = 0; 4824 4825 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK; 4826 link_width >>= LC_LINK_WIDTH_RD_SHIFT; 4827 4828 switch (link_width) { 4829 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4830 return 1; 4831 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4832 return 2; 4833 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4834 return 4; 4835 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4836 return 8; 4837 case RADEON_PCIE_LC_LINK_WIDTH_X12: 4838 /* not actually supported */ 4839 return 12; 4840 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4841 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4842 default: 4843 return 16; 4844 } 4845 } 4846 4847 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev, 4848 struct radeon_ps *radeon_new_state, 4849 struct radeon_ps *radeon_current_state) 4850 { 4851 struct ci_power_info *pi = ci_get_pi(rdev); 4852 enum radeon_pcie_gen target_link_speed = 4853 ci_get_maximum_link_speed(rdev, radeon_new_state); 4854 enum radeon_pcie_gen current_link_speed; 4855 4856 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID) 4857 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state); 4858 else 4859 current_link_speed = pi->force_pcie_gen; 4860 4861 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 4862 pi->pspp_notify_required = false; 4863 if (target_link_speed > current_link_speed) { 4864 switch (target_link_speed) { 4865 #ifdef CONFIG_ACPI 4866 case RADEON_PCIE_GEN3: 4867 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) 4868 break; 4869 pi->force_pcie_gen = RADEON_PCIE_GEN2; 4870 if (current_link_speed == RADEON_PCIE_GEN2) 4871 break; 4872 case RADEON_PCIE_GEN2: 4873 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) 4874 break; 4875 #endif 4876 default: 4877 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev); 4878 break; 4879 } 4880 } else { 4881 if (target_link_speed < current_link_speed) 4882 pi->pspp_notify_required = true; 4883 } 4884 } 4885 4886 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev, 4887 struct radeon_ps *radeon_new_state, 4888 struct radeon_ps *radeon_current_state) 4889 { 4890 struct ci_power_info *pi = ci_get_pi(rdev); 4891 enum radeon_pcie_gen target_link_speed = 4892 ci_get_maximum_link_speed(rdev, radeon_new_state); 4893 u8 request; 4894 4895 if (pi->pspp_notify_required) { 4896 if (target_link_speed == RADEON_PCIE_GEN3) 4897 request = PCIE_PERF_REQ_PECI_GEN3; 4898 else if (target_link_speed == RADEON_PCIE_GEN2) 4899 request = PCIE_PERF_REQ_PECI_GEN2; 4900 else 4901 request = PCIE_PERF_REQ_PECI_GEN1; 4902 4903 if ((request == PCIE_PERF_REQ_PECI_GEN1) && 4904 (ci_get_current_pcie_speed(rdev) > 0)) 4905 return; 4906 4907 #ifdef CONFIG_ACPI 4908 radeon_acpi_pcie_performance_request(rdev, request, false); 4909 #endif 4910 } 4911 } 4912 4913 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev) 4914 { 4915 struct ci_power_info *pi = ci_get_pi(rdev); 4916 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 4917 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 4918 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table = 4919 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 4920 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table = 4921 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 4922 4923 if (allowed_sclk_vddc_table == NULL) 4924 return -EINVAL; 4925 if (allowed_sclk_vddc_table->count < 1) 4926 return -EINVAL; 4927 if (allowed_mclk_vddc_table == NULL) 4928 return -EINVAL; 4929 if (allowed_mclk_vddc_table->count < 1) 4930 return -EINVAL; 4931 if (allowed_mclk_vddci_table == NULL) 4932 return -EINVAL; 4933 if (allowed_mclk_vddci_table->count < 1) 4934 return -EINVAL; 4935 4936 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; 4937 pi->max_vddc_in_pp_table = 4938 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4939 4940 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; 4941 pi->max_vddci_in_pp_table = 4942 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4943 4944 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = 4945 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4946 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = 4947 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4948 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = 4949 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4950 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = 4951 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4952 4953 return 0; 4954 } 4955 4956 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc) 4957 { 4958 struct ci_power_info *pi = ci_get_pi(rdev); 4959 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage; 4960 u32 leakage_index; 4961 4962 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4963 if (leakage_table->leakage_id[leakage_index] == *vddc) { 4964 *vddc = leakage_table->actual_voltage[leakage_index]; 4965 break; 4966 } 4967 } 4968 } 4969 4970 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci) 4971 { 4972 struct ci_power_info *pi = ci_get_pi(rdev); 4973 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage; 4974 u32 leakage_index; 4975 4976 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4977 if (leakage_table->leakage_id[leakage_index] == *vddci) { 4978 *vddci = leakage_table->actual_voltage[leakage_index]; 4979 break; 4980 } 4981 } 4982 } 4983 4984 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 4985 struct radeon_clock_voltage_dependency_table *table) 4986 { 4987 u32 i; 4988 4989 if (table) { 4990 for (i = 0; i < table->count; i++) 4991 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 4992 } 4993 } 4994 4995 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev, 4996 struct radeon_clock_voltage_dependency_table *table) 4997 { 4998 u32 i; 4999 5000 if (table) { 5001 for (i = 0; i < table->count; i++) 5002 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v); 5003 } 5004 } 5005 5006 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 5007 struct radeon_vce_clock_voltage_dependency_table *table) 5008 { 5009 u32 i; 5010 5011 if (table) { 5012 for (i = 0; i < table->count; i++) 5013 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 5014 } 5015 } 5016 5017 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 5018 struct radeon_uvd_clock_voltage_dependency_table *table) 5019 { 5020 u32 i; 5021 5022 if (table) { 5023 for (i = 0; i < table->count; i++) 5024 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 5025 } 5026 } 5027 5028 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev, 5029 struct radeon_phase_shedding_limits_table *table) 5030 { 5031 u32 i; 5032 5033 if (table) { 5034 for (i = 0; i < table->count; i++) 5035 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage); 5036 } 5037 } 5038 5039 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev, 5040 struct radeon_clock_and_voltage_limits *table) 5041 { 5042 if (table) { 5043 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc); 5044 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci); 5045 } 5046 } 5047 5048 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev, 5049 struct radeon_cac_leakage_table *table) 5050 { 5051 u32 i; 5052 5053 if (table) { 5054 for (i = 0; i < table->count; i++) 5055 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc); 5056 } 5057 } 5058 5059 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev) 5060 { 5061 5062 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5063 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk); 5064 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5065 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk); 5066 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5067 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk); 5068 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev, 5069 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk); 5070 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5071 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table); 5072 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5073 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table); 5074 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5075 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table); 5076 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5077 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table); 5078 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev, 5079 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table); 5080 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 5081 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 5082 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 5083 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc); 5084 ci_patch_cac_leakage_table_with_vddc_leakage(rdev, 5085 &rdev->pm.dpm.dyn_state.cac_leakage_table); 5086 5087 } 5088 5089 static void ci_get_memory_type(struct radeon_device *rdev) 5090 { 5091 struct ci_power_info *pi = ci_get_pi(rdev); 5092 u32 tmp; 5093 5094 tmp = RREG32(MC_SEQ_MISC0); 5095 5096 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) == 5097 MC_SEQ_MISC0_GDDR5_VALUE) 5098 pi->mem_gddr5 = true; 5099 else 5100 pi->mem_gddr5 = false; 5101 5102 } 5103 5104 static void ci_update_current_ps(struct radeon_device *rdev, 5105 struct radeon_ps *rps) 5106 { 5107 struct ci_ps *new_ps = ci_get_ps(rps); 5108 struct ci_power_info *pi = ci_get_pi(rdev); 5109 5110 pi->current_rps = *rps; 5111 pi->current_ps = *new_ps; 5112 pi->current_rps.ps_priv = &pi->current_ps; 5113 } 5114 5115 static void ci_update_requested_ps(struct radeon_device *rdev, 5116 struct radeon_ps *rps) 5117 { 5118 struct ci_ps *new_ps = ci_get_ps(rps); 5119 struct ci_power_info *pi = ci_get_pi(rdev); 5120 5121 pi->requested_rps = *rps; 5122 pi->requested_ps = *new_ps; 5123 pi->requested_rps.ps_priv = &pi->requested_ps; 5124 } 5125 5126 int ci_dpm_pre_set_power_state(struct radeon_device *rdev) 5127 { 5128 struct ci_power_info *pi = ci_get_pi(rdev); 5129 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 5130 struct radeon_ps *new_ps = &requested_ps; 5131 5132 ci_update_requested_ps(rdev, new_ps); 5133 5134 ci_apply_state_adjust_rules(rdev, &pi->requested_rps); 5135 5136 return 0; 5137 } 5138 5139 void ci_dpm_post_set_power_state(struct radeon_device *rdev) 5140 { 5141 struct ci_power_info *pi = ci_get_pi(rdev); 5142 struct radeon_ps *new_ps = &pi->requested_rps; 5143 5144 ci_update_current_ps(rdev, new_ps); 5145 } 5146 5147 5148 void ci_dpm_setup_asic(struct radeon_device *rdev) 5149 { 5150 int r; 5151 5152 r = ci_mc_load_microcode(rdev); 5153 if (r) 5154 DRM_ERROR("Failed to load MC firmware!\n"); 5155 ci_read_clock_registers(rdev); 5156 ci_get_memory_type(rdev); 5157 ci_enable_acpi_power_management(rdev); 5158 ci_init_sclk_t(rdev); 5159 } 5160 5161 int ci_dpm_enable(struct radeon_device *rdev) 5162 { 5163 struct ci_power_info *pi = ci_get_pi(rdev); 5164 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5165 int ret; 5166 5167 if (ci_is_smc_running(rdev)) 5168 return -EINVAL; 5169 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 5170 ci_enable_voltage_control(rdev); 5171 ret = ci_construct_voltage_tables(rdev); 5172 if (ret) { 5173 DRM_ERROR("ci_construct_voltage_tables failed\n"); 5174 return ret; 5175 } 5176 } 5177 if (pi->caps_dynamic_ac_timing) { 5178 ret = ci_initialize_mc_reg_table(rdev); 5179 if (ret) 5180 pi->caps_dynamic_ac_timing = false; 5181 } 5182 if (pi->dynamic_ss) 5183 ci_enable_spread_spectrum(rdev, true); 5184 if (pi->thermal_protection) 5185 ci_enable_thermal_protection(rdev, true); 5186 ci_program_sstp(rdev); 5187 ci_enable_display_gap(rdev); 5188 ci_program_vc(rdev); 5189 ret = ci_upload_firmware(rdev); 5190 if (ret) { 5191 DRM_ERROR("ci_upload_firmware failed\n"); 5192 return ret; 5193 } 5194 ret = ci_process_firmware_header(rdev); 5195 if (ret) { 5196 DRM_ERROR("ci_process_firmware_header failed\n"); 5197 return ret; 5198 } 5199 ret = ci_initial_switch_from_arb_f0_to_f1(rdev); 5200 if (ret) { 5201 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n"); 5202 return ret; 5203 } 5204 ret = ci_init_smc_table(rdev); 5205 if (ret) { 5206 DRM_ERROR("ci_init_smc_table failed\n"); 5207 return ret; 5208 } 5209 ret = ci_init_arb_table_index(rdev); 5210 if (ret) { 5211 DRM_ERROR("ci_init_arb_table_index failed\n"); 5212 return ret; 5213 } 5214 if (pi->caps_dynamic_ac_timing) { 5215 ret = ci_populate_initial_mc_reg_table(rdev); 5216 if (ret) { 5217 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n"); 5218 return ret; 5219 } 5220 } 5221 ret = ci_populate_pm_base(rdev); 5222 if (ret) { 5223 DRM_ERROR("ci_populate_pm_base failed\n"); 5224 return ret; 5225 } 5226 ci_dpm_start_smc(rdev); 5227 ci_enable_vr_hot_gpio_interrupt(rdev); 5228 ret = ci_notify_smc_display_change(rdev, false); 5229 if (ret) { 5230 DRM_ERROR("ci_notify_smc_display_change failed\n"); 5231 return ret; 5232 } 5233 ci_enable_sclk_control(rdev, true); 5234 ret = ci_enable_ulv(rdev, true); 5235 if (ret) { 5236 DRM_ERROR("ci_enable_ulv failed\n"); 5237 return ret; 5238 } 5239 ret = ci_enable_ds_master_switch(rdev, true); 5240 if (ret) { 5241 DRM_ERROR("ci_enable_ds_master_switch failed\n"); 5242 return ret; 5243 } 5244 ret = ci_start_dpm(rdev); 5245 if (ret) { 5246 DRM_ERROR("ci_start_dpm failed\n"); 5247 return ret; 5248 } 5249 ret = ci_enable_didt(rdev, true); 5250 if (ret) { 5251 DRM_ERROR("ci_enable_didt failed\n"); 5252 return ret; 5253 } 5254 ret = ci_enable_smc_cac(rdev, true); 5255 if (ret) { 5256 DRM_ERROR("ci_enable_smc_cac failed\n"); 5257 return ret; 5258 } 5259 ret = ci_enable_power_containment(rdev, true); 5260 if (ret) { 5261 DRM_ERROR("ci_enable_power_containment failed\n"); 5262 return ret; 5263 } 5264 5265 ret = ci_power_control_set_level(rdev); 5266 if (ret) { 5267 DRM_ERROR("ci_power_control_set_level failed\n"); 5268 return ret; 5269 } 5270 5271 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 5272 5273 ret = ci_enable_thermal_based_sclk_dpm(rdev, true); 5274 if (ret) { 5275 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n"); 5276 return ret; 5277 } 5278 5279 ci_thermal_start_thermal_controller(rdev); 5280 5281 ci_update_current_ps(rdev, boot_ps); 5282 5283 return 0; 5284 } 5285 5286 static int ci_set_temperature_range(struct radeon_device *rdev) 5287 { 5288 int ret; 5289 5290 ret = ci_thermal_enable_alert(rdev, false); 5291 if (ret) 5292 return ret; 5293 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 5294 if (ret) 5295 return ret; 5296 ret = ci_thermal_enable_alert(rdev, true); 5297 if (ret) 5298 return ret; 5299 5300 return ret; 5301 } 5302 5303 int ci_dpm_late_enable(struct radeon_device *rdev) 5304 { 5305 int ret; 5306 5307 ret = ci_set_temperature_range(rdev); 5308 if (ret) 5309 return ret; 5310 5311 ci_dpm_powergate_uvd(rdev, true); 5312 5313 return 0; 5314 } 5315 5316 void ci_dpm_disable(struct radeon_device *rdev) 5317 { 5318 struct ci_power_info *pi = ci_get_pi(rdev); 5319 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5320 5321 ci_dpm_powergate_uvd(rdev, false); 5322 5323 if (!ci_is_smc_running(rdev)) 5324 return; 5325 5326 ci_thermal_stop_thermal_controller(rdev); 5327 5328 if (pi->thermal_protection) 5329 ci_enable_thermal_protection(rdev, false); 5330 ci_enable_power_containment(rdev, false); 5331 ci_enable_smc_cac(rdev, false); 5332 ci_enable_didt(rdev, false); 5333 ci_enable_spread_spectrum(rdev, false); 5334 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); 5335 ci_stop_dpm(rdev); 5336 ci_enable_ds_master_switch(rdev, false); 5337 ci_enable_ulv(rdev, false); 5338 ci_clear_vc(rdev); 5339 ci_reset_to_default(rdev); 5340 ci_dpm_stop_smc(rdev); 5341 ci_force_switch_to_arb_f0(rdev); 5342 ci_enable_thermal_based_sclk_dpm(rdev, false); 5343 5344 ci_update_current_ps(rdev, boot_ps); 5345 } 5346 5347 int ci_dpm_set_power_state(struct radeon_device *rdev) 5348 { 5349 struct ci_power_info *pi = ci_get_pi(rdev); 5350 struct radeon_ps *new_ps = &pi->requested_rps; 5351 struct radeon_ps *old_ps = &pi->current_rps; 5352 int ret; 5353 5354 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); 5355 if (pi->pcie_performance_request) 5356 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); 5357 ret = ci_freeze_sclk_mclk_dpm(rdev); 5358 if (ret) { 5359 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n"); 5360 return ret; 5361 } 5362 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps); 5363 if (ret) { 5364 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n"); 5365 return ret; 5366 } 5367 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps); 5368 if (ret) { 5369 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); 5370 return ret; 5371 } 5372 5373 ret = ci_update_vce_dpm(rdev, new_ps, old_ps); 5374 if (ret) { 5375 DRM_ERROR("ci_update_vce_dpm failed\n"); 5376 return ret; 5377 } 5378 5379 ret = ci_update_sclk_t(rdev); 5380 if (ret) { 5381 DRM_ERROR("ci_update_sclk_t failed\n"); 5382 return ret; 5383 } 5384 if (pi->caps_dynamic_ac_timing) { 5385 ret = ci_update_and_upload_mc_reg_table(rdev); 5386 if (ret) { 5387 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n"); 5388 return ret; 5389 } 5390 } 5391 ret = ci_program_memory_timing_parameters(rdev); 5392 if (ret) { 5393 DRM_ERROR("ci_program_memory_timing_parameters failed\n"); 5394 return ret; 5395 } 5396 ret = ci_unfreeze_sclk_mclk_dpm(rdev); 5397 if (ret) { 5398 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n"); 5399 return ret; 5400 } 5401 ret = ci_upload_dpm_level_enable_mask(rdev); 5402 if (ret) { 5403 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n"); 5404 return ret; 5405 } 5406 if (pi->pcie_performance_request) 5407 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 5408 5409 return 0; 5410 } 5411 5412 #if 0 5413 void ci_dpm_reset_asic(struct radeon_device *rdev) 5414 { 5415 ci_set_boot_state(rdev); 5416 } 5417 #endif 5418 5419 void ci_dpm_display_configuration_changed(struct radeon_device *rdev) 5420 { 5421 ci_program_display_gap(rdev); 5422 } 5423 5424 union power_info { 5425 struct _ATOM_POWERPLAY_INFO info; 5426 struct _ATOM_POWERPLAY_INFO_V2 info_2; 5427 struct _ATOM_POWERPLAY_INFO_V3 info_3; 5428 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 5429 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 5430 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 5431 }; 5432 5433 union pplib_clock_info { 5434 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 5435 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 5436 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 5437 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 5438 struct _ATOM_PPLIB_SI_CLOCK_INFO si; 5439 struct _ATOM_PPLIB_CI_CLOCK_INFO ci; 5440 }; 5441 5442 union pplib_power_state { 5443 struct _ATOM_PPLIB_STATE v1; 5444 struct _ATOM_PPLIB_STATE_V2 v2; 5445 }; 5446 5447 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev, 5448 struct radeon_ps *rps, 5449 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 5450 u8 table_rev) 5451 { 5452 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 5453 rps->class = le16_to_cpu(non_clock_info->usClassification); 5454 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 5455 5456 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 5457 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 5458 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 5459 } else { 5460 rps->vclk = 0; 5461 rps->dclk = 0; 5462 } 5463 5464 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 5465 rdev->pm.dpm.boot_ps = rps; 5466 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 5467 rdev->pm.dpm.uvd_ps = rps; 5468 } 5469 5470 static void ci_parse_pplib_clock_info(struct radeon_device *rdev, 5471 struct radeon_ps *rps, int index, 5472 union pplib_clock_info *clock_info) 5473 { 5474 struct ci_power_info *pi = ci_get_pi(rdev); 5475 struct ci_ps *ps = ci_get_ps(rps); 5476 struct ci_pl *pl = &ps->performance_levels[index]; 5477 5478 ps->performance_level_count = index + 1; 5479 5480 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5481 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; 5482 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5483 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5484 5485 pl->pcie_gen = r600_get_pcie_gen_support(rdev, 5486 pi->sys_pcie_mask, 5487 pi->vbios_boot_state.pcie_gen_bootup_value, 5488 clock_info->ci.ucPCIEGen); 5489 pl->pcie_lane = r600_get_pcie_lane_support(rdev, 5490 pi->vbios_boot_state.pcie_lane_bootup_value, 5491 le16_to_cpu(clock_info->ci.usPCIELane)); 5492 5493 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 5494 pi->acpi_pcie_gen = pl->pcie_gen; 5495 } 5496 5497 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { 5498 pi->ulv.supported = true; 5499 pi->ulv.pl = *pl; 5500 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT; 5501 } 5502 5503 /* patch up boot state */ 5504 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 5505 pl->mclk = pi->vbios_boot_state.mclk_bootup_value; 5506 pl->sclk = pi->vbios_boot_state.sclk_bootup_value; 5507 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; 5508 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; 5509 } 5510 5511 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 5512 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 5513 pi->use_pcie_powersaving_levels = true; 5514 if (pi->pcie_gen_powersaving.max < pl->pcie_gen) 5515 pi->pcie_gen_powersaving.max = pl->pcie_gen; 5516 if (pi->pcie_gen_powersaving.min > pl->pcie_gen) 5517 pi->pcie_gen_powersaving.min = pl->pcie_gen; 5518 if (pi->pcie_lane_powersaving.max < pl->pcie_lane) 5519 pi->pcie_lane_powersaving.max = pl->pcie_lane; 5520 if (pi->pcie_lane_powersaving.min > pl->pcie_lane) 5521 pi->pcie_lane_powersaving.min = pl->pcie_lane; 5522 break; 5523 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 5524 pi->use_pcie_performance_levels = true; 5525 if (pi->pcie_gen_performance.max < pl->pcie_gen) 5526 pi->pcie_gen_performance.max = pl->pcie_gen; 5527 if (pi->pcie_gen_performance.min > pl->pcie_gen) 5528 pi->pcie_gen_performance.min = pl->pcie_gen; 5529 if (pi->pcie_lane_performance.max < pl->pcie_lane) 5530 pi->pcie_lane_performance.max = pl->pcie_lane; 5531 if (pi->pcie_lane_performance.min > pl->pcie_lane) 5532 pi->pcie_lane_performance.min = pl->pcie_lane; 5533 break; 5534 default: 5535 break; 5536 } 5537 } 5538 5539 static int ci_parse_power_table(struct radeon_device *rdev) 5540 { 5541 struct radeon_mode_info *mode_info = &rdev->mode_info; 5542 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 5543 union pplib_power_state *power_state; 5544 int i, j, k, non_clock_array_index, clock_array_index; 5545 union pplib_clock_info *clock_info; 5546 struct _StateArray *state_array; 5547 struct _ClockInfoArray *clock_info_array; 5548 struct _NonClockInfoArray *non_clock_info_array; 5549 union power_info *power_info; 5550 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 5551 u16 data_offset; 5552 u8 frev, crev; 5553 u8 *power_state_offset; 5554 struct ci_ps *ps; 5555 5556 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 5557 &frev, &crev, &data_offset)) 5558 return -EINVAL; 5559 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 5560 5561 state_array = (struct _StateArray *) 5562 (mode_info->atom_context->bios + data_offset + 5563 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 5564 clock_info_array = (struct _ClockInfoArray *) 5565 (mode_info->atom_context->bios + data_offset + 5566 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 5567 non_clock_info_array = (struct _NonClockInfoArray *) 5568 (mode_info->atom_context->bios + data_offset + 5569 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 5570 5571 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * 5572 state_array->ucNumEntries, GFP_KERNEL); 5573 if (!rdev->pm.dpm.ps) 5574 return -ENOMEM; 5575 power_state_offset = (u8 *)state_array->states; 5576 for (i = 0; i < state_array->ucNumEntries; i++) { 5577 u8 *idx; 5578 power_state = (union pplib_power_state *)power_state_offset; 5579 non_clock_array_index = power_state->v2.nonClockInfoIndex; 5580 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 5581 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 5582 if (!rdev->pm.power_state[i].clock_info) 5583 return -EINVAL; 5584 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); 5585 if (ps == NULL) { 5586 kfree(rdev->pm.dpm.ps); 5587 return -ENOMEM; 5588 } 5589 rdev->pm.dpm.ps[i].ps_priv = ps; 5590 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 5591 non_clock_info, 5592 non_clock_info_array->ucEntrySize); 5593 k = 0; 5594 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 5595 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 5596 clock_array_index = idx[j]; 5597 if (clock_array_index >= clock_info_array->ucNumEntries) 5598 continue; 5599 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) 5600 break; 5601 clock_info = (union pplib_clock_info *) 5602 ((u8 *)&clock_info_array->clockInfo[0] + 5603 (clock_array_index * clock_info_array->ucEntrySize)); 5604 ci_parse_pplib_clock_info(rdev, 5605 &rdev->pm.dpm.ps[i], k, 5606 clock_info); 5607 k++; 5608 } 5609 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 5610 } 5611 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 5612 5613 /* fill in the vce power states */ 5614 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 5615 u32 sclk, mclk; 5616 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 5617 clock_info = (union pplib_clock_info *) 5618 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 5619 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5620 sclk |= clock_info->ci.ucEngineClockHigh << 16; 5621 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5622 mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5623 rdev->pm.dpm.vce_states[i].sclk = sclk; 5624 rdev->pm.dpm.vce_states[i].mclk = mclk; 5625 } 5626 5627 return 0; 5628 } 5629 5630 static int ci_get_vbios_boot_values(struct radeon_device *rdev, 5631 struct ci_vbios_boot_state *boot_state) 5632 { 5633 struct radeon_mode_info *mode_info = &rdev->mode_info; 5634 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 5635 ATOM_FIRMWARE_INFO_V2_2 *firmware_info; 5636 u8 frev, crev; 5637 u16 data_offset; 5638 5639 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 5640 &frev, &crev, &data_offset)) { 5641 firmware_info = 5642 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios + 5643 data_offset); 5644 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage); 5645 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage); 5646 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage); 5647 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev); 5648 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev); 5649 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock); 5650 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock); 5651 5652 return 0; 5653 } 5654 return -EINVAL; 5655 } 5656 5657 void ci_dpm_fini(struct radeon_device *rdev) 5658 { 5659 int i; 5660 5661 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 5662 kfree(rdev->pm.dpm.ps[i].ps_priv); 5663 } 5664 kfree(rdev->pm.dpm.ps); 5665 kfree(rdev->pm.dpm.priv); 5666 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); 5667 r600_free_extended_power_table(rdev); 5668 } 5669 5670 int ci_dpm_init(struct radeon_device *rdev) 5671 { 5672 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 5673 SMU7_Discrete_DpmTable *dpm_table; 5674 struct radeon_gpio_rec gpio; 5675 u16 data_offset, size; 5676 u8 frev, crev; 5677 struct ci_power_info *pi; 5678 int ret; 5679 u32 mask; 5680 5681 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); 5682 if (pi == NULL) 5683 return -ENOMEM; 5684 rdev->pm.dpm.priv = pi; 5685 5686 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 5687 if (ret) 5688 pi->sys_pcie_mask = 0; 5689 else 5690 pi->sys_pcie_mask = mask; 5691 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 5692 5693 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1; 5694 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3; 5695 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1; 5696 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3; 5697 5698 pi->pcie_lane_performance.max = 0; 5699 pi->pcie_lane_performance.min = 16; 5700 pi->pcie_lane_powersaving.max = 0; 5701 pi->pcie_lane_powersaving.min = 16; 5702 5703 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state); 5704 if (ret) { 5705 ci_dpm_fini(rdev); 5706 return ret; 5707 } 5708 5709 ret = r600_get_platform_caps(rdev); 5710 if (ret) { 5711 ci_dpm_fini(rdev); 5712 return ret; 5713 } 5714 5715 ret = r600_parse_extended_power_table(rdev); 5716 if (ret) { 5717 ci_dpm_fini(rdev); 5718 return ret; 5719 } 5720 5721 ret = ci_parse_power_table(rdev); 5722 if (ret) { 5723 ci_dpm_fini(rdev); 5724 return ret; 5725 } 5726 5727 pi->dll_default_on = false; 5728 pi->sram_end = SMC_RAM_END; 5729 5730 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; 5731 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; 5732 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT; 5733 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT; 5734 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT; 5735 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT; 5736 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT; 5737 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT; 5738 5739 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT; 5740 5741 pi->sclk_dpm_key_disabled = 0; 5742 pi->mclk_dpm_key_disabled = 0; 5743 pi->pcie_dpm_key_disabled = 0; 5744 pi->thermal_sclk_dpm_enabled = 0; 5745 5746 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ 5747 if ((rdev->pdev->device == 0x6658) && 5748 (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) { 5749 pi->mclk_dpm_key_disabled = 1; 5750 } 5751 5752 pi->caps_sclk_ds = true; 5753 5754 pi->mclk_strobe_mode_threshold = 40000; 5755 pi->mclk_stutter_mode_threshold = 40000; 5756 pi->mclk_edc_enable_threshold = 40000; 5757 pi->mclk_edc_wr_enable_threshold = 40000; 5758 5759 ci_initialize_powertune_defaults(rdev); 5760 5761 pi->caps_fps = false; 5762 5763 pi->caps_sclk_throttle_low_notification = false; 5764 5765 pi->caps_uvd_dpm = true; 5766 pi->caps_vce_dpm = true; 5767 5768 ci_get_leakage_voltages(rdev); 5769 ci_patch_dependency_tables_with_leakage(rdev); 5770 ci_set_private_data_variables_based_on_pptable(rdev); 5771 5772 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 5773 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); 5774 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 5775 ci_dpm_fini(rdev); 5776 return -ENOMEM; 5777 } 5778 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; 5779 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; 5780 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; 5781 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; 5782 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; 5783 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; 5784 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; 5785 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; 5786 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; 5787 5788 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; 5789 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; 5790 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200; 5791 5792 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0; 5793 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; 5794 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 5795 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 5796 5797 if (rdev->family == CHIP_HAWAII) { 5798 pi->thermal_temp_setting.temperature_low = 94500; 5799 pi->thermal_temp_setting.temperature_high = 95000; 5800 pi->thermal_temp_setting.temperature_shutdown = 104000; 5801 } else { 5802 pi->thermal_temp_setting.temperature_low = 99500; 5803 pi->thermal_temp_setting.temperature_high = 100000; 5804 pi->thermal_temp_setting.temperature_shutdown = 104000; 5805 } 5806 5807 pi->uvd_enabled = false; 5808 5809 dpm_table = &pi->smc_state_table; 5810 5811 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID); 5812 if (gpio.valid) { 5813 dpm_table->VRHotGpio = gpio.shift; 5814 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5815 } else { 5816 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN; 5817 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5818 } 5819 5820 gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID); 5821 if (gpio.valid) { 5822 dpm_table->AcDcGpio = gpio.shift; 5823 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5824 } else { 5825 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN; 5826 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5827 } 5828 5829 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID); 5830 if (gpio.valid) { 5831 u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL); 5832 5833 switch (gpio.shift) { 5834 case 0: 5835 tmp &= ~GNB_SLOW_MODE_MASK; 5836 tmp |= GNB_SLOW_MODE(1); 5837 break; 5838 case 1: 5839 tmp &= ~GNB_SLOW_MODE_MASK; 5840 tmp |= GNB_SLOW_MODE(2); 5841 break; 5842 case 2: 5843 tmp |= GNB_SLOW; 5844 break; 5845 case 3: 5846 tmp |= FORCE_NB_PS1; 5847 break; 5848 case 4: 5849 tmp |= DPM_ENABLED; 5850 break; 5851 default: 5852 DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift); 5853 break; 5854 } 5855 WREG32_SMC(CNB_PWRMGT_CNTL, tmp); 5856 } 5857 5858 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5859 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5860 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5861 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) 5862 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5863 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 5864 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5865 5866 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) { 5867 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 5868 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5869 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) 5870 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5871 else 5872 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; 5873 } 5874 5875 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { 5876 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) 5877 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5878 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) 5879 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5880 else 5881 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL; 5882 } 5883 5884 pi->vddc_phase_shed_control = true; 5885 5886 #if defined(CONFIG_ACPI) 5887 pi->pcie_performance_request = 5888 radeon_acpi_is_pcie_performance_request_supported(rdev); 5889 #else 5890 pi->pcie_performance_request = false; 5891 #endif 5892 5893 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 5894 &frev, &crev, &data_offset)) { 5895 pi->caps_sclk_ss_support = true; 5896 pi->caps_mclk_ss_support = true; 5897 pi->dynamic_ss = true; 5898 } else { 5899 pi->caps_sclk_ss_support = false; 5900 pi->caps_mclk_ss_support = false; 5901 pi->dynamic_ss = true; 5902 } 5903 5904 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) 5905 pi->thermal_protection = true; 5906 else 5907 pi->thermal_protection = false; 5908 5909 pi->caps_dynamic_ac_timing = true; 5910 5911 pi->uvd_power_gated = false; 5912 5913 /* make sure dc limits are valid */ 5914 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || 5915 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) 5916 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = 5917 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 5918 5919 pi->fan_ctrl_is_in_default_mode = true; 5920 5921 return 0; 5922 } 5923 5924 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 5925 struct seq_file *m) 5926 { 5927 struct ci_power_info *pi = ci_get_pi(rdev); 5928 struct radeon_ps *rps = &pi->current_rps; 5929 u32 sclk = ci_get_average_sclk_freq(rdev); 5930 u32 mclk = ci_get_average_mclk_freq(rdev); 5931 5932 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); 5933 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); 5934 seq_printf(m, "power level avg sclk: %u mclk: %u\n", 5935 sclk, mclk); 5936 } 5937 5938 void ci_dpm_print_power_state(struct radeon_device *rdev, 5939 struct radeon_ps *rps) 5940 { 5941 struct ci_ps *ps = ci_get_ps(rps); 5942 struct ci_pl *pl; 5943 int i; 5944 5945 r600_dpm_print_class_info(rps->class, rps->class2); 5946 r600_dpm_print_cap_info(rps->caps); 5947 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 5948 for (i = 0; i < ps->performance_level_count; i++) { 5949 pl = &ps->performance_levels[i]; 5950 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n", 5951 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); 5952 } 5953 r600_dpm_print_ps_status(rdev, rps); 5954 } 5955 5956 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev) 5957 { 5958 u32 sclk = ci_get_average_sclk_freq(rdev); 5959 5960 return sclk; 5961 } 5962 5963 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev) 5964 { 5965 u32 mclk = ci_get_average_mclk_freq(rdev); 5966 5967 return mclk; 5968 } 5969 5970 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low) 5971 { 5972 struct ci_power_info *pi = ci_get_pi(rdev); 5973 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 5974 5975 if (low) 5976 return requested_state->performance_levels[0].sclk; 5977 else 5978 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; 5979 } 5980 5981 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low) 5982 { 5983 struct ci_power_info *pi = ci_get_pi(rdev); 5984 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 5985 5986 if (low) 5987 return requested_state->performance_levels[0].mclk; 5988 else 5989 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; 5990 } 5991