1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/seq_file.h> 26 27 #include <drm/drm_pci.h> 28 29 #include "atom.h" 30 #include "ci_dpm.h" 31 #include "cikd.h" 32 #include "r600_dpm.h" 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "radeon_ucode.h" 36 37 #define MC_CG_ARB_FREQ_F0 0x0a 38 #define MC_CG_ARB_FREQ_F1 0x0b 39 #define MC_CG_ARB_FREQ_F2 0x0c 40 #define MC_CG_ARB_FREQ_F3 0x0d 41 42 #define SMC_RAM_END 0x40000 43 44 #define VOLTAGE_SCALE 4 45 #define VOLTAGE_VID_OFFSET_SCALE1 625 46 #define VOLTAGE_VID_OFFSET_SCALE2 100 47 48 static const struct ci_pt_defaults defaults_hawaii_xt = 49 { 50 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 51 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 52 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 53 }; 54 55 static const struct ci_pt_defaults defaults_hawaii_pro = 56 { 57 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, 58 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 59 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 60 }; 61 62 static const struct ci_pt_defaults defaults_bonaire_xt = 63 { 64 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, 65 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, 66 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } 67 }; 68 69 static const struct ci_pt_defaults defaults_bonaire_pro = 70 { 71 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, 72 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, 73 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } 74 }; 75 76 static const struct ci_pt_defaults defaults_saturn_xt = 77 { 78 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, 79 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, 80 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } 81 }; 82 83 static const struct ci_pt_defaults defaults_saturn_pro = 84 { 85 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, 86 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, 87 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } 88 }; 89 90 static const struct ci_pt_config_reg didt_config_ci[] = 91 { 92 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 93 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 94 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 95 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 96 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 97 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 98 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 99 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 100 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 101 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 102 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 103 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 104 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 105 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 106 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 107 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 108 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 109 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 110 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 111 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 112 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 113 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 114 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 115 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 116 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 117 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 118 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 119 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 120 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 121 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 122 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 123 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 124 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 125 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 126 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 127 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 128 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 129 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 130 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 131 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 132 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 133 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 134 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 135 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 136 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 137 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 138 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 139 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 140 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 141 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 142 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 143 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 144 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 145 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 146 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 147 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 148 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 149 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 150 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 151 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 152 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 153 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 154 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 155 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 156 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 157 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 158 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 159 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 160 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 161 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 162 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 163 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 164 { 0xFFFFFFFF } 165 }; 166 167 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); 168 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, 169 u32 arb_freq_src, u32 arb_freq_dest); 170 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); 171 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode); 172 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev, 173 u32 max_voltage_steps, 174 struct atom_voltage_table *voltage_table); 175 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev); 176 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev); 177 extern int ci_mc_load_microcode(struct radeon_device *rdev); 178 extern void cik_update_cg(struct radeon_device *rdev, 179 u32 block, bool enable); 180 181 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 182 struct atom_voltage_table_entry *voltage_table, 183 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd); 184 static int ci_set_power_limit(struct radeon_device *rdev, u32 n); 185 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 186 u32 target_tdp); 187 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); 188 189 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg); 190 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 191 PPSMC_Msg msg, u32 parameter); 192 193 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev); 194 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev); 195 196 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) 197 { 198 struct ci_power_info *pi = rdev->pm.dpm.priv; 199 200 return pi; 201 } 202 203 static struct ci_ps *ci_get_ps(struct radeon_ps *rps) 204 { 205 struct ci_ps *ps = rps->ps_priv; 206 207 return ps; 208 } 209 210 static void ci_initialize_powertune_defaults(struct radeon_device *rdev) 211 { 212 struct ci_power_info *pi = ci_get_pi(rdev); 213 214 switch (rdev->pdev->device) { 215 case 0x6649: 216 case 0x6650: 217 case 0x6651: 218 case 0x6658: 219 case 0x665C: 220 case 0x665D: 221 default: 222 pi->powertune_defaults = &defaults_bonaire_xt; 223 break; 224 case 0x6640: 225 case 0x6641: 226 case 0x6646: 227 case 0x6647: 228 pi->powertune_defaults = &defaults_saturn_xt; 229 break; 230 case 0x67B8: 231 case 0x67B0: 232 pi->powertune_defaults = &defaults_hawaii_xt; 233 break; 234 case 0x67BA: 235 case 0x67B1: 236 pi->powertune_defaults = &defaults_hawaii_pro; 237 break; 238 case 0x67A0: 239 case 0x67A1: 240 case 0x67A2: 241 case 0x67A8: 242 case 0x67A9: 243 case 0x67AA: 244 case 0x67B9: 245 case 0x67BE: 246 pi->powertune_defaults = &defaults_bonaire_xt; 247 break; 248 } 249 250 pi->dte_tj_offset = 0; 251 252 pi->caps_power_containment = true; 253 pi->caps_cac = false; 254 pi->caps_sq_ramping = false; 255 pi->caps_db_ramping = false; 256 pi->caps_td_ramping = false; 257 pi->caps_tcp_ramping = false; 258 259 if (pi->caps_power_containment) { 260 pi->caps_cac = true; 261 if (rdev->family == CHIP_HAWAII) 262 pi->enable_bapm_feature = false; 263 else 264 pi->enable_bapm_feature = true; 265 pi->enable_tdc_limit_feature = true; 266 pi->enable_pkg_pwr_tracking_feature = true; 267 } 268 } 269 270 static u8 ci_convert_to_vid(u16 vddc) 271 { 272 return (6200 - (vddc * VOLTAGE_SCALE)) / 25; 273 } 274 275 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev) 276 { 277 struct ci_power_info *pi = ci_get_pi(rdev); 278 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 279 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 280 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2; 281 u32 i; 282 283 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) 284 return -EINVAL; 285 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8) 286 return -EINVAL; 287 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count != 288 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) 289 return -EINVAL; 290 291 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { 292 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 293 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); 294 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); 295 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); 296 } else { 297 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); 298 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); 299 } 300 } 301 return 0; 302 } 303 304 static int ci_populate_vddc_vid(struct radeon_device *rdev) 305 { 306 struct ci_power_info *pi = ci_get_pi(rdev); 307 u8 *vid = pi->smc_powertune_table.VddCVid; 308 u32 i; 309 310 if (pi->vddc_voltage_table.count > 8) 311 return -EINVAL; 312 313 for (i = 0; i < pi->vddc_voltage_table.count; i++) 314 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); 315 316 return 0; 317 } 318 319 static int ci_populate_svi_load_line(struct radeon_device *rdev) 320 { 321 struct ci_power_info *pi = ci_get_pi(rdev); 322 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 323 324 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en; 325 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc; 326 pi->smc_powertune_table.SviLoadLineTrimVddC = 3; 327 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0; 328 329 return 0; 330 } 331 332 static int ci_populate_tdc_limit(struct radeon_device *rdev) 333 { 334 struct ci_power_info *pi = ci_get_pi(rdev); 335 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 336 u16 tdc_limit; 337 338 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256; 339 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit); 340 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc = 341 pt_defaults->tdc_vddc_throttle_release_limit_perc; 342 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt; 343 344 return 0; 345 } 346 347 static int ci_populate_dw8(struct radeon_device *rdev) 348 { 349 struct ci_power_info *pi = ci_get_pi(rdev); 350 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 351 int ret; 352 353 ret = ci_read_smc_sram_dword(rdev, 354 SMU7_FIRMWARE_HEADER_LOCATION + 355 offsetof(SMU7_Firmware_Header, PmFuseTable) + 356 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), 357 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl, 358 pi->sram_end); 359 if (ret) 360 return -EINVAL; 361 else 362 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl; 363 364 return 0; 365 } 366 367 static int ci_populate_fuzzy_fan(struct radeon_device *rdev) 368 { 369 struct ci_power_info *pi = ci_get_pi(rdev); 370 371 if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) || 372 (rdev->pm.dpm.fan.fan_output_sensitivity == 0)) 373 rdev->pm.dpm.fan.fan_output_sensitivity = 374 rdev->pm.dpm.fan.default_fan_output_sensitivity; 375 376 pi->smc_powertune_table.FuzzyFan_PwmSetDelta = 377 cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity); 378 379 return 0; 380 } 381 382 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) 383 { 384 struct ci_power_info *pi = ci_get_pi(rdev); 385 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 386 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 387 int i, min, max; 388 389 min = max = hi_vid[0]; 390 for (i = 0; i < 8; i++) { 391 if (0 != hi_vid[i]) { 392 if (min > hi_vid[i]) 393 min = hi_vid[i]; 394 if (max < hi_vid[i]) 395 max = hi_vid[i]; 396 } 397 398 if (0 != lo_vid[i]) { 399 if (min > lo_vid[i]) 400 min = lo_vid[i]; 401 if (max < lo_vid[i]) 402 max = lo_vid[i]; 403 } 404 } 405 406 if ((min == 0) || (max == 0)) 407 return -EINVAL; 408 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max; 409 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min; 410 411 return 0; 412 } 413 414 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev) 415 { 416 struct ci_power_info *pi = ci_get_pi(rdev); 417 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; 418 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; 419 struct radeon_cac_tdp_table *cac_tdp_table = 420 rdev->pm.dpm.dyn_state.cac_tdp_table; 421 422 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256; 423 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256; 424 425 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd); 426 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd); 427 428 return 0; 429 } 430 431 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev) 432 { 433 struct ci_power_info *pi = ci_get_pi(rdev); 434 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 435 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table; 436 struct radeon_cac_tdp_table *cac_tdp_table = 437 rdev->pm.dpm.dyn_state.cac_tdp_table; 438 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table; 439 int i, j, k; 440 const u16 *def1; 441 const u16 *def2; 442 443 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256; 444 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256; 445 446 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset; 447 dpm_table->GpuTjMax = 448 (u8)(pi->thermal_temp_setting.temperature_high / 1000); 449 dpm_table->GpuTjHyst = 8; 450 451 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base; 452 453 if (ppm) { 454 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000); 455 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256); 456 } else { 457 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0); 458 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0); 459 } 460 461 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient); 462 def1 = pt_defaults->bapmti_r; 463 def2 = pt_defaults->bapmti_rc; 464 465 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { 466 for (j = 0; j < SMU7_DTE_SOURCES; j++) { 467 for (k = 0; k < SMU7_DTE_SINKS; k++) { 468 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1); 469 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2); 470 def1++; 471 def2++; 472 } 473 } 474 } 475 476 return 0; 477 } 478 479 static int ci_populate_pm_base(struct radeon_device *rdev) 480 { 481 struct ci_power_info *pi = ci_get_pi(rdev); 482 u32 pm_fuse_table_offset; 483 int ret; 484 485 if (pi->caps_power_containment) { 486 ret = ci_read_smc_sram_dword(rdev, 487 SMU7_FIRMWARE_HEADER_LOCATION + 488 offsetof(SMU7_Firmware_Header, PmFuseTable), 489 &pm_fuse_table_offset, pi->sram_end); 490 if (ret) 491 return ret; 492 ret = ci_populate_bapm_vddc_vid_sidd(rdev); 493 if (ret) 494 return ret; 495 ret = ci_populate_vddc_vid(rdev); 496 if (ret) 497 return ret; 498 ret = ci_populate_svi_load_line(rdev); 499 if (ret) 500 return ret; 501 ret = ci_populate_tdc_limit(rdev); 502 if (ret) 503 return ret; 504 ret = ci_populate_dw8(rdev); 505 if (ret) 506 return ret; 507 ret = ci_populate_fuzzy_fan(rdev); 508 if (ret) 509 return ret; 510 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); 511 if (ret) 512 return ret; 513 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev); 514 if (ret) 515 return ret; 516 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset, 517 (u8 *)&pi->smc_powertune_table, 518 sizeof(SMU7_Discrete_PmFuses), pi->sram_end); 519 if (ret) 520 return ret; 521 } 522 523 return 0; 524 } 525 526 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable) 527 { 528 struct ci_power_info *pi = ci_get_pi(rdev); 529 u32 data; 530 531 if (pi->caps_sq_ramping) { 532 data = RREG32_DIDT(DIDT_SQ_CTRL0); 533 if (enable) 534 data |= DIDT_CTRL_EN; 535 else 536 data &= ~DIDT_CTRL_EN; 537 WREG32_DIDT(DIDT_SQ_CTRL0, data); 538 } 539 540 if (pi->caps_db_ramping) { 541 data = RREG32_DIDT(DIDT_DB_CTRL0); 542 if (enable) 543 data |= DIDT_CTRL_EN; 544 else 545 data &= ~DIDT_CTRL_EN; 546 WREG32_DIDT(DIDT_DB_CTRL0, data); 547 } 548 549 if (pi->caps_td_ramping) { 550 data = RREG32_DIDT(DIDT_TD_CTRL0); 551 if (enable) 552 data |= DIDT_CTRL_EN; 553 else 554 data &= ~DIDT_CTRL_EN; 555 WREG32_DIDT(DIDT_TD_CTRL0, data); 556 } 557 558 if (pi->caps_tcp_ramping) { 559 data = RREG32_DIDT(DIDT_TCP_CTRL0); 560 if (enable) 561 data |= DIDT_CTRL_EN; 562 else 563 data &= ~DIDT_CTRL_EN; 564 WREG32_DIDT(DIDT_TCP_CTRL0, data); 565 } 566 } 567 568 static int ci_program_pt_config_registers(struct radeon_device *rdev, 569 const struct ci_pt_config_reg *cac_config_regs) 570 { 571 const struct ci_pt_config_reg *config_regs = cac_config_regs; 572 u32 data; 573 u32 cache = 0; 574 575 if (config_regs == NULL) 576 return -EINVAL; 577 578 while (config_regs->offset != 0xFFFFFFFF) { 579 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) { 580 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 581 } else { 582 switch (config_regs->type) { 583 case CISLANDS_CONFIGREG_SMC_IND: 584 data = RREG32_SMC(config_regs->offset); 585 break; 586 case CISLANDS_CONFIGREG_DIDT_IND: 587 data = RREG32_DIDT(config_regs->offset); 588 break; 589 default: 590 data = RREG32(config_regs->offset << 2); 591 break; 592 } 593 594 data &= ~config_regs->mask; 595 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 596 data |= cache; 597 598 switch (config_regs->type) { 599 case CISLANDS_CONFIGREG_SMC_IND: 600 WREG32_SMC(config_regs->offset, data); 601 break; 602 case CISLANDS_CONFIGREG_DIDT_IND: 603 WREG32_DIDT(config_regs->offset, data); 604 break; 605 default: 606 WREG32(config_regs->offset << 2, data); 607 break; 608 } 609 cache = 0; 610 } 611 config_regs++; 612 } 613 return 0; 614 } 615 616 static int ci_enable_didt(struct radeon_device *rdev, bool enable) 617 { 618 struct ci_power_info *pi = ci_get_pi(rdev); 619 int ret; 620 621 if (pi->caps_sq_ramping || pi->caps_db_ramping || 622 pi->caps_td_ramping || pi->caps_tcp_ramping) { 623 cik_enter_rlc_safe_mode(rdev); 624 625 if (enable) { 626 ret = ci_program_pt_config_registers(rdev, didt_config_ci); 627 if (ret) { 628 cik_exit_rlc_safe_mode(rdev); 629 return ret; 630 } 631 } 632 633 ci_do_enable_didt(rdev, enable); 634 635 cik_exit_rlc_safe_mode(rdev); 636 } 637 638 return 0; 639 } 640 641 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable) 642 { 643 struct ci_power_info *pi = ci_get_pi(rdev); 644 PPSMC_Result smc_result; 645 int ret = 0; 646 647 if (enable) { 648 pi->power_containment_features = 0; 649 if (pi->caps_power_containment) { 650 if (pi->enable_bapm_feature) { 651 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE); 652 if (smc_result != PPSMC_Result_OK) 653 ret = -EINVAL; 654 else 655 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; 656 } 657 658 if (pi->enable_tdc_limit_feature) { 659 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable); 660 if (smc_result != PPSMC_Result_OK) 661 ret = -EINVAL; 662 else 663 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit; 664 } 665 666 if (pi->enable_pkg_pwr_tracking_feature) { 667 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable); 668 if (smc_result != PPSMC_Result_OK) { 669 ret = -EINVAL; 670 } else { 671 struct radeon_cac_tdp_table *cac_tdp_table = 672 rdev->pm.dpm.dyn_state.cac_tdp_table; 673 u32 default_pwr_limit = 674 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 675 676 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; 677 678 ci_set_power_limit(rdev, default_pwr_limit); 679 } 680 } 681 } 682 } else { 683 if (pi->caps_power_containment && pi->power_containment_features) { 684 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) 685 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable); 686 687 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM) 688 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE); 689 690 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) 691 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable); 692 pi->power_containment_features = 0; 693 } 694 } 695 696 return ret; 697 } 698 699 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable) 700 { 701 struct ci_power_info *pi = ci_get_pi(rdev); 702 PPSMC_Result smc_result; 703 int ret = 0; 704 705 if (pi->caps_cac) { 706 if (enable) { 707 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac); 708 if (smc_result != PPSMC_Result_OK) { 709 ret = -EINVAL; 710 pi->cac_enabled = false; 711 } else { 712 pi->cac_enabled = true; 713 } 714 } else if (pi->cac_enabled) { 715 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac); 716 pi->cac_enabled = false; 717 } 718 } 719 720 return ret; 721 } 722 723 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev, 724 bool enable) 725 { 726 struct ci_power_info *pi = ci_get_pi(rdev); 727 PPSMC_Result smc_result = PPSMC_Result_OK; 728 729 if (pi->thermal_sclk_dpm_enabled) { 730 if (enable) 731 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM); 732 else 733 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM); 734 } 735 736 if (smc_result == PPSMC_Result_OK) 737 return 0; 738 else 739 return -EINVAL; 740 } 741 742 static int ci_power_control_set_level(struct radeon_device *rdev) 743 { 744 struct ci_power_info *pi = ci_get_pi(rdev); 745 struct radeon_cac_tdp_table *cac_tdp_table = 746 rdev->pm.dpm.dyn_state.cac_tdp_table; 747 s32 adjust_percent; 748 s32 target_tdp; 749 int ret = 0; 750 bool adjust_polarity = false; /* ??? */ 751 752 if (pi->caps_power_containment) { 753 adjust_percent = adjust_polarity ? 754 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); 755 target_tdp = ((100 + adjust_percent) * 756 (s32)cac_tdp_table->configurable_tdp) / 100; 757 758 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); 759 } 760 761 return ret; 762 } 763 764 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 765 { 766 struct ci_power_info *pi = ci_get_pi(rdev); 767 768 if (pi->uvd_power_gated == gate) 769 return; 770 771 pi->uvd_power_gated = gate; 772 773 ci_update_uvd_dpm(rdev, gate); 774 } 775 776 bool ci_dpm_vblank_too_short(struct radeon_device *rdev) 777 { 778 struct ci_power_info *pi = ci_get_pi(rdev); 779 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 780 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 781 782 /* disable mclk switching if the refresh is >120Hz, even if the 783 * blanking period would allow it 784 */ 785 if (r600_dpm_get_vrefresh(rdev) > 120) 786 return true; 787 788 if (vblank_time < switch_limit) 789 return true; 790 else 791 return false; 792 793 } 794 795 static void ci_apply_state_adjust_rules(struct radeon_device *rdev, 796 struct radeon_ps *rps) 797 { 798 struct ci_ps *ps = ci_get_ps(rps); 799 struct ci_power_info *pi = ci_get_pi(rdev); 800 struct radeon_clock_and_voltage_limits *max_limits; 801 bool disable_mclk_switching; 802 u32 sclk, mclk; 803 int i; 804 805 if (rps->vce_active) { 806 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 807 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 808 } else { 809 rps->evclk = 0; 810 rps->ecclk = 0; 811 } 812 813 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 814 ci_dpm_vblank_too_short(rdev)) 815 disable_mclk_switching = true; 816 else 817 disable_mclk_switching = false; 818 819 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 820 pi->battery_state = true; 821 else 822 pi->battery_state = false; 823 824 if (rdev->pm.dpm.ac_power) 825 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 826 else 827 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 828 829 if (rdev->pm.dpm.ac_power == false) { 830 for (i = 0; i < ps->performance_level_count; i++) { 831 if (ps->performance_levels[i].mclk > max_limits->mclk) 832 ps->performance_levels[i].mclk = max_limits->mclk; 833 if (ps->performance_levels[i].sclk > max_limits->sclk) 834 ps->performance_levels[i].sclk = max_limits->sclk; 835 } 836 } 837 838 /* XXX validate the min clocks required for display */ 839 840 if (disable_mclk_switching) { 841 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 842 sclk = ps->performance_levels[0].sclk; 843 } else { 844 mclk = ps->performance_levels[0].mclk; 845 sclk = ps->performance_levels[0].sclk; 846 } 847 848 if (rps->vce_active) { 849 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 850 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 851 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk) 852 mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk; 853 } 854 855 ps->performance_levels[0].sclk = sclk; 856 ps->performance_levels[0].mclk = mclk; 857 858 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk) 859 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk; 860 861 if (disable_mclk_switching) { 862 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk) 863 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk; 864 } else { 865 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk) 866 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk; 867 } 868 } 869 870 static int ci_thermal_set_temperature_range(struct radeon_device *rdev, 871 int min_temp, int max_temp) 872 { 873 int low_temp = 0 * 1000; 874 int high_temp = 255 * 1000; 875 u32 tmp; 876 877 if (low_temp < min_temp) 878 low_temp = min_temp; 879 if (high_temp > max_temp) 880 high_temp = max_temp; 881 if (high_temp < low_temp) { 882 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 883 return -EINVAL; 884 } 885 886 tmp = RREG32_SMC(CG_THERMAL_INT); 887 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK); 888 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) | 889 CI_DIG_THERM_INTL(low_temp / 1000); 890 WREG32_SMC(CG_THERMAL_INT, tmp); 891 892 #if 0 893 /* XXX: need to figure out how to handle this properly */ 894 tmp = RREG32_SMC(CG_THERMAL_CTRL); 895 tmp &= DIG_THERM_DPM_MASK; 896 tmp |= DIG_THERM_DPM(high_temp / 1000); 897 WREG32_SMC(CG_THERMAL_CTRL, tmp); 898 #endif 899 900 rdev->pm.dpm.thermal.min_temp = low_temp; 901 rdev->pm.dpm.thermal.max_temp = high_temp; 902 903 return 0; 904 } 905 906 static int ci_thermal_enable_alert(struct radeon_device *rdev, 907 bool enable) 908 { 909 u32 thermal_int = RREG32_SMC(CG_THERMAL_INT); 910 PPSMC_Result result; 911 912 if (enable) { 913 thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 914 WREG32_SMC(CG_THERMAL_INT, thermal_int); 915 rdev->irq.dpm_thermal = false; 916 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable); 917 if (result != PPSMC_Result_OK) { 918 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 919 return -EINVAL; 920 } 921 } else { 922 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; 923 WREG32_SMC(CG_THERMAL_INT, thermal_int); 924 rdev->irq.dpm_thermal = true; 925 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable); 926 if (result != PPSMC_Result_OK) { 927 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n"); 928 return -EINVAL; 929 } 930 } 931 932 return 0; 933 } 934 935 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode) 936 { 937 struct ci_power_info *pi = ci_get_pi(rdev); 938 u32 tmp; 939 940 if (pi->fan_ctrl_is_in_default_mode) { 941 tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; 942 pi->fan_ctrl_default_mode = tmp; 943 tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; 944 pi->t_min = tmp; 945 pi->fan_ctrl_is_in_default_mode = false; 946 } 947 948 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; 949 tmp |= TMIN(0); 950 WREG32_SMC(CG_FDO_CTRL2, tmp); 951 952 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 953 tmp |= FDO_PWM_MODE(mode); 954 WREG32_SMC(CG_FDO_CTRL2, tmp); 955 } 956 957 static int ci_thermal_setup_fan_table(struct radeon_device *rdev) 958 { 959 struct ci_power_info *pi = ci_get_pi(rdev); 960 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; 961 u32 duty100; 962 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; 963 u16 fdo_min, slope1, slope2; 964 u32 reference_clock, tmp; 965 int ret; 966 u64 tmp64; 967 968 if (!pi->fan_table_start) { 969 rdev->pm.dpm.fan.ucode_fan_control = false; 970 return 0; 971 } 972 973 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 974 975 if (duty100 == 0) { 976 rdev->pm.dpm.fan.ucode_fan_control = false; 977 return 0; 978 } 979 980 tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100; 981 do_div(tmp64, 10000); 982 fdo_min = (u16)tmp64; 983 984 t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min; 985 t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med; 986 987 pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min; 988 pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med; 989 990 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); 991 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); 992 993 fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100); 994 fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100); 995 fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100); 996 997 fan_table.Slope1 = cpu_to_be16(slope1); 998 fan_table.Slope2 = cpu_to_be16(slope2); 999 1000 fan_table.FdoMin = cpu_to_be16(fdo_min); 1001 1002 fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst); 1003 1004 fan_table.HystUp = cpu_to_be16(1); 1005 1006 fan_table.HystSlope = cpu_to_be16(1); 1007 1008 fan_table.TempRespLim = cpu_to_be16(5); 1009 1010 reference_clock = radeon_get_xclk(rdev); 1011 1012 fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay * 1013 reference_clock) / 1600); 1014 1015 fan_table.FdoMax = cpu_to_be16((u16)duty100); 1016 1017 tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; 1018 fan_table.TempSrc = (uint8_t)tmp; 1019 1020 ret = ci_copy_bytes_to_smc(rdev, 1021 pi->fan_table_start, 1022 (u8 *)(&fan_table), 1023 sizeof(fan_table), 1024 pi->sram_end); 1025 1026 if (ret) { 1027 DRM_ERROR("Failed to load fan table to the SMC."); 1028 rdev->pm.dpm.fan.ucode_fan_control = false; 1029 } 1030 1031 return 0; 1032 } 1033 1034 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev) 1035 { 1036 struct ci_power_info *pi = ci_get_pi(rdev); 1037 PPSMC_Result ret; 1038 1039 if (pi->caps_od_fuzzy_fan_control_support) { 1040 ret = ci_send_msg_to_smc_with_parameter(rdev, 1041 PPSMC_StartFanControl, 1042 FAN_CONTROL_FUZZY); 1043 if (ret != PPSMC_Result_OK) 1044 return -EINVAL; 1045 ret = ci_send_msg_to_smc_with_parameter(rdev, 1046 PPSMC_MSG_SetFanPwmMax, 1047 rdev->pm.dpm.fan.default_max_fan_pwm); 1048 if (ret != PPSMC_Result_OK) 1049 return -EINVAL; 1050 } else { 1051 ret = ci_send_msg_to_smc_with_parameter(rdev, 1052 PPSMC_StartFanControl, 1053 FAN_CONTROL_TABLE); 1054 if (ret != PPSMC_Result_OK) 1055 return -EINVAL; 1056 } 1057 1058 pi->fan_is_controlled_by_smc = true; 1059 return 0; 1060 } 1061 1062 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev) 1063 { 1064 PPSMC_Result ret; 1065 struct ci_power_info *pi = ci_get_pi(rdev); 1066 1067 ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl); 1068 if (ret == PPSMC_Result_OK) { 1069 pi->fan_is_controlled_by_smc = false; 1070 return 0; 1071 } else 1072 return -EINVAL; 1073 } 1074 1075 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, 1076 u32 *speed) 1077 { 1078 u32 duty, duty100; 1079 u64 tmp64; 1080 1081 if (rdev->pm.no_fan) 1082 return -ENOENT; 1083 1084 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 1085 duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; 1086 1087 if (duty100 == 0) 1088 return -EINVAL; 1089 1090 tmp64 = (u64)duty * 100; 1091 do_div(tmp64, duty100); 1092 *speed = (u32)tmp64; 1093 1094 if (*speed > 100) 1095 *speed = 100; 1096 1097 return 0; 1098 } 1099 1100 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, 1101 u32 speed) 1102 { 1103 u32 tmp; 1104 u32 duty, duty100; 1105 u64 tmp64; 1106 struct ci_power_info *pi = ci_get_pi(rdev); 1107 1108 if (rdev->pm.no_fan) 1109 return -ENOENT; 1110 1111 if (pi->fan_is_controlled_by_smc) 1112 return -EINVAL; 1113 1114 if (speed > 100) 1115 return -EINVAL; 1116 1117 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 1118 1119 if (duty100 == 0) 1120 return -EINVAL; 1121 1122 tmp64 = (u64)speed * duty100; 1123 do_div(tmp64, 100); 1124 duty = (u32)tmp64; 1125 1126 tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; 1127 tmp |= FDO_STATIC_DUTY(duty); 1128 WREG32_SMC(CG_FDO_CTRL0, tmp); 1129 1130 return 0; 1131 } 1132 1133 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode) 1134 { 1135 if (mode) { 1136 /* stop auto-manage */ 1137 if (rdev->pm.dpm.fan.ucode_fan_control) 1138 ci_fan_ctrl_stop_smc_fan_control(rdev); 1139 ci_fan_ctrl_set_static_mode(rdev, mode); 1140 } else { 1141 /* restart auto-manage */ 1142 if (rdev->pm.dpm.fan.ucode_fan_control) 1143 ci_thermal_start_smc_fan_control(rdev); 1144 else 1145 ci_fan_ctrl_set_default_mode(rdev); 1146 } 1147 } 1148 1149 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev) 1150 { 1151 struct ci_power_info *pi = ci_get_pi(rdev); 1152 u32 tmp; 1153 1154 if (pi->fan_is_controlled_by_smc) 1155 return 0; 1156 1157 tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; 1158 return (tmp >> FDO_PWM_MODE_SHIFT); 1159 } 1160 1161 #if 0 1162 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev, 1163 u32 *speed) 1164 { 1165 u32 tach_period; 1166 u32 xclk = radeon_get_xclk(rdev); 1167 1168 if (rdev->pm.no_fan) 1169 return -ENOENT; 1170 1171 if (rdev->pm.fan_pulses_per_revolution == 0) 1172 return -ENOENT; 1173 1174 tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; 1175 if (tach_period == 0) 1176 return -ENOENT; 1177 1178 *speed = 60 * xclk * 10000 / tach_period; 1179 1180 return 0; 1181 } 1182 1183 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev, 1184 u32 speed) 1185 { 1186 u32 tach_period, tmp; 1187 u32 xclk = radeon_get_xclk(rdev); 1188 1189 if (rdev->pm.no_fan) 1190 return -ENOENT; 1191 1192 if (rdev->pm.fan_pulses_per_revolution == 0) 1193 return -ENOENT; 1194 1195 if ((speed < rdev->pm.fan_min_rpm) || 1196 (speed > rdev->pm.fan_max_rpm)) 1197 return -EINVAL; 1198 1199 if (rdev->pm.dpm.fan.ucode_fan_control) 1200 ci_fan_ctrl_stop_smc_fan_control(rdev); 1201 1202 tach_period = 60 * xclk * 10000 / (8 * speed); 1203 tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; 1204 tmp |= TARGET_PERIOD(tach_period); 1205 WREG32_SMC(CG_TACH_CTRL, tmp); 1206 1207 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM); 1208 1209 return 0; 1210 } 1211 #endif 1212 1213 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev) 1214 { 1215 struct ci_power_info *pi = ci_get_pi(rdev); 1216 u32 tmp; 1217 1218 if (!pi->fan_ctrl_is_in_default_mode) { 1219 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 1220 tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode); 1221 WREG32_SMC(CG_FDO_CTRL2, tmp); 1222 1223 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; 1224 tmp |= TMIN(pi->t_min); 1225 WREG32_SMC(CG_FDO_CTRL2, tmp); 1226 pi->fan_ctrl_is_in_default_mode = true; 1227 } 1228 } 1229 1230 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev) 1231 { 1232 if (rdev->pm.dpm.fan.ucode_fan_control) { 1233 ci_fan_ctrl_start_smc_fan_control(rdev); 1234 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); 1235 } 1236 } 1237 1238 static void ci_thermal_initialize(struct radeon_device *rdev) 1239 { 1240 u32 tmp; 1241 1242 if (rdev->pm.fan_pulses_per_revolution) { 1243 tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; 1244 tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1); 1245 WREG32_SMC(CG_TACH_CTRL, tmp); 1246 } 1247 1248 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; 1249 tmp |= TACH_PWM_RESP_RATE(0x28); 1250 WREG32_SMC(CG_FDO_CTRL2, tmp); 1251 } 1252 1253 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev) 1254 { 1255 int ret; 1256 1257 ci_thermal_initialize(rdev); 1258 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1259 if (ret) 1260 return ret; 1261 ret = ci_thermal_enable_alert(rdev, true); 1262 if (ret) 1263 return ret; 1264 if (rdev->pm.dpm.fan.ucode_fan_control) { 1265 ret = ci_thermal_setup_fan_table(rdev); 1266 if (ret) 1267 return ret; 1268 ci_thermal_start_smc_fan_control(rdev); 1269 } 1270 1271 return 0; 1272 } 1273 1274 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev) 1275 { 1276 if (!rdev->pm.no_fan) 1277 ci_fan_ctrl_set_default_mode(rdev); 1278 } 1279 1280 #if 0 1281 static int ci_read_smc_soft_register(struct radeon_device *rdev, 1282 u16 reg_offset, u32 *value) 1283 { 1284 struct ci_power_info *pi = ci_get_pi(rdev); 1285 1286 return ci_read_smc_sram_dword(rdev, 1287 pi->soft_regs_start + reg_offset, 1288 value, pi->sram_end); 1289 } 1290 #endif 1291 1292 static int ci_write_smc_soft_register(struct radeon_device *rdev, 1293 u16 reg_offset, u32 value) 1294 { 1295 struct ci_power_info *pi = ci_get_pi(rdev); 1296 1297 return ci_write_smc_sram_dword(rdev, 1298 pi->soft_regs_start + reg_offset, 1299 value, pi->sram_end); 1300 } 1301 1302 static void ci_init_fps_limits(struct radeon_device *rdev) 1303 { 1304 struct ci_power_info *pi = ci_get_pi(rdev); 1305 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 1306 1307 if (pi->caps_fps) { 1308 u16 tmp; 1309 1310 tmp = 45; 1311 table->FpsHighT = cpu_to_be16(tmp); 1312 1313 tmp = 30; 1314 table->FpsLowT = cpu_to_be16(tmp); 1315 } 1316 } 1317 1318 static int ci_update_sclk_t(struct radeon_device *rdev) 1319 { 1320 struct ci_power_info *pi = ci_get_pi(rdev); 1321 int ret = 0; 1322 u32 low_sclk_interrupt_t = 0; 1323 1324 if (pi->caps_sclk_throttle_low_notification) { 1325 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 1326 1327 ret = ci_copy_bytes_to_smc(rdev, 1328 pi->dpm_table_start + 1329 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), 1330 (u8 *)&low_sclk_interrupt_t, 1331 sizeof(u32), pi->sram_end); 1332 1333 } 1334 1335 return ret; 1336 } 1337 1338 static void ci_get_leakage_voltages(struct radeon_device *rdev) 1339 { 1340 struct ci_power_info *pi = ci_get_pi(rdev); 1341 u16 leakage_id, virtual_voltage_id; 1342 u16 vddc, vddci; 1343 int i; 1344 1345 pi->vddc_leakage.count = 0; 1346 pi->vddci_leakage.count = 0; 1347 1348 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 1349 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1350 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1351 if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0) 1352 continue; 1353 if (vddc != 0 && vddc != virtual_voltage_id) { 1354 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1355 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1356 pi->vddc_leakage.count++; 1357 } 1358 } 1359 } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { 1360 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1361 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1362 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci, 1363 virtual_voltage_id, 1364 leakage_id) == 0) { 1365 if (vddc != 0 && vddc != virtual_voltage_id) { 1366 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1367 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1368 pi->vddc_leakage.count++; 1369 } 1370 if (vddci != 0 && vddci != virtual_voltage_id) { 1371 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci; 1372 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id; 1373 pi->vddci_leakage.count++; 1374 } 1375 } 1376 } 1377 } 1378 } 1379 1380 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources) 1381 { 1382 struct ci_power_info *pi = ci_get_pi(rdev); 1383 bool want_thermal_protection; 1384 enum radeon_dpm_event_src dpm_event_src; 1385 u32 tmp; 1386 1387 switch (sources) { 1388 case 0: 1389 default: 1390 want_thermal_protection = false; 1391 break; 1392 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): 1393 want_thermal_protection = true; 1394 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; 1395 break; 1396 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): 1397 want_thermal_protection = true; 1398 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; 1399 break; 1400 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | 1401 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): 1402 want_thermal_protection = true; 1403 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; 1404 break; 1405 } 1406 1407 if (want_thermal_protection) { 1408 #if 0 1409 /* XXX: need to figure out how to handle this properly */ 1410 tmp = RREG32_SMC(CG_THERMAL_CTRL); 1411 tmp &= DPM_EVENT_SRC_MASK; 1412 tmp |= DPM_EVENT_SRC(dpm_event_src); 1413 WREG32_SMC(CG_THERMAL_CTRL, tmp); 1414 #endif 1415 1416 tmp = RREG32_SMC(GENERAL_PWRMGT); 1417 if (pi->thermal_protection) 1418 tmp &= ~THERMAL_PROTECTION_DIS; 1419 else 1420 tmp |= THERMAL_PROTECTION_DIS; 1421 WREG32_SMC(GENERAL_PWRMGT, tmp); 1422 } else { 1423 tmp = RREG32_SMC(GENERAL_PWRMGT); 1424 tmp |= THERMAL_PROTECTION_DIS; 1425 WREG32_SMC(GENERAL_PWRMGT, tmp); 1426 } 1427 } 1428 1429 static void ci_enable_auto_throttle_source(struct radeon_device *rdev, 1430 enum radeon_dpm_auto_throttle_src source, 1431 bool enable) 1432 { 1433 struct ci_power_info *pi = ci_get_pi(rdev); 1434 1435 if (enable) { 1436 if (!(pi->active_auto_throttle_sources & (1 << source))) { 1437 pi->active_auto_throttle_sources |= 1 << source; 1438 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1439 } 1440 } else { 1441 if (pi->active_auto_throttle_sources & (1 << source)) { 1442 pi->active_auto_throttle_sources &= ~(1 << source); 1443 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1444 } 1445 } 1446 } 1447 1448 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev) 1449 { 1450 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) 1451 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt); 1452 } 1453 1454 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev) 1455 { 1456 struct ci_power_info *pi = ci_get_pi(rdev); 1457 PPSMC_Result smc_result; 1458 1459 if (!pi->need_update_smu7_dpm_table) 1460 return 0; 1461 1462 if ((!pi->sclk_dpm_key_disabled) && 1463 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1464 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 1465 if (smc_result != PPSMC_Result_OK) 1466 return -EINVAL; 1467 } 1468 1469 if ((!pi->mclk_dpm_key_disabled) && 1470 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1471 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 1472 if (smc_result != PPSMC_Result_OK) 1473 return -EINVAL; 1474 } 1475 1476 pi->need_update_smu7_dpm_table = 0; 1477 return 0; 1478 } 1479 1480 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable) 1481 { 1482 struct ci_power_info *pi = ci_get_pi(rdev); 1483 PPSMC_Result smc_result; 1484 1485 if (enable) { 1486 if (!pi->sclk_dpm_key_disabled) { 1487 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable); 1488 if (smc_result != PPSMC_Result_OK) 1489 return -EINVAL; 1490 } 1491 1492 if (!pi->mclk_dpm_key_disabled) { 1493 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable); 1494 if (smc_result != PPSMC_Result_OK) 1495 return -EINVAL; 1496 1497 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN); 1498 1499 WREG32_SMC(LCAC_MC0_CNTL, 0x05); 1500 WREG32_SMC(LCAC_MC1_CNTL, 0x05); 1501 WREG32_SMC(LCAC_CPL_CNTL, 0x100005); 1502 1503 udelay(10); 1504 1505 WREG32_SMC(LCAC_MC0_CNTL, 0x400005); 1506 WREG32_SMC(LCAC_MC1_CNTL, 0x400005); 1507 WREG32_SMC(LCAC_CPL_CNTL, 0x500005); 1508 } 1509 } else { 1510 if (!pi->sclk_dpm_key_disabled) { 1511 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable); 1512 if (smc_result != PPSMC_Result_OK) 1513 return -EINVAL; 1514 } 1515 1516 if (!pi->mclk_dpm_key_disabled) { 1517 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable); 1518 if (smc_result != PPSMC_Result_OK) 1519 return -EINVAL; 1520 } 1521 } 1522 1523 return 0; 1524 } 1525 1526 static int ci_start_dpm(struct radeon_device *rdev) 1527 { 1528 struct ci_power_info *pi = ci_get_pi(rdev); 1529 PPSMC_Result smc_result; 1530 int ret; 1531 u32 tmp; 1532 1533 tmp = RREG32_SMC(GENERAL_PWRMGT); 1534 tmp |= GLOBAL_PWRMGT_EN; 1535 WREG32_SMC(GENERAL_PWRMGT, tmp); 1536 1537 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1538 tmp |= DYNAMIC_PM_EN; 1539 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1540 1541 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000); 1542 1543 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN); 1544 1545 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable); 1546 if (smc_result != PPSMC_Result_OK) 1547 return -EINVAL; 1548 1549 ret = ci_enable_sclk_mclk_dpm(rdev, true); 1550 if (ret) 1551 return ret; 1552 1553 if (!pi->pcie_dpm_key_disabled) { 1554 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable); 1555 if (smc_result != PPSMC_Result_OK) 1556 return -EINVAL; 1557 } 1558 1559 return 0; 1560 } 1561 1562 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev) 1563 { 1564 struct ci_power_info *pi = ci_get_pi(rdev); 1565 PPSMC_Result smc_result; 1566 1567 if (!pi->need_update_smu7_dpm_table) 1568 return 0; 1569 1570 if ((!pi->sclk_dpm_key_disabled) && 1571 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1572 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel); 1573 if (smc_result != PPSMC_Result_OK) 1574 return -EINVAL; 1575 } 1576 1577 if ((!pi->mclk_dpm_key_disabled) && 1578 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1579 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel); 1580 if (smc_result != PPSMC_Result_OK) 1581 return -EINVAL; 1582 } 1583 1584 return 0; 1585 } 1586 1587 static int ci_stop_dpm(struct radeon_device *rdev) 1588 { 1589 struct ci_power_info *pi = ci_get_pi(rdev); 1590 PPSMC_Result smc_result; 1591 int ret; 1592 u32 tmp; 1593 1594 tmp = RREG32_SMC(GENERAL_PWRMGT); 1595 tmp &= ~GLOBAL_PWRMGT_EN; 1596 WREG32_SMC(GENERAL_PWRMGT, tmp); 1597 1598 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1599 tmp &= ~DYNAMIC_PM_EN; 1600 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1601 1602 if (!pi->pcie_dpm_key_disabled) { 1603 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable); 1604 if (smc_result != PPSMC_Result_OK) 1605 return -EINVAL; 1606 } 1607 1608 ret = ci_enable_sclk_mclk_dpm(rdev, false); 1609 if (ret) 1610 return ret; 1611 1612 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable); 1613 if (smc_result != PPSMC_Result_OK) 1614 return -EINVAL; 1615 1616 return 0; 1617 } 1618 1619 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable) 1620 { 1621 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1622 1623 if (enable) 1624 tmp &= ~SCLK_PWRMGT_OFF; 1625 else 1626 tmp |= SCLK_PWRMGT_OFF; 1627 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1628 } 1629 1630 #if 0 1631 static int ci_notify_hw_of_power_source(struct radeon_device *rdev, 1632 bool ac_power) 1633 { 1634 struct ci_power_info *pi = ci_get_pi(rdev); 1635 struct radeon_cac_tdp_table *cac_tdp_table = 1636 rdev->pm.dpm.dyn_state.cac_tdp_table; 1637 u32 power_limit; 1638 1639 if (ac_power) 1640 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 1641 else 1642 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); 1643 1644 ci_set_power_limit(rdev, power_limit); 1645 1646 if (pi->caps_automatic_dc_transition) { 1647 if (ac_power) 1648 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC); 1649 else 1650 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp); 1651 } 1652 1653 return 0; 1654 } 1655 #endif 1656 1657 static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg) 1658 { 1659 u32 tmp; 1660 int i; 1661 1662 if (!ci_is_smc_running(rdev)) 1663 return PPSMC_Result_Failed; 1664 1665 WREG32(SMC_MESSAGE_0, msg); 1666 1667 for (i = 0; i < rdev->usec_timeout; i++) { 1668 tmp = RREG32(SMC_RESP_0); 1669 if (tmp != 0) 1670 break; 1671 udelay(1); 1672 } 1673 tmp = RREG32(SMC_RESP_0); 1674 1675 return (PPSMC_Result)tmp; 1676 } 1677 1678 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 1679 PPSMC_Msg msg, u32 parameter) 1680 { 1681 WREG32(SMC_MSG_ARG_0, parameter); 1682 return ci_send_msg_to_smc(rdev, msg); 1683 } 1684 1685 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev, 1686 PPSMC_Msg msg, u32 *parameter) 1687 { 1688 PPSMC_Result smc_result; 1689 1690 smc_result = ci_send_msg_to_smc(rdev, msg); 1691 1692 if ((smc_result == PPSMC_Result_OK) && parameter) 1693 *parameter = RREG32(SMC_MSG_ARG_0); 1694 1695 return smc_result; 1696 } 1697 1698 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n) 1699 { 1700 struct ci_power_info *pi = ci_get_pi(rdev); 1701 1702 if (!pi->sclk_dpm_key_disabled) { 1703 PPSMC_Result smc_result = 1704 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); 1705 if (smc_result != PPSMC_Result_OK) 1706 return -EINVAL; 1707 } 1708 1709 return 0; 1710 } 1711 1712 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n) 1713 { 1714 struct ci_power_info *pi = ci_get_pi(rdev); 1715 1716 if (!pi->mclk_dpm_key_disabled) { 1717 PPSMC_Result smc_result = 1718 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); 1719 if (smc_result != PPSMC_Result_OK) 1720 return -EINVAL; 1721 } 1722 1723 return 0; 1724 } 1725 1726 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n) 1727 { 1728 struct ci_power_info *pi = ci_get_pi(rdev); 1729 1730 if (!pi->pcie_dpm_key_disabled) { 1731 PPSMC_Result smc_result = 1732 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n); 1733 if (smc_result != PPSMC_Result_OK) 1734 return -EINVAL; 1735 } 1736 1737 return 0; 1738 } 1739 1740 static int ci_set_power_limit(struct radeon_device *rdev, u32 n) 1741 { 1742 struct ci_power_info *pi = ci_get_pi(rdev); 1743 1744 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { 1745 PPSMC_Result smc_result = 1746 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n); 1747 if (smc_result != PPSMC_Result_OK) 1748 return -EINVAL; 1749 } 1750 1751 return 0; 1752 } 1753 1754 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 1755 u32 target_tdp) 1756 { 1757 PPSMC_Result smc_result = 1758 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); 1759 if (smc_result != PPSMC_Result_OK) 1760 return -EINVAL; 1761 return 0; 1762 } 1763 1764 #if 0 1765 static int ci_set_boot_state(struct radeon_device *rdev) 1766 { 1767 return ci_enable_sclk_mclk_dpm(rdev, false); 1768 } 1769 #endif 1770 1771 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev) 1772 { 1773 u32 sclk_freq; 1774 PPSMC_Result smc_result = 1775 ci_send_msg_to_smc_return_parameter(rdev, 1776 PPSMC_MSG_API_GetSclkFrequency, 1777 &sclk_freq); 1778 if (smc_result != PPSMC_Result_OK) 1779 sclk_freq = 0; 1780 1781 return sclk_freq; 1782 } 1783 1784 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev) 1785 { 1786 u32 mclk_freq; 1787 PPSMC_Result smc_result = 1788 ci_send_msg_to_smc_return_parameter(rdev, 1789 PPSMC_MSG_API_GetMclkFrequency, 1790 &mclk_freq); 1791 if (smc_result != PPSMC_Result_OK) 1792 mclk_freq = 0; 1793 1794 return mclk_freq; 1795 } 1796 1797 static void ci_dpm_start_smc(struct radeon_device *rdev) 1798 { 1799 int i; 1800 1801 ci_program_jump_on_start(rdev); 1802 ci_start_smc_clock(rdev); 1803 ci_start_smc(rdev); 1804 for (i = 0; i < rdev->usec_timeout; i++) { 1805 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED) 1806 break; 1807 } 1808 } 1809 1810 static void ci_dpm_stop_smc(struct radeon_device *rdev) 1811 { 1812 ci_reset_smc(rdev); 1813 ci_stop_smc_clock(rdev); 1814 } 1815 1816 static int ci_process_firmware_header(struct radeon_device *rdev) 1817 { 1818 struct ci_power_info *pi = ci_get_pi(rdev); 1819 u32 tmp; 1820 int ret; 1821 1822 ret = ci_read_smc_sram_dword(rdev, 1823 SMU7_FIRMWARE_HEADER_LOCATION + 1824 offsetof(SMU7_Firmware_Header, DpmTable), 1825 &tmp, pi->sram_end); 1826 if (ret) 1827 return ret; 1828 1829 pi->dpm_table_start = tmp; 1830 1831 ret = ci_read_smc_sram_dword(rdev, 1832 SMU7_FIRMWARE_HEADER_LOCATION + 1833 offsetof(SMU7_Firmware_Header, SoftRegisters), 1834 &tmp, pi->sram_end); 1835 if (ret) 1836 return ret; 1837 1838 pi->soft_regs_start = tmp; 1839 1840 ret = ci_read_smc_sram_dword(rdev, 1841 SMU7_FIRMWARE_HEADER_LOCATION + 1842 offsetof(SMU7_Firmware_Header, mcRegisterTable), 1843 &tmp, pi->sram_end); 1844 if (ret) 1845 return ret; 1846 1847 pi->mc_reg_table_start = tmp; 1848 1849 ret = ci_read_smc_sram_dword(rdev, 1850 SMU7_FIRMWARE_HEADER_LOCATION + 1851 offsetof(SMU7_Firmware_Header, FanTable), 1852 &tmp, pi->sram_end); 1853 if (ret) 1854 return ret; 1855 1856 pi->fan_table_start = tmp; 1857 1858 ret = ci_read_smc_sram_dword(rdev, 1859 SMU7_FIRMWARE_HEADER_LOCATION + 1860 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), 1861 &tmp, pi->sram_end); 1862 if (ret) 1863 return ret; 1864 1865 pi->arb_table_start = tmp; 1866 1867 return 0; 1868 } 1869 1870 static void ci_read_clock_registers(struct radeon_device *rdev) 1871 { 1872 struct ci_power_info *pi = ci_get_pi(rdev); 1873 1874 pi->clock_registers.cg_spll_func_cntl = 1875 RREG32_SMC(CG_SPLL_FUNC_CNTL); 1876 pi->clock_registers.cg_spll_func_cntl_2 = 1877 RREG32_SMC(CG_SPLL_FUNC_CNTL_2); 1878 pi->clock_registers.cg_spll_func_cntl_3 = 1879 RREG32_SMC(CG_SPLL_FUNC_CNTL_3); 1880 pi->clock_registers.cg_spll_func_cntl_4 = 1881 RREG32_SMC(CG_SPLL_FUNC_CNTL_4); 1882 pi->clock_registers.cg_spll_spread_spectrum = 1883 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 1884 pi->clock_registers.cg_spll_spread_spectrum_2 = 1885 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2); 1886 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); 1887 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); 1888 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); 1889 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); 1890 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); 1891 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); 1892 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); 1893 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); 1894 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); 1895 } 1896 1897 static void ci_init_sclk_t(struct radeon_device *rdev) 1898 { 1899 struct ci_power_info *pi = ci_get_pi(rdev); 1900 1901 pi->low_sclk_interrupt_t = 0; 1902 } 1903 1904 static void ci_enable_thermal_protection(struct radeon_device *rdev, 1905 bool enable) 1906 { 1907 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1908 1909 if (enable) 1910 tmp &= ~THERMAL_PROTECTION_DIS; 1911 else 1912 tmp |= THERMAL_PROTECTION_DIS; 1913 WREG32_SMC(GENERAL_PWRMGT, tmp); 1914 } 1915 1916 static void ci_enable_acpi_power_management(struct radeon_device *rdev) 1917 { 1918 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1919 1920 tmp |= STATIC_PM_EN; 1921 1922 WREG32_SMC(GENERAL_PWRMGT, tmp); 1923 } 1924 1925 #if 0 1926 static int ci_enter_ulp_state(struct radeon_device *rdev) 1927 { 1928 1929 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); 1930 1931 udelay(25000); 1932 1933 return 0; 1934 } 1935 1936 static int ci_exit_ulp_state(struct radeon_device *rdev) 1937 { 1938 int i; 1939 1940 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); 1941 1942 udelay(7000); 1943 1944 for (i = 0; i < rdev->usec_timeout; i++) { 1945 if (RREG32(SMC_RESP_0) == 1) 1946 break; 1947 udelay(1000); 1948 } 1949 1950 return 0; 1951 } 1952 #endif 1953 1954 static int ci_notify_smc_display_change(struct radeon_device *rdev, 1955 bool has_display) 1956 { 1957 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; 1958 1959 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; 1960 } 1961 1962 static int ci_enable_ds_master_switch(struct radeon_device *rdev, 1963 bool enable) 1964 { 1965 struct ci_power_info *pi = ci_get_pi(rdev); 1966 1967 if (enable) { 1968 if (pi->caps_sclk_ds) { 1969 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK) 1970 return -EINVAL; 1971 } else { 1972 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1973 return -EINVAL; 1974 } 1975 } else { 1976 if (pi->caps_sclk_ds) { 1977 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1978 return -EINVAL; 1979 } 1980 } 1981 1982 return 0; 1983 } 1984 1985 static void ci_program_display_gap(struct radeon_device *rdev) 1986 { 1987 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 1988 u32 pre_vbi_time_in_us; 1989 u32 frame_time_in_us; 1990 u32 ref_clock = rdev->clock.spll.reference_freq; 1991 u32 refresh_rate = r600_dpm_get_vrefresh(rdev); 1992 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 1993 1994 tmp &= ~DISP_GAP_MASK; 1995 if (rdev->pm.dpm.new_active_crtc_count > 0) 1996 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); 1997 else 1998 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE); 1999 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 2000 2001 if (refresh_rate == 0) 2002 refresh_rate = 60; 2003 if (vblank_time == 0xffffffff) 2004 vblank_time = 500; 2005 frame_time_in_us = 1000000 / refresh_rate; 2006 pre_vbi_time_in_us = 2007 frame_time_in_us - 200 - vblank_time; 2008 tmp = pre_vbi_time_in_us * (ref_clock / 100); 2009 2010 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp); 2011 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64); 2012 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); 2013 2014 2015 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1)); 2016 2017 } 2018 2019 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable) 2020 { 2021 struct ci_power_info *pi = ci_get_pi(rdev); 2022 u32 tmp; 2023 2024 if (enable) { 2025 if (pi->caps_sclk_ss_support) { 2026 tmp = RREG32_SMC(GENERAL_PWRMGT); 2027 tmp |= DYN_SPREAD_SPECTRUM_EN; 2028 WREG32_SMC(GENERAL_PWRMGT, tmp); 2029 } 2030 } else { 2031 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 2032 tmp &= ~SSEN; 2033 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp); 2034 2035 tmp = RREG32_SMC(GENERAL_PWRMGT); 2036 tmp &= ~DYN_SPREAD_SPECTRUM_EN; 2037 WREG32_SMC(GENERAL_PWRMGT, tmp); 2038 } 2039 } 2040 2041 static void ci_program_sstp(struct radeon_device *rdev) 2042 { 2043 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); 2044 } 2045 2046 static void ci_enable_display_gap(struct radeon_device *rdev) 2047 { 2048 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 2049 2050 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK); 2051 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) | 2052 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK)); 2053 2054 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 2055 } 2056 2057 static void ci_program_vc(struct radeon_device *rdev) 2058 { 2059 u32 tmp; 2060 2061 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 2062 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 2063 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 2064 2065 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0); 2066 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1); 2067 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2); 2068 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3); 2069 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4); 2070 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5); 2071 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6); 2072 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7); 2073 } 2074 2075 static void ci_clear_vc(struct radeon_device *rdev) 2076 { 2077 u32 tmp; 2078 2079 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 2080 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 2081 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 2082 2083 WREG32_SMC(CG_FTV_0, 0); 2084 WREG32_SMC(CG_FTV_1, 0); 2085 WREG32_SMC(CG_FTV_2, 0); 2086 WREG32_SMC(CG_FTV_3, 0); 2087 WREG32_SMC(CG_FTV_4, 0); 2088 WREG32_SMC(CG_FTV_5, 0); 2089 WREG32_SMC(CG_FTV_6, 0); 2090 WREG32_SMC(CG_FTV_7, 0); 2091 } 2092 2093 static int ci_upload_firmware(struct radeon_device *rdev) 2094 { 2095 struct ci_power_info *pi = ci_get_pi(rdev); 2096 int i, ret; 2097 2098 for (i = 0; i < rdev->usec_timeout; i++) { 2099 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE) 2100 break; 2101 } 2102 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1); 2103 2104 ci_stop_smc_clock(rdev); 2105 ci_reset_smc(rdev); 2106 2107 ret = ci_load_smc_ucode(rdev, pi->sram_end); 2108 2109 return ret; 2110 2111 } 2112 2113 static int ci_get_svi2_voltage_table(struct radeon_device *rdev, 2114 struct radeon_clock_voltage_dependency_table *voltage_dependency_table, 2115 struct atom_voltage_table *voltage_table) 2116 { 2117 u32 i; 2118 2119 if (voltage_dependency_table == NULL) 2120 return -EINVAL; 2121 2122 voltage_table->mask_low = 0; 2123 voltage_table->phase_delay = 0; 2124 2125 voltage_table->count = voltage_dependency_table->count; 2126 for (i = 0; i < voltage_table->count; i++) { 2127 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; 2128 voltage_table->entries[i].smio_low = 0; 2129 } 2130 2131 return 0; 2132 } 2133 2134 static int ci_construct_voltage_tables(struct radeon_device *rdev) 2135 { 2136 struct ci_power_info *pi = ci_get_pi(rdev); 2137 int ret; 2138 2139 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2140 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC, 2141 VOLTAGE_OBJ_GPIO_LUT, 2142 &pi->vddc_voltage_table); 2143 if (ret) 2144 return ret; 2145 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2146 ret = ci_get_svi2_voltage_table(rdev, 2147 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2148 &pi->vddc_voltage_table); 2149 if (ret) 2150 return ret; 2151 } 2152 2153 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC) 2154 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC, 2155 &pi->vddc_voltage_table); 2156 2157 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2158 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, 2159 VOLTAGE_OBJ_GPIO_LUT, 2160 &pi->vddci_voltage_table); 2161 if (ret) 2162 return ret; 2163 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2164 ret = ci_get_svi2_voltage_table(rdev, 2165 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2166 &pi->vddci_voltage_table); 2167 if (ret) 2168 return ret; 2169 } 2170 2171 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI) 2172 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI, 2173 &pi->vddci_voltage_table); 2174 2175 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2176 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC, 2177 VOLTAGE_OBJ_GPIO_LUT, 2178 &pi->mvdd_voltage_table); 2179 if (ret) 2180 return ret; 2181 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2182 ret = ci_get_svi2_voltage_table(rdev, 2183 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2184 &pi->mvdd_voltage_table); 2185 if (ret) 2186 return ret; 2187 } 2188 2189 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD) 2190 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD, 2191 &pi->mvdd_voltage_table); 2192 2193 return 0; 2194 } 2195 2196 static void ci_populate_smc_voltage_table(struct radeon_device *rdev, 2197 struct atom_voltage_table_entry *voltage_table, 2198 SMU7_Discrete_VoltageLevel *smc_voltage_table) 2199 { 2200 int ret; 2201 2202 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table, 2203 &smc_voltage_table->StdVoltageHiSidd, 2204 &smc_voltage_table->StdVoltageLoSidd); 2205 2206 if (ret) { 2207 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE; 2208 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE; 2209 } 2210 2211 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE); 2212 smc_voltage_table->StdVoltageHiSidd = 2213 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd); 2214 smc_voltage_table->StdVoltageLoSidd = 2215 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd); 2216 } 2217 2218 static int ci_populate_smc_vddc_table(struct radeon_device *rdev, 2219 SMU7_Discrete_DpmTable *table) 2220 { 2221 struct ci_power_info *pi = ci_get_pi(rdev); 2222 unsigned int count; 2223 2224 table->VddcLevelCount = pi->vddc_voltage_table.count; 2225 for (count = 0; count < table->VddcLevelCount; count++) { 2226 ci_populate_smc_voltage_table(rdev, 2227 &pi->vddc_voltage_table.entries[count], 2228 &table->VddcLevel[count]); 2229 2230 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2231 table->VddcLevel[count].Smio |= 2232 pi->vddc_voltage_table.entries[count].smio_low; 2233 else 2234 table->VddcLevel[count].Smio = 0; 2235 } 2236 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); 2237 2238 return 0; 2239 } 2240 2241 static int ci_populate_smc_vddci_table(struct radeon_device *rdev, 2242 SMU7_Discrete_DpmTable *table) 2243 { 2244 unsigned int count; 2245 struct ci_power_info *pi = ci_get_pi(rdev); 2246 2247 table->VddciLevelCount = pi->vddci_voltage_table.count; 2248 for (count = 0; count < table->VddciLevelCount; count++) { 2249 ci_populate_smc_voltage_table(rdev, 2250 &pi->vddci_voltage_table.entries[count], 2251 &table->VddciLevel[count]); 2252 2253 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2254 table->VddciLevel[count].Smio |= 2255 pi->vddci_voltage_table.entries[count].smio_low; 2256 else 2257 table->VddciLevel[count].Smio = 0; 2258 } 2259 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); 2260 2261 return 0; 2262 } 2263 2264 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev, 2265 SMU7_Discrete_DpmTable *table) 2266 { 2267 struct ci_power_info *pi = ci_get_pi(rdev); 2268 unsigned int count; 2269 2270 table->MvddLevelCount = pi->mvdd_voltage_table.count; 2271 for (count = 0; count < table->MvddLevelCount; count++) { 2272 ci_populate_smc_voltage_table(rdev, 2273 &pi->mvdd_voltage_table.entries[count], 2274 &table->MvddLevel[count]); 2275 2276 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2277 table->MvddLevel[count].Smio |= 2278 pi->mvdd_voltage_table.entries[count].smio_low; 2279 else 2280 table->MvddLevel[count].Smio = 0; 2281 } 2282 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); 2283 2284 return 0; 2285 } 2286 2287 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev, 2288 SMU7_Discrete_DpmTable *table) 2289 { 2290 int ret; 2291 2292 ret = ci_populate_smc_vddc_table(rdev, table); 2293 if (ret) 2294 return ret; 2295 2296 ret = ci_populate_smc_vddci_table(rdev, table); 2297 if (ret) 2298 return ret; 2299 2300 ret = ci_populate_smc_mvdd_table(rdev, table); 2301 if (ret) 2302 return ret; 2303 2304 return 0; 2305 } 2306 2307 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk, 2308 SMU7_Discrete_VoltageLevel *voltage) 2309 { 2310 struct ci_power_info *pi = ci_get_pi(rdev); 2311 u32 i = 0; 2312 2313 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 2314 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) { 2315 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { 2316 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; 2317 break; 2318 } 2319 } 2320 2321 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count) 2322 return -EINVAL; 2323 } 2324 2325 return -EINVAL; 2326 } 2327 2328 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 2329 struct atom_voltage_table_entry *voltage_table, 2330 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd) 2331 { 2332 u16 v_index, idx; 2333 bool voltage_found = false; 2334 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE; 2335 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE; 2336 2337 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) 2338 return -EINVAL; 2339 2340 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 2341 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2342 if (voltage_table->value == 2343 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2344 voltage_found = true; 2345 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 2346 idx = v_index; 2347 else 2348 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2349 *std_voltage_lo_sidd = 2350 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2351 *std_voltage_hi_sidd = 2352 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2353 break; 2354 } 2355 } 2356 2357 if (!voltage_found) { 2358 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2359 if (voltage_table->value <= 2360 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2361 voltage_found = true; 2362 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 2363 idx = v_index; 2364 else 2365 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2366 *std_voltage_lo_sidd = 2367 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2368 *std_voltage_hi_sidd = 2369 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2370 break; 2371 } 2372 } 2373 } 2374 } 2375 2376 return 0; 2377 } 2378 2379 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev, 2380 const struct radeon_phase_shedding_limits_table *limits, 2381 u32 sclk, 2382 u32 *phase_shedding) 2383 { 2384 unsigned int i; 2385 2386 *phase_shedding = 1; 2387 2388 for (i = 0; i < limits->count; i++) { 2389 if (sclk < limits->entries[i].sclk) { 2390 *phase_shedding = i; 2391 break; 2392 } 2393 } 2394 } 2395 2396 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev, 2397 const struct radeon_phase_shedding_limits_table *limits, 2398 u32 mclk, 2399 u32 *phase_shedding) 2400 { 2401 unsigned int i; 2402 2403 *phase_shedding = 1; 2404 2405 for (i = 0; i < limits->count; i++) { 2406 if (mclk < limits->entries[i].mclk) { 2407 *phase_shedding = i; 2408 break; 2409 } 2410 } 2411 } 2412 2413 static int ci_init_arb_table_index(struct radeon_device *rdev) 2414 { 2415 struct ci_power_info *pi = ci_get_pi(rdev); 2416 u32 tmp; 2417 int ret; 2418 2419 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start, 2420 &tmp, pi->sram_end); 2421 if (ret) 2422 return ret; 2423 2424 tmp &= 0x00FFFFFF; 2425 tmp |= MC_CG_ARB_FREQ_F1 << 24; 2426 2427 return ci_write_smc_sram_dword(rdev, pi->arb_table_start, 2428 tmp, pi->sram_end); 2429 } 2430 2431 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev, 2432 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table, 2433 u32 clock, u32 *voltage) 2434 { 2435 u32 i = 0; 2436 2437 if (allowed_clock_voltage_table->count == 0) 2438 return -EINVAL; 2439 2440 for (i = 0; i < allowed_clock_voltage_table->count; i++) { 2441 if (allowed_clock_voltage_table->entries[i].clk >= clock) { 2442 *voltage = allowed_clock_voltage_table->entries[i].v; 2443 return 0; 2444 } 2445 } 2446 2447 *voltage = allowed_clock_voltage_table->entries[i-1].v; 2448 2449 return 0; 2450 } 2451 2452 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2453 u32 sclk, u32 min_sclk_in_sr) 2454 { 2455 u32 i; 2456 u32 tmp; 2457 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? 2458 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK; 2459 2460 if (sclk < min) 2461 return 0; 2462 2463 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 2464 tmp = sclk / (1 << i); 2465 if (tmp >= min || i == 0) 2466 break; 2467 } 2468 2469 return (u8)i; 2470 } 2471 2472 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev) 2473 { 2474 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 2475 } 2476 2477 static int ci_reset_to_default(struct radeon_device *rdev) 2478 { 2479 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? 2480 0 : -EINVAL; 2481 } 2482 2483 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev) 2484 { 2485 u32 tmp; 2486 2487 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8; 2488 2489 if (tmp == MC_CG_ARB_FREQ_F0) 2490 return 0; 2491 2492 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); 2493 } 2494 2495 static void ci_register_patching_mc_arb(struct radeon_device *rdev, 2496 const u32 engine_clock, 2497 const u32 memory_clock, 2498 u32 *dram_timimg2) 2499 { 2500 bool patch; 2501 u32 tmp, tmp2; 2502 2503 tmp = RREG32(MC_SEQ_MISC0); 2504 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 2505 2506 if (patch && 2507 ((rdev->pdev->device == 0x67B0) || 2508 (rdev->pdev->device == 0x67B1))) { 2509 if ((memory_clock > 100000) && (memory_clock <= 125000)) { 2510 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff; 2511 *dram_timimg2 &= ~0x00ff0000; 2512 *dram_timimg2 |= tmp2 << 16; 2513 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) { 2514 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff; 2515 *dram_timimg2 &= ~0x00ff0000; 2516 *dram_timimg2 |= tmp2 << 16; 2517 } 2518 } 2519 } 2520 2521 2522 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, 2523 u32 sclk, 2524 u32 mclk, 2525 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs) 2526 { 2527 u32 dram_timing; 2528 u32 dram_timing2; 2529 u32 burst_time; 2530 2531 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk); 2532 2533 dram_timing = RREG32(MC_ARB_DRAM_TIMING); 2534 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 2535 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; 2536 2537 ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2); 2538 2539 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); 2540 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); 2541 arb_regs->McArbBurstTime = (u8)burst_time; 2542 2543 return 0; 2544 } 2545 2546 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev) 2547 { 2548 struct ci_power_info *pi = ci_get_pi(rdev); 2549 SMU7_Discrete_MCArbDramTimingTable arb_regs; 2550 u32 i, j; 2551 int ret = 0; 2552 2553 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable)); 2554 2555 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) { 2556 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) { 2557 ret = ci_populate_memory_timing_parameters(rdev, 2558 pi->dpm_table.sclk_table.dpm_levels[i].value, 2559 pi->dpm_table.mclk_table.dpm_levels[j].value, 2560 &arb_regs.entries[i][j]); 2561 if (ret) 2562 break; 2563 } 2564 } 2565 2566 if (ret == 0) 2567 ret = ci_copy_bytes_to_smc(rdev, 2568 pi->arb_table_start, 2569 (u8 *)&arb_regs, 2570 sizeof(SMU7_Discrete_MCArbDramTimingTable), 2571 pi->sram_end); 2572 2573 return ret; 2574 } 2575 2576 static int ci_program_memory_timing_parameters(struct radeon_device *rdev) 2577 { 2578 struct ci_power_info *pi = ci_get_pi(rdev); 2579 2580 if (pi->need_update_smu7_dpm_table == 0) 2581 return 0; 2582 2583 return ci_do_program_memory_timing_parameters(rdev); 2584 } 2585 2586 static void ci_populate_smc_initial_state(struct radeon_device *rdev, 2587 struct radeon_ps *radeon_boot_state) 2588 { 2589 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state); 2590 struct ci_power_info *pi = ci_get_pi(rdev); 2591 u32 level = 0; 2592 2593 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) { 2594 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= 2595 boot_state->performance_levels[0].sclk) { 2596 pi->smc_state_table.GraphicsBootLevel = level; 2597 break; 2598 } 2599 } 2600 2601 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) { 2602 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= 2603 boot_state->performance_levels[0].mclk) { 2604 pi->smc_state_table.MemoryBootLevel = level; 2605 break; 2606 } 2607 } 2608 } 2609 2610 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table) 2611 { 2612 u32 i; 2613 u32 mask_value = 0; 2614 2615 for (i = dpm_table->count; i > 0; i--) { 2616 mask_value = mask_value << 1; 2617 if (dpm_table->dpm_levels[i-1].enabled) 2618 mask_value |= 0x1; 2619 else 2620 mask_value &= 0xFFFFFFFE; 2621 } 2622 2623 return mask_value; 2624 } 2625 2626 static void ci_populate_smc_link_level(struct radeon_device *rdev, 2627 SMU7_Discrete_DpmTable *table) 2628 { 2629 struct ci_power_info *pi = ci_get_pi(rdev); 2630 struct ci_dpm_table *dpm_table = &pi->dpm_table; 2631 u32 i; 2632 2633 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) { 2634 table->LinkLevel[i].PcieGenSpeed = 2635 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value; 2636 table->LinkLevel[i].PcieLaneCount = 2637 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); 2638 table->LinkLevel[i].EnabledForActivity = 1; 2639 table->LinkLevel[i].DownT = cpu_to_be32(5); 2640 table->LinkLevel[i].UpT = cpu_to_be32(30); 2641 } 2642 2643 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count; 2644 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 2645 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); 2646 } 2647 2648 static int ci_populate_smc_uvd_level(struct radeon_device *rdev, 2649 SMU7_Discrete_DpmTable *table) 2650 { 2651 u32 count; 2652 struct atom_clock_dividers dividers; 2653 int ret = -EINVAL; 2654 2655 table->UvdLevelCount = 2656 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count; 2657 2658 for (count = 0; count < table->UvdLevelCount; count++) { 2659 table->UvdLevel[count].VclkFrequency = 2660 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; 2661 table->UvdLevel[count].DclkFrequency = 2662 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; 2663 table->UvdLevel[count].MinVddc = 2664 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2665 table->UvdLevel[count].MinVddcPhases = 1; 2666 2667 ret = radeon_atom_get_clock_dividers(rdev, 2668 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2669 table->UvdLevel[count].VclkFrequency, false, ÷rs); 2670 if (ret) 2671 return ret; 2672 2673 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; 2674 2675 ret = radeon_atom_get_clock_dividers(rdev, 2676 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2677 table->UvdLevel[count].DclkFrequency, false, ÷rs); 2678 if (ret) 2679 return ret; 2680 2681 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; 2682 2683 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); 2684 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); 2685 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); 2686 } 2687 2688 return ret; 2689 } 2690 2691 static int ci_populate_smc_vce_level(struct radeon_device *rdev, 2692 SMU7_Discrete_DpmTable *table) 2693 { 2694 u32 count; 2695 struct atom_clock_dividers dividers; 2696 int ret = -EINVAL; 2697 2698 table->VceLevelCount = 2699 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count; 2700 2701 for (count = 0; count < table->VceLevelCount; count++) { 2702 table->VceLevel[count].Frequency = 2703 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; 2704 table->VceLevel[count].MinVoltage = 2705 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2706 table->VceLevel[count].MinPhases = 1; 2707 2708 ret = radeon_atom_get_clock_dividers(rdev, 2709 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2710 table->VceLevel[count].Frequency, false, ÷rs); 2711 if (ret) 2712 return ret; 2713 2714 table->VceLevel[count].Divider = (u8)dividers.post_divider; 2715 2716 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); 2717 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); 2718 } 2719 2720 return ret; 2721 2722 } 2723 2724 static int ci_populate_smc_acp_level(struct radeon_device *rdev, 2725 SMU7_Discrete_DpmTable *table) 2726 { 2727 u32 count; 2728 struct atom_clock_dividers dividers; 2729 int ret = -EINVAL; 2730 2731 table->AcpLevelCount = (u8) 2732 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count); 2733 2734 for (count = 0; count < table->AcpLevelCount; count++) { 2735 table->AcpLevel[count].Frequency = 2736 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; 2737 table->AcpLevel[count].MinVoltage = 2738 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; 2739 table->AcpLevel[count].MinPhases = 1; 2740 2741 ret = radeon_atom_get_clock_dividers(rdev, 2742 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2743 table->AcpLevel[count].Frequency, false, ÷rs); 2744 if (ret) 2745 return ret; 2746 2747 table->AcpLevel[count].Divider = (u8)dividers.post_divider; 2748 2749 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); 2750 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); 2751 } 2752 2753 return ret; 2754 } 2755 2756 static int ci_populate_smc_samu_level(struct radeon_device *rdev, 2757 SMU7_Discrete_DpmTable *table) 2758 { 2759 u32 count; 2760 struct atom_clock_dividers dividers; 2761 int ret = -EINVAL; 2762 2763 table->SamuLevelCount = 2764 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count; 2765 2766 for (count = 0; count < table->SamuLevelCount; count++) { 2767 table->SamuLevel[count].Frequency = 2768 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; 2769 table->SamuLevel[count].MinVoltage = 2770 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2771 table->SamuLevel[count].MinPhases = 1; 2772 2773 ret = radeon_atom_get_clock_dividers(rdev, 2774 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2775 table->SamuLevel[count].Frequency, false, ÷rs); 2776 if (ret) 2777 return ret; 2778 2779 table->SamuLevel[count].Divider = (u8)dividers.post_divider; 2780 2781 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); 2782 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); 2783 } 2784 2785 return ret; 2786 } 2787 2788 static int ci_calculate_mclk_params(struct radeon_device *rdev, 2789 u32 memory_clock, 2790 SMU7_Discrete_MemoryLevel *mclk, 2791 bool strobe_mode, 2792 bool dll_state_on) 2793 { 2794 struct ci_power_info *pi = ci_get_pi(rdev); 2795 u32 dll_cntl = pi->clock_registers.dll_cntl; 2796 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2797 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl; 2798 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl; 2799 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl; 2800 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1; 2801 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2; 2802 u32 mpll_ss1 = pi->clock_registers.mpll_ss1; 2803 u32 mpll_ss2 = pi->clock_registers.mpll_ss2; 2804 struct atom_mpll_param mpll_param; 2805 int ret; 2806 2807 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param); 2808 if (ret) 2809 return ret; 2810 2811 mpll_func_cntl &= ~BWCTRL_MASK; 2812 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); 2813 2814 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); 2815 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | 2816 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); 2817 2818 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; 2819 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); 2820 2821 if (pi->mem_gddr5) { 2822 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); 2823 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | 2824 YCLK_POST_DIV(mpll_param.post_div); 2825 } 2826 2827 if (pi->caps_mclk_ss_support) { 2828 struct radeon_atom_ss ss; 2829 u32 freq_nom; 2830 u32 tmp; 2831 u32 reference_clock = rdev->clock.mpll.reference_freq; 2832 2833 if (mpll_param.qdr == 1) 2834 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div); 2835 else 2836 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div); 2837 2838 tmp = (freq_nom / reference_clock); 2839 tmp = tmp * tmp; 2840 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 2841 ASIC_INTERNAL_MEMORY_SS, freq_nom)) { 2842 u32 clks = reference_clock * 5 / ss.rate; 2843 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); 2844 2845 mpll_ss1 &= ~CLKV_MASK; 2846 mpll_ss1 |= CLKV(clkv); 2847 2848 mpll_ss2 &= ~CLKS_MASK; 2849 mpll_ss2 |= CLKS(clks); 2850 } 2851 } 2852 2853 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; 2854 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); 2855 2856 if (dll_state_on) 2857 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; 2858 else 2859 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 2860 2861 mclk->MclkFrequency = memory_clock; 2862 mclk->MpllFuncCntl = mpll_func_cntl; 2863 mclk->MpllFuncCntl_1 = mpll_func_cntl_1; 2864 mclk->MpllFuncCntl_2 = mpll_func_cntl_2; 2865 mclk->MpllAdFuncCntl = mpll_ad_func_cntl; 2866 mclk->MpllDqFuncCntl = mpll_dq_func_cntl; 2867 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; 2868 mclk->DllCntl = dll_cntl; 2869 mclk->MpllSs1 = mpll_ss1; 2870 mclk->MpllSs2 = mpll_ss2; 2871 2872 return 0; 2873 } 2874 2875 static int ci_populate_single_memory_level(struct radeon_device *rdev, 2876 u32 memory_clock, 2877 SMU7_Discrete_MemoryLevel *memory_level) 2878 { 2879 struct ci_power_info *pi = ci_get_pi(rdev); 2880 int ret; 2881 bool dll_state_on; 2882 2883 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { 2884 ret = ci_get_dependency_volt_by_clk(rdev, 2885 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2886 memory_clock, &memory_level->MinVddc); 2887 if (ret) 2888 return ret; 2889 } 2890 2891 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { 2892 ret = ci_get_dependency_volt_by_clk(rdev, 2893 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2894 memory_clock, &memory_level->MinVddci); 2895 if (ret) 2896 return ret; 2897 } 2898 2899 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { 2900 ret = ci_get_dependency_volt_by_clk(rdev, 2901 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2902 memory_clock, &memory_level->MinMvdd); 2903 if (ret) 2904 return ret; 2905 } 2906 2907 memory_level->MinVddcPhases = 1; 2908 2909 if (pi->vddc_phase_shed_control) 2910 ci_populate_phase_value_based_on_mclk(rdev, 2911 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 2912 memory_clock, 2913 &memory_level->MinVddcPhases); 2914 2915 memory_level->EnabledForThrottle = 1; 2916 memory_level->UpH = 0; 2917 memory_level->DownH = 100; 2918 memory_level->VoltageDownH = 0; 2919 memory_level->ActivityLevel = (u16)pi->mclk_activity_target; 2920 2921 memory_level->StutterEnable = false; 2922 memory_level->StrobeEnable = false; 2923 memory_level->EdcReadEnable = false; 2924 memory_level->EdcWriteEnable = false; 2925 memory_level->RttEnable = false; 2926 2927 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 2928 2929 if (pi->mclk_stutter_mode_threshold && 2930 (memory_clock <= pi->mclk_stutter_mode_threshold) && 2931 (pi->uvd_enabled == false) && 2932 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && 2933 (rdev->pm.dpm.new_active_crtc_count <= 2)) 2934 memory_level->StutterEnable = true; 2935 2936 if (pi->mclk_strobe_mode_threshold && 2937 (memory_clock <= pi->mclk_strobe_mode_threshold)) 2938 memory_level->StrobeEnable = 1; 2939 2940 if (pi->mem_gddr5) { 2941 memory_level->StrobeRatio = 2942 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); 2943 if (pi->mclk_edc_enable_threshold && 2944 (memory_clock > pi->mclk_edc_enable_threshold)) 2945 memory_level->EdcReadEnable = true; 2946 2947 if (pi->mclk_edc_wr_enable_threshold && 2948 (memory_clock > pi->mclk_edc_wr_enable_threshold)) 2949 memory_level->EdcWriteEnable = true; 2950 2951 if (memory_level->StrobeEnable) { 2952 if (si_get_mclk_frequency_ratio(memory_clock, true) >= 2953 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) 2954 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2955 else 2956 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; 2957 } else { 2958 dll_state_on = pi->dll_default_on; 2959 } 2960 } else { 2961 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock); 2962 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2963 } 2964 2965 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); 2966 if (ret) 2967 return ret; 2968 2969 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); 2970 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); 2971 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); 2972 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); 2973 2974 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); 2975 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); 2976 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl); 2977 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1); 2978 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2); 2979 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl); 2980 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl); 2981 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl); 2982 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl); 2983 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1); 2984 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2); 2985 2986 return 0; 2987 } 2988 2989 static int ci_populate_smc_acpi_level(struct radeon_device *rdev, 2990 SMU7_Discrete_DpmTable *table) 2991 { 2992 struct ci_power_info *pi = ci_get_pi(rdev); 2993 struct atom_clock_dividers dividers; 2994 SMU7_Discrete_VoltageLevel voltage_level; 2995 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl; 2996 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2; 2997 u32 dll_cntl = pi->clock_registers.dll_cntl; 2998 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2999 int ret; 3000 3001 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 3002 3003 if (pi->acpi_vddc) 3004 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); 3005 else 3006 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); 3007 3008 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; 3009 3010 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq; 3011 3012 ret = radeon_atom_get_clock_dividers(rdev, 3013 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 3014 table->ACPILevel.SclkFrequency, false, ÷rs); 3015 if (ret) 3016 return ret; 3017 3018 table->ACPILevel.SclkDid = (u8)dividers.post_divider; 3019 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3020 table->ACPILevel.DeepSleepDivId = 0; 3021 3022 spll_func_cntl &= ~SPLL_PWRON; 3023 spll_func_cntl |= SPLL_RESET; 3024 3025 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 3026 spll_func_cntl_2 |= SCLK_MUX_SEL(4); 3027 3028 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; 3029 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; 3030 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; 3031 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; 3032 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; 3033 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; 3034 table->ACPILevel.CcPwrDynRm = 0; 3035 table->ACPILevel.CcPwrDynRm1 = 0; 3036 3037 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); 3038 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); 3039 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); 3040 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); 3041 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); 3042 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); 3043 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); 3044 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); 3045 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); 3046 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); 3047 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); 3048 3049 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; 3050 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; 3051 3052 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 3053 if (pi->acpi_vddci) 3054 table->MemoryACPILevel.MinVddci = 3055 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE); 3056 else 3057 table->MemoryACPILevel.MinVddci = 3058 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE); 3059 } 3060 3061 if (ci_populate_mvdd_value(rdev, 0, &voltage_level)) 3062 table->MemoryACPILevel.MinMvdd = 0; 3063 else 3064 table->MemoryACPILevel.MinMvdd = 3065 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE); 3066 3067 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; 3068 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 3069 3070 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); 3071 3072 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); 3073 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); 3074 table->MemoryACPILevel.MpllAdFuncCntl = 3075 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl); 3076 table->MemoryACPILevel.MpllDqFuncCntl = 3077 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl); 3078 table->MemoryACPILevel.MpllFuncCntl = 3079 cpu_to_be32(pi->clock_registers.mpll_func_cntl); 3080 table->MemoryACPILevel.MpllFuncCntl_1 = 3081 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1); 3082 table->MemoryACPILevel.MpllFuncCntl_2 = 3083 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2); 3084 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); 3085 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); 3086 3087 table->MemoryACPILevel.EnabledForThrottle = 0; 3088 table->MemoryACPILevel.EnabledForActivity = 0; 3089 table->MemoryACPILevel.UpH = 0; 3090 table->MemoryACPILevel.DownH = 100; 3091 table->MemoryACPILevel.VoltageDownH = 0; 3092 table->MemoryACPILevel.ActivityLevel = 3093 cpu_to_be16((u16)pi->mclk_activity_target); 3094 3095 table->MemoryACPILevel.StutterEnable = false; 3096 table->MemoryACPILevel.StrobeEnable = false; 3097 table->MemoryACPILevel.EdcReadEnable = false; 3098 table->MemoryACPILevel.EdcWriteEnable = false; 3099 table->MemoryACPILevel.RttEnable = false; 3100 3101 return 0; 3102 } 3103 3104 3105 static int ci_enable_ulv(struct radeon_device *rdev, bool enable) 3106 { 3107 struct ci_power_info *pi = ci_get_pi(rdev); 3108 struct ci_ulv_parm *ulv = &pi->ulv; 3109 3110 if (ulv->supported) { 3111 if (enable) 3112 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? 3113 0 : -EINVAL; 3114 else 3115 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? 3116 0 : -EINVAL; 3117 } 3118 3119 return 0; 3120 } 3121 3122 static int ci_populate_ulv_level(struct radeon_device *rdev, 3123 SMU7_Discrete_Ulv *state) 3124 { 3125 struct ci_power_info *pi = ci_get_pi(rdev); 3126 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time; 3127 3128 state->CcPwrDynRm = 0; 3129 state->CcPwrDynRm1 = 0; 3130 3131 if (ulv_voltage == 0) { 3132 pi->ulv.supported = false; 3133 return 0; 3134 } 3135 3136 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 3137 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3138 state->VddcOffset = 0; 3139 else 3140 state->VddcOffset = 3141 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; 3142 } else { 3143 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3144 state->VddcOffsetVid = 0; 3145 else 3146 state->VddcOffsetVid = (u8) 3147 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * 3148 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); 3149 } 3150 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1; 3151 3152 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm); 3153 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1); 3154 state->VddcOffset = cpu_to_be16(state->VddcOffset); 3155 3156 return 0; 3157 } 3158 3159 static int ci_calculate_sclk_params(struct radeon_device *rdev, 3160 u32 engine_clock, 3161 SMU7_Discrete_GraphicsLevel *sclk) 3162 { 3163 struct ci_power_info *pi = ci_get_pi(rdev); 3164 struct atom_clock_dividers dividers; 3165 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3; 3166 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4; 3167 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum; 3168 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2; 3169 u32 reference_clock = rdev->clock.spll.reference_freq; 3170 u32 reference_divider; 3171 u32 fbdiv; 3172 int ret; 3173 3174 ret = radeon_atom_get_clock_dividers(rdev, 3175 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 3176 engine_clock, false, ÷rs); 3177 if (ret) 3178 return ret; 3179 3180 reference_divider = 1 + dividers.ref_div; 3181 fbdiv = dividers.fb_div & 0x3FFFFFF; 3182 3183 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; 3184 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); 3185 spll_func_cntl_3 |= SPLL_DITHEN; 3186 3187 if (pi->caps_sclk_ss_support) { 3188 struct radeon_atom_ss ss; 3189 u32 vco_freq = engine_clock * dividers.post_div; 3190 3191 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 3192 ASIC_INTERNAL_ENGINE_SS, vco_freq)) { 3193 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); 3194 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); 3195 3196 cg_spll_spread_spectrum &= ~CLK_S_MASK; 3197 cg_spll_spread_spectrum |= CLK_S(clk_s); 3198 cg_spll_spread_spectrum |= SSEN; 3199 3200 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; 3201 cg_spll_spread_spectrum_2 |= CLK_V(clk_v); 3202 } 3203 } 3204 3205 sclk->SclkFrequency = engine_clock; 3206 sclk->CgSpllFuncCntl3 = spll_func_cntl_3; 3207 sclk->CgSpllFuncCntl4 = spll_func_cntl_4; 3208 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; 3209 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; 3210 sclk->SclkDid = (u8)dividers.post_divider; 3211 3212 return 0; 3213 } 3214 3215 static int ci_populate_single_graphic_level(struct radeon_device *rdev, 3216 u32 engine_clock, 3217 u16 sclk_activity_level_t, 3218 SMU7_Discrete_GraphicsLevel *graphic_level) 3219 { 3220 struct ci_power_info *pi = ci_get_pi(rdev); 3221 int ret; 3222 3223 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level); 3224 if (ret) 3225 return ret; 3226 3227 ret = ci_get_dependency_volt_by_clk(rdev, 3228 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 3229 engine_clock, &graphic_level->MinVddc); 3230 if (ret) 3231 return ret; 3232 3233 graphic_level->SclkFrequency = engine_clock; 3234 3235 graphic_level->Flags = 0; 3236 graphic_level->MinVddcPhases = 1; 3237 3238 if (pi->vddc_phase_shed_control) 3239 ci_populate_phase_value_based_on_sclk(rdev, 3240 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 3241 engine_clock, 3242 &graphic_level->MinVddcPhases); 3243 3244 graphic_level->ActivityLevel = sclk_activity_level_t; 3245 3246 graphic_level->CcPwrDynRm = 0; 3247 graphic_level->CcPwrDynRm1 = 0; 3248 graphic_level->EnabledForThrottle = 1; 3249 graphic_level->UpH = 0; 3250 graphic_level->DownH = 0; 3251 graphic_level->VoltageDownH = 0; 3252 graphic_level->PowerThrottle = 0; 3253 3254 if (pi->caps_sclk_ds) 3255 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev, 3256 engine_clock, 3257 CISLAND_MINIMUM_ENGINE_CLOCK); 3258 3259 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3260 3261 graphic_level->Flags = cpu_to_be32(graphic_level->Flags); 3262 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); 3263 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); 3264 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); 3265 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); 3266 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3); 3267 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4); 3268 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum); 3269 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2); 3270 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm); 3271 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1); 3272 3273 return 0; 3274 } 3275 3276 static int ci_populate_all_graphic_levels(struct radeon_device *rdev) 3277 { 3278 struct ci_power_info *pi = ci_get_pi(rdev); 3279 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3280 u32 level_array_address = pi->dpm_table_start + 3281 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); 3282 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * 3283 SMU7_MAX_LEVELS_GRAPHICS; 3284 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; 3285 u32 i, ret; 3286 3287 memset(levels, 0, level_array_size); 3288 3289 for (i = 0; i < dpm_table->sclk_table.count; i++) { 3290 ret = ci_populate_single_graphic_level(rdev, 3291 dpm_table->sclk_table.dpm_levels[i].value, 3292 (u16)pi->activity_target[i], 3293 &pi->smc_state_table.GraphicsLevel[i]); 3294 if (ret) 3295 return ret; 3296 if (i > 1) 3297 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; 3298 if (i == (dpm_table->sclk_table.count - 1)) 3299 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = 3300 PPSMC_DISPLAY_WATERMARK_HIGH; 3301 } 3302 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; 3303 3304 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; 3305 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 3306 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); 3307 3308 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 3309 (u8 *)levels, level_array_size, 3310 pi->sram_end); 3311 if (ret) 3312 return ret; 3313 3314 return 0; 3315 } 3316 3317 static int ci_populate_ulv_state(struct radeon_device *rdev, 3318 SMU7_Discrete_Ulv *ulv_level) 3319 { 3320 return ci_populate_ulv_level(rdev, ulv_level); 3321 } 3322 3323 static int ci_populate_all_memory_levels(struct radeon_device *rdev) 3324 { 3325 struct ci_power_info *pi = ci_get_pi(rdev); 3326 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3327 u32 level_array_address = pi->dpm_table_start + 3328 offsetof(SMU7_Discrete_DpmTable, MemoryLevel); 3329 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * 3330 SMU7_MAX_LEVELS_MEMORY; 3331 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; 3332 u32 i, ret; 3333 3334 memset(levels, 0, level_array_size); 3335 3336 for (i = 0; i < dpm_table->mclk_table.count; i++) { 3337 if (dpm_table->mclk_table.dpm_levels[i].value == 0) 3338 return -EINVAL; 3339 ret = ci_populate_single_memory_level(rdev, 3340 dpm_table->mclk_table.dpm_levels[i].value, 3341 &pi->smc_state_table.MemoryLevel[i]); 3342 if (ret) 3343 return ret; 3344 } 3345 3346 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; 3347 3348 if ((dpm_table->mclk_table.count >= 2) && 3349 ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) { 3350 pi->smc_state_table.MemoryLevel[1].MinVddc = 3351 pi->smc_state_table.MemoryLevel[0].MinVddc; 3352 pi->smc_state_table.MemoryLevel[1].MinVddcPhases = 3353 pi->smc_state_table.MemoryLevel[0].MinVddcPhases; 3354 } 3355 3356 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); 3357 3358 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; 3359 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 3360 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); 3361 3362 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark = 3363 PPSMC_DISPLAY_WATERMARK_HIGH; 3364 3365 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 3366 (u8 *)levels, level_array_size, 3367 pi->sram_end); 3368 if (ret) 3369 return ret; 3370 3371 return 0; 3372 } 3373 3374 static void ci_reset_single_dpm_table(struct radeon_device *rdev, 3375 struct ci_single_dpm_table* dpm_table, 3376 u32 count) 3377 { 3378 u32 i; 3379 3380 dpm_table->count = count; 3381 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) 3382 dpm_table->dpm_levels[i].enabled = false; 3383 } 3384 3385 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, 3386 u32 index, u32 pcie_gen, u32 pcie_lanes) 3387 { 3388 dpm_table->dpm_levels[index].value = pcie_gen; 3389 dpm_table->dpm_levels[index].param1 = pcie_lanes; 3390 dpm_table->dpm_levels[index].enabled = true; 3391 } 3392 3393 static int ci_setup_default_pcie_tables(struct radeon_device *rdev) 3394 { 3395 struct ci_power_info *pi = ci_get_pi(rdev); 3396 3397 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) 3398 return -EINVAL; 3399 3400 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) { 3401 pi->pcie_gen_powersaving = pi->pcie_gen_performance; 3402 pi->pcie_lane_powersaving = pi->pcie_lane_performance; 3403 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) { 3404 pi->pcie_gen_performance = pi->pcie_gen_powersaving; 3405 pi->pcie_lane_performance = pi->pcie_lane_powersaving; 3406 } 3407 3408 ci_reset_single_dpm_table(rdev, 3409 &pi->dpm_table.pcie_speed_table, 3410 SMU7_MAX_LEVELS_LINK); 3411 3412 if (rdev->family == CHIP_BONAIRE) 3413 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3414 pi->pcie_gen_powersaving.min, 3415 pi->pcie_lane_powersaving.max); 3416 else 3417 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3418 pi->pcie_gen_powersaving.min, 3419 pi->pcie_lane_powersaving.min); 3420 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, 3421 pi->pcie_gen_performance.min, 3422 pi->pcie_lane_performance.min); 3423 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2, 3424 pi->pcie_gen_powersaving.min, 3425 pi->pcie_lane_powersaving.max); 3426 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3, 3427 pi->pcie_gen_performance.min, 3428 pi->pcie_lane_performance.max); 3429 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4, 3430 pi->pcie_gen_powersaving.max, 3431 pi->pcie_lane_powersaving.max); 3432 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5, 3433 pi->pcie_gen_performance.max, 3434 pi->pcie_lane_performance.max); 3435 3436 pi->dpm_table.pcie_speed_table.count = 6; 3437 3438 return 0; 3439 } 3440 3441 static int ci_setup_default_dpm_tables(struct radeon_device *rdev) 3442 { 3443 struct ci_power_info *pi = ci_get_pi(rdev); 3444 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 3445 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3446 struct radeon_clock_voltage_dependency_table *allowed_mclk_table = 3447 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 3448 struct radeon_cac_leakage_table *std_voltage_table = 3449 &rdev->pm.dpm.dyn_state.cac_leakage_table; 3450 u32 i; 3451 3452 if (allowed_sclk_vddc_table == NULL) 3453 return -EINVAL; 3454 if (allowed_sclk_vddc_table->count < 1) 3455 return -EINVAL; 3456 if (allowed_mclk_table == NULL) 3457 return -EINVAL; 3458 if (allowed_mclk_table->count < 1) 3459 return -EINVAL; 3460 3461 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table)); 3462 3463 ci_reset_single_dpm_table(rdev, 3464 &pi->dpm_table.sclk_table, 3465 SMU7_MAX_LEVELS_GRAPHICS); 3466 ci_reset_single_dpm_table(rdev, 3467 &pi->dpm_table.mclk_table, 3468 SMU7_MAX_LEVELS_MEMORY); 3469 ci_reset_single_dpm_table(rdev, 3470 &pi->dpm_table.vddc_table, 3471 SMU7_MAX_LEVELS_VDDC); 3472 ci_reset_single_dpm_table(rdev, 3473 &pi->dpm_table.vddci_table, 3474 SMU7_MAX_LEVELS_VDDCI); 3475 ci_reset_single_dpm_table(rdev, 3476 &pi->dpm_table.mvdd_table, 3477 SMU7_MAX_LEVELS_MVDD); 3478 3479 pi->dpm_table.sclk_table.count = 0; 3480 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3481 if ((i == 0) || 3482 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value != 3483 allowed_sclk_vddc_table->entries[i].clk)) { 3484 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = 3485 allowed_sclk_vddc_table->entries[i].clk; 3486 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = 3487 (i == 0) ? true : false; 3488 pi->dpm_table.sclk_table.count++; 3489 } 3490 } 3491 3492 pi->dpm_table.mclk_table.count = 0; 3493 for (i = 0; i < allowed_mclk_table->count; i++) { 3494 if ((i == 0) || 3495 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != 3496 allowed_mclk_table->entries[i].clk)) { 3497 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = 3498 allowed_mclk_table->entries[i].clk; 3499 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = 3500 (i == 0) ? true : false; 3501 pi->dpm_table.mclk_table.count++; 3502 } 3503 } 3504 3505 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3506 pi->dpm_table.vddc_table.dpm_levels[i].value = 3507 allowed_sclk_vddc_table->entries[i].v; 3508 pi->dpm_table.vddc_table.dpm_levels[i].param1 = 3509 std_voltage_table->entries[i].leakage; 3510 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true; 3511 } 3512 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; 3513 3514 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 3515 if (allowed_mclk_table) { 3516 for (i = 0; i < allowed_mclk_table->count; i++) { 3517 pi->dpm_table.vddci_table.dpm_levels[i].value = 3518 allowed_mclk_table->entries[i].v; 3519 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; 3520 } 3521 pi->dpm_table.vddci_table.count = allowed_mclk_table->count; 3522 } 3523 3524 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; 3525 if (allowed_mclk_table) { 3526 for (i = 0; i < allowed_mclk_table->count; i++) { 3527 pi->dpm_table.mvdd_table.dpm_levels[i].value = 3528 allowed_mclk_table->entries[i].v; 3529 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 3530 } 3531 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; 3532 } 3533 3534 ci_setup_default_pcie_tables(rdev); 3535 3536 return 0; 3537 } 3538 3539 static int ci_find_boot_level(struct ci_single_dpm_table *table, 3540 u32 value, u32 *boot_level) 3541 { 3542 u32 i; 3543 int ret = -EINVAL; 3544 3545 for(i = 0; i < table->count; i++) { 3546 if (value == table->dpm_levels[i].value) { 3547 *boot_level = i; 3548 ret = 0; 3549 } 3550 } 3551 3552 return ret; 3553 } 3554 3555 static int ci_init_smc_table(struct radeon_device *rdev) 3556 { 3557 struct ci_power_info *pi = ci_get_pi(rdev); 3558 struct ci_ulv_parm *ulv = &pi->ulv; 3559 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps; 3560 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 3561 int ret; 3562 3563 ret = ci_setup_default_dpm_tables(rdev); 3564 if (ret) 3565 return ret; 3566 3567 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) 3568 ci_populate_smc_voltage_tables(rdev, table); 3569 3570 ci_init_fps_limits(rdev); 3571 3572 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) 3573 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 3574 3575 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 3576 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 3577 3578 if (pi->mem_gddr5) 3579 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 3580 3581 if (ulv->supported) { 3582 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv); 3583 if (ret) 3584 return ret; 3585 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); 3586 } 3587 3588 ret = ci_populate_all_graphic_levels(rdev); 3589 if (ret) 3590 return ret; 3591 3592 ret = ci_populate_all_memory_levels(rdev); 3593 if (ret) 3594 return ret; 3595 3596 ci_populate_smc_link_level(rdev, table); 3597 3598 ret = ci_populate_smc_acpi_level(rdev, table); 3599 if (ret) 3600 return ret; 3601 3602 ret = ci_populate_smc_vce_level(rdev, table); 3603 if (ret) 3604 return ret; 3605 3606 ret = ci_populate_smc_acp_level(rdev, table); 3607 if (ret) 3608 return ret; 3609 3610 ret = ci_populate_smc_samu_level(rdev, table); 3611 if (ret) 3612 return ret; 3613 3614 ret = ci_do_program_memory_timing_parameters(rdev); 3615 if (ret) 3616 return ret; 3617 3618 ret = ci_populate_smc_uvd_level(rdev, table); 3619 if (ret) 3620 return ret; 3621 3622 table->UvdBootLevel = 0; 3623 table->VceBootLevel = 0; 3624 table->AcpBootLevel = 0; 3625 table->SamuBootLevel = 0; 3626 table->GraphicsBootLevel = 0; 3627 table->MemoryBootLevel = 0; 3628 3629 ret = ci_find_boot_level(&pi->dpm_table.sclk_table, 3630 pi->vbios_boot_state.sclk_bootup_value, 3631 (u32 *)&pi->smc_state_table.GraphicsBootLevel); 3632 3633 ret = ci_find_boot_level(&pi->dpm_table.mclk_table, 3634 pi->vbios_boot_state.mclk_bootup_value, 3635 (u32 *)&pi->smc_state_table.MemoryBootLevel); 3636 3637 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; 3638 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; 3639 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; 3640 3641 ci_populate_smc_initial_state(rdev, radeon_boot_state); 3642 3643 ret = ci_populate_bapm_parameters_in_dpm_table(rdev); 3644 if (ret) 3645 return ret; 3646 3647 table->UVDInterval = 1; 3648 table->VCEInterval = 1; 3649 table->ACPInterval = 1; 3650 table->SAMUInterval = 1; 3651 table->GraphicsVoltageChangeEnable = 1; 3652 table->GraphicsThermThrottleEnable = 1; 3653 table->GraphicsInterval = 1; 3654 table->VoltageInterval = 1; 3655 table->ThermalInterval = 1; 3656 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * 3657 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3658 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * 3659 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3660 table->MemoryVoltageChangeEnable = 1; 3661 table->MemoryInterval = 1; 3662 table->VoltageResponseTime = 0; 3663 table->VddcVddciDelta = 4000; 3664 table->PhaseResponseTime = 0; 3665 table->MemoryThermThrottleEnable = 1; 3666 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; 3667 table->PCIeGenInterval = 1; 3668 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) 3669 table->SVI2Enable = 1; 3670 else 3671 table->SVI2Enable = 0; 3672 3673 table->ThermGpio = 17; 3674 table->SclkStepSize = 0x4000; 3675 3676 table->SystemFlags = cpu_to_be32(table->SystemFlags); 3677 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); 3678 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); 3679 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); 3680 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); 3681 table->SclkStepSize = cpu_to_be32(table->SclkStepSize); 3682 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); 3683 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); 3684 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); 3685 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); 3686 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); 3687 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); 3688 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); 3689 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); 3690 3691 ret = ci_copy_bytes_to_smc(rdev, 3692 pi->dpm_table_start + 3693 offsetof(SMU7_Discrete_DpmTable, SystemFlags), 3694 (u8 *)&table->SystemFlags, 3695 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController), 3696 pi->sram_end); 3697 if (ret) 3698 return ret; 3699 3700 return 0; 3701 } 3702 3703 static void ci_trim_single_dpm_states(struct radeon_device *rdev, 3704 struct ci_single_dpm_table *dpm_table, 3705 u32 low_limit, u32 high_limit) 3706 { 3707 u32 i; 3708 3709 for (i = 0; i < dpm_table->count; i++) { 3710 if ((dpm_table->dpm_levels[i].value < low_limit) || 3711 (dpm_table->dpm_levels[i].value > high_limit)) 3712 dpm_table->dpm_levels[i].enabled = false; 3713 else 3714 dpm_table->dpm_levels[i].enabled = true; 3715 } 3716 } 3717 3718 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev, 3719 u32 speed_low, u32 lanes_low, 3720 u32 speed_high, u32 lanes_high) 3721 { 3722 struct ci_power_info *pi = ci_get_pi(rdev); 3723 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; 3724 u32 i, j; 3725 3726 for (i = 0; i < pcie_table->count; i++) { 3727 if ((pcie_table->dpm_levels[i].value < speed_low) || 3728 (pcie_table->dpm_levels[i].param1 < lanes_low) || 3729 (pcie_table->dpm_levels[i].value > speed_high) || 3730 (pcie_table->dpm_levels[i].param1 > lanes_high)) 3731 pcie_table->dpm_levels[i].enabled = false; 3732 else 3733 pcie_table->dpm_levels[i].enabled = true; 3734 } 3735 3736 for (i = 0; i < pcie_table->count; i++) { 3737 if (pcie_table->dpm_levels[i].enabled) { 3738 for (j = i + 1; j < pcie_table->count; j++) { 3739 if (pcie_table->dpm_levels[j].enabled) { 3740 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) && 3741 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1)) 3742 pcie_table->dpm_levels[j].enabled = false; 3743 } 3744 } 3745 } 3746 } 3747 } 3748 3749 static int ci_trim_dpm_states(struct radeon_device *rdev, 3750 struct radeon_ps *radeon_state) 3751 { 3752 struct ci_ps *state = ci_get_ps(radeon_state); 3753 struct ci_power_info *pi = ci_get_pi(rdev); 3754 u32 high_limit_count; 3755 3756 if (state->performance_level_count < 1) 3757 return -EINVAL; 3758 3759 if (state->performance_level_count == 1) 3760 high_limit_count = 0; 3761 else 3762 high_limit_count = 1; 3763 3764 ci_trim_single_dpm_states(rdev, 3765 &pi->dpm_table.sclk_table, 3766 state->performance_levels[0].sclk, 3767 state->performance_levels[high_limit_count].sclk); 3768 3769 ci_trim_single_dpm_states(rdev, 3770 &pi->dpm_table.mclk_table, 3771 state->performance_levels[0].mclk, 3772 state->performance_levels[high_limit_count].mclk); 3773 3774 ci_trim_pcie_dpm_states(rdev, 3775 state->performance_levels[0].pcie_gen, 3776 state->performance_levels[0].pcie_lane, 3777 state->performance_levels[high_limit_count].pcie_gen, 3778 state->performance_levels[high_limit_count].pcie_lane); 3779 3780 return 0; 3781 } 3782 3783 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev) 3784 { 3785 struct radeon_clock_voltage_dependency_table *disp_voltage_table = 3786 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk; 3787 struct radeon_clock_voltage_dependency_table *vddc_table = 3788 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3789 u32 requested_voltage = 0; 3790 u32 i; 3791 3792 if (disp_voltage_table == NULL) 3793 return -EINVAL; 3794 if (!disp_voltage_table->count) 3795 return -EINVAL; 3796 3797 for (i = 0; i < disp_voltage_table->count; i++) { 3798 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk) 3799 requested_voltage = disp_voltage_table->entries[i].v; 3800 } 3801 3802 for (i = 0; i < vddc_table->count; i++) { 3803 if (requested_voltage <= vddc_table->entries[i].v) { 3804 requested_voltage = vddc_table->entries[i].v; 3805 return (ci_send_msg_to_smc_with_parameter(rdev, 3806 PPSMC_MSG_VddC_Request, 3807 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ? 3808 0 : -EINVAL; 3809 } 3810 } 3811 3812 return -EINVAL; 3813 } 3814 3815 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev) 3816 { 3817 struct ci_power_info *pi = ci_get_pi(rdev); 3818 PPSMC_Result result; 3819 3820 ci_apply_disp_minimum_voltage_request(rdev); 3821 3822 if (!pi->sclk_dpm_key_disabled) { 3823 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3824 result = ci_send_msg_to_smc_with_parameter(rdev, 3825 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3826 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 3827 if (result != PPSMC_Result_OK) 3828 return -EINVAL; 3829 } 3830 } 3831 3832 if (!pi->mclk_dpm_key_disabled) { 3833 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3834 result = ci_send_msg_to_smc_with_parameter(rdev, 3835 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3836 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3837 if (result != PPSMC_Result_OK) 3838 return -EINVAL; 3839 } 3840 } 3841 #if 0 3842 if (!pi->pcie_dpm_key_disabled) { 3843 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3844 result = ci_send_msg_to_smc_with_parameter(rdev, 3845 PPSMC_MSG_PCIeDPM_SetEnabledMask, 3846 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 3847 if (result != PPSMC_Result_OK) 3848 return -EINVAL; 3849 } 3850 } 3851 #endif 3852 return 0; 3853 } 3854 3855 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev, 3856 struct radeon_ps *radeon_state) 3857 { 3858 struct ci_power_info *pi = ci_get_pi(rdev); 3859 struct ci_ps *state = ci_get_ps(radeon_state); 3860 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; 3861 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3862 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; 3863 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3864 u32 i; 3865 3866 pi->need_update_smu7_dpm_table = 0; 3867 3868 for (i = 0; i < sclk_table->count; i++) { 3869 if (sclk == sclk_table->dpm_levels[i].value) 3870 break; 3871 } 3872 3873 if (i >= sclk_table->count) { 3874 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3875 } else { 3876 /* XXX The current code always reprogrammed the sclk levels, 3877 * but we don't currently handle disp sclk requirements 3878 * so just skip it. 3879 */ 3880 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) 3881 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3882 } 3883 3884 for (i = 0; i < mclk_table->count; i++) { 3885 if (mclk == mclk_table->dpm_levels[i].value) 3886 break; 3887 } 3888 3889 if (i >= mclk_table->count) 3890 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3891 3892 if (rdev->pm.dpm.current_active_crtc_count != 3893 rdev->pm.dpm.new_active_crtc_count) 3894 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 3895 } 3896 3897 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev, 3898 struct radeon_ps *radeon_state) 3899 { 3900 struct ci_power_info *pi = ci_get_pi(rdev); 3901 struct ci_ps *state = ci_get_ps(radeon_state); 3902 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3903 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3904 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3905 int ret; 3906 3907 if (!pi->need_update_smu7_dpm_table) 3908 return 0; 3909 3910 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) 3911 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk; 3912 3913 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) 3914 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk; 3915 3916 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { 3917 ret = ci_populate_all_graphic_levels(rdev); 3918 if (ret) 3919 return ret; 3920 } 3921 3922 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { 3923 ret = ci_populate_all_memory_levels(rdev); 3924 if (ret) 3925 return ret; 3926 } 3927 3928 return 0; 3929 } 3930 3931 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 3932 { 3933 struct ci_power_info *pi = ci_get_pi(rdev); 3934 const struct radeon_clock_and_voltage_limits *max_limits; 3935 int i; 3936 3937 if (rdev->pm.dpm.ac_power) 3938 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3939 else 3940 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3941 3942 if (enable) { 3943 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; 3944 3945 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3946 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3947 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; 3948 3949 if (!pi->caps_uvd_dpm) 3950 break; 3951 } 3952 } 3953 3954 ci_send_msg_to_smc_with_parameter(rdev, 3955 PPSMC_MSG_UVDDPM_SetEnabledMask, 3956 pi->dpm_level_enable_mask.uvd_dpm_enable_mask); 3957 3958 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3959 pi->uvd_enabled = true; 3960 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 3961 ci_send_msg_to_smc_with_parameter(rdev, 3962 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3963 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3964 } 3965 } else { 3966 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3967 pi->uvd_enabled = false; 3968 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; 3969 ci_send_msg_to_smc_with_parameter(rdev, 3970 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3971 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3972 } 3973 } 3974 3975 return (ci_send_msg_to_smc(rdev, enable ? 3976 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ? 3977 0 : -EINVAL; 3978 } 3979 3980 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) 3981 { 3982 struct ci_power_info *pi = ci_get_pi(rdev); 3983 const struct radeon_clock_and_voltage_limits *max_limits; 3984 int i; 3985 3986 if (rdev->pm.dpm.ac_power) 3987 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3988 else 3989 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3990 3991 if (enable) { 3992 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0; 3993 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3994 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3995 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; 3996 3997 if (!pi->caps_vce_dpm) 3998 break; 3999 } 4000 } 4001 4002 ci_send_msg_to_smc_with_parameter(rdev, 4003 PPSMC_MSG_VCEDPM_SetEnabledMask, 4004 pi->dpm_level_enable_mask.vce_dpm_enable_mask); 4005 } 4006 4007 return (ci_send_msg_to_smc(rdev, enable ? 4008 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ? 4009 0 : -EINVAL; 4010 } 4011 4012 #if 0 4013 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) 4014 { 4015 struct ci_power_info *pi = ci_get_pi(rdev); 4016 const struct radeon_clock_and_voltage_limits *max_limits; 4017 int i; 4018 4019 if (rdev->pm.dpm.ac_power) 4020 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4021 else 4022 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4023 4024 if (enable) { 4025 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0; 4026 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4027 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4028 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i; 4029 4030 if (!pi->caps_samu_dpm) 4031 break; 4032 } 4033 } 4034 4035 ci_send_msg_to_smc_with_parameter(rdev, 4036 PPSMC_MSG_SAMUDPM_SetEnabledMask, 4037 pi->dpm_level_enable_mask.samu_dpm_enable_mask); 4038 } 4039 return (ci_send_msg_to_smc(rdev, enable ? 4040 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ? 4041 0 : -EINVAL; 4042 } 4043 4044 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable) 4045 { 4046 struct ci_power_info *pi = ci_get_pi(rdev); 4047 const struct radeon_clock_and_voltage_limits *max_limits; 4048 int i; 4049 4050 if (rdev->pm.dpm.ac_power) 4051 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4052 else 4053 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4054 4055 if (enable) { 4056 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0; 4057 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4058 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4059 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i; 4060 4061 if (!pi->caps_acp_dpm) 4062 break; 4063 } 4064 } 4065 4066 ci_send_msg_to_smc_with_parameter(rdev, 4067 PPSMC_MSG_ACPDPM_SetEnabledMask, 4068 pi->dpm_level_enable_mask.acp_dpm_enable_mask); 4069 } 4070 4071 return (ci_send_msg_to_smc(rdev, enable ? 4072 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ? 4073 0 : -EINVAL; 4074 } 4075 #endif 4076 4077 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate) 4078 { 4079 struct ci_power_info *pi = ci_get_pi(rdev); 4080 u32 tmp; 4081 4082 if (!gate) { 4083 if (pi->caps_uvd_dpm || 4084 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) 4085 pi->smc_state_table.UvdBootLevel = 0; 4086 else 4087 pi->smc_state_table.UvdBootLevel = 4088 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; 4089 4090 tmp = RREG32_SMC(DPM_TABLE_475); 4091 tmp &= ~UvdBootLevel_MASK; 4092 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel); 4093 WREG32_SMC(DPM_TABLE_475, tmp); 4094 } 4095 4096 return ci_enable_uvd_dpm(rdev, !gate); 4097 } 4098 4099 static u8 ci_get_vce_boot_level(struct radeon_device *rdev) 4100 { 4101 u8 i; 4102 u32 min_evclk = 30000; /* ??? */ 4103 struct radeon_vce_clock_voltage_dependency_table *table = 4104 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 4105 4106 for (i = 0; i < table->count; i++) { 4107 if (table->entries[i].evclk >= min_evclk) 4108 return i; 4109 } 4110 4111 return table->count - 1; 4112 } 4113 4114 static int ci_update_vce_dpm(struct radeon_device *rdev, 4115 struct radeon_ps *radeon_new_state, 4116 struct radeon_ps *radeon_current_state) 4117 { 4118 struct ci_power_info *pi = ci_get_pi(rdev); 4119 int ret = 0; 4120 u32 tmp; 4121 4122 if (radeon_current_state->evclk != radeon_new_state->evclk) { 4123 if (radeon_new_state->evclk) { 4124 /* turn the clocks on when encoding */ 4125 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 4126 4127 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); 4128 tmp = RREG32_SMC(DPM_TABLE_475); 4129 tmp &= ~VceBootLevel_MASK; 4130 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); 4131 WREG32_SMC(DPM_TABLE_475, tmp); 4132 4133 ret = ci_enable_vce_dpm(rdev, true); 4134 } else { 4135 /* turn the clocks off when not encoding */ 4136 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 4137 4138 ret = ci_enable_vce_dpm(rdev, false); 4139 } 4140 } 4141 return ret; 4142 } 4143 4144 #if 0 4145 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) 4146 { 4147 return ci_enable_samu_dpm(rdev, gate); 4148 } 4149 4150 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate) 4151 { 4152 struct ci_power_info *pi = ci_get_pi(rdev); 4153 u32 tmp; 4154 4155 if (!gate) { 4156 pi->smc_state_table.AcpBootLevel = 0; 4157 4158 tmp = RREG32_SMC(DPM_TABLE_475); 4159 tmp &= ~AcpBootLevel_MASK; 4160 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel); 4161 WREG32_SMC(DPM_TABLE_475, tmp); 4162 } 4163 4164 return ci_enable_acp_dpm(rdev, !gate); 4165 } 4166 #endif 4167 4168 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev, 4169 struct radeon_ps *radeon_state) 4170 { 4171 struct ci_power_info *pi = ci_get_pi(rdev); 4172 int ret; 4173 4174 ret = ci_trim_dpm_states(rdev, radeon_state); 4175 if (ret) 4176 return ret; 4177 4178 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 4179 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table); 4180 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 4181 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table); 4182 pi->last_mclk_dpm_enable_mask = 4183 pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4184 if (pi->uvd_enabled) { 4185 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) 4186 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 4187 } 4188 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 4189 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table); 4190 4191 return 0; 4192 } 4193 4194 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev, 4195 u32 level_mask) 4196 { 4197 u32 level = 0; 4198 4199 while ((level_mask & (1 << level)) == 0) 4200 level++; 4201 4202 return level; 4203 } 4204 4205 4206 int ci_dpm_force_performance_level(struct radeon_device *rdev, 4207 enum radeon_dpm_forced_level level) 4208 { 4209 struct ci_power_info *pi = ci_get_pi(rdev); 4210 u32 tmp, levels, i; 4211 int ret; 4212 4213 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 4214 if ((!pi->pcie_dpm_key_disabled) && 4215 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4216 levels = 0; 4217 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; 4218 while (tmp >>= 1) 4219 levels++; 4220 if (levels) { 4221 ret = ci_dpm_force_state_pcie(rdev, level); 4222 if (ret) 4223 return ret; 4224 for (i = 0; i < rdev->usec_timeout; i++) { 4225 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 4226 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 4227 if (tmp == levels) 4228 break; 4229 udelay(1); 4230 } 4231 } 4232 } 4233 if ((!pi->sclk_dpm_key_disabled) && 4234 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4235 levels = 0; 4236 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; 4237 while (tmp >>= 1) 4238 levels++; 4239 if (levels) { 4240 ret = ci_dpm_force_state_sclk(rdev, levels); 4241 if (ret) 4242 return ret; 4243 for (i = 0; i < rdev->usec_timeout; i++) { 4244 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4245 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 4246 if (tmp == levels) 4247 break; 4248 udelay(1); 4249 } 4250 } 4251 } 4252 if ((!pi->mclk_dpm_key_disabled) && 4253 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4254 levels = 0; 4255 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4256 while (tmp >>= 1) 4257 levels++; 4258 if (levels) { 4259 ret = ci_dpm_force_state_mclk(rdev, levels); 4260 if (ret) 4261 return ret; 4262 for (i = 0; i < rdev->usec_timeout; i++) { 4263 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4264 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 4265 if (tmp == levels) 4266 break; 4267 udelay(1); 4268 } 4269 } 4270 } 4271 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 4272 if ((!pi->sclk_dpm_key_disabled) && 4273 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4274 levels = ci_get_lowest_enabled_level(rdev, 4275 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 4276 ret = ci_dpm_force_state_sclk(rdev, levels); 4277 if (ret) 4278 return ret; 4279 for (i = 0; i < rdev->usec_timeout; i++) { 4280 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4281 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 4282 if (tmp == levels) 4283 break; 4284 udelay(1); 4285 } 4286 } 4287 if ((!pi->mclk_dpm_key_disabled) && 4288 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4289 levels = ci_get_lowest_enabled_level(rdev, 4290 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 4291 ret = ci_dpm_force_state_mclk(rdev, levels); 4292 if (ret) 4293 return ret; 4294 for (i = 0; i < rdev->usec_timeout; i++) { 4295 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4296 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 4297 if (tmp == levels) 4298 break; 4299 udelay(1); 4300 } 4301 } 4302 if ((!pi->pcie_dpm_key_disabled) && 4303 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4304 levels = ci_get_lowest_enabled_level(rdev, 4305 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 4306 ret = ci_dpm_force_state_pcie(rdev, levels); 4307 if (ret) 4308 return ret; 4309 for (i = 0; i < rdev->usec_timeout; i++) { 4310 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 4311 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 4312 if (tmp == levels) 4313 break; 4314 udelay(1); 4315 } 4316 } 4317 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 4318 if (!pi->pcie_dpm_key_disabled) { 4319 PPSMC_Result smc_result; 4320 4321 smc_result = ci_send_msg_to_smc(rdev, 4322 PPSMC_MSG_PCIeDPM_UnForceLevel); 4323 if (smc_result != PPSMC_Result_OK) 4324 return -EINVAL; 4325 } 4326 ret = ci_upload_dpm_level_enable_mask(rdev); 4327 if (ret) 4328 return ret; 4329 } 4330 4331 rdev->pm.dpm.forced_level = level; 4332 4333 return 0; 4334 } 4335 4336 static int ci_set_mc_special_registers(struct radeon_device *rdev, 4337 struct ci_mc_reg_table *table) 4338 { 4339 struct ci_power_info *pi = ci_get_pi(rdev); 4340 u8 i, j, k; 4341 u32 temp_reg; 4342 4343 for (i = 0, j = table->last; i < table->last; i++) { 4344 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4345 return -EINVAL; 4346 switch(table->mc_reg_address[i].s1 << 2) { 4347 case MC_SEQ_MISC1: 4348 temp_reg = RREG32(MC_PMG_CMD_EMRS); 4349 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; 4350 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 4351 for (k = 0; k < table->num_entries; k++) { 4352 table->mc_reg_table_entry[k].mc_data[j] = 4353 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); 4354 } 4355 j++; 4356 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4357 return -EINVAL; 4358 4359 temp_reg = RREG32(MC_PMG_CMD_MRS); 4360 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; 4361 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; 4362 for (k = 0; k < table->num_entries; k++) { 4363 table->mc_reg_table_entry[k].mc_data[j] = 4364 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4365 if (!pi->mem_gddr5) 4366 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 4367 } 4368 j++; 4369 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4370 return -EINVAL; 4371 4372 if (!pi->mem_gddr5) { 4373 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2; 4374 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2; 4375 for (k = 0; k < table->num_entries; k++) { 4376 table->mc_reg_table_entry[k].mc_data[j] = 4377 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 4378 } 4379 j++; 4380 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4381 return -EINVAL; 4382 } 4383 break; 4384 case MC_SEQ_RESERVE_M: 4385 temp_reg = RREG32(MC_PMG_CMD_MRS1); 4386 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; 4387 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 4388 for (k = 0; k < table->num_entries; k++) { 4389 table->mc_reg_table_entry[k].mc_data[j] = 4390 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4391 } 4392 j++; 4393 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4394 return -EINVAL; 4395 break; 4396 default: 4397 break; 4398 } 4399 4400 } 4401 4402 table->last = j; 4403 4404 return 0; 4405 } 4406 4407 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) 4408 { 4409 bool result = true; 4410 4411 switch(in_reg) { 4412 case MC_SEQ_RAS_TIMING >> 2: 4413 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2; 4414 break; 4415 case MC_SEQ_DLL_STBY >> 2: 4416 *out_reg = MC_SEQ_DLL_STBY_LP >> 2; 4417 break; 4418 case MC_SEQ_G5PDX_CMD0 >> 2: 4419 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2; 4420 break; 4421 case MC_SEQ_G5PDX_CMD1 >> 2: 4422 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2; 4423 break; 4424 case MC_SEQ_G5PDX_CTRL >> 2: 4425 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2; 4426 break; 4427 case MC_SEQ_CAS_TIMING >> 2: 4428 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2; 4429 break; 4430 case MC_SEQ_MISC_TIMING >> 2: 4431 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2; 4432 break; 4433 case MC_SEQ_MISC_TIMING2 >> 2: 4434 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; 4435 break; 4436 case MC_SEQ_PMG_DVS_CMD >> 2: 4437 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2; 4438 break; 4439 case MC_SEQ_PMG_DVS_CTL >> 2: 4440 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2; 4441 break; 4442 case MC_SEQ_RD_CTL_D0 >> 2: 4443 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; 4444 break; 4445 case MC_SEQ_RD_CTL_D1 >> 2: 4446 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; 4447 break; 4448 case MC_SEQ_WR_CTL_D0 >> 2: 4449 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; 4450 break; 4451 case MC_SEQ_WR_CTL_D1 >> 2: 4452 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; 4453 break; 4454 case MC_PMG_CMD_EMRS >> 2: 4455 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 4456 break; 4457 case MC_PMG_CMD_MRS >> 2: 4458 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; 4459 break; 4460 case MC_PMG_CMD_MRS1 >> 2: 4461 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 4462 break; 4463 case MC_SEQ_PMG_TIMING >> 2: 4464 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2; 4465 break; 4466 case MC_PMG_CMD_MRS2 >> 2: 4467 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2; 4468 break; 4469 case MC_SEQ_WR_CTL_2 >> 2: 4470 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2; 4471 break; 4472 default: 4473 result = false; 4474 break; 4475 } 4476 4477 return result; 4478 } 4479 4480 static void ci_set_valid_flag(struct ci_mc_reg_table *table) 4481 { 4482 u8 i, j; 4483 4484 for (i = 0; i < table->last; i++) { 4485 for (j = 1; j < table->num_entries; j++) { 4486 if (table->mc_reg_table_entry[j-1].mc_data[i] != 4487 table->mc_reg_table_entry[j].mc_data[i]) { 4488 table->valid_flag |= 1 << i; 4489 break; 4490 } 4491 } 4492 } 4493 } 4494 4495 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) 4496 { 4497 u32 i; 4498 u16 address; 4499 4500 for (i = 0; i < table->last; i++) { 4501 table->mc_reg_address[i].s0 = 4502 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? 4503 address : table->mc_reg_address[i].s1; 4504 } 4505 } 4506 4507 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, 4508 struct ci_mc_reg_table *ci_table) 4509 { 4510 u8 i, j; 4511 4512 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4513 return -EINVAL; 4514 if (table->num_entries > MAX_AC_TIMING_ENTRIES) 4515 return -EINVAL; 4516 4517 for (i = 0; i < table->last; i++) 4518 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; 4519 4520 ci_table->last = table->last; 4521 4522 for (i = 0; i < table->num_entries; i++) { 4523 ci_table->mc_reg_table_entry[i].mclk_max = 4524 table->mc_reg_table_entry[i].mclk_max; 4525 for (j = 0; j < table->last; j++) 4526 ci_table->mc_reg_table_entry[i].mc_data[j] = 4527 table->mc_reg_table_entry[i].mc_data[j]; 4528 } 4529 ci_table->num_entries = table->num_entries; 4530 4531 return 0; 4532 } 4533 4534 static int ci_register_patching_mc_seq(struct radeon_device *rdev, 4535 struct ci_mc_reg_table *table) 4536 { 4537 u8 i, k; 4538 u32 tmp; 4539 bool patch; 4540 4541 tmp = RREG32(MC_SEQ_MISC0); 4542 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 4543 4544 if (patch && 4545 ((rdev->pdev->device == 0x67B0) || 4546 (rdev->pdev->device == 0x67B1))) { 4547 for (i = 0; i < table->last; i++) { 4548 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4549 return -EINVAL; 4550 switch(table->mc_reg_address[i].s1 >> 2) { 4551 case MC_SEQ_MISC1: 4552 for (k = 0; k < table->num_entries; k++) { 4553 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4554 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4555 table->mc_reg_table_entry[k].mc_data[i] = 4556 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | 4557 0x00000007; 4558 } 4559 break; 4560 case MC_SEQ_WR_CTL_D0: 4561 for (k = 0; k < table->num_entries; k++) { 4562 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4563 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4564 table->mc_reg_table_entry[k].mc_data[i] = 4565 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4566 0x0000D0DD; 4567 } 4568 break; 4569 case MC_SEQ_WR_CTL_D1: 4570 for (k = 0; k < table->num_entries; k++) { 4571 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4572 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4573 table->mc_reg_table_entry[k].mc_data[i] = 4574 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4575 0x0000D0DD; 4576 } 4577 break; 4578 case MC_SEQ_WR_CTL_2: 4579 for (k = 0; k < table->num_entries; k++) { 4580 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4581 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4582 table->mc_reg_table_entry[k].mc_data[i] = 0; 4583 } 4584 break; 4585 case MC_SEQ_CAS_TIMING: 4586 for (k = 0; k < table->num_entries; k++) { 4587 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4588 table->mc_reg_table_entry[k].mc_data[i] = 4589 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4590 0x000C0140; 4591 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4592 table->mc_reg_table_entry[k].mc_data[i] = 4593 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4594 0x000C0150; 4595 } 4596 break; 4597 case MC_SEQ_MISC_TIMING: 4598 for (k = 0; k < table->num_entries; k++) { 4599 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4600 table->mc_reg_table_entry[k].mc_data[i] = 4601 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4602 0x00000030; 4603 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4604 table->mc_reg_table_entry[k].mc_data[i] = 4605 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4606 0x00000035; 4607 } 4608 break; 4609 default: 4610 break; 4611 } 4612 } 4613 4614 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); 4615 tmp = RREG32(MC_SEQ_IO_DEBUG_DATA); 4616 tmp = (tmp & 0xFFF8FFFF) | (1 << 16); 4617 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); 4618 WREG32(MC_SEQ_IO_DEBUG_DATA, tmp); 4619 } 4620 4621 return 0; 4622 } 4623 4624 static int ci_initialize_mc_reg_table(struct radeon_device *rdev) 4625 { 4626 struct ci_power_info *pi = ci_get_pi(rdev); 4627 struct atom_mc_reg_table *table; 4628 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table; 4629 u8 module_index = rv770_get_memory_module_index(rdev); 4630 int ret; 4631 4632 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); 4633 if (!table) 4634 return -ENOMEM; 4635 4636 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); 4637 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); 4638 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY)); 4639 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0)); 4640 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1)); 4641 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL)); 4642 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD)); 4643 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL)); 4644 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); 4645 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); 4646 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); 4647 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); 4648 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); 4649 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); 4650 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); 4651 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); 4652 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); 4653 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); 4654 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); 4655 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); 4656 4657 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); 4658 if (ret) 4659 goto init_mc_done; 4660 4661 ret = ci_copy_vbios_mc_reg_table(table, ci_table); 4662 if (ret) 4663 goto init_mc_done; 4664 4665 ci_set_s0_mc_reg_index(ci_table); 4666 4667 ret = ci_register_patching_mc_seq(rdev, ci_table); 4668 if (ret) 4669 goto init_mc_done; 4670 4671 ret = ci_set_mc_special_registers(rdev, ci_table); 4672 if (ret) 4673 goto init_mc_done; 4674 4675 ci_set_valid_flag(ci_table); 4676 4677 init_mc_done: 4678 kfree(table); 4679 4680 return ret; 4681 } 4682 4683 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev, 4684 SMU7_Discrete_MCRegisters *mc_reg_table) 4685 { 4686 struct ci_power_info *pi = ci_get_pi(rdev); 4687 u32 i, j; 4688 4689 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) { 4690 if (pi->mc_reg_table.valid_flag & (1 << j)) { 4691 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4692 return -EINVAL; 4693 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0); 4694 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1); 4695 i++; 4696 } 4697 } 4698 4699 mc_reg_table->last = (u8)i; 4700 4701 return 0; 4702 } 4703 4704 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry, 4705 SMU7_Discrete_MCRegisterSet *data, 4706 u32 num_entries, u32 valid_flag) 4707 { 4708 u32 i, j; 4709 4710 for (i = 0, j = 0; j < num_entries; j++) { 4711 if (valid_flag & (1 << j)) { 4712 data->value[i] = cpu_to_be32(entry->mc_data[j]); 4713 i++; 4714 } 4715 } 4716 } 4717 4718 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev, 4719 const u32 memory_clock, 4720 SMU7_Discrete_MCRegisterSet *mc_reg_table_data) 4721 { 4722 struct ci_power_info *pi = ci_get_pi(rdev); 4723 u32 i = 0; 4724 4725 for(i = 0; i < pi->mc_reg_table.num_entries; i++) { 4726 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) 4727 break; 4728 } 4729 4730 if ((i == pi->mc_reg_table.num_entries) && (i > 0)) 4731 --i; 4732 4733 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i], 4734 mc_reg_table_data, pi->mc_reg_table.last, 4735 pi->mc_reg_table.valid_flag); 4736 } 4737 4738 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev, 4739 SMU7_Discrete_MCRegisters *mc_reg_table) 4740 { 4741 struct ci_power_info *pi = ci_get_pi(rdev); 4742 u32 i; 4743 4744 for (i = 0; i < pi->dpm_table.mclk_table.count; i++) 4745 ci_convert_mc_reg_table_entry_to_smc(rdev, 4746 pi->dpm_table.mclk_table.dpm_levels[i].value, 4747 &mc_reg_table->data[i]); 4748 } 4749 4750 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev) 4751 { 4752 struct ci_power_info *pi = ci_get_pi(rdev); 4753 int ret; 4754 4755 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4756 4757 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table); 4758 if (ret) 4759 return ret; 4760 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4761 4762 return ci_copy_bytes_to_smc(rdev, 4763 pi->mc_reg_table_start, 4764 (u8 *)&pi->smc_mc_reg_table, 4765 sizeof(SMU7_Discrete_MCRegisters), 4766 pi->sram_end); 4767 } 4768 4769 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev) 4770 { 4771 struct ci_power_info *pi = ci_get_pi(rdev); 4772 4773 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) 4774 return 0; 4775 4776 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4777 4778 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4779 4780 return ci_copy_bytes_to_smc(rdev, 4781 pi->mc_reg_table_start + 4782 offsetof(SMU7_Discrete_MCRegisters, data[0]), 4783 (u8 *)&pi->smc_mc_reg_table.data[0], 4784 sizeof(SMU7_Discrete_MCRegisterSet) * 4785 pi->dpm_table.mclk_table.count, 4786 pi->sram_end); 4787 } 4788 4789 static void ci_enable_voltage_control(struct radeon_device *rdev) 4790 { 4791 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 4792 4793 tmp |= VOLT_PWRMGT_EN; 4794 WREG32_SMC(GENERAL_PWRMGT, tmp); 4795 } 4796 4797 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev, 4798 struct radeon_ps *radeon_state) 4799 { 4800 struct ci_ps *state = ci_get_ps(radeon_state); 4801 int i; 4802 u16 pcie_speed, max_speed = 0; 4803 4804 for (i = 0; i < state->performance_level_count; i++) { 4805 pcie_speed = state->performance_levels[i].pcie_gen; 4806 if (max_speed < pcie_speed) 4807 max_speed = pcie_speed; 4808 } 4809 4810 return max_speed; 4811 } 4812 4813 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev) 4814 { 4815 u32 speed_cntl = 0; 4816 4817 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; 4818 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; 4819 4820 return (u16)speed_cntl; 4821 } 4822 4823 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev) 4824 { 4825 u32 link_width = 0; 4826 4827 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK; 4828 link_width >>= LC_LINK_WIDTH_RD_SHIFT; 4829 4830 switch (link_width) { 4831 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4832 return 1; 4833 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4834 return 2; 4835 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4836 return 4; 4837 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4838 return 8; 4839 case RADEON_PCIE_LC_LINK_WIDTH_X12: 4840 /* not actually supported */ 4841 return 12; 4842 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4843 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4844 default: 4845 return 16; 4846 } 4847 } 4848 4849 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev, 4850 struct radeon_ps *radeon_new_state, 4851 struct radeon_ps *radeon_current_state) 4852 { 4853 struct ci_power_info *pi = ci_get_pi(rdev); 4854 enum radeon_pcie_gen target_link_speed = 4855 ci_get_maximum_link_speed(rdev, radeon_new_state); 4856 enum radeon_pcie_gen current_link_speed; 4857 4858 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID) 4859 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state); 4860 else 4861 current_link_speed = pi->force_pcie_gen; 4862 4863 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 4864 pi->pspp_notify_required = false; 4865 if (target_link_speed > current_link_speed) { 4866 switch (target_link_speed) { 4867 #ifdef CONFIG_ACPI 4868 case RADEON_PCIE_GEN3: 4869 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) 4870 break; 4871 pi->force_pcie_gen = RADEON_PCIE_GEN2; 4872 if (current_link_speed == RADEON_PCIE_GEN2) 4873 break; 4874 /* fall through */ 4875 case RADEON_PCIE_GEN2: 4876 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) 4877 break; 4878 #endif 4879 /* fall through */ 4880 default: 4881 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev); 4882 break; 4883 } 4884 } else { 4885 if (target_link_speed < current_link_speed) 4886 pi->pspp_notify_required = true; 4887 } 4888 } 4889 4890 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev, 4891 struct radeon_ps *radeon_new_state, 4892 struct radeon_ps *radeon_current_state) 4893 { 4894 struct ci_power_info *pi = ci_get_pi(rdev); 4895 enum radeon_pcie_gen target_link_speed = 4896 ci_get_maximum_link_speed(rdev, radeon_new_state); 4897 u8 request; 4898 4899 if (pi->pspp_notify_required) { 4900 if (target_link_speed == RADEON_PCIE_GEN3) 4901 request = PCIE_PERF_REQ_PECI_GEN3; 4902 else if (target_link_speed == RADEON_PCIE_GEN2) 4903 request = PCIE_PERF_REQ_PECI_GEN2; 4904 else 4905 request = PCIE_PERF_REQ_PECI_GEN1; 4906 4907 if ((request == PCIE_PERF_REQ_PECI_GEN1) && 4908 (ci_get_current_pcie_speed(rdev) > 0)) 4909 return; 4910 4911 #ifdef CONFIG_ACPI 4912 radeon_acpi_pcie_performance_request(rdev, request, false); 4913 #endif 4914 } 4915 } 4916 4917 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev) 4918 { 4919 struct ci_power_info *pi = ci_get_pi(rdev); 4920 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 4921 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 4922 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table = 4923 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 4924 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table = 4925 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 4926 4927 if (allowed_sclk_vddc_table == NULL) 4928 return -EINVAL; 4929 if (allowed_sclk_vddc_table->count < 1) 4930 return -EINVAL; 4931 if (allowed_mclk_vddc_table == NULL) 4932 return -EINVAL; 4933 if (allowed_mclk_vddc_table->count < 1) 4934 return -EINVAL; 4935 if (allowed_mclk_vddci_table == NULL) 4936 return -EINVAL; 4937 if (allowed_mclk_vddci_table->count < 1) 4938 return -EINVAL; 4939 4940 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; 4941 pi->max_vddc_in_pp_table = 4942 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4943 4944 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; 4945 pi->max_vddci_in_pp_table = 4946 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4947 4948 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = 4949 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4950 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = 4951 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4952 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = 4953 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4954 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = 4955 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4956 4957 return 0; 4958 } 4959 4960 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc) 4961 { 4962 struct ci_power_info *pi = ci_get_pi(rdev); 4963 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage; 4964 u32 leakage_index; 4965 4966 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4967 if (leakage_table->leakage_id[leakage_index] == *vddc) { 4968 *vddc = leakage_table->actual_voltage[leakage_index]; 4969 break; 4970 } 4971 } 4972 } 4973 4974 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci) 4975 { 4976 struct ci_power_info *pi = ci_get_pi(rdev); 4977 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage; 4978 u32 leakage_index; 4979 4980 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4981 if (leakage_table->leakage_id[leakage_index] == *vddci) { 4982 *vddci = leakage_table->actual_voltage[leakage_index]; 4983 break; 4984 } 4985 } 4986 } 4987 4988 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 4989 struct radeon_clock_voltage_dependency_table *table) 4990 { 4991 u32 i; 4992 4993 if (table) { 4994 for (i = 0; i < table->count; i++) 4995 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 4996 } 4997 } 4998 4999 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev, 5000 struct radeon_clock_voltage_dependency_table *table) 5001 { 5002 u32 i; 5003 5004 if (table) { 5005 for (i = 0; i < table->count; i++) 5006 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v); 5007 } 5008 } 5009 5010 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 5011 struct radeon_vce_clock_voltage_dependency_table *table) 5012 { 5013 u32 i; 5014 5015 if (table) { 5016 for (i = 0; i < table->count; i++) 5017 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 5018 } 5019 } 5020 5021 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 5022 struct radeon_uvd_clock_voltage_dependency_table *table) 5023 { 5024 u32 i; 5025 5026 if (table) { 5027 for (i = 0; i < table->count; i++) 5028 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 5029 } 5030 } 5031 5032 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev, 5033 struct radeon_phase_shedding_limits_table *table) 5034 { 5035 u32 i; 5036 5037 if (table) { 5038 for (i = 0; i < table->count; i++) 5039 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage); 5040 } 5041 } 5042 5043 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev, 5044 struct radeon_clock_and_voltage_limits *table) 5045 { 5046 if (table) { 5047 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc); 5048 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci); 5049 } 5050 } 5051 5052 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev, 5053 struct radeon_cac_leakage_table *table) 5054 { 5055 u32 i; 5056 5057 if (table) { 5058 for (i = 0; i < table->count; i++) 5059 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc); 5060 } 5061 } 5062 5063 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev) 5064 { 5065 5066 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5067 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk); 5068 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5069 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk); 5070 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5071 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk); 5072 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev, 5073 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk); 5074 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5075 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table); 5076 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5077 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table); 5078 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5079 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table); 5080 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5081 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table); 5082 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev, 5083 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table); 5084 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 5085 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 5086 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 5087 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc); 5088 ci_patch_cac_leakage_table_with_vddc_leakage(rdev, 5089 &rdev->pm.dpm.dyn_state.cac_leakage_table); 5090 5091 } 5092 5093 static void ci_get_memory_type(struct radeon_device *rdev) 5094 { 5095 struct ci_power_info *pi = ci_get_pi(rdev); 5096 u32 tmp; 5097 5098 tmp = RREG32(MC_SEQ_MISC0); 5099 5100 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) == 5101 MC_SEQ_MISC0_GDDR5_VALUE) 5102 pi->mem_gddr5 = true; 5103 else 5104 pi->mem_gddr5 = false; 5105 5106 } 5107 5108 static void ci_update_current_ps(struct radeon_device *rdev, 5109 struct radeon_ps *rps) 5110 { 5111 struct ci_ps *new_ps = ci_get_ps(rps); 5112 struct ci_power_info *pi = ci_get_pi(rdev); 5113 5114 pi->current_rps = *rps; 5115 pi->current_ps = *new_ps; 5116 pi->current_rps.ps_priv = &pi->current_ps; 5117 } 5118 5119 static void ci_update_requested_ps(struct radeon_device *rdev, 5120 struct radeon_ps *rps) 5121 { 5122 struct ci_ps *new_ps = ci_get_ps(rps); 5123 struct ci_power_info *pi = ci_get_pi(rdev); 5124 5125 pi->requested_rps = *rps; 5126 pi->requested_ps = *new_ps; 5127 pi->requested_rps.ps_priv = &pi->requested_ps; 5128 } 5129 5130 int ci_dpm_pre_set_power_state(struct radeon_device *rdev) 5131 { 5132 struct ci_power_info *pi = ci_get_pi(rdev); 5133 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 5134 struct radeon_ps *new_ps = &requested_ps; 5135 5136 ci_update_requested_ps(rdev, new_ps); 5137 5138 ci_apply_state_adjust_rules(rdev, &pi->requested_rps); 5139 5140 return 0; 5141 } 5142 5143 void ci_dpm_post_set_power_state(struct radeon_device *rdev) 5144 { 5145 struct ci_power_info *pi = ci_get_pi(rdev); 5146 struct radeon_ps *new_ps = &pi->requested_rps; 5147 5148 ci_update_current_ps(rdev, new_ps); 5149 } 5150 5151 5152 void ci_dpm_setup_asic(struct radeon_device *rdev) 5153 { 5154 int r; 5155 5156 r = ci_mc_load_microcode(rdev); 5157 if (r) 5158 DRM_ERROR("Failed to load MC firmware!\n"); 5159 ci_read_clock_registers(rdev); 5160 ci_get_memory_type(rdev); 5161 ci_enable_acpi_power_management(rdev); 5162 ci_init_sclk_t(rdev); 5163 } 5164 5165 int ci_dpm_enable(struct radeon_device *rdev) 5166 { 5167 struct ci_power_info *pi = ci_get_pi(rdev); 5168 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5169 int ret; 5170 5171 if (ci_is_smc_running(rdev)) 5172 return -EINVAL; 5173 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 5174 ci_enable_voltage_control(rdev); 5175 ret = ci_construct_voltage_tables(rdev); 5176 if (ret) { 5177 DRM_ERROR("ci_construct_voltage_tables failed\n"); 5178 return ret; 5179 } 5180 } 5181 if (pi->caps_dynamic_ac_timing) { 5182 ret = ci_initialize_mc_reg_table(rdev); 5183 if (ret) 5184 pi->caps_dynamic_ac_timing = false; 5185 } 5186 if (pi->dynamic_ss) 5187 ci_enable_spread_spectrum(rdev, true); 5188 if (pi->thermal_protection) 5189 ci_enable_thermal_protection(rdev, true); 5190 ci_program_sstp(rdev); 5191 ci_enable_display_gap(rdev); 5192 ci_program_vc(rdev); 5193 ret = ci_upload_firmware(rdev); 5194 if (ret) { 5195 DRM_ERROR("ci_upload_firmware failed\n"); 5196 return ret; 5197 } 5198 ret = ci_process_firmware_header(rdev); 5199 if (ret) { 5200 DRM_ERROR("ci_process_firmware_header failed\n"); 5201 return ret; 5202 } 5203 ret = ci_initial_switch_from_arb_f0_to_f1(rdev); 5204 if (ret) { 5205 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n"); 5206 return ret; 5207 } 5208 ret = ci_init_smc_table(rdev); 5209 if (ret) { 5210 DRM_ERROR("ci_init_smc_table failed\n"); 5211 return ret; 5212 } 5213 ret = ci_init_arb_table_index(rdev); 5214 if (ret) { 5215 DRM_ERROR("ci_init_arb_table_index failed\n"); 5216 return ret; 5217 } 5218 if (pi->caps_dynamic_ac_timing) { 5219 ret = ci_populate_initial_mc_reg_table(rdev); 5220 if (ret) { 5221 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n"); 5222 return ret; 5223 } 5224 } 5225 ret = ci_populate_pm_base(rdev); 5226 if (ret) { 5227 DRM_ERROR("ci_populate_pm_base failed\n"); 5228 return ret; 5229 } 5230 ci_dpm_start_smc(rdev); 5231 ci_enable_vr_hot_gpio_interrupt(rdev); 5232 ret = ci_notify_smc_display_change(rdev, false); 5233 if (ret) { 5234 DRM_ERROR("ci_notify_smc_display_change failed\n"); 5235 return ret; 5236 } 5237 ci_enable_sclk_control(rdev, true); 5238 ret = ci_enable_ulv(rdev, true); 5239 if (ret) { 5240 DRM_ERROR("ci_enable_ulv failed\n"); 5241 return ret; 5242 } 5243 ret = ci_enable_ds_master_switch(rdev, true); 5244 if (ret) { 5245 DRM_ERROR("ci_enable_ds_master_switch failed\n"); 5246 return ret; 5247 } 5248 ret = ci_start_dpm(rdev); 5249 if (ret) { 5250 DRM_ERROR("ci_start_dpm failed\n"); 5251 return ret; 5252 } 5253 ret = ci_enable_didt(rdev, true); 5254 if (ret) { 5255 DRM_ERROR("ci_enable_didt failed\n"); 5256 return ret; 5257 } 5258 ret = ci_enable_smc_cac(rdev, true); 5259 if (ret) { 5260 DRM_ERROR("ci_enable_smc_cac failed\n"); 5261 return ret; 5262 } 5263 ret = ci_enable_power_containment(rdev, true); 5264 if (ret) { 5265 DRM_ERROR("ci_enable_power_containment failed\n"); 5266 return ret; 5267 } 5268 5269 ret = ci_power_control_set_level(rdev); 5270 if (ret) { 5271 DRM_ERROR("ci_power_control_set_level failed\n"); 5272 return ret; 5273 } 5274 5275 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 5276 5277 ret = ci_enable_thermal_based_sclk_dpm(rdev, true); 5278 if (ret) { 5279 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n"); 5280 return ret; 5281 } 5282 5283 ci_thermal_start_thermal_controller(rdev); 5284 5285 ci_update_current_ps(rdev, boot_ps); 5286 5287 return 0; 5288 } 5289 5290 static int ci_set_temperature_range(struct radeon_device *rdev) 5291 { 5292 int ret; 5293 5294 ret = ci_thermal_enable_alert(rdev, false); 5295 if (ret) 5296 return ret; 5297 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 5298 if (ret) 5299 return ret; 5300 ret = ci_thermal_enable_alert(rdev, true); 5301 if (ret) 5302 return ret; 5303 5304 return ret; 5305 } 5306 5307 int ci_dpm_late_enable(struct radeon_device *rdev) 5308 { 5309 int ret; 5310 5311 ret = ci_set_temperature_range(rdev); 5312 if (ret) 5313 return ret; 5314 5315 ci_dpm_powergate_uvd(rdev, true); 5316 5317 return 0; 5318 } 5319 5320 void ci_dpm_disable(struct radeon_device *rdev) 5321 { 5322 struct ci_power_info *pi = ci_get_pi(rdev); 5323 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5324 5325 ci_dpm_powergate_uvd(rdev, false); 5326 5327 if (!ci_is_smc_running(rdev)) 5328 return; 5329 5330 ci_thermal_stop_thermal_controller(rdev); 5331 5332 if (pi->thermal_protection) 5333 ci_enable_thermal_protection(rdev, false); 5334 ci_enable_power_containment(rdev, false); 5335 ci_enable_smc_cac(rdev, false); 5336 ci_enable_didt(rdev, false); 5337 ci_enable_spread_spectrum(rdev, false); 5338 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); 5339 ci_stop_dpm(rdev); 5340 ci_enable_ds_master_switch(rdev, false); 5341 ci_enable_ulv(rdev, false); 5342 ci_clear_vc(rdev); 5343 ci_reset_to_default(rdev); 5344 ci_dpm_stop_smc(rdev); 5345 ci_force_switch_to_arb_f0(rdev); 5346 ci_enable_thermal_based_sclk_dpm(rdev, false); 5347 5348 ci_update_current_ps(rdev, boot_ps); 5349 } 5350 5351 int ci_dpm_set_power_state(struct radeon_device *rdev) 5352 { 5353 struct ci_power_info *pi = ci_get_pi(rdev); 5354 struct radeon_ps *new_ps = &pi->requested_rps; 5355 struct radeon_ps *old_ps = &pi->current_rps; 5356 int ret; 5357 5358 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); 5359 if (pi->pcie_performance_request) 5360 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); 5361 ret = ci_freeze_sclk_mclk_dpm(rdev); 5362 if (ret) { 5363 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n"); 5364 return ret; 5365 } 5366 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps); 5367 if (ret) { 5368 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n"); 5369 return ret; 5370 } 5371 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps); 5372 if (ret) { 5373 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); 5374 return ret; 5375 } 5376 5377 ret = ci_update_vce_dpm(rdev, new_ps, old_ps); 5378 if (ret) { 5379 DRM_ERROR("ci_update_vce_dpm failed\n"); 5380 return ret; 5381 } 5382 5383 ret = ci_update_sclk_t(rdev); 5384 if (ret) { 5385 DRM_ERROR("ci_update_sclk_t failed\n"); 5386 return ret; 5387 } 5388 if (pi->caps_dynamic_ac_timing) { 5389 ret = ci_update_and_upload_mc_reg_table(rdev); 5390 if (ret) { 5391 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n"); 5392 return ret; 5393 } 5394 } 5395 ret = ci_program_memory_timing_parameters(rdev); 5396 if (ret) { 5397 DRM_ERROR("ci_program_memory_timing_parameters failed\n"); 5398 return ret; 5399 } 5400 ret = ci_unfreeze_sclk_mclk_dpm(rdev); 5401 if (ret) { 5402 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n"); 5403 return ret; 5404 } 5405 ret = ci_upload_dpm_level_enable_mask(rdev); 5406 if (ret) { 5407 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n"); 5408 return ret; 5409 } 5410 if (pi->pcie_performance_request) 5411 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 5412 5413 return 0; 5414 } 5415 5416 #if 0 5417 void ci_dpm_reset_asic(struct radeon_device *rdev) 5418 { 5419 ci_set_boot_state(rdev); 5420 } 5421 #endif 5422 5423 void ci_dpm_display_configuration_changed(struct radeon_device *rdev) 5424 { 5425 ci_program_display_gap(rdev); 5426 } 5427 5428 union power_info { 5429 struct _ATOM_POWERPLAY_INFO info; 5430 struct _ATOM_POWERPLAY_INFO_V2 info_2; 5431 struct _ATOM_POWERPLAY_INFO_V3 info_3; 5432 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 5433 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 5434 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 5435 }; 5436 5437 union pplib_clock_info { 5438 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 5439 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 5440 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 5441 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 5442 struct _ATOM_PPLIB_SI_CLOCK_INFO si; 5443 struct _ATOM_PPLIB_CI_CLOCK_INFO ci; 5444 }; 5445 5446 union pplib_power_state { 5447 struct _ATOM_PPLIB_STATE v1; 5448 struct _ATOM_PPLIB_STATE_V2 v2; 5449 }; 5450 5451 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev, 5452 struct radeon_ps *rps, 5453 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 5454 u8 table_rev) 5455 { 5456 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 5457 rps->class = le16_to_cpu(non_clock_info->usClassification); 5458 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 5459 5460 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 5461 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 5462 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 5463 } else { 5464 rps->vclk = 0; 5465 rps->dclk = 0; 5466 } 5467 5468 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 5469 rdev->pm.dpm.boot_ps = rps; 5470 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 5471 rdev->pm.dpm.uvd_ps = rps; 5472 } 5473 5474 static void ci_parse_pplib_clock_info(struct radeon_device *rdev, 5475 struct radeon_ps *rps, int index, 5476 union pplib_clock_info *clock_info) 5477 { 5478 struct ci_power_info *pi = ci_get_pi(rdev); 5479 struct ci_ps *ps = ci_get_ps(rps); 5480 struct ci_pl *pl = &ps->performance_levels[index]; 5481 5482 ps->performance_level_count = index + 1; 5483 5484 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5485 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; 5486 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5487 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5488 5489 pl->pcie_gen = r600_get_pcie_gen_support(rdev, 5490 pi->sys_pcie_mask, 5491 pi->vbios_boot_state.pcie_gen_bootup_value, 5492 clock_info->ci.ucPCIEGen); 5493 pl->pcie_lane = r600_get_pcie_lane_support(rdev, 5494 pi->vbios_boot_state.pcie_lane_bootup_value, 5495 le16_to_cpu(clock_info->ci.usPCIELane)); 5496 5497 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 5498 pi->acpi_pcie_gen = pl->pcie_gen; 5499 } 5500 5501 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { 5502 pi->ulv.supported = true; 5503 pi->ulv.pl = *pl; 5504 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT; 5505 } 5506 5507 /* patch up boot state */ 5508 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 5509 pl->mclk = pi->vbios_boot_state.mclk_bootup_value; 5510 pl->sclk = pi->vbios_boot_state.sclk_bootup_value; 5511 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; 5512 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; 5513 } 5514 5515 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 5516 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 5517 pi->use_pcie_powersaving_levels = true; 5518 if (pi->pcie_gen_powersaving.max < pl->pcie_gen) 5519 pi->pcie_gen_powersaving.max = pl->pcie_gen; 5520 if (pi->pcie_gen_powersaving.min > pl->pcie_gen) 5521 pi->pcie_gen_powersaving.min = pl->pcie_gen; 5522 if (pi->pcie_lane_powersaving.max < pl->pcie_lane) 5523 pi->pcie_lane_powersaving.max = pl->pcie_lane; 5524 if (pi->pcie_lane_powersaving.min > pl->pcie_lane) 5525 pi->pcie_lane_powersaving.min = pl->pcie_lane; 5526 break; 5527 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 5528 pi->use_pcie_performance_levels = true; 5529 if (pi->pcie_gen_performance.max < pl->pcie_gen) 5530 pi->pcie_gen_performance.max = pl->pcie_gen; 5531 if (pi->pcie_gen_performance.min > pl->pcie_gen) 5532 pi->pcie_gen_performance.min = pl->pcie_gen; 5533 if (pi->pcie_lane_performance.max < pl->pcie_lane) 5534 pi->pcie_lane_performance.max = pl->pcie_lane; 5535 if (pi->pcie_lane_performance.min > pl->pcie_lane) 5536 pi->pcie_lane_performance.min = pl->pcie_lane; 5537 break; 5538 default: 5539 break; 5540 } 5541 } 5542 5543 static int ci_parse_power_table(struct radeon_device *rdev) 5544 { 5545 struct radeon_mode_info *mode_info = &rdev->mode_info; 5546 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 5547 union pplib_power_state *power_state; 5548 int i, j, k, non_clock_array_index, clock_array_index; 5549 union pplib_clock_info *clock_info; 5550 struct _StateArray *state_array; 5551 struct _ClockInfoArray *clock_info_array; 5552 struct _NonClockInfoArray *non_clock_info_array; 5553 union power_info *power_info; 5554 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 5555 u16 data_offset; 5556 u8 frev, crev; 5557 u8 *power_state_offset; 5558 struct ci_ps *ps; 5559 5560 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 5561 &frev, &crev, &data_offset)) 5562 return -EINVAL; 5563 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 5564 5565 state_array = (struct _StateArray *) 5566 (mode_info->atom_context->bios + data_offset + 5567 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 5568 clock_info_array = (struct _ClockInfoArray *) 5569 (mode_info->atom_context->bios + data_offset + 5570 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 5571 non_clock_info_array = (struct _NonClockInfoArray *) 5572 (mode_info->atom_context->bios + data_offset + 5573 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 5574 5575 rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, 5576 sizeof(struct radeon_ps), 5577 GFP_KERNEL); 5578 if (!rdev->pm.dpm.ps) 5579 return -ENOMEM; 5580 power_state_offset = (u8 *)state_array->states; 5581 for (i = 0; i < state_array->ucNumEntries; i++) { 5582 u8 *idx; 5583 power_state = (union pplib_power_state *)power_state_offset; 5584 non_clock_array_index = power_state->v2.nonClockInfoIndex; 5585 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 5586 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 5587 if (!rdev->pm.power_state[i].clock_info) 5588 return -EINVAL; 5589 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); 5590 if (ps == NULL) { 5591 kfree(rdev->pm.dpm.ps); 5592 return -ENOMEM; 5593 } 5594 rdev->pm.dpm.ps[i].ps_priv = ps; 5595 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 5596 non_clock_info, 5597 non_clock_info_array->ucEntrySize); 5598 k = 0; 5599 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 5600 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 5601 clock_array_index = idx[j]; 5602 if (clock_array_index >= clock_info_array->ucNumEntries) 5603 continue; 5604 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) 5605 break; 5606 clock_info = (union pplib_clock_info *) 5607 ((u8 *)&clock_info_array->clockInfo[0] + 5608 (clock_array_index * clock_info_array->ucEntrySize)); 5609 ci_parse_pplib_clock_info(rdev, 5610 &rdev->pm.dpm.ps[i], k, 5611 clock_info); 5612 k++; 5613 } 5614 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 5615 } 5616 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 5617 5618 /* fill in the vce power states */ 5619 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 5620 u32 sclk, mclk; 5621 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 5622 clock_info = (union pplib_clock_info *) 5623 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 5624 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5625 sclk |= clock_info->ci.ucEngineClockHigh << 16; 5626 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5627 mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5628 rdev->pm.dpm.vce_states[i].sclk = sclk; 5629 rdev->pm.dpm.vce_states[i].mclk = mclk; 5630 } 5631 5632 return 0; 5633 } 5634 5635 static int ci_get_vbios_boot_values(struct radeon_device *rdev, 5636 struct ci_vbios_boot_state *boot_state) 5637 { 5638 struct radeon_mode_info *mode_info = &rdev->mode_info; 5639 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 5640 ATOM_FIRMWARE_INFO_V2_2 *firmware_info; 5641 u8 frev, crev; 5642 u16 data_offset; 5643 5644 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 5645 &frev, &crev, &data_offset)) { 5646 firmware_info = 5647 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios + 5648 data_offset); 5649 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage); 5650 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage); 5651 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage); 5652 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev); 5653 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev); 5654 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock); 5655 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock); 5656 5657 return 0; 5658 } 5659 return -EINVAL; 5660 } 5661 5662 void ci_dpm_fini(struct radeon_device *rdev) 5663 { 5664 int i; 5665 5666 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 5667 kfree(rdev->pm.dpm.ps[i].ps_priv); 5668 } 5669 kfree(rdev->pm.dpm.ps); 5670 kfree(rdev->pm.dpm.priv); 5671 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); 5672 r600_free_extended_power_table(rdev); 5673 } 5674 5675 int ci_dpm_init(struct radeon_device *rdev) 5676 { 5677 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 5678 SMU7_Discrete_DpmTable *dpm_table; 5679 struct radeon_gpio_rec gpio; 5680 u16 data_offset, size; 5681 u8 frev, crev; 5682 struct ci_power_info *pi; 5683 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; 5684 struct pci_dev *root = rdev->pdev->bus->self; 5685 int ret; 5686 5687 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); 5688 if (pi == NULL) 5689 return -ENOMEM; 5690 rdev->pm.dpm.priv = pi; 5691 5692 if (!pci_is_root_bus(rdev->pdev->bus)) 5693 speed_cap = pcie_get_speed_cap(root); 5694 if (speed_cap == PCI_SPEED_UNKNOWN) { 5695 pi->sys_pcie_mask = 0; 5696 } else { 5697 if (speed_cap == PCIE_SPEED_8_0GT) 5698 pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 | 5699 RADEON_PCIE_SPEED_50 | 5700 RADEON_PCIE_SPEED_80; 5701 else if (speed_cap == PCIE_SPEED_5_0GT) 5702 pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 | 5703 RADEON_PCIE_SPEED_50; 5704 else 5705 pi->sys_pcie_mask = RADEON_PCIE_SPEED_25; 5706 } 5707 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 5708 5709 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1; 5710 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3; 5711 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1; 5712 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3; 5713 5714 pi->pcie_lane_performance.max = 0; 5715 pi->pcie_lane_performance.min = 16; 5716 pi->pcie_lane_powersaving.max = 0; 5717 pi->pcie_lane_powersaving.min = 16; 5718 5719 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state); 5720 if (ret) { 5721 ci_dpm_fini(rdev); 5722 return ret; 5723 } 5724 5725 ret = r600_get_platform_caps(rdev); 5726 if (ret) { 5727 ci_dpm_fini(rdev); 5728 return ret; 5729 } 5730 5731 ret = r600_parse_extended_power_table(rdev); 5732 if (ret) { 5733 ci_dpm_fini(rdev); 5734 return ret; 5735 } 5736 5737 ret = ci_parse_power_table(rdev); 5738 if (ret) { 5739 ci_dpm_fini(rdev); 5740 return ret; 5741 } 5742 5743 pi->dll_default_on = false; 5744 pi->sram_end = SMC_RAM_END; 5745 5746 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; 5747 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; 5748 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT; 5749 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT; 5750 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT; 5751 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT; 5752 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT; 5753 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT; 5754 5755 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT; 5756 5757 pi->sclk_dpm_key_disabled = 0; 5758 pi->mclk_dpm_key_disabled = 0; 5759 pi->pcie_dpm_key_disabled = 0; 5760 pi->thermal_sclk_dpm_enabled = 0; 5761 5762 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ 5763 if ((rdev->pdev->device == 0x6658) && 5764 (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) { 5765 pi->mclk_dpm_key_disabled = 1; 5766 } 5767 5768 pi->caps_sclk_ds = true; 5769 5770 pi->mclk_strobe_mode_threshold = 40000; 5771 pi->mclk_stutter_mode_threshold = 40000; 5772 pi->mclk_edc_enable_threshold = 40000; 5773 pi->mclk_edc_wr_enable_threshold = 40000; 5774 5775 ci_initialize_powertune_defaults(rdev); 5776 5777 pi->caps_fps = false; 5778 5779 pi->caps_sclk_throttle_low_notification = false; 5780 5781 pi->caps_uvd_dpm = true; 5782 pi->caps_vce_dpm = true; 5783 5784 ci_get_leakage_voltages(rdev); 5785 ci_patch_dependency_tables_with_leakage(rdev); 5786 ci_set_private_data_variables_based_on_pptable(rdev); 5787 5788 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 5789 kcalloc(4, 5790 sizeof(struct radeon_clock_voltage_dependency_entry), 5791 GFP_KERNEL); 5792 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 5793 ci_dpm_fini(rdev); 5794 return -ENOMEM; 5795 } 5796 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; 5797 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; 5798 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; 5799 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; 5800 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; 5801 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; 5802 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; 5803 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; 5804 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; 5805 5806 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; 5807 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; 5808 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200; 5809 5810 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0; 5811 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; 5812 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 5813 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 5814 5815 if (rdev->family == CHIP_HAWAII) { 5816 pi->thermal_temp_setting.temperature_low = 94500; 5817 pi->thermal_temp_setting.temperature_high = 95000; 5818 pi->thermal_temp_setting.temperature_shutdown = 104000; 5819 } else { 5820 pi->thermal_temp_setting.temperature_low = 99500; 5821 pi->thermal_temp_setting.temperature_high = 100000; 5822 pi->thermal_temp_setting.temperature_shutdown = 104000; 5823 } 5824 5825 pi->uvd_enabled = false; 5826 5827 dpm_table = &pi->smc_state_table; 5828 5829 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID); 5830 if (gpio.valid) { 5831 dpm_table->VRHotGpio = gpio.shift; 5832 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5833 } else { 5834 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN; 5835 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5836 } 5837 5838 gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID); 5839 if (gpio.valid) { 5840 dpm_table->AcDcGpio = gpio.shift; 5841 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5842 } else { 5843 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN; 5844 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5845 } 5846 5847 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID); 5848 if (gpio.valid) { 5849 u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL); 5850 5851 switch (gpio.shift) { 5852 case 0: 5853 tmp &= ~GNB_SLOW_MODE_MASK; 5854 tmp |= GNB_SLOW_MODE(1); 5855 break; 5856 case 1: 5857 tmp &= ~GNB_SLOW_MODE_MASK; 5858 tmp |= GNB_SLOW_MODE(2); 5859 break; 5860 case 2: 5861 tmp |= GNB_SLOW; 5862 break; 5863 case 3: 5864 tmp |= FORCE_NB_PS1; 5865 break; 5866 case 4: 5867 tmp |= DPM_ENABLED; 5868 break; 5869 default: 5870 DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift); 5871 break; 5872 } 5873 WREG32_SMC(CNB_PWRMGT_CNTL, tmp); 5874 } 5875 5876 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5877 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5878 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5879 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) 5880 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5881 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 5882 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5883 5884 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) { 5885 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 5886 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5887 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) 5888 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5889 else 5890 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; 5891 } 5892 5893 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { 5894 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) 5895 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5896 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) 5897 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5898 else 5899 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL; 5900 } 5901 5902 pi->vddc_phase_shed_control = true; 5903 5904 #if defined(CONFIG_ACPI) 5905 pi->pcie_performance_request = 5906 radeon_acpi_is_pcie_performance_request_supported(rdev); 5907 #else 5908 pi->pcie_performance_request = false; 5909 #endif 5910 5911 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 5912 &frev, &crev, &data_offset)) { 5913 pi->caps_sclk_ss_support = true; 5914 pi->caps_mclk_ss_support = true; 5915 pi->dynamic_ss = true; 5916 } else { 5917 pi->caps_sclk_ss_support = false; 5918 pi->caps_mclk_ss_support = false; 5919 pi->dynamic_ss = true; 5920 } 5921 5922 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) 5923 pi->thermal_protection = true; 5924 else 5925 pi->thermal_protection = false; 5926 5927 pi->caps_dynamic_ac_timing = true; 5928 5929 pi->uvd_power_gated = false; 5930 5931 /* make sure dc limits are valid */ 5932 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || 5933 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) 5934 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = 5935 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 5936 5937 pi->fan_ctrl_is_in_default_mode = true; 5938 5939 return 0; 5940 } 5941 5942 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 5943 struct seq_file *m) 5944 { 5945 struct ci_power_info *pi = ci_get_pi(rdev); 5946 struct radeon_ps *rps = &pi->current_rps; 5947 u32 sclk = ci_get_average_sclk_freq(rdev); 5948 u32 mclk = ci_get_average_mclk_freq(rdev); 5949 5950 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); 5951 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); 5952 seq_printf(m, "power level avg sclk: %u mclk: %u\n", 5953 sclk, mclk); 5954 } 5955 5956 void ci_dpm_print_power_state(struct radeon_device *rdev, 5957 struct radeon_ps *rps) 5958 { 5959 struct ci_ps *ps = ci_get_ps(rps); 5960 struct ci_pl *pl; 5961 int i; 5962 5963 r600_dpm_print_class_info(rps->class, rps->class2); 5964 r600_dpm_print_cap_info(rps->caps); 5965 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 5966 for (i = 0; i < ps->performance_level_count; i++) { 5967 pl = &ps->performance_levels[i]; 5968 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n", 5969 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); 5970 } 5971 r600_dpm_print_ps_status(rdev, rps); 5972 } 5973 5974 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev) 5975 { 5976 u32 sclk = ci_get_average_sclk_freq(rdev); 5977 5978 return sclk; 5979 } 5980 5981 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev) 5982 { 5983 u32 mclk = ci_get_average_mclk_freq(rdev); 5984 5985 return mclk; 5986 } 5987 5988 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low) 5989 { 5990 struct ci_power_info *pi = ci_get_pi(rdev); 5991 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 5992 5993 if (low) 5994 return requested_state->performance_levels[0].sclk; 5995 else 5996 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; 5997 } 5998 5999 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low) 6000 { 6001 struct ci_power_info *pi = ci_get_pi(rdev); 6002 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 6003 6004 if (low) 6005 return requested_state->performance_levels[0].mclk; 6006 else 6007 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; 6008 } 6009