xref: /openbmc/linux/drivers/gpu/drm/radeon/ci_dpm.c (revision b34081f1)
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "drmP.h"
25 #include "radeon.h"
26 #include "cikd.h"
27 #include "r600_dpm.h"
28 #include "ci_dpm.h"
29 #include "atom.h"
30 #include <linux/seq_file.h>
31 
32 #define MC_CG_ARB_FREQ_F0           0x0a
33 #define MC_CG_ARB_FREQ_F1           0x0b
34 #define MC_CG_ARB_FREQ_F2           0x0c
35 #define MC_CG_ARB_FREQ_F3           0x0d
36 
37 #define SMC_RAM_END 0x40000
38 
39 #define VOLTAGE_SCALE               4
40 #define VOLTAGE_VID_OFFSET_SCALE1    625
41 #define VOLTAGE_VID_OFFSET_SCALE2    100
42 
43 static const struct ci_pt_defaults defaults_bonaire_xt =
44 {
45 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
46 	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
47 	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
48 };
49 
50 static const struct ci_pt_defaults defaults_bonaire_pro =
51 {
52 	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
53 	{ 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
54 	{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
55 };
56 
57 static const struct ci_pt_defaults defaults_saturn_xt =
58 {
59 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
60 	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
61 	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
62 };
63 
64 static const struct ci_pt_defaults defaults_saturn_pro =
65 {
66 	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
67 	{ 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
68 	{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
69 };
70 
71 static const struct ci_pt_config_reg didt_config_ci[] =
72 {
73 	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
74 	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
75 	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
76 	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
77 	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
78 	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
79 	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80 	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81 	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82 	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83 	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84 	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
85 	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
86 	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
87 	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
88 	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
89 	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
90 	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103 	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
104 	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
105 	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
106 	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
108 	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
122 	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
123 	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
124 	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
126 	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
140 	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
141 	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
142 	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
144 	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 	{ 0xFFFFFFFF }
146 };
147 
148 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
149 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
150 				       u32 arb_freq_src, u32 arb_freq_dest);
151 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
152 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
153 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
154 						     u32 max_voltage_steps,
155 						     struct atom_voltage_table *voltage_table);
156 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
157 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
158 extern void cik_update_cg(struct radeon_device *rdev,
159 			  u32 block, bool enable);
160 
161 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
162 					 struct atom_voltage_table_entry *voltage_table,
163 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
164 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
165 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
166 				       u32 target_tdp);
167 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
168 
169 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
170 {
171         struct ci_power_info *pi = rdev->pm.dpm.priv;
172 
173         return pi;
174 }
175 
176 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
177 {
178 	struct ci_ps *ps = rps->ps_priv;
179 
180 	return ps;
181 }
182 
183 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
184 {
185 	struct ci_power_info *pi = ci_get_pi(rdev);
186 
187 	switch (rdev->pdev->device) {
188         case 0x6650:
189         case 0x6658:
190         case 0x665C:
191         default:
192 		pi->powertune_defaults = &defaults_bonaire_xt;
193 		break;
194         case 0x6651:
195         case 0x665D:
196 		pi->powertune_defaults = &defaults_bonaire_pro;
197 		break;
198         case 0x6640:
199 		pi->powertune_defaults = &defaults_saturn_xt;
200 		break;
201         case 0x6641:
202 		pi->powertune_defaults = &defaults_saturn_pro;
203 		break;
204 	}
205 
206 	pi->dte_tj_offset = 0;
207 
208 	pi->caps_power_containment = true;
209 	pi->caps_cac = false;
210 	pi->caps_sq_ramping = false;
211 	pi->caps_db_ramping = false;
212 	pi->caps_td_ramping = false;
213 	pi->caps_tcp_ramping = false;
214 
215 	if (pi->caps_power_containment) {
216 		pi->caps_cac = true;
217 		pi->enable_bapm_feature = true;
218 		pi->enable_tdc_limit_feature = true;
219 		pi->enable_pkg_pwr_tracking_feature = true;
220 	}
221 }
222 
223 static u8 ci_convert_to_vid(u16 vddc)
224 {
225 	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
226 }
227 
228 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
229 {
230 	struct ci_power_info *pi = ci_get_pi(rdev);
231 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
232 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
233 	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
234 	u32 i;
235 
236 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
237 		return -EINVAL;
238 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
239 		return -EINVAL;
240 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
241 	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
242 		return -EINVAL;
243 
244 	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
245 		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
246 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
247 			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
248 			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
249 		} else {
250 			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
251 			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
252 		}
253 	}
254 	return 0;
255 }
256 
257 static int ci_populate_vddc_vid(struct radeon_device *rdev)
258 {
259 	struct ci_power_info *pi = ci_get_pi(rdev);
260 	u8 *vid = pi->smc_powertune_table.VddCVid;
261 	u32 i;
262 
263 	if (pi->vddc_voltage_table.count > 8)
264 		return -EINVAL;
265 
266 	for (i = 0; i < pi->vddc_voltage_table.count; i++)
267 		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
268 
269 	return 0;
270 }
271 
272 static int ci_populate_svi_load_line(struct radeon_device *rdev)
273 {
274 	struct ci_power_info *pi = ci_get_pi(rdev);
275 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
276 
277 	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
278 	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
279 	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
280 	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
281 
282 	return 0;
283 }
284 
285 static int ci_populate_tdc_limit(struct radeon_device *rdev)
286 {
287 	struct ci_power_info *pi = ci_get_pi(rdev);
288 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
289 	u16 tdc_limit;
290 
291 	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
292 	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
293 	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
294 		pt_defaults->tdc_vddc_throttle_release_limit_perc;
295 	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
296 
297 	return 0;
298 }
299 
300 static int ci_populate_dw8(struct radeon_device *rdev)
301 {
302 	struct ci_power_info *pi = ci_get_pi(rdev);
303 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
304 	int ret;
305 
306 	ret = ci_read_smc_sram_dword(rdev,
307 				     SMU7_FIRMWARE_HEADER_LOCATION +
308 				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
309 				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
310 				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
311 				     pi->sram_end);
312 	if (ret)
313 		return -EINVAL;
314 	else
315 		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
316 
317 	return 0;
318 }
319 
320 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
321 {
322 	struct ci_power_info *pi = ci_get_pi(rdev);
323 	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
324 	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
325 	int i, min, max;
326 
327 	min = max = hi_vid[0];
328 	for (i = 0; i < 8; i++) {
329 		if (0 != hi_vid[i]) {
330 			if (min > hi_vid[i])
331 				min = hi_vid[i];
332 			if (max < hi_vid[i])
333 				max = hi_vid[i];
334 		}
335 
336 		if (0 != lo_vid[i]) {
337 			if (min > lo_vid[i])
338 				min = lo_vid[i];
339 			if (max < lo_vid[i])
340 				max = lo_vid[i];
341 		}
342 	}
343 
344 	if ((min == 0) || (max == 0))
345 		return -EINVAL;
346 	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
347 	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
348 
349 	return 0;
350 }
351 
352 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
353 {
354 	struct ci_power_info *pi = ci_get_pi(rdev);
355 	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
356 	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
357 	struct radeon_cac_tdp_table *cac_tdp_table =
358 		rdev->pm.dpm.dyn_state.cac_tdp_table;
359 
360 	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
361 	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
362 
363 	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
364 	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
365 
366 	return 0;
367 }
368 
369 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
370 {
371 	struct ci_power_info *pi = ci_get_pi(rdev);
372 	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
373 	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
374 	struct radeon_cac_tdp_table *cac_tdp_table =
375 		rdev->pm.dpm.dyn_state.cac_tdp_table;
376 	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
377 	int i, j, k;
378 	const u16 *def1;
379 	const u16 *def2;
380 
381 	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
382 	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
383 
384 	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
385 	dpm_table->GpuTjMax =
386 		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
387 	dpm_table->GpuTjHyst = 8;
388 
389 	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
390 
391 	if (ppm) {
392 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
393 		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
394 	} else {
395 		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
396 		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
397 	}
398 
399 	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
400 	def1 = pt_defaults->bapmti_r;
401 	def2 = pt_defaults->bapmti_rc;
402 
403 	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
404 		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
405 			for (k = 0; k < SMU7_DTE_SINKS; k++) {
406 				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
407 				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
408 				def1++;
409 				def2++;
410 			}
411 		}
412 	}
413 
414 	return 0;
415 }
416 
417 static int ci_populate_pm_base(struct radeon_device *rdev)
418 {
419 	struct ci_power_info *pi = ci_get_pi(rdev);
420 	u32 pm_fuse_table_offset;
421 	int ret;
422 
423 	if (pi->caps_power_containment) {
424 		ret = ci_read_smc_sram_dword(rdev,
425 					     SMU7_FIRMWARE_HEADER_LOCATION +
426 					     offsetof(SMU7_Firmware_Header, PmFuseTable),
427 					     &pm_fuse_table_offset, pi->sram_end);
428 		if (ret)
429 			return ret;
430 		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
431 		if (ret)
432 			return ret;
433 		ret = ci_populate_vddc_vid(rdev);
434 		if (ret)
435 			return ret;
436 		ret = ci_populate_svi_load_line(rdev);
437 		if (ret)
438 			return ret;
439 		ret = ci_populate_tdc_limit(rdev);
440 		if (ret)
441 			return ret;
442 		ret = ci_populate_dw8(rdev);
443 		if (ret)
444 			return ret;
445 		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
446 		if (ret)
447 			return ret;
448 		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
449 		if (ret)
450 			return ret;
451 		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
452 					   (u8 *)&pi->smc_powertune_table,
453 					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
454 		if (ret)
455 			return ret;
456 	}
457 
458 	return 0;
459 }
460 
461 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
462 {
463 	struct ci_power_info *pi = ci_get_pi(rdev);
464 	u32 data;
465 
466 	if (pi->caps_sq_ramping) {
467 		data = RREG32_DIDT(DIDT_SQ_CTRL0);
468 		if (enable)
469 			data |= DIDT_CTRL_EN;
470 		else
471 			data &= ~DIDT_CTRL_EN;
472 		WREG32_DIDT(DIDT_SQ_CTRL0, data);
473 	}
474 
475 	if (pi->caps_db_ramping) {
476 		data = RREG32_DIDT(DIDT_DB_CTRL0);
477 		if (enable)
478 			data |= DIDT_CTRL_EN;
479 		else
480 			data &= ~DIDT_CTRL_EN;
481 		WREG32_DIDT(DIDT_DB_CTRL0, data);
482 	}
483 
484 	if (pi->caps_td_ramping) {
485 		data = RREG32_DIDT(DIDT_TD_CTRL0);
486 		if (enable)
487 			data |= DIDT_CTRL_EN;
488 		else
489 			data &= ~DIDT_CTRL_EN;
490 		WREG32_DIDT(DIDT_TD_CTRL0, data);
491 	}
492 
493 	if (pi->caps_tcp_ramping) {
494 		data = RREG32_DIDT(DIDT_TCP_CTRL0);
495 		if (enable)
496 			data |= DIDT_CTRL_EN;
497 		else
498 			data &= ~DIDT_CTRL_EN;
499 		WREG32_DIDT(DIDT_TCP_CTRL0, data);
500 	}
501 }
502 
503 static int ci_program_pt_config_registers(struct radeon_device *rdev,
504 					  const struct ci_pt_config_reg *cac_config_regs)
505 {
506 	const struct ci_pt_config_reg *config_regs = cac_config_regs;
507 	u32 data;
508 	u32 cache = 0;
509 
510 	if (config_regs == NULL)
511 		return -EINVAL;
512 
513 	while (config_regs->offset != 0xFFFFFFFF) {
514 		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
515 			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
516 		} else {
517 			switch (config_regs->type) {
518 			case CISLANDS_CONFIGREG_SMC_IND:
519 				data = RREG32_SMC(config_regs->offset);
520 				break;
521 			case CISLANDS_CONFIGREG_DIDT_IND:
522 				data = RREG32_DIDT(config_regs->offset);
523 				break;
524 			default:
525 				data = RREG32(config_regs->offset << 2);
526 				break;
527 			}
528 
529 			data &= ~config_regs->mask;
530 			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
531 			data |= cache;
532 
533 			switch (config_regs->type) {
534 			case CISLANDS_CONFIGREG_SMC_IND:
535 				WREG32_SMC(config_regs->offset, data);
536 				break;
537 			case CISLANDS_CONFIGREG_DIDT_IND:
538 				WREG32_DIDT(config_regs->offset, data);
539 				break;
540 			default:
541 				WREG32(config_regs->offset << 2, data);
542 				break;
543 			}
544 			cache = 0;
545 		}
546 		config_regs++;
547 	}
548 	return 0;
549 }
550 
551 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
552 {
553 	struct ci_power_info *pi = ci_get_pi(rdev);
554 	int ret;
555 
556 	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
557 	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
558 		cik_enter_rlc_safe_mode(rdev);
559 
560 		if (enable) {
561 			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
562 			if (ret) {
563 				cik_exit_rlc_safe_mode(rdev);
564 				return ret;
565 			}
566 		}
567 
568 		ci_do_enable_didt(rdev, enable);
569 
570 		cik_exit_rlc_safe_mode(rdev);
571 	}
572 
573 	return 0;
574 }
575 
576 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
577 {
578 	struct ci_power_info *pi = ci_get_pi(rdev);
579 	PPSMC_Result smc_result;
580 	int ret = 0;
581 
582 	if (enable) {
583 		pi->power_containment_features = 0;
584 		if (pi->caps_power_containment) {
585 			if (pi->enable_bapm_feature) {
586 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
587 				if (smc_result != PPSMC_Result_OK)
588 					ret = -EINVAL;
589 				else
590 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
591 			}
592 
593 			if (pi->enable_tdc_limit_feature) {
594 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
595 				if (smc_result != PPSMC_Result_OK)
596 					ret = -EINVAL;
597 				else
598 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
599 			}
600 
601 			if (pi->enable_pkg_pwr_tracking_feature) {
602 				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
603 				if (smc_result != PPSMC_Result_OK) {
604 					ret = -EINVAL;
605 				} else {
606 					struct radeon_cac_tdp_table *cac_tdp_table =
607 						rdev->pm.dpm.dyn_state.cac_tdp_table;
608 					u32 default_pwr_limit =
609 						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
610 
611 					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
612 
613 					ci_set_power_limit(rdev, default_pwr_limit);
614 				}
615 			}
616 		}
617 	} else {
618 		if (pi->caps_power_containment && pi->power_containment_features) {
619 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
620 				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
621 
622 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
623 				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
624 
625 			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
626 				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
627 			pi->power_containment_features = 0;
628 		}
629 	}
630 
631 	return ret;
632 }
633 
634 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
635 {
636 	struct ci_power_info *pi = ci_get_pi(rdev);
637 	PPSMC_Result smc_result;
638 	int ret = 0;
639 
640 	if (pi->caps_cac) {
641 		if (enable) {
642 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
643 			if (smc_result != PPSMC_Result_OK) {
644 				ret = -EINVAL;
645 				pi->cac_enabled = false;
646 			} else {
647 				pi->cac_enabled = true;
648 			}
649 		} else if (pi->cac_enabled) {
650 			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
651 			pi->cac_enabled = false;
652 		}
653 	}
654 
655 	return ret;
656 }
657 
658 static int ci_power_control_set_level(struct radeon_device *rdev)
659 {
660 	struct ci_power_info *pi = ci_get_pi(rdev);
661 	struct radeon_cac_tdp_table *cac_tdp_table =
662 		rdev->pm.dpm.dyn_state.cac_tdp_table;
663 	s32 adjust_percent;
664 	s32 target_tdp;
665 	int ret = 0;
666 	bool adjust_polarity = false; /* ??? */
667 
668 	if (pi->caps_power_containment &&
669 	    (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
670 		adjust_percent = adjust_polarity ?
671 			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
672 		target_tdp = ((100 + adjust_percent) *
673 			      (s32)cac_tdp_table->configurable_tdp) / 100;
674 		target_tdp *= 256;
675 
676 		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
677 	}
678 
679 	return ret;
680 }
681 
682 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
683 {
684 	struct ci_power_info *pi = ci_get_pi(rdev);
685 
686 	if (pi->uvd_power_gated == gate)
687 		return;
688 
689 	pi->uvd_power_gated = gate;
690 
691 	ci_update_uvd_dpm(rdev, gate);
692 }
693 
694 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
695 {
696 	struct ci_power_info *pi = ci_get_pi(rdev);
697 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
698 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
699 
700 	if (vblank_time < switch_limit)
701 		return true;
702 	else
703 		return false;
704 
705 }
706 
707 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
708 					struct radeon_ps *rps)
709 {
710 	struct ci_ps *ps = ci_get_ps(rps);
711 	struct ci_power_info *pi = ci_get_pi(rdev);
712 	struct radeon_clock_and_voltage_limits *max_limits;
713 	bool disable_mclk_switching;
714 	u32 sclk, mclk;
715 	int i;
716 
717 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
718 	    ci_dpm_vblank_too_short(rdev))
719 		disable_mclk_switching = true;
720 	else
721 		disable_mclk_switching = false;
722 
723 	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
724 		pi->battery_state = true;
725 	else
726 		pi->battery_state = false;
727 
728 	if (rdev->pm.dpm.ac_power)
729 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
730 	else
731 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
732 
733 	if (rdev->pm.dpm.ac_power == false) {
734 		for (i = 0; i < ps->performance_level_count; i++) {
735 			if (ps->performance_levels[i].mclk > max_limits->mclk)
736 				ps->performance_levels[i].mclk = max_limits->mclk;
737 			if (ps->performance_levels[i].sclk > max_limits->sclk)
738 				ps->performance_levels[i].sclk = max_limits->sclk;
739 		}
740 	}
741 
742 	/* XXX validate the min clocks required for display */
743 
744 	if (disable_mclk_switching) {
745 		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
746 		sclk = ps->performance_levels[0].sclk;
747 	} else {
748 		mclk = ps->performance_levels[0].mclk;
749 		sclk = ps->performance_levels[0].sclk;
750 	}
751 
752 	ps->performance_levels[0].sclk = sclk;
753 	ps->performance_levels[0].mclk = mclk;
754 
755 	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
756 		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
757 
758 	if (disable_mclk_switching) {
759 		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
760 			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
761 	} else {
762 		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
763 			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
764 	}
765 }
766 
767 static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
768 					    int min_temp, int max_temp)
769 {
770 	int low_temp = 0 * 1000;
771 	int high_temp = 255 * 1000;
772 	u32 tmp;
773 
774 	if (low_temp < min_temp)
775 		low_temp = min_temp;
776 	if (high_temp > max_temp)
777 		high_temp = max_temp;
778 	if (high_temp < low_temp) {
779 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
780 		return -EINVAL;
781 	}
782 
783 	tmp = RREG32_SMC(CG_THERMAL_INT);
784 	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
785 	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
786 		CI_DIG_THERM_INTL(low_temp / 1000);
787 	WREG32_SMC(CG_THERMAL_INT, tmp);
788 
789 #if 0
790 	/* XXX: need to figure out how to handle this properly */
791 	tmp = RREG32_SMC(CG_THERMAL_CTRL);
792 	tmp &= DIG_THERM_DPM_MASK;
793 	tmp |= DIG_THERM_DPM(high_temp / 1000);
794 	WREG32_SMC(CG_THERMAL_CTRL, tmp);
795 #endif
796 
797 	return 0;
798 }
799 
800 #if 0
801 static int ci_read_smc_soft_register(struct radeon_device *rdev,
802 				     u16 reg_offset, u32 *value)
803 {
804 	struct ci_power_info *pi = ci_get_pi(rdev);
805 
806 	return ci_read_smc_sram_dword(rdev,
807 				      pi->soft_regs_start + reg_offset,
808 				      value, pi->sram_end);
809 }
810 #endif
811 
812 static int ci_write_smc_soft_register(struct radeon_device *rdev,
813 				      u16 reg_offset, u32 value)
814 {
815 	struct ci_power_info *pi = ci_get_pi(rdev);
816 
817 	return ci_write_smc_sram_dword(rdev,
818 				       pi->soft_regs_start + reg_offset,
819 				       value, pi->sram_end);
820 }
821 
822 static void ci_init_fps_limits(struct radeon_device *rdev)
823 {
824 	struct ci_power_info *pi = ci_get_pi(rdev);
825 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
826 
827 	if (pi->caps_fps) {
828 		u16 tmp;
829 
830 		tmp = 45;
831 		table->FpsHighT = cpu_to_be16(tmp);
832 
833 		tmp = 30;
834 		table->FpsLowT = cpu_to_be16(tmp);
835 	}
836 }
837 
838 static int ci_update_sclk_t(struct radeon_device *rdev)
839 {
840 	struct ci_power_info *pi = ci_get_pi(rdev);
841 	int ret = 0;
842 	u32 low_sclk_interrupt_t = 0;
843 
844 	if (pi->caps_sclk_throttle_low_notification) {
845 		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
846 
847 		ret = ci_copy_bytes_to_smc(rdev,
848 					   pi->dpm_table_start +
849 					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
850 					   (u8 *)&low_sclk_interrupt_t,
851 					   sizeof(u32), pi->sram_end);
852 
853 	}
854 
855 	return ret;
856 }
857 
858 static void ci_get_leakage_voltages(struct radeon_device *rdev)
859 {
860 	struct ci_power_info *pi = ci_get_pi(rdev);
861 	u16 leakage_id, virtual_voltage_id;
862 	u16 vddc, vddci;
863 	int i;
864 
865 	pi->vddc_leakage.count = 0;
866 	pi->vddci_leakage.count = 0;
867 
868 	if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
869 		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
870 			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
871 			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
872 										 virtual_voltage_id,
873 										 leakage_id) == 0) {
874 				if (vddc != 0 && vddc != virtual_voltage_id) {
875 					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
876 					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
877 					pi->vddc_leakage.count++;
878 				}
879 				if (vddci != 0 && vddci != virtual_voltage_id) {
880 					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
881 					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
882 					pi->vddci_leakage.count++;
883 				}
884 			}
885 		}
886 	}
887 }
888 
889 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
890 {
891 	struct ci_power_info *pi = ci_get_pi(rdev);
892 	bool want_thermal_protection;
893 	enum radeon_dpm_event_src dpm_event_src;
894 	u32 tmp;
895 
896 	switch (sources) {
897 	case 0:
898 	default:
899 		want_thermal_protection = false;
900 		break;
901 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
902 		want_thermal_protection = true;
903 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
904 		break;
905 	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
906 		want_thermal_protection = true;
907 		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
908 		break;
909 	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
910 	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
911 		want_thermal_protection = true;
912 		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
913 		break;
914 	}
915 
916 	if (want_thermal_protection) {
917 #if 0
918 		/* XXX: need to figure out how to handle this properly */
919 		tmp = RREG32_SMC(CG_THERMAL_CTRL);
920 		tmp &= DPM_EVENT_SRC_MASK;
921 		tmp |= DPM_EVENT_SRC(dpm_event_src);
922 		WREG32_SMC(CG_THERMAL_CTRL, tmp);
923 #endif
924 
925 		tmp = RREG32_SMC(GENERAL_PWRMGT);
926 		if (pi->thermal_protection)
927 			tmp &= ~THERMAL_PROTECTION_DIS;
928 		else
929 			tmp |= THERMAL_PROTECTION_DIS;
930 		WREG32_SMC(GENERAL_PWRMGT, tmp);
931 	} else {
932 		tmp = RREG32_SMC(GENERAL_PWRMGT);
933 		tmp |= THERMAL_PROTECTION_DIS;
934 		WREG32_SMC(GENERAL_PWRMGT, tmp);
935 	}
936 }
937 
938 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
939 					   enum radeon_dpm_auto_throttle_src source,
940 					   bool enable)
941 {
942 	struct ci_power_info *pi = ci_get_pi(rdev);
943 
944 	if (enable) {
945 		if (!(pi->active_auto_throttle_sources & (1 << source))) {
946 			pi->active_auto_throttle_sources |= 1 << source;
947 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
948 		}
949 	} else {
950 		if (pi->active_auto_throttle_sources & (1 << source)) {
951 			pi->active_auto_throttle_sources &= ~(1 << source);
952 			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
953 		}
954 	}
955 }
956 
957 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
958 {
959 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
960 		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
961 }
962 
963 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
964 {
965 	struct ci_power_info *pi = ci_get_pi(rdev);
966 	PPSMC_Result smc_result;
967 
968 	if (!pi->need_update_smu7_dpm_table)
969 		return 0;
970 
971 	if ((!pi->sclk_dpm_key_disabled) &&
972 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
973 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
974 		if (smc_result != PPSMC_Result_OK)
975 			return -EINVAL;
976 	}
977 
978 	if ((!pi->mclk_dpm_key_disabled) &&
979 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
980 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
981 		if (smc_result != PPSMC_Result_OK)
982 			return -EINVAL;
983 	}
984 
985 	pi->need_update_smu7_dpm_table = 0;
986 	return 0;
987 }
988 
989 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
990 {
991 	struct ci_power_info *pi = ci_get_pi(rdev);
992 	PPSMC_Result smc_result;
993 
994 	if (enable) {
995 		if (!pi->sclk_dpm_key_disabled) {
996 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
997 			if (smc_result != PPSMC_Result_OK)
998 				return -EINVAL;
999 		}
1000 
1001 		if (!pi->mclk_dpm_key_disabled) {
1002 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1003 			if (smc_result != PPSMC_Result_OK)
1004 				return -EINVAL;
1005 
1006 			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1007 
1008 			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1009 			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1010 			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1011 
1012 			udelay(10);
1013 
1014 			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1015 			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1016 			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1017 		}
1018 	} else {
1019 		if (!pi->sclk_dpm_key_disabled) {
1020 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1021 			if (smc_result != PPSMC_Result_OK)
1022 				return -EINVAL;
1023 		}
1024 
1025 		if (!pi->mclk_dpm_key_disabled) {
1026 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1027 			if (smc_result != PPSMC_Result_OK)
1028 				return -EINVAL;
1029 		}
1030 	}
1031 
1032 	return 0;
1033 }
1034 
1035 static int ci_start_dpm(struct radeon_device *rdev)
1036 {
1037 	struct ci_power_info *pi = ci_get_pi(rdev);
1038 	PPSMC_Result smc_result;
1039 	int ret;
1040 	u32 tmp;
1041 
1042 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1043 	tmp |= GLOBAL_PWRMGT_EN;
1044 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1045 
1046 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1047 	tmp |= DYNAMIC_PM_EN;
1048 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1049 
1050 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1051 
1052 	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1053 
1054 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1055 	if (smc_result != PPSMC_Result_OK)
1056 		return -EINVAL;
1057 
1058 	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1059 	if (ret)
1060 		return ret;
1061 
1062 	if (!pi->pcie_dpm_key_disabled) {
1063 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1064 		if (smc_result != PPSMC_Result_OK)
1065 			return -EINVAL;
1066 	}
1067 
1068 	return 0;
1069 }
1070 
1071 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1072 {
1073 	struct ci_power_info *pi = ci_get_pi(rdev);
1074 	PPSMC_Result smc_result;
1075 
1076 	if (!pi->need_update_smu7_dpm_table)
1077 		return 0;
1078 
1079 	if ((!pi->sclk_dpm_key_disabled) &&
1080 	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1081 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1082 		if (smc_result != PPSMC_Result_OK)
1083 			return -EINVAL;
1084 	}
1085 
1086 	if ((!pi->mclk_dpm_key_disabled) &&
1087 	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1088 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1089 		if (smc_result != PPSMC_Result_OK)
1090 			return -EINVAL;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 static int ci_stop_dpm(struct radeon_device *rdev)
1097 {
1098 	struct ci_power_info *pi = ci_get_pi(rdev);
1099 	PPSMC_Result smc_result;
1100 	int ret;
1101 	u32 tmp;
1102 
1103 	tmp = RREG32_SMC(GENERAL_PWRMGT);
1104 	tmp &= ~GLOBAL_PWRMGT_EN;
1105 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1106 
1107 	tmp = RREG32(SCLK_PWRMGT_CNTL);
1108 	tmp &= ~DYNAMIC_PM_EN;
1109 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1110 
1111 	if (!pi->pcie_dpm_key_disabled) {
1112 		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1113 		if (smc_result != PPSMC_Result_OK)
1114 			return -EINVAL;
1115 	}
1116 
1117 	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1118 	if (ret)
1119 		return ret;
1120 
1121 	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1122 	if (smc_result != PPSMC_Result_OK)
1123 		return -EINVAL;
1124 
1125 	return 0;
1126 }
1127 
1128 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1129 {
1130 	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1131 
1132 	if (enable)
1133 		tmp &= ~SCLK_PWRMGT_OFF;
1134 	else
1135 		tmp |= SCLK_PWRMGT_OFF;
1136 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1137 }
1138 
1139 #if 0
1140 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1141 					bool ac_power)
1142 {
1143 	struct ci_power_info *pi = ci_get_pi(rdev);
1144 	struct radeon_cac_tdp_table *cac_tdp_table =
1145 		rdev->pm.dpm.dyn_state.cac_tdp_table;
1146 	u32 power_limit;
1147 
1148 	if (ac_power)
1149 		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1150 	else
1151 		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1152 
1153         ci_set_power_limit(rdev, power_limit);
1154 
1155 	if (pi->caps_automatic_dc_transition) {
1156 		if (ac_power)
1157 			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1158 		else
1159 			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1160 	}
1161 
1162 	return 0;
1163 }
1164 #endif
1165 
1166 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1167 						      PPSMC_Msg msg, u32 parameter)
1168 {
1169 	WREG32(SMC_MSG_ARG_0, parameter);
1170 	return ci_send_msg_to_smc(rdev, msg);
1171 }
1172 
1173 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1174 							PPSMC_Msg msg, u32 *parameter)
1175 {
1176 	PPSMC_Result smc_result;
1177 
1178 	smc_result = ci_send_msg_to_smc(rdev, msg);
1179 
1180 	if ((smc_result == PPSMC_Result_OK) && parameter)
1181 		*parameter = RREG32(SMC_MSG_ARG_0);
1182 
1183 	return smc_result;
1184 }
1185 
1186 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1187 {
1188 	struct ci_power_info *pi = ci_get_pi(rdev);
1189 
1190 	if (!pi->sclk_dpm_key_disabled) {
1191 		PPSMC_Result smc_result =
1192 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1193 		if (smc_result != PPSMC_Result_OK)
1194 			return -EINVAL;
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1201 {
1202 	struct ci_power_info *pi = ci_get_pi(rdev);
1203 
1204 	if (!pi->mclk_dpm_key_disabled) {
1205 		PPSMC_Result smc_result =
1206 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1207 		if (smc_result != PPSMC_Result_OK)
1208 			return -EINVAL;
1209 	}
1210 
1211 	return 0;
1212 }
1213 
1214 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1215 {
1216 	struct ci_power_info *pi = ci_get_pi(rdev);
1217 
1218 	if (!pi->pcie_dpm_key_disabled) {
1219 		PPSMC_Result smc_result =
1220 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1221 		if (smc_result != PPSMC_Result_OK)
1222 			return -EINVAL;
1223 	}
1224 
1225 	return 0;
1226 }
1227 
1228 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1229 {
1230 	struct ci_power_info *pi = ci_get_pi(rdev);
1231 
1232 	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1233 		PPSMC_Result smc_result =
1234 			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1235 		if (smc_result != PPSMC_Result_OK)
1236 			return -EINVAL;
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1243 				       u32 target_tdp)
1244 {
1245 	PPSMC_Result smc_result =
1246 		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1247 	if (smc_result != PPSMC_Result_OK)
1248 		return -EINVAL;
1249 	return 0;
1250 }
1251 
1252 static int ci_set_boot_state(struct radeon_device *rdev)
1253 {
1254 	return ci_enable_sclk_mclk_dpm(rdev, false);
1255 }
1256 
1257 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1258 {
1259 	u32 sclk_freq;
1260 	PPSMC_Result smc_result =
1261 		ci_send_msg_to_smc_return_parameter(rdev,
1262 						    PPSMC_MSG_API_GetSclkFrequency,
1263 						    &sclk_freq);
1264 	if (smc_result != PPSMC_Result_OK)
1265 		sclk_freq = 0;
1266 
1267 	return sclk_freq;
1268 }
1269 
1270 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1271 {
1272 	u32 mclk_freq;
1273 	PPSMC_Result smc_result =
1274 		ci_send_msg_to_smc_return_parameter(rdev,
1275 						    PPSMC_MSG_API_GetMclkFrequency,
1276 						    &mclk_freq);
1277 	if (smc_result != PPSMC_Result_OK)
1278 		mclk_freq = 0;
1279 
1280 	return mclk_freq;
1281 }
1282 
1283 static void ci_dpm_start_smc(struct radeon_device *rdev)
1284 {
1285 	int i;
1286 
1287 	ci_program_jump_on_start(rdev);
1288 	ci_start_smc_clock(rdev);
1289 	ci_start_smc(rdev);
1290 	for (i = 0; i < rdev->usec_timeout; i++) {
1291 		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1292 			break;
1293 	}
1294 }
1295 
1296 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1297 {
1298 	ci_reset_smc(rdev);
1299 	ci_stop_smc_clock(rdev);
1300 }
1301 
1302 static int ci_process_firmware_header(struct radeon_device *rdev)
1303 {
1304 	struct ci_power_info *pi = ci_get_pi(rdev);
1305 	u32 tmp;
1306 	int ret;
1307 
1308 	ret = ci_read_smc_sram_dword(rdev,
1309 				     SMU7_FIRMWARE_HEADER_LOCATION +
1310 				     offsetof(SMU7_Firmware_Header, DpmTable),
1311 				     &tmp, pi->sram_end);
1312 	if (ret)
1313 		return ret;
1314 
1315 	pi->dpm_table_start = tmp;
1316 
1317 	ret = ci_read_smc_sram_dword(rdev,
1318 				     SMU7_FIRMWARE_HEADER_LOCATION +
1319 				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1320 				     &tmp, pi->sram_end);
1321 	if (ret)
1322 		return ret;
1323 
1324 	pi->soft_regs_start = tmp;
1325 
1326 	ret = ci_read_smc_sram_dword(rdev,
1327 				     SMU7_FIRMWARE_HEADER_LOCATION +
1328 				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1329 				     &tmp, pi->sram_end);
1330 	if (ret)
1331 		return ret;
1332 
1333 	pi->mc_reg_table_start = tmp;
1334 
1335 	ret = ci_read_smc_sram_dword(rdev,
1336 				     SMU7_FIRMWARE_HEADER_LOCATION +
1337 				     offsetof(SMU7_Firmware_Header, FanTable),
1338 				     &tmp, pi->sram_end);
1339 	if (ret)
1340 		return ret;
1341 
1342 	pi->fan_table_start = tmp;
1343 
1344 	ret = ci_read_smc_sram_dword(rdev,
1345 				     SMU7_FIRMWARE_HEADER_LOCATION +
1346 				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1347 				     &tmp, pi->sram_end);
1348 	if (ret)
1349 		return ret;
1350 
1351 	pi->arb_table_start = tmp;
1352 
1353 	return 0;
1354 }
1355 
1356 static void ci_read_clock_registers(struct radeon_device *rdev)
1357 {
1358 	struct ci_power_info *pi = ci_get_pi(rdev);
1359 
1360 	pi->clock_registers.cg_spll_func_cntl =
1361 		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1362 	pi->clock_registers.cg_spll_func_cntl_2 =
1363 		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1364 	pi->clock_registers.cg_spll_func_cntl_3 =
1365 		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1366 	pi->clock_registers.cg_spll_func_cntl_4 =
1367 		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1368 	pi->clock_registers.cg_spll_spread_spectrum =
1369 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1370 	pi->clock_registers.cg_spll_spread_spectrum_2 =
1371 		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1372 	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1373 	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1374 	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1375 	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1376 	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1377 	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1378 	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1379 	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1380 	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1381 }
1382 
1383 static void ci_init_sclk_t(struct radeon_device *rdev)
1384 {
1385 	struct ci_power_info *pi = ci_get_pi(rdev);
1386 
1387 	pi->low_sclk_interrupt_t = 0;
1388 }
1389 
1390 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1391 					 bool enable)
1392 {
1393 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1394 
1395 	if (enable)
1396 		tmp &= ~THERMAL_PROTECTION_DIS;
1397 	else
1398 		tmp |= THERMAL_PROTECTION_DIS;
1399 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1400 }
1401 
1402 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1403 {
1404 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1405 
1406 	tmp |= STATIC_PM_EN;
1407 
1408 	WREG32_SMC(GENERAL_PWRMGT, tmp);
1409 }
1410 
1411 #if 0
1412 static int ci_enter_ulp_state(struct radeon_device *rdev)
1413 {
1414 
1415 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1416 
1417 	udelay(25000);
1418 
1419 	return 0;
1420 }
1421 
1422 static int ci_exit_ulp_state(struct radeon_device *rdev)
1423 {
1424 	int i;
1425 
1426 	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1427 
1428 	udelay(7000);
1429 
1430 	for (i = 0; i < rdev->usec_timeout; i++) {
1431 		if (RREG32(SMC_RESP_0) == 1)
1432 			break;
1433 		udelay(1000);
1434 	}
1435 
1436 	return 0;
1437 }
1438 #endif
1439 
1440 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1441 					bool has_display)
1442 {
1443 	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1444 
1445 	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1446 }
1447 
1448 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1449 				      bool enable)
1450 {
1451 	struct ci_power_info *pi = ci_get_pi(rdev);
1452 
1453 	if (enable) {
1454 		if (pi->caps_sclk_ds) {
1455 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1456 				return -EINVAL;
1457 		} else {
1458 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1459 				return -EINVAL;
1460 		}
1461 	} else {
1462 		if (pi->caps_sclk_ds) {
1463 			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1464 				return -EINVAL;
1465 		}
1466 	}
1467 
1468 	return 0;
1469 }
1470 
1471 static void ci_program_display_gap(struct radeon_device *rdev)
1472 {
1473 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1474 	u32 pre_vbi_time_in_us;
1475 	u32 frame_time_in_us;
1476 	u32 ref_clock = rdev->clock.spll.reference_freq;
1477 	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1478 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1479 
1480 	tmp &= ~DISP_GAP_MASK;
1481 	if (rdev->pm.dpm.new_active_crtc_count > 0)
1482 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1483 	else
1484 		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1485 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1486 
1487 	if (refresh_rate == 0)
1488 		refresh_rate = 60;
1489 	if (vblank_time == 0xffffffff)
1490 		vblank_time = 500;
1491 	frame_time_in_us = 1000000 / refresh_rate;
1492 	pre_vbi_time_in_us =
1493 		frame_time_in_us - 200 - vblank_time;
1494 	tmp = pre_vbi_time_in_us * (ref_clock / 100);
1495 
1496 	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1497 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1498 	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1499 
1500 
1501 	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1502 
1503 }
1504 
1505 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1506 {
1507 	struct ci_power_info *pi = ci_get_pi(rdev);
1508 	u32 tmp;
1509 
1510 	if (enable) {
1511 		if (pi->caps_sclk_ss_support) {
1512 			tmp = RREG32_SMC(GENERAL_PWRMGT);
1513 			tmp |= DYN_SPREAD_SPECTRUM_EN;
1514 			WREG32_SMC(GENERAL_PWRMGT, tmp);
1515 		}
1516 	} else {
1517 		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1518 		tmp &= ~SSEN;
1519 		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1520 
1521 		tmp = RREG32_SMC(GENERAL_PWRMGT);
1522 		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1523 		WREG32_SMC(GENERAL_PWRMGT, tmp);
1524 	}
1525 }
1526 
1527 static void ci_program_sstp(struct radeon_device *rdev)
1528 {
1529 	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1530 }
1531 
1532 static void ci_enable_display_gap(struct radeon_device *rdev)
1533 {
1534 	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1535 
1536         tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1537         tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1538                 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1539 
1540 	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1541 }
1542 
1543 static void ci_program_vc(struct radeon_device *rdev)
1544 {
1545 	u32 tmp;
1546 
1547 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1548 	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1549 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1550 
1551 	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1552 	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1553 	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1554 	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1555 	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1556 	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1557 	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1558 	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1559 }
1560 
1561 static void ci_clear_vc(struct radeon_device *rdev)
1562 {
1563 	u32 tmp;
1564 
1565 	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1566 	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1567 	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1568 
1569 	WREG32_SMC(CG_FTV_0, 0);
1570 	WREG32_SMC(CG_FTV_1, 0);
1571 	WREG32_SMC(CG_FTV_2, 0);
1572 	WREG32_SMC(CG_FTV_3, 0);
1573 	WREG32_SMC(CG_FTV_4, 0);
1574 	WREG32_SMC(CG_FTV_5, 0);
1575 	WREG32_SMC(CG_FTV_6, 0);
1576 	WREG32_SMC(CG_FTV_7, 0);
1577 }
1578 
1579 static int ci_upload_firmware(struct radeon_device *rdev)
1580 {
1581 	struct ci_power_info *pi = ci_get_pi(rdev);
1582 	int i, ret;
1583 
1584 	for (i = 0; i < rdev->usec_timeout; i++) {
1585 		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1586 			break;
1587 	}
1588 	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1589 
1590 	ci_stop_smc_clock(rdev);
1591 	ci_reset_smc(rdev);
1592 
1593 	ret = ci_load_smc_ucode(rdev, pi->sram_end);
1594 
1595 	return ret;
1596 
1597 }
1598 
1599 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1600 				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1601 				     struct atom_voltage_table *voltage_table)
1602 {
1603 	u32 i;
1604 
1605 	if (voltage_dependency_table == NULL)
1606 		return -EINVAL;
1607 
1608 	voltage_table->mask_low = 0;
1609 	voltage_table->phase_delay = 0;
1610 
1611 	voltage_table->count = voltage_dependency_table->count;
1612 	for (i = 0; i < voltage_table->count; i++) {
1613 		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1614 		voltage_table->entries[i].smio_low = 0;
1615 	}
1616 
1617 	return 0;
1618 }
1619 
1620 static int ci_construct_voltage_tables(struct radeon_device *rdev)
1621 {
1622 	struct ci_power_info *pi = ci_get_pi(rdev);
1623 	int ret;
1624 
1625 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1626 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1627 						    VOLTAGE_OBJ_GPIO_LUT,
1628 						    &pi->vddc_voltage_table);
1629 		if (ret)
1630 			return ret;
1631 	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1632 		ret = ci_get_svi2_voltage_table(rdev,
1633 						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1634 						&pi->vddc_voltage_table);
1635 		if (ret)
1636 			return ret;
1637 	}
1638 
1639 	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1640 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1641 							 &pi->vddc_voltage_table);
1642 
1643 	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1644 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1645 						    VOLTAGE_OBJ_GPIO_LUT,
1646 						    &pi->vddci_voltage_table);
1647 		if (ret)
1648 			return ret;
1649 	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1650 		ret = ci_get_svi2_voltage_table(rdev,
1651 						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1652 						&pi->vddci_voltage_table);
1653 		if (ret)
1654 			return ret;
1655 	}
1656 
1657 	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1658 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1659 							 &pi->vddci_voltage_table);
1660 
1661 	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1662 		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1663 						    VOLTAGE_OBJ_GPIO_LUT,
1664 						    &pi->mvdd_voltage_table);
1665 		if (ret)
1666 			return ret;
1667 	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1668 		ret = ci_get_svi2_voltage_table(rdev,
1669 						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1670 						&pi->mvdd_voltage_table);
1671 		if (ret)
1672 			return ret;
1673 	}
1674 
1675 	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1676 		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1677 							 &pi->mvdd_voltage_table);
1678 
1679 	return 0;
1680 }
1681 
1682 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1683 					  struct atom_voltage_table_entry *voltage_table,
1684 					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
1685 {
1686 	int ret;
1687 
1688 	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1689 					    &smc_voltage_table->StdVoltageHiSidd,
1690 					    &smc_voltage_table->StdVoltageLoSidd);
1691 
1692 	if (ret) {
1693 		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1694 		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1695 	}
1696 
1697 	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1698 	smc_voltage_table->StdVoltageHiSidd =
1699 		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1700 	smc_voltage_table->StdVoltageLoSidd =
1701 		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1702 }
1703 
1704 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1705 				      SMU7_Discrete_DpmTable *table)
1706 {
1707 	struct ci_power_info *pi = ci_get_pi(rdev);
1708 	unsigned int count;
1709 
1710 	table->VddcLevelCount = pi->vddc_voltage_table.count;
1711 	for (count = 0; count < table->VddcLevelCount; count++) {
1712 		ci_populate_smc_voltage_table(rdev,
1713 					      &pi->vddc_voltage_table.entries[count],
1714 					      &table->VddcLevel[count]);
1715 
1716 		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1717 			table->VddcLevel[count].Smio |=
1718 				pi->vddc_voltage_table.entries[count].smio_low;
1719 		else
1720 			table->VddcLevel[count].Smio = 0;
1721 	}
1722 	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1723 
1724 	return 0;
1725 }
1726 
1727 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1728 				       SMU7_Discrete_DpmTable *table)
1729 {
1730 	unsigned int count;
1731 	struct ci_power_info *pi = ci_get_pi(rdev);
1732 
1733 	table->VddciLevelCount = pi->vddci_voltage_table.count;
1734 	for (count = 0; count < table->VddciLevelCount; count++) {
1735 		ci_populate_smc_voltage_table(rdev,
1736 					      &pi->vddci_voltage_table.entries[count],
1737 					      &table->VddciLevel[count]);
1738 
1739 		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1740 			table->VddciLevel[count].Smio |=
1741 				pi->vddci_voltage_table.entries[count].smio_low;
1742 		else
1743 			table->VddciLevel[count].Smio = 0;
1744 	}
1745 	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1746 
1747 	return 0;
1748 }
1749 
1750 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1751 				      SMU7_Discrete_DpmTable *table)
1752 {
1753 	struct ci_power_info *pi = ci_get_pi(rdev);
1754 	unsigned int count;
1755 
1756 	table->MvddLevelCount = pi->mvdd_voltage_table.count;
1757 	for (count = 0; count < table->MvddLevelCount; count++) {
1758 		ci_populate_smc_voltage_table(rdev,
1759 					      &pi->mvdd_voltage_table.entries[count],
1760 					      &table->MvddLevel[count]);
1761 
1762 		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1763 			table->MvddLevel[count].Smio |=
1764 				pi->mvdd_voltage_table.entries[count].smio_low;
1765 		else
1766 			table->MvddLevel[count].Smio = 0;
1767 	}
1768 	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1769 
1770 	return 0;
1771 }
1772 
1773 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1774 					  SMU7_Discrete_DpmTable *table)
1775 {
1776 	int ret;
1777 
1778 	ret = ci_populate_smc_vddc_table(rdev, table);
1779 	if (ret)
1780 		return ret;
1781 
1782 	ret = ci_populate_smc_vddci_table(rdev, table);
1783 	if (ret)
1784 		return ret;
1785 
1786 	ret = ci_populate_smc_mvdd_table(rdev, table);
1787 	if (ret)
1788 		return ret;
1789 
1790 	return 0;
1791 }
1792 
1793 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1794 				  SMU7_Discrete_VoltageLevel *voltage)
1795 {
1796 	struct ci_power_info *pi = ci_get_pi(rdev);
1797 	u32 i = 0;
1798 
1799 	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1800 		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1801 			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1802 				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1803 				break;
1804 			}
1805 		}
1806 
1807 		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1808 			return -EINVAL;
1809 	}
1810 
1811 	return -EINVAL;
1812 }
1813 
1814 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1815 					 struct atom_voltage_table_entry *voltage_table,
1816 					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1817 {
1818 	u16 v_index, idx;
1819 	bool voltage_found = false;
1820 	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1821 	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1822 
1823 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1824 		return -EINVAL;
1825 
1826 	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1827 		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1828 			if (voltage_table->value ==
1829 			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1830 				voltage_found = true;
1831 				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1832 					idx = v_index;
1833 				else
1834 					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1835 				*std_voltage_lo_sidd =
1836 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1837 				*std_voltage_hi_sidd =
1838 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1839 				break;
1840 			}
1841 		}
1842 
1843 		if (!voltage_found) {
1844 			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1845 				if (voltage_table->value <=
1846 				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1847 					voltage_found = true;
1848 					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1849 						idx = v_index;
1850 					else
1851 						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1852 					*std_voltage_lo_sidd =
1853 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1854 					*std_voltage_hi_sidd =
1855 						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1856 					break;
1857 				}
1858 			}
1859 		}
1860 	}
1861 
1862 	return 0;
1863 }
1864 
1865 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1866 						  const struct radeon_phase_shedding_limits_table *limits,
1867 						  u32 sclk,
1868 						  u32 *phase_shedding)
1869 {
1870 	unsigned int i;
1871 
1872 	*phase_shedding = 1;
1873 
1874 	for (i = 0; i < limits->count; i++) {
1875 		if (sclk < limits->entries[i].sclk) {
1876 			*phase_shedding = i;
1877 			break;
1878 		}
1879 	}
1880 }
1881 
1882 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1883 						  const struct radeon_phase_shedding_limits_table *limits,
1884 						  u32 mclk,
1885 						  u32 *phase_shedding)
1886 {
1887 	unsigned int i;
1888 
1889 	*phase_shedding = 1;
1890 
1891 	for (i = 0; i < limits->count; i++) {
1892 		if (mclk < limits->entries[i].mclk) {
1893 			*phase_shedding = i;
1894 			break;
1895 		}
1896 	}
1897 }
1898 
1899 static int ci_init_arb_table_index(struct radeon_device *rdev)
1900 {
1901 	struct ci_power_info *pi = ci_get_pi(rdev);
1902 	u32 tmp;
1903 	int ret;
1904 
1905 	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1906 				     &tmp, pi->sram_end);
1907 	if (ret)
1908 		return ret;
1909 
1910 	tmp &= 0x00FFFFFF;
1911 	tmp |= MC_CG_ARB_FREQ_F1 << 24;
1912 
1913 	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1914 				       tmp, pi->sram_end);
1915 }
1916 
1917 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1918 					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1919 					 u32 clock, u32 *voltage)
1920 {
1921 	u32 i = 0;
1922 
1923 	if (allowed_clock_voltage_table->count == 0)
1924 		return -EINVAL;
1925 
1926 	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1927 		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1928 			*voltage = allowed_clock_voltage_table->entries[i].v;
1929 			return 0;
1930 		}
1931 	}
1932 
1933 	*voltage = allowed_clock_voltage_table->entries[i-1].v;
1934 
1935 	return 0;
1936 }
1937 
1938 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1939 					     u32 sclk, u32 min_sclk_in_sr)
1940 {
1941 	u32 i;
1942 	u32 tmp;
1943 	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
1944 		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
1945 
1946 	if (sclk < min)
1947 		return 0;
1948 
1949 	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
1950 		tmp = sclk / (1 << i);
1951 		if (tmp >= min || i == 0)
1952 			break;
1953 	}
1954 
1955 	return (u8)i;
1956 }
1957 
1958 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
1959 {
1960 	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1961 }
1962 
1963 static int ci_reset_to_default(struct radeon_device *rdev)
1964 {
1965 	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
1966 		0 : -EINVAL;
1967 }
1968 
1969 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
1970 {
1971 	u32 tmp;
1972 
1973 	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
1974 
1975 	if (tmp == MC_CG_ARB_FREQ_F0)
1976 		return 0;
1977 
1978 	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
1979 }
1980 
1981 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
1982 						u32 sclk,
1983 						u32 mclk,
1984 						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
1985 {
1986 	u32 dram_timing;
1987 	u32 dram_timing2;
1988 	u32 burst_time;
1989 
1990 	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
1991 
1992 	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
1993 	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
1994 	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
1995 
1996 	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
1997 	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
1998 	arb_regs->McArbBurstTime = (u8)burst_time;
1999 
2000 	return 0;
2001 }
2002 
2003 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2004 {
2005 	struct ci_power_info *pi = ci_get_pi(rdev);
2006 	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2007 	u32 i, j;
2008 	int ret =  0;
2009 
2010 	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2011 
2012 	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2013 		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2014 			ret = ci_populate_memory_timing_parameters(rdev,
2015 								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2016 								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2017 								   &arb_regs.entries[i][j]);
2018 			if (ret)
2019 				break;
2020 		}
2021 	}
2022 
2023 	if (ret == 0)
2024 		ret = ci_copy_bytes_to_smc(rdev,
2025 					   pi->arb_table_start,
2026 					   (u8 *)&arb_regs,
2027 					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2028 					   pi->sram_end);
2029 
2030 	return ret;
2031 }
2032 
2033 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2034 {
2035 	struct ci_power_info *pi = ci_get_pi(rdev);
2036 
2037 	if (pi->need_update_smu7_dpm_table == 0)
2038 		return 0;
2039 
2040 	return ci_do_program_memory_timing_parameters(rdev);
2041 }
2042 
2043 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2044 					  struct radeon_ps *radeon_boot_state)
2045 {
2046 	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2047 	struct ci_power_info *pi = ci_get_pi(rdev);
2048 	u32 level = 0;
2049 
2050 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2051 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2052 		    boot_state->performance_levels[0].sclk) {
2053 			pi->smc_state_table.GraphicsBootLevel = level;
2054 			break;
2055 		}
2056 	}
2057 
2058 	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2059 		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2060 		    boot_state->performance_levels[0].mclk) {
2061 			pi->smc_state_table.MemoryBootLevel = level;
2062 			break;
2063 		}
2064 	}
2065 }
2066 
2067 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2068 {
2069 	u32 i;
2070 	u32 mask_value = 0;
2071 
2072 	for (i = dpm_table->count; i > 0; i--) {
2073 		mask_value = mask_value << 1;
2074 		if (dpm_table->dpm_levels[i-1].enabled)
2075 			mask_value |= 0x1;
2076 		else
2077 			mask_value &= 0xFFFFFFFE;
2078 	}
2079 
2080 	return mask_value;
2081 }
2082 
2083 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2084 				       SMU7_Discrete_DpmTable *table)
2085 {
2086 	struct ci_power_info *pi = ci_get_pi(rdev);
2087 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2088 	u32 i;
2089 
2090 	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2091 		table->LinkLevel[i].PcieGenSpeed =
2092 			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2093 		table->LinkLevel[i].PcieLaneCount =
2094 			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2095 		table->LinkLevel[i].EnabledForActivity = 1;
2096 		table->LinkLevel[i].DownT = cpu_to_be32(5);
2097 		table->LinkLevel[i].UpT = cpu_to_be32(30);
2098 	}
2099 
2100 	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2101 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2102 		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2103 }
2104 
2105 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2106 				     SMU7_Discrete_DpmTable *table)
2107 {
2108 	u32 count;
2109 	struct atom_clock_dividers dividers;
2110 	int ret = -EINVAL;
2111 
2112 	table->UvdLevelCount =
2113 		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2114 
2115 	for (count = 0; count < table->UvdLevelCount; count++) {
2116 		table->UvdLevel[count].VclkFrequency =
2117 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2118 		table->UvdLevel[count].DclkFrequency =
2119 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2120 		table->UvdLevel[count].MinVddc =
2121 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2122 		table->UvdLevel[count].MinVddcPhases = 1;
2123 
2124 		ret = radeon_atom_get_clock_dividers(rdev,
2125 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2126 						     table->UvdLevel[count].VclkFrequency, false, &dividers);
2127 		if (ret)
2128 			return ret;
2129 
2130 		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2131 
2132 		ret = radeon_atom_get_clock_dividers(rdev,
2133 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2134 						     table->UvdLevel[count].DclkFrequency, false, &dividers);
2135 		if (ret)
2136 			return ret;
2137 
2138 		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2139 
2140 		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2141 		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2142 		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2143 	}
2144 
2145 	return ret;
2146 }
2147 
2148 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2149 				     SMU7_Discrete_DpmTable *table)
2150 {
2151 	u32 count;
2152 	struct atom_clock_dividers dividers;
2153 	int ret = -EINVAL;
2154 
2155 	table->VceLevelCount =
2156 		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2157 
2158 	for (count = 0; count < table->VceLevelCount; count++) {
2159 		table->VceLevel[count].Frequency =
2160 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2161 		table->VceLevel[count].MinVoltage =
2162 			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2163 		table->VceLevel[count].MinPhases = 1;
2164 
2165 		ret = radeon_atom_get_clock_dividers(rdev,
2166 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2167 						     table->VceLevel[count].Frequency, false, &dividers);
2168 		if (ret)
2169 			return ret;
2170 
2171 		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2172 
2173 		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2174 		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2175 	}
2176 
2177 	return ret;
2178 
2179 }
2180 
2181 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2182 				     SMU7_Discrete_DpmTable *table)
2183 {
2184 	u32 count;
2185 	struct atom_clock_dividers dividers;
2186 	int ret = -EINVAL;
2187 
2188 	table->AcpLevelCount = (u8)
2189 		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2190 
2191 	for (count = 0; count < table->AcpLevelCount; count++) {
2192 		table->AcpLevel[count].Frequency =
2193 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2194 		table->AcpLevel[count].MinVoltage =
2195 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2196 		table->AcpLevel[count].MinPhases = 1;
2197 
2198 		ret = radeon_atom_get_clock_dividers(rdev,
2199 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2200 						     table->AcpLevel[count].Frequency, false, &dividers);
2201 		if (ret)
2202 			return ret;
2203 
2204 		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2205 
2206 		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2207 		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2208 	}
2209 
2210 	return ret;
2211 }
2212 
2213 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2214 				      SMU7_Discrete_DpmTable *table)
2215 {
2216 	u32 count;
2217 	struct atom_clock_dividers dividers;
2218 	int ret = -EINVAL;
2219 
2220 	table->SamuLevelCount =
2221 		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2222 
2223 	for (count = 0; count < table->SamuLevelCount; count++) {
2224 		table->SamuLevel[count].Frequency =
2225 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2226 		table->SamuLevel[count].MinVoltage =
2227 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2228 		table->SamuLevel[count].MinPhases = 1;
2229 
2230 		ret = radeon_atom_get_clock_dividers(rdev,
2231 						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2232 						     table->SamuLevel[count].Frequency, false, &dividers);
2233 		if (ret)
2234 			return ret;
2235 
2236 		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2237 
2238 		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2239 		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2240 	}
2241 
2242 	return ret;
2243 }
2244 
2245 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2246 				    u32 memory_clock,
2247 				    SMU7_Discrete_MemoryLevel *mclk,
2248 				    bool strobe_mode,
2249 				    bool dll_state_on)
2250 {
2251 	struct ci_power_info *pi = ci_get_pi(rdev);
2252 	u32  dll_cntl = pi->clock_registers.dll_cntl;
2253 	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2254 	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2255 	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2256 	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2257 	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2258 	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2259 	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2260 	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2261 	struct atom_mpll_param mpll_param;
2262 	int ret;
2263 
2264 	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2265 	if (ret)
2266 		return ret;
2267 
2268 	mpll_func_cntl &= ~BWCTRL_MASK;
2269 	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2270 
2271 	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2272 	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2273 		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2274 
2275 	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2276 	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2277 
2278 	if (pi->mem_gddr5) {
2279 		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2280 		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2281 			YCLK_POST_DIV(mpll_param.post_div);
2282 	}
2283 
2284 	if (pi->caps_mclk_ss_support) {
2285 		struct radeon_atom_ss ss;
2286 		u32 freq_nom;
2287 		u32 tmp;
2288 		u32 reference_clock = rdev->clock.mpll.reference_freq;
2289 
2290 		if (pi->mem_gddr5)
2291 			freq_nom = memory_clock * 4;
2292 		else
2293 			freq_nom = memory_clock * 2;
2294 
2295 		tmp = (freq_nom / reference_clock);
2296 		tmp = tmp * tmp;
2297 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2298 						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2299 			u32 clks = reference_clock * 5 / ss.rate;
2300 			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2301 
2302 			mpll_ss1 &= ~CLKV_MASK;
2303 			mpll_ss1 |= CLKV(clkv);
2304 
2305 			mpll_ss2 &= ~CLKS_MASK;
2306 			mpll_ss2 |= CLKS(clks);
2307 		}
2308 	}
2309 
2310 	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2311 	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2312 
2313 	if (dll_state_on)
2314 		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2315 	else
2316 		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2317 
2318 	mclk->MclkFrequency = memory_clock;
2319 	mclk->MpllFuncCntl = mpll_func_cntl;
2320 	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2321 	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2322 	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2323 	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2324 	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2325 	mclk->DllCntl = dll_cntl;
2326 	mclk->MpllSs1 = mpll_ss1;
2327 	mclk->MpllSs2 = mpll_ss2;
2328 
2329 	return 0;
2330 }
2331 
2332 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2333 					   u32 memory_clock,
2334 					   SMU7_Discrete_MemoryLevel *memory_level)
2335 {
2336 	struct ci_power_info *pi = ci_get_pi(rdev);
2337 	int ret;
2338 	bool dll_state_on;
2339 
2340 	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2341 		ret = ci_get_dependency_volt_by_clk(rdev,
2342 						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2343 						    memory_clock, &memory_level->MinVddc);
2344 		if (ret)
2345 			return ret;
2346 	}
2347 
2348 	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2349 		ret = ci_get_dependency_volt_by_clk(rdev,
2350 						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2351 						    memory_clock, &memory_level->MinVddci);
2352 		if (ret)
2353 			return ret;
2354 	}
2355 
2356 	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2357 		ret = ci_get_dependency_volt_by_clk(rdev,
2358 						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2359 						    memory_clock, &memory_level->MinMvdd);
2360 		if (ret)
2361 			return ret;
2362 	}
2363 
2364 	memory_level->MinVddcPhases = 1;
2365 
2366 	if (pi->vddc_phase_shed_control)
2367 		ci_populate_phase_value_based_on_mclk(rdev,
2368 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2369 						      memory_clock,
2370 						      &memory_level->MinVddcPhases);
2371 
2372 	memory_level->EnabledForThrottle = 1;
2373 	memory_level->EnabledForActivity = 1;
2374 	memory_level->UpH = 0;
2375 	memory_level->DownH = 100;
2376 	memory_level->VoltageDownH = 0;
2377 	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2378 
2379 	memory_level->StutterEnable = false;
2380 	memory_level->StrobeEnable = false;
2381 	memory_level->EdcReadEnable = false;
2382 	memory_level->EdcWriteEnable = false;
2383 	memory_level->RttEnable = false;
2384 
2385 	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2386 
2387 	if (pi->mclk_stutter_mode_threshold &&
2388 	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2389 	    (pi->uvd_enabled == false) &&
2390 	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2391 	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2392 		memory_level->StutterEnable = true;
2393 
2394 	if (pi->mclk_strobe_mode_threshold &&
2395 	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2396 		memory_level->StrobeEnable = 1;
2397 
2398 	if (pi->mem_gddr5) {
2399 		memory_level->StrobeRatio =
2400 			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2401 		if (pi->mclk_edc_enable_threshold &&
2402 		    (memory_clock > pi->mclk_edc_enable_threshold))
2403 			memory_level->EdcReadEnable = true;
2404 
2405 		if (pi->mclk_edc_wr_enable_threshold &&
2406 		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2407 			memory_level->EdcWriteEnable = true;
2408 
2409 		if (memory_level->StrobeEnable) {
2410 			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2411 			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2412 				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2413 			else
2414 				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2415 		} else {
2416 			dll_state_on = pi->dll_default_on;
2417 		}
2418 	} else {
2419 		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2420 		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2421 	}
2422 
2423 	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2424 	if (ret)
2425 		return ret;
2426 
2427 	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2428 	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2429         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2430         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2431 
2432 	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2433 	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2434 	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2435 	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2436 	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2437 	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2438 	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2439 	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2440 	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2441 	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2442 	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2443 
2444 	return 0;
2445 }
2446 
2447 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2448 				      SMU7_Discrete_DpmTable *table)
2449 {
2450 	struct ci_power_info *pi = ci_get_pi(rdev);
2451 	struct atom_clock_dividers dividers;
2452 	SMU7_Discrete_VoltageLevel voltage_level;
2453 	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2454 	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2455 	u32 dll_cntl = pi->clock_registers.dll_cntl;
2456 	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2457 	int ret;
2458 
2459 	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2460 
2461 	if (pi->acpi_vddc)
2462 		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2463 	else
2464 		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2465 
2466 	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2467 
2468 	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2469 
2470 	ret = radeon_atom_get_clock_dividers(rdev,
2471 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2472 					     table->ACPILevel.SclkFrequency, false, &dividers);
2473 	if (ret)
2474 		return ret;
2475 
2476 	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2477 	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2478 	table->ACPILevel.DeepSleepDivId = 0;
2479 
2480 	spll_func_cntl &= ~SPLL_PWRON;
2481 	spll_func_cntl |= SPLL_RESET;
2482 
2483 	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2484 	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2485 
2486 	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2487 	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2488 	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2489 	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2490 	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2491 	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2492 	table->ACPILevel.CcPwrDynRm = 0;
2493 	table->ACPILevel.CcPwrDynRm1 = 0;
2494 
2495 	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2496 	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2497 	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2498 	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2499 	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2500 	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2501 	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2502 	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2503 	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2504 	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2505 	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2506 
2507 	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2508 	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2509 
2510 	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2511 		if (pi->acpi_vddci)
2512 			table->MemoryACPILevel.MinVddci =
2513 				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2514 		else
2515 			table->MemoryACPILevel.MinVddci =
2516 				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2517 	}
2518 
2519 	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2520 		table->MemoryACPILevel.MinMvdd = 0;
2521 	else
2522 		table->MemoryACPILevel.MinMvdd =
2523 			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2524 
2525 	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2526 	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2527 
2528 	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2529 
2530 	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2531 	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2532 	table->MemoryACPILevel.MpllAdFuncCntl =
2533 		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2534 	table->MemoryACPILevel.MpllDqFuncCntl =
2535 		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2536 	table->MemoryACPILevel.MpllFuncCntl =
2537 		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2538 	table->MemoryACPILevel.MpllFuncCntl_1 =
2539 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2540 	table->MemoryACPILevel.MpllFuncCntl_2 =
2541 		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2542 	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2543 	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2544 
2545 	table->MemoryACPILevel.EnabledForThrottle = 0;
2546 	table->MemoryACPILevel.EnabledForActivity = 0;
2547 	table->MemoryACPILevel.UpH = 0;
2548 	table->MemoryACPILevel.DownH = 100;
2549 	table->MemoryACPILevel.VoltageDownH = 0;
2550 	table->MemoryACPILevel.ActivityLevel =
2551 		cpu_to_be16((u16)pi->mclk_activity_target);
2552 
2553 	table->MemoryACPILevel.StutterEnable = false;
2554 	table->MemoryACPILevel.StrobeEnable = false;
2555 	table->MemoryACPILevel.EdcReadEnable = false;
2556 	table->MemoryACPILevel.EdcWriteEnable = false;
2557 	table->MemoryACPILevel.RttEnable = false;
2558 
2559 	return 0;
2560 }
2561 
2562 
2563 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2564 {
2565 	struct ci_power_info *pi = ci_get_pi(rdev);
2566 	struct ci_ulv_parm *ulv = &pi->ulv;
2567 
2568 	if (ulv->supported) {
2569 		if (enable)
2570 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2571 				0 : -EINVAL;
2572 		else
2573 			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2574 				0 : -EINVAL;
2575 	}
2576 
2577 	return 0;
2578 }
2579 
2580 static int ci_populate_ulv_level(struct radeon_device *rdev,
2581 				 SMU7_Discrete_Ulv *state)
2582 {
2583 	struct ci_power_info *pi = ci_get_pi(rdev);
2584 	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2585 
2586 	state->CcPwrDynRm = 0;
2587 	state->CcPwrDynRm1 = 0;
2588 
2589 	if (ulv_voltage == 0) {
2590 		pi->ulv.supported = false;
2591 		return 0;
2592 	}
2593 
2594 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2595 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2596 			state->VddcOffset = 0;
2597 		else
2598 			state->VddcOffset =
2599 				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2600 	} else {
2601 		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2602 			state->VddcOffsetVid = 0;
2603 		else
2604 			state->VddcOffsetVid = (u8)
2605 				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2606 				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2607 	}
2608 	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2609 
2610 	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2611 	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2612 	state->VddcOffset = cpu_to_be16(state->VddcOffset);
2613 
2614 	return 0;
2615 }
2616 
2617 static int ci_calculate_sclk_params(struct radeon_device *rdev,
2618 				    u32 engine_clock,
2619 				    SMU7_Discrete_GraphicsLevel *sclk)
2620 {
2621 	struct ci_power_info *pi = ci_get_pi(rdev);
2622 	struct atom_clock_dividers dividers;
2623 	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2624 	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2625 	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2626 	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2627 	u32 reference_clock = rdev->clock.spll.reference_freq;
2628 	u32 reference_divider;
2629 	u32 fbdiv;
2630 	int ret;
2631 
2632 	ret = radeon_atom_get_clock_dividers(rdev,
2633 					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2634 					     engine_clock, false, &dividers);
2635 	if (ret)
2636 		return ret;
2637 
2638 	reference_divider = 1 + dividers.ref_div;
2639 	fbdiv = dividers.fb_div & 0x3FFFFFF;
2640 
2641 	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2642 	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2643         spll_func_cntl_3 |= SPLL_DITHEN;
2644 
2645 	if (pi->caps_sclk_ss_support) {
2646 		struct radeon_atom_ss ss;
2647 		u32 vco_freq = engine_clock * dividers.post_div;
2648 
2649 		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2650 						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2651 			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2652 			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2653 
2654 			cg_spll_spread_spectrum &= ~CLK_S_MASK;
2655 			cg_spll_spread_spectrum |= CLK_S(clk_s);
2656 			cg_spll_spread_spectrum |= SSEN;
2657 
2658 			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2659 			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2660 		}
2661 	}
2662 
2663 	sclk->SclkFrequency = engine_clock;
2664 	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2665 	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2666 	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2667 	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
2668 	sclk->SclkDid = (u8)dividers.post_divider;
2669 
2670 	return 0;
2671 }
2672 
2673 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2674 					    u32 engine_clock,
2675 					    u16 sclk_activity_level_t,
2676 					    SMU7_Discrete_GraphicsLevel *graphic_level)
2677 {
2678 	struct ci_power_info *pi = ci_get_pi(rdev);
2679 	int ret;
2680 
2681 	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2682 	if (ret)
2683 		return ret;
2684 
2685 	ret = ci_get_dependency_volt_by_clk(rdev,
2686 					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2687 					    engine_clock, &graphic_level->MinVddc);
2688 	if (ret)
2689 		return ret;
2690 
2691 	graphic_level->SclkFrequency = engine_clock;
2692 
2693 	graphic_level->Flags =  0;
2694 	graphic_level->MinVddcPhases = 1;
2695 
2696 	if (pi->vddc_phase_shed_control)
2697 		ci_populate_phase_value_based_on_sclk(rdev,
2698 						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2699 						      engine_clock,
2700 						      &graphic_level->MinVddcPhases);
2701 
2702 	graphic_level->ActivityLevel = sclk_activity_level_t;
2703 
2704 	graphic_level->CcPwrDynRm = 0;
2705 	graphic_level->CcPwrDynRm1 = 0;
2706 	graphic_level->EnabledForActivity = 1;
2707 	graphic_level->EnabledForThrottle = 1;
2708 	graphic_level->UpH = 0;
2709 	graphic_level->DownH = 0;
2710 	graphic_level->VoltageDownH = 0;
2711 	graphic_level->PowerThrottle = 0;
2712 
2713 	if (pi->caps_sclk_ds)
2714 		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2715 										   engine_clock,
2716 										   CISLAND_MINIMUM_ENGINE_CLOCK);
2717 
2718 	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2719 
2720 	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2721         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2722 	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2723 	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2724 	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2725 	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2726 	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2727 	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2728 	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2729 	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2730 	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2731 
2732 	return 0;
2733 }
2734 
2735 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2736 {
2737 	struct ci_power_info *pi = ci_get_pi(rdev);
2738 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2739 	u32 level_array_address = pi->dpm_table_start +
2740 		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2741 	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2742 		SMU7_MAX_LEVELS_GRAPHICS;
2743 	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2744 	u32 i, ret;
2745 
2746 	memset(levels, 0, level_array_size);
2747 
2748 	for (i = 0; i < dpm_table->sclk_table.count; i++) {
2749 		ret = ci_populate_single_graphic_level(rdev,
2750 						       dpm_table->sclk_table.dpm_levels[i].value,
2751 						       (u16)pi->activity_target[i],
2752 						       &pi->smc_state_table.GraphicsLevel[i]);
2753 		if (ret)
2754 			return ret;
2755 		if (i == (dpm_table->sclk_table.count - 1))
2756 			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2757 				PPSMC_DISPLAY_WATERMARK_HIGH;
2758 	}
2759 
2760 	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2761 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2762 		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2763 
2764 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2765 				   (u8 *)levels, level_array_size,
2766 				   pi->sram_end);
2767 	if (ret)
2768 		return ret;
2769 
2770 	return 0;
2771 }
2772 
2773 static int ci_populate_ulv_state(struct radeon_device *rdev,
2774 				 SMU7_Discrete_Ulv *ulv_level)
2775 {
2776 	return ci_populate_ulv_level(rdev, ulv_level);
2777 }
2778 
2779 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2780 {
2781 	struct ci_power_info *pi = ci_get_pi(rdev);
2782 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2783 	u32 level_array_address = pi->dpm_table_start +
2784 		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2785 	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2786 		SMU7_MAX_LEVELS_MEMORY;
2787 	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2788 	u32 i, ret;
2789 
2790 	memset(levels, 0, level_array_size);
2791 
2792 	for (i = 0; i < dpm_table->mclk_table.count; i++) {
2793 		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2794 			return -EINVAL;
2795 		ret = ci_populate_single_memory_level(rdev,
2796 						      dpm_table->mclk_table.dpm_levels[i].value,
2797 						      &pi->smc_state_table.MemoryLevel[i]);
2798 		if (ret)
2799 			return ret;
2800 	}
2801 
2802 	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2803 
2804 	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2805 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2806 		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2807 
2808 	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2809 		PPSMC_DISPLAY_WATERMARK_HIGH;
2810 
2811 	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2812 				   (u8 *)levels, level_array_size,
2813 				   pi->sram_end);
2814 	if (ret)
2815 		return ret;
2816 
2817 	return 0;
2818 }
2819 
2820 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2821 				      struct ci_single_dpm_table* dpm_table,
2822 				      u32 count)
2823 {
2824 	u32 i;
2825 
2826 	dpm_table->count = count;
2827 	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2828 		dpm_table->dpm_levels[i].enabled = false;
2829 }
2830 
2831 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2832 				      u32 index, u32 pcie_gen, u32 pcie_lanes)
2833 {
2834 	dpm_table->dpm_levels[index].value = pcie_gen;
2835 	dpm_table->dpm_levels[index].param1 = pcie_lanes;
2836 	dpm_table->dpm_levels[index].enabled = true;
2837 }
2838 
2839 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2840 {
2841 	struct ci_power_info *pi = ci_get_pi(rdev);
2842 
2843 	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2844 		return -EINVAL;
2845 
2846 	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2847 		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2848 		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2849 	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2850 		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2851 		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2852 	}
2853 
2854 	ci_reset_single_dpm_table(rdev,
2855 				  &pi->dpm_table.pcie_speed_table,
2856 				  SMU7_MAX_LEVELS_LINK);
2857 
2858 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2859 				  pi->pcie_gen_powersaving.min,
2860 				  pi->pcie_lane_powersaving.min);
2861 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2862 				  pi->pcie_gen_performance.min,
2863 				  pi->pcie_lane_performance.min);
2864 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2865 				  pi->pcie_gen_powersaving.min,
2866 				  pi->pcie_lane_powersaving.max);
2867 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2868 				  pi->pcie_gen_performance.min,
2869 				  pi->pcie_lane_performance.max);
2870 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2871 				  pi->pcie_gen_powersaving.max,
2872 				  pi->pcie_lane_powersaving.max);
2873 	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2874 				  pi->pcie_gen_performance.max,
2875 				  pi->pcie_lane_performance.max);
2876 
2877 	pi->dpm_table.pcie_speed_table.count = 6;
2878 
2879 	return 0;
2880 }
2881 
2882 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2883 {
2884 	struct ci_power_info *pi = ci_get_pi(rdev);
2885 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2886 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2887 	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2888 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2889 	struct radeon_cac_leakage_table *std_voltage_table =
2890 		&rdev->pm.dpm.dyn_state.cac_leakage_table;
2891 	u32 i;
2892 
2893 	if (allowed_sclk_vddc_table == NULL)
2894 		return -EINVAL;
2895 	if (allowed_sclk_vddc_table->count < 1)
2896 		return -EINVAL;
2897 	if (allowed_mclk_table == NULL)
2898 		return -EINVAL;
2899 	if (allowed_mclk_table->count < 1)
2900 		return -EINVAL;
2901 
2902 	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2903 
2904 	ci_reset_single_dpm_table(rdev,
2905 				  &pi->dpm_table.sclk_table,
2906 				  SMU7_MAX_LEVELS_GRAPHICS);
2907 	ci_reset_single_dpm_table(rdev,
2908 				  &pi->dpm_table.mclk_table,
2909 				  SMU7_MAX_LEVELS_MEMORY);
2910 	ci_reset_single_dpm_table(rdev,
2911 				  &pi->dpm_table.vddc_table,
2912 				  SMU7_MAX_LEVELS_VDDC);
2913 	ci_reset_single_dpm_table(rdev,
2914 				  &pi->dpm_table.vddci_table,
2915 				  SMU7_MAX_LEVELS_VDDCI);
2916 	ci_reset_single_dpm_table(rdev,
2917 				  &pi->dpm_table.mvdd_table,
2918 				  SMU7_MAX_LEVELS_MVDD);
2919 
2920 	pi->dpm_table.sclk_table.count = 0;
2921 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2922 		if ((i == 0) ||
2923 		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2924 		     allowed_sclk_vddc_table->entries[i].clk)) {
2925 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2926 				allowed_sclk_vddc_table->entries[i].clk;
2927 			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2928 			pi->dpm_table.sclk_table.count++;
2929 		}
2930 	}
2931 
2932 	pi->dpm_table.mclk_table.count = 0;
2933 	for (i = 0; i < allowed_mclk_table->count; i++) {
2934 		if ((i==0) ||
2935 		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
2936 		     allowed_mclk_table->entries[i].clk)) {
2937 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
2938 				allowed_mclk_table->entries[i].clk;
2939 			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
2940 			pi->dpm_table.mclk_table.count++;
2941 		}
2942 	}
2943 
2944 	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2945 		pi->dpm_table.vddc_table.dpm_levels[i].value =
2946 			allowed_sclk_vddc_table->entries[i].v;
2947 		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
2948 			std_voltage_table->entries[i].leakage;
2949 		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
2950 	}
2951 	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
2952 
2953 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
2954 	if (allowed_mclk_table) {
2955 		for (i = 0; i < allowed_mclk_table->count; i++) {
2956 			pi->dpm_table.vddci_table.dpm_levels[i].value =
2957 				allowed_mclk_table->entries[i].v;
2958 			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
2959 		}
2960 		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
2961 	}
2962 
2963 	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
2964 	if (allowed_mclk_table) {
2965 		for (i = 0; i < allowed_mclk_table->count; i++) {
2966 			pi->dpm_table.mvdd_table.dpm_levels[i].value =
2967 				allowed_mclk_table->entries[i].v;
2968 			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
2969 		}
2970 		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
2971 	}
2972 
2973 	ci_setup_default_pcie_tables(rdev);
2974 
2975 	return 0;
2976 }
2977 
2978 static int ci_find_boot_level(struct ci_single_dpm_table *table,
2979 			      u32 value, u32 *boot_level)
2980 {
2981 	u32 i;
2982 	int ret = -EINVAL;
2983 
2984 	for(i = 0; i < table->count; i++) {
2985 		if (value == table->dpm_levels[i].value) {
2986 			*boot_level = i;
2987 			ret = 0;
2988 		}
2989 	}
2990 
2991 	return ret;
2992 }
2993 
2994 static int ci_init_smc_table(struct radeon_device *rdev)
2995 {
2996 	struct ci_power_info *pi = ci_get_pi(rdev);
2997 	struct ci_ulv_parm *ulv = &pi->ulv;
2998 	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
2999 	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3000 	int ret;
3001 
3002 	ret = ci_setup_default_dpm_tables(rdev);
3003 	if (ret)
3004 		return ret;
3005 
3006 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3007 		ci_populate_smc_voltage_tables(rdev, table);
3008 
3009 	ci_init_fps_limits(rdev);
3010 
3011 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3012 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3013 
3014 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3015 		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3016 
3017 	if (pi->mem_gddr5)
3018 		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3019 
3020 	if (ulv->supported) {
3021 		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3022 		if (ret)
3023 			return ret;
3024 		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3025 	}
3026 
3027 	ret = ci_populate_all_graphic_levels(rdev);
3028 	if (ret)
3029 		return ret;
3030 
3031 	ret = ci_populate_all_memory_levels(rdev);
3032 	if (ret)
3033 		return ret;
3034 
3035 	ci_populate_smc_link_level(rdev, table);
3036 
3037 	ret = ci_populate_smc_acpi_level(rdev, table);
3038 	if (ret)
3039 		return ret;
3040 
3041 	ret = ci_populate_smc_vce_level(rdev, table);
3042 	if (ret)
3043 		return ret;
3044 
3045 	ret = ci_populate_smc_acp_level(rdev, table);
3046 	if (ret)
3047 		return ret;
3048 
3049 	ret = ci_populate_smc_samu_level(rdev, table);
3050 	if (ret)
3051 		return ret;
3052 
3053 	ret = ci_do_program_memory_timing_parameters(rdev);
3054 	if (ret)
3055 		return ret;
3056 
3057 	ret = ci_populate_smc_uvd_level(rdev, table);
3058 	if (ret)
3059 		return ret;
3060 
3061 	table->UvdBootLevel  = 0;
3062 	table->VceBootLevel  = 0;
3063 	table->AcpBootLevel  = 0;
3064 	table->SamuBootLevel  = 0;
3065 	table->GraphicsBootLevel  = 0;
3066 	table->MemoryBootLevel  = 0;
3067 
3068 	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3069 				 pi->vbios_boot_state.sclk_bootup_value,
3070 				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3071 
3072 	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3073 				 pi->vbios_boot_state.mclk_bootup_value,
3074 				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3075 
3076 	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3077 	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3078 	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3079 
3080 	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3081 
3082 	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3083 	if (ret)
3084 		return ret;
3085 
3086 	table->UVDInterval = 1;
3087 	table->VCEInterval = 1;
3088 	table->ACPInterval = 1;
3089 	table->SAMUInterval = 1;
3090 	table->GraphicsVoltageChangeEnable = 1;
3091 	table->GraphicsThermThrottleEnable = 1;
3092 	table->GraphicsInterval = 1;
3093 	table->VoltageInterval = 1;
3094 	table->ThermalInterval = 1;
3095 	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3096 					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3097 	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3098 					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3099 	table->MemoryVoltageChangeEnable = 1;
3100 	table->MemoryInterval = 1;
3101 	table->VoltageResponseTime = 0;
3102 	table->VddcVddciDelta = 4000;
3103 	table->PhaseResponseTime = 0;
3104 	table->MemoryThermThrottleEnable = 1;
3105 	table->PCIeBootLinkLevel = 0;
3106 	table->PCIeGenInterval = 1;
3107 	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3108 		table->SVI2Enable  = 1;
3109 	else
3110 		table->SVI2Enable  = 0;
3111 
3112 	table->ThermGpio = 17;
3113 	table->SclkStepSize = 0x4000;
3114 
3115 	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3116 	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3117 	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3118 	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3119 	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3120 	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3121 	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3122 	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3123 	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3124 	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3125 	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3126 	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3127 	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3128 	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3129 
3130 	ret = ci_copy_bytes_to_smc(rdev,
3131 				   pi->dpm_table_start +
3132 				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3133 				   (u8 *)&table->SystemFlags,
3134 				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3135 				   pi->sram_end);
3136 	if (ret)
3137 		return ret;
3138 
3139 	return 0;
3140 }
3141 
3142 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3143 				      struct ci_single_dpm_table *dpm_table,
3144 				      u32 low_limit, u32 high_limit)
3145 {
3146 	u32 i;
3147 
3148 	for (i = 0; i < dpm_table->count; i++) {
3149 		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3150 		    (dpm_table->dpm_levels[i].value > high_limit))
3151 			dpm_table->dpm_levels[i].enabled = false;
3152 		else
3153 			dpm_table->dpm_levels[i].enabled = true;
3154 	}
3155 }
3156 
3157 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3158 				    u32 speed_low, u32 lanes_low,
3159 				    u32 speed_high, u32 lanes_high)
3160 {
3161 	struct ci_power_info *pi = ci_get_pi(rdev);
3162 	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3163 	u32 i, j;
3164 
3165 	for (i = 0; i < pcie_table->count; i++) {
3166 		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3167 		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3168 		    (pcie_table->dpm_levels[i].value > speed_high) ||
3169 		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3170 			pcie_table->dpm_levels[i].enabled = false;
3171 		else
3172 			pcie_table->dpm_levels[i].enabled = true;
3173 	}
3174 
3175 	for (i = 0; i < pcie_table->count; i++) {
3176 		if (pcie_table->dpm_levels[i].enabled) {
3177 			for (j = i + 1; j < pcie_table->count; j++) {
3178 				if (pcie_table->dpm_levels[j].enabled) {
3179 					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3180 					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3181 						pcie_table->dpm_levels[j].enabled = false;
3182 				}
3183 			}
3184 		}
3185 	}
3186 }
3187 
3188 static int ci_trim_dpm_states(struct radeon_device *rdev,
3189 			      struct radeon_ps *radeon_state)
3190 {
3191 	struct ci_ps *state = ci_get_ps(radeon_state);
3192 	struct ci_power_info *pi = ci_get_pi(rdev);
3193 	u32 high_limit_count;
3194 
3195 	if (state->performance_level_count < 1)
3196 		return -EINVAL;
3197 
3198 	if (state->performance_level_count == 1)
3199 		high_limit_count = 0;
3200 	else
3201 		high_limit_count = 1;
3202 
3203 	ci_trim_single_dpm_states(rdev,
3204 				  &pi->dpm_table.sclk_table,
3205 				  state->performance_levels[0].sclk,
3206 				  state->performance_levels[high_limit_count].sclk);
3207 
3208 	ci_trim_single_dpm_states(rdev,
3209 				  &pi->dpm_table.mclk_table,
3210 				  state->performance_levels[0].mclk,
3211 				  state->performance_levels[high_limit_count].mclk);
3212 
3213 	ci_trim_pcie_dpm_states(rdev,
3214 				state->performance_levels[0].pcie_gen,
3215 				state->performance_levels[0].pcie_lane,
3216 				state->performance_levels[high_limit_count].pcie_gen,
3217 				state->performance_levels[high_limit_count].pcie_lane);
3218 
3219 	return 0;
3220 }
3221 
3222 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3223 {
3224 	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3225 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3226 	struct radeon_clock_voltage_dependency_table *vddc_table =
3227 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3228 	u32 requested_voltage = 0;
3229 	u32 i;
3230 
3231 	if (disp_voltage_table == NULL)
3232 		return -EINVAL;
3233 	if (!disp_voltage_table->count)
3234 		return -EINVAL;
3235 
3236 	for (i = 0; i < disp_voltage_table->count; i++) {
3237 		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3238 			requested_voltage = disp_voltage_table->entries[i].v;
3239 	}
3240 
3241 	for (i = 0; i < vddc_table->count; i++) {
3242 		if (requested_voltage <= vddc_table->entries[i].v) {
3243 			requested_voltage = vddc_table->entries[i].v;
3244 			return (ci_send_msg_to_smc_with_parameter(rdev,
3245 								  PPSMC_MSG_VddC_Request,
3246 								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3247 				0 : -EINVAL;
3248 		}
3249 	}
3250 
3251 	return -EINVAL;
3252 }
3253 
3254 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3255 {
3256 	struct ci_power_info *pi = ci_get_pi(rdev);
3257 	PPSMC_Result result;
3258 
3259 	if (!pi->sclk_dpm_key_disabled) {
3260 		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3261 			result = ci_send_msg_to_smc_with_parameter(rdev,
3262 								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3263 								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3264 			if (result != PPSMC_Result_OK)
3265 				return -EINVAL;
3266 		}
3267 	}
3268 
3269 	if (!pi->mclk_dpm_key_disabled) {
3270 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3271 			result = ci_send_msg_to_smc_with_parameter(rdev,
3272 								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3273 								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3274 			if (result != PPSMC_Result_OK)
3275 				return -EINVAL;
3276 		}
3277 	}
3278 
3279 	if (!pi->pcie_dpm_key_disabled) {
3280 		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3281 			result = ci_send_msg_to_smc_with_parameter(rdev,
3282 								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3283 								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3284 			if (result != PPSMC_Result_OK)
3285 				return -EINVAL;
3286 		}
3287 	}
3288 
3289 	ci_apply_disp_minimum_voltage_request(rdev);
3290 
3291 	return 0;
3292 }
3293 
3294 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3295 						   struct radeon_ps *radeon_state)
3296 {
3297 	struct ci_power_info *pi = ci_get_pi(rdev);
3298 	struct ci_ps *state = ci_get_ps(radeon_state);
3299 	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3300 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3301 	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3302 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3303 	u32 i;
3304 
3305 	pi->need_update_smu7_dpm_table = 0;
3306 
3307 	for (i = 0; i < sclk_table->count; i++) {
3308 		if (sclk == sclk_table->dpm_levels[i].value)
3309 			break;
3310 	}
3311 
3312 	if (i >= sclk_table->count) {
3313 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3314 	} else {
3315 		/* XXX check display min clock requirements */
3316 		if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3317 			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3318 	}
3319 
3320 	for (i = 0; i < mclk_table->count; i++) {
3321 		if (mclk == mclk_table->dpm_levels[i].value)
3322 			break;
3323 	}
3324 
3325 	if (i >= mclk_table->count)
3326 		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3327 
3328 	if (rdev->pm.dpm.current_active_crtc_count !=
3329 	    rdev->pm.dpm.new_active_crtc_count)
3330 		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3331 }
3332 
3333 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3334 						       struct radeon_ps *radeon_state)
3335 {
3336 	struct ci_power_info *pi = ci_get_pi(rdev);
3337 	struct ci_ps *state = ci_get_ps(radeon_state);
3338 	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3339 	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3340 	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3341 	int ret;
3342 
3343 	if (!pi->need_update_smu7_dpm_table)
3344 		return 0;
3345 
3346 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3347 		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3348 
3349 	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3350 		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3351 
3352 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3353 		ret = ci_populate_all_graphic_levels(rdev);
3354 		if (ret)
3355 			return ret;
3356 	}
3357 
3358 	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3359 		ret = ci_populate_all_memory_levels(rdev);
3360 		if (ret)
3361 			return ret;
3362 	}
3363 
3364 	return 0;
3365 }
3366 
3367 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3368 {
3369 	struct ci_power_info *pi = ci_get_pi(rdev);
3370 	const struct radeon_clock_and_voltage_limits *max_limits;
3371 	int i;
3372 
3373 	if (rdev->pm.dpm.ac_power)
3374 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3375 	else
3376 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3377 
3378 	if (enable) {
3379 		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3380 
3381 		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3382 			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3383 				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3384 
3385 				if (!pi->caps_uvd_dpm)
3386 					break;
3387 			}
3388 		}
3389 
3390 		ci_send_msg_to_smc_with_parameter(rdev,
3391 						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3392 						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3393 
3394 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3395 			pi->uvd_enabled = true;
3396 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3397 			ci_send_msg_to_smc_with_parameter(rdev,
3398 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3399 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3400 		}
3401 	} else {
3402 		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3403 			pi->uvd_enabled = false;
3404 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3405 			ci_send_msg_to_smc_with_parameter(rdev,
3406 							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3407 							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3408 		}
3409 	}
3410 
3411 	return (ci_send_msg_to_smc(rdev, enable ?
3412 				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3413 		0 : -EINVAL;
3414 }
3415 
3416 #if 0
3417 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3418 {
3419 	struct ci_power_info *pi = ci_get_pi(rdev);
3420 	const struct radeon_clock_and_voltage_limits *max_limits;
3421 	int i;
3422 
3423 	if (rdev->pm.dpm.ac_power)
3424 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3425 	else
3426 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3427 
3428 	if (enable) {
3429 		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3430 		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3431 			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3432 				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3433 
3434 				if (!pi->caps_vce_dpm)
3435 					break;
3436 			}
3437 		}
3438 
3439 		ci_send_msg_to_smc_with_parameter(rdev,
3440 						  PPSMC_MSG_VCEDPM_SetEnabledMask,
3441 						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3442 	}
3443 
3444 	return (ci_send_msg_to_smc(rdev, enable ?
3445 				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3446 		0 : -EINVAL;
3447 }
3448 
3449 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3450 {
3451 	struct ci_power_info *pi = ci_get_pi(rdev);
3452 	const struct radeon_clock_and_voltage_limits *max_limits;
3453 	int i;
3454 
3455 	if (rdev->pm.dpm.ac_power)
3456 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3457 	else
3458 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3459 
3460 	if (enable) {
3461 		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3462 		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3463 			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3464 				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3465 
3466 				if (!pi->caps_samu_dpm)
3467 					break;
3468 			}
3469 		}
3470 
3471 		ci_send_msg_to_smc_with_parameter(rdev,
3472 						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
3473 						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3474 	}
3475 	return (ci_send_msg_to_smc(rdev, enable ?
3476 				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3477 		0 : -EINVAL;
3478 }
3479 
3480 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3481 {
3482 	struct ci_power_info *pi = ci_get_pi(rdev);
3483 	const struct radeon_clock_and_voltage_limits *max_limits;
3484 	int i;
3485 
3486 	if (rdev->pm.dpm.ac_power)
3487 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3488 	else
3489 		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3490 
3491 	if (enable) {
3492 		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3493 		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3494 			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3495 				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3496 
3497 				if (!pi->caps_acp_dpm)
3498 					break;
3499 			}
3500 		}
3501 
3502 		ci_send_msg_to_smc_with_parameter(rdev,
3503 						  PPSMC_MSG_ACPDPM_SetEnabledMask,
3504 						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3505 	}
3506 
3507 	return (ci_send_msg_to_smc(rdev, enable ?
3508 				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3509 		0 : -EINVAL;
3510 }
3511 #endif
3512 
3513 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3514 {
3515 	struct ci_power_info *pi = ci_get_pi(rdev);
3516 	u32 tmp;
3517 
3518 	if (!gate) {
3519 		if (pi->caps_uvd_dpm ||
3520 		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3521 			pi->smc_state_table.UvdBootLevel = 0;
3522 		else
3523 			pi->smc_state_table.UvdBootLevel =
3524 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3525 
3526 		tmp = RREG32_SMC(DPM_TABLE_475);
3527 		tmp &= ~UvdBootLevel_MASK;
3528 		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3529 		WREG32_SMC(DPM_TABLE_475, tmp);
3530 	}
3531 
3532 	return ci_enable_uvd_dpm(rdev, !gate);
3533 }
3534 
3535 #if 0
3536 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3537 {
3538 	u8 i;
3539 	u32 min_evclk = 30000; /* ??? */
3540 	struct radeon_vce_clock_voltage_dependency_table *table =
3541 		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3542 
3543 	for (i = 0; i < table->count; i++) {
3544 		if (table->entries[i].evclk >= min_evclk)
3545 			return i;
3546 	}
3547 
3548 	return table->count - 1;
3549 }
3550 
3551 static int ci_update_vce_dpm(struct radeon_device *rdev,
3552 			     struct radeon_ps *radeon_new_state,
3553 			     struct radeon_ps *radeon_current_state)
3554 {
3555 	struct ci_power_info *pi = ci_get_pi(rdev);
3556 	bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
3557 	bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
3558 	int ret = 0;
3559 	u32 tmp;
3560 
3561 	if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
3562 		if (new_vce_clock_non_zero) {
3563 			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3564 
3565 			tmp = RREG32_SMC(DPM_TABLE_475);
3566 			tmp &= ~VceBootLevel_MASK;
3567 			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3568 			WREG32_SMC(DPM_TABLE_475, tmp);
3569 
3570 			ret = ci_enable_vce_dpm(rdev, true);
3571 		} else {
3572 			ret = ci_enable_vce_dpm(rdev, false);
3573 		}
3574 	}
3575 	return ret;
3576 }
3577 
3578 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3579 {
3580 	return ci_enable_samu_dpm(rdev, gate);
3581 }
3582 
3583 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3584 {
3585 	struct ci_power_info *pi = ci_get_pi(rdev);
3586 	u32 tmp;
3587 
3588 	if (!gate) {
3589 		pi->smc_state_table.AcpBootLevel = 0;
3590 
3591 		tmp = RREG32_SMC(DPM_TABLE_475);
3592 		tmp &= ~AcpBootLevel_MASK;
3593 		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3594 		WREG32_SMC(DPM_TABLE_475, tmp);
3595 	}
3596 
3597 	return ci_enable_acp_dpm(rdev, !gate);
3598 }
3599 #endif
3600 
3601 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3602 					     struct radeon_ps *radeon_state)
3603 {
3604 	struct ci_power_info *pi = ci_get_pi(rdev);
3605 	int ret;
3606 
3607 	ret = ci_trim_dpm_states(rdev, radeon_state);
3608 	if (ret)
3609 		return ret;
3610 
3611 	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3612 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3613 	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3614 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3615 	pi->last_mclk_dpm_enable_mask =
3616 		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3617 	if (pi->uvd_enabled) {
3618 		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3619 			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3620 	}
3621 	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3622 		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3623 
3624 	return 0;
3625 }
3626 
3627 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3628 				       u32 level_mask)
3629 {
3630 	u32 level = 0;
3631 
3632 	while ((level_mask & (1 << level)) == 0)
3633 		level++;
3634 
3635 	return level;
3636 }
3637 
3638 
3639 int ci_dpm_force_performance_level(struct radeon_device *rdev,
3640 				   enum radeon_dpm_forced_level level)
3641 {
3642 	struct ci_power_info *pi = ci_get_pi(rdev);
3643 	PPSMC_Result smc_result;
3644 	u32 tmp, levels, i;
3645 	int ret;
3646 
3647 	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3648 		if ((!pi->sclk_dpm_key_disabled) &&
3649 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3650 			levels = 0;
3651 			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3652 			while (tmp >>= 1)
3653 				levels++;
3654 			if (levels) {
3655 				ret = ci_dpm_force_state_sclk(rdev, levels);
3656 				if (ret)
3657 					return ret;
3658 				for (i = 0; i < rdev->usec_timeout; i++) {
3659 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3660 					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3661 					if (tmp == levels)
3662 						break;
3663 					udelay(1);
3664 				}
3665 			}
3666 		}
3667 		if ((!pi->mclk_dpm_key_disabled) &&
3668 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3669 			levels = 0;
3670 			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3671 			while (tmp >>= 1)
3672 				levels++;
3673 			if (levels) {
3674 				ret = ci_dpm_force_state_mclk(rdev, levels);
3675 				if (ret)
3676 					return ret;
3677 				for (i = 0; i < rdev->usec_timeout; i++) {
3678 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3679 					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3680 					if (tmp == levels)
3681 						break;
3682 					udelay(1);
3683 				}
3684 			}
3685 		}
3686 		if ((!pi->pcie_dpm_key_disabled) &&
3687 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3688 			levels = 0;
3689 			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3690 			while (tmp >>= 1)
3691 				levels++;
3692 			if (levels) {
3693 				ret = ci_dpm_force_state_pcie(rdev, level);
3694 				if (ret)
3695 					return ret;
3696 				for (i = 0; i < rdev->usec_timeout; i++) {
3697 					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3698 					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3699 					if (tmp == levels)
3700 						break;
3701 					udelay(1);
3702 				}
3703 			}
3704 		}
3705 	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3706 		if ((!pi->sclk_dpm_key_disabled) &&
3707 		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3708 			levels = ci_get_lowest_enabled_level(rdev,
3709 							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3710 			ret = ci_dpm_force_state_sclk(rdev, levels);
3711 			if (ret)
3712 				return ret;
3713 			for (i = 0; i < rdev->usec_timeout; i++) {
3714 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3715 				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3716 				if (tmp == levels)
3717 					break;
3718 				udelay(1);
3719 			}
3720 		}
3721 		if ((!pi->mclk_dpm_key_disabled) &&
3722 		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3723 			levels = ci_get_lowest_enabled_level(rdev,
3724 							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3725 			ret = ci_dpm_force_state_mclk(rdev, levels);
3726 			if (ret)
3727 				return ret;
3728 			for (i = 0; i < rdev->usec_timeout; i++) {
3729 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3730 				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3731 				if (tmp == levels)
3732 					break;
3733 				udelay(1);
3734 			}
3735 		}
3736 		if ((!pi->pcie_dpm_key_disabled) &&
3737 		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3738 			levels = ci_get_lowest_enabled_level(rdev,
3739 							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3740 			ret = ci_dpm_force_state_pcie(rdev, levels);
3741 			if (ret)
3742 				return ret;
3743 			for (i = 0; i < rdev->usec_timeout; i++) {
3744 				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3745 				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3746 				if (tmp == levels)
3747 					break;
3748 				udelay(1);
3749 			}
3750 		}
3751 	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3752 		if (!pi->sclk_dpm_key_disabled) {
3753 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3754 			if (smc_result != PPSMC_Result_OK)
3755 				return -EINVAL;
3756 		}
3757 		if (!pi->mclk_dpm_key_disabled) {
3758 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3759 			if (smc_result != PPSMC_Result_OK)
3760 				return -EINVAL;
3761 		}
3762 		if (!pi->pcie_dpm_key_disabled) {
3763 			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3764 			if (smc_result != PPSMC_Result_OK)
3765 				return -EINVAL;
3766 		}
3767 	}
3768 
3769 	rdev->pm.dpm.forced_level = level;
3770 
3771 	return 0;
3772 }
3773 
3774 static int ci_set_mc_special_registers(struct radeon_device *rdev,
3775 				       struct ci_mc_reg_table *table)
3776 {
3777 	struct ci_power_info *pi = ci_get_pi(rdev);
3778 	u8 i, j, k;
3779 	u32 temp_reg;
3780 
3781 	for (i = 0, j = table->last; i < table->last; i++) {
3782 		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3783 			return -EINVAL;
3784 		switch(table->mc_reg_address[i].s1 << 2) {
3785 		case MC_SEQ_MISC1:
3786 			temp_reg = RREG32(MC_PMG_CMD_EMRS);
3787 			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3788 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3789 			for (k = 0; k < table->num_entries; k++) {
3790 				table->mc_reg_table_entry[k].mc_data[j] =
3791 					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3792 			}
3793 			j++;
3794 			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3795 				return -EINVAL;
3796 
3797 			temp_reg = RREG32(MC_PMG_CMD_MRS);
3798 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3799 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3800 			for (k = 0; k < table->num_entries; k++) {
3801 				table->mc_reg_table_entry[k].mc_data[j] =
3802 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3803 				if (!pi->mem_gddr5)
3804 					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3805 			}
3806 			j++;
3807 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3808 				return -EINVAL;
3809 
3810 			if (!pi->mem_gddr5) {
3811 				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3812 				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3813 				for (k = 0; k < table->num_entries; k++) {
3814 					table->mc_reg_table_entry[k].mc_data[j] =
3815 						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3816 				}
3817 				j++;
3818 				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3819 					return -EINVAL;
3820 			}
3821 			break;
3822 		case MC_SEQ_RESERVE_M:
3823 			temp_reg = RREG32(MC_PMG_CMD_MRS1);
3824 			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3825 			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3826 			for (k = 0; k < table->num_entries; k++) {
3827 				table->mc_reg_table_entry[k].mc_data[j] =
3828 					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3829 			}
3830 			j++;
3831 			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3832 				return -EINVAL;
3833 			break;
3834 		default:
3835 			break;
3836 		}
3837 
3838 	}
3839 
3840 	table->last = j;
3841 
3842 	return 0;
3843 }
3844 
3845 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3846 {
3847 	bool result = true;
3848 
3849 	switch(in_reg) {
3850 	case MC_SEQ_RAS_TIMING >> 2:
3851 		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3852 		break;
3853 	case MC_SEQ_DLL_STBY >> 2:
3854 		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3855 		break;
3856 	case MC_SEQ_G5PDX_CMD0 >> 2:
3857 		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3858 		break;
3859 	case MC_SEQ_G5PDX_CMD1 >> 2:
3860 		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3861 		break;
3862 	case MC_SEQ_G5PDX_CTRL >> 2:
3863 		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3864 		break;
3865 	case MC_SEQ_CAS_TIMING >> 2:
3866 		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3867             break;
3868 	case MC_SEQ_MISC_TIMING >> 2:
3869 		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3870 		break;
3871 	case MC_SEQ_MISC_TIMING2 >> 2:
3872 		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3873 		break;
3874 	case MC_SEQ_PMG_DVS_CMD >> 2:
3875 		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3876 		break;
3877 	case MC_SEQ_PMG_DVS_CTL >> 2:
3878 		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3879 		break;
3880 	case MC_SEQ_RD_CTL_D0 >> 2:
3881 		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3882 		break;
3883 	case MC_SEQ_RD_CTL_D1 >> 2:
3884 		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3885 		break;
3886 	case MC_SEQ_WR_CTL_D0 >> 2:
3887 		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3888 		break;
3889 	case MC_SEQ_WR_CTL_D1 >> 2:
3890 		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3891 		break;
3892 	case MC_PMG_CMD_EMRS >> 2:
3893 		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3894 		break;
3895 	case MC_PMG_CMD_MRS >> 2:
3896 		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3897 		break;
3898 	case MC_PMG_CMD_MRS1 >> 2:
3899 		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3900 		break;
3901 	case MC_SEQ_PMG_TIMING >> 2:
3902 		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3903 		break;
3904 	case MC_PMG_CMD_MRS2 >> 2:
3905 		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3906 		break;
3907 	case MC_SEQ_WR_CTL_2 >> 2:
3908 		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3909 		break;
3910 	default:
3911 		result = false;
3912 		break;
3913 	}
3914 
3915 	return result;
3916 }
3917 
3918 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3919 {
3920 	u8 i, j;
3921 
3922 	for (i = 0; i < table->last; i++) {
3923 		for (j = 1; j < table->num_entries; j++) {
3924 			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3925 			    table->mc_reg_table_entry[j].mc_data[i]) {
3926 				table->valid_flag |= 1 << i;
3927 				break;
3928 			}
3929 		}
3930 	}
3931 }
3932 
3933 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
3934 {
3935 	u32 i;
3936 	u16 address;
3937 
3938 	for (i = 0; i < table->last; i++) {
3939 		table->mc_reg_address[i].s0 =
3940 			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
3941 			address : table->mc_reg_address[i].s1;
3942 	}
3943 }
3944 
3945 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
3946 				      struct ci_mc_reg_table *ci_table)
3947 {
3948 	u8 i, j;
3949 
3950 	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3951 		return -EINVAL;
3952 	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
3953 		return -EINVAL;
3954 
3955 	for (i = 0; i < table->last; i++)
3956 		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
3957 
3958 	ci_table->last = table->last;
3959 
3960 	for (i = 0; i < table->num_entries; i++) {
3961 		ci_table->mc_reg_table_entry[i].mclk_max =
3962 			table->mc_reg_table_entry[i].mclk_max;
3963 		for (j = 0; j < table->last; j++)
3964 			ci_table->mc_reg_table_entry[i].mc_data[j] =
3965 				table->mc_reg_table_entry[i].mc_data[j];
3966 	}
3967 	ci_table->num_entries = table->num_entries;
3968 
3969 	return 0;
3970 }
3971 
3972 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
3973 {
3974 	struct ci_power_info *pi = ci_get_pi(rdev);
3975 	struct atom_mc_reg_table *table;
3976 	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
3977 	u8 module_index = rv770_get_memory_module_index(rdev);
3978 	int ret;
3979 
3980 	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
3981 	if (!table)
3982 		return -ENOMEM;
3983 
3984 	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
3985 	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
3986 	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
3987 	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
3988 	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
3989 	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
3990 	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
3991 	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
3992 	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
3993 	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
3994 	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
3995 	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
3996 	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
3997 	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
3998 	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
3999 	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4000 	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4001 	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4002 	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4003 	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4004 
4005 	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4006 	if (ret)
4007 		goto init_mc_done;
4008 
4009         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4010 	if (ret)
4011 		goto init_mc_done;
4012 
4013 	ci_set_s0_mc_reg_index(ci_table);
4014 
4015 	ret = ci_set_mc_special_registers(rdev, ci_table);
4016 	if (ret)
4017 		goto init_mc_done;
4018 
4019 	ci_set_valid_flag(ci_table);
4020 
4021 init_mc_done:
4022 	kfree(table);
4023 
4024 	return ret;
4025 }
4026 
4027 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4028 					SMU7_Discrete_MCRegisters *mc_reg_table)
4029 {
4030 	struct ci_power_info *pi = ci_get_pi(rdev);
4031 	u32 i, j;
4032 
4033 	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4034 		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4035 			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4036 				return -EINVAL;
4037 			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4038 			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4039 			i++;
4040 		}
4041 	}
4042 
4043 	mc_reg_table->last = (u8)i;
4044 
4045 	return 0;
4046 }
4047 
4048 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4049 				    SMU7_Discrete_MCRegisterSet *data,
4050 				    u32 num_entries, u32 valid_flag)
4051 {
4052 	u32 i, j;
4053 
4054 	for (i = 0, j = 0; j < num_entries; j++) {
4055 		if (valid_flag & (1 << j)) {
4056 			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4057 			i++;
4058 		}
4059 	}
4060 }
4061 
4062 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4063 						 const u32 memory_clock,
4064 						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4065 {
4066 	struct ci_power_info *pi = ci_get_pi(rdev);
4067 	u32 i = 0;
4068 
4069 	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4070 		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4071 			break;
4072 	}
4073 
4074 	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4075 		--i;
4076 
4077 	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4078 				mc_reg_table_data, pi->mc_reg_table.last,
4079 				pi->mc_reg_table.valid_flag);
4080 }
4081 
4082 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4083 					   SMU7_Discrete_MCRegisters *mc_reg_table)
4084 {
4085 	struct ci_power_info *pi = ci_get_pi(rdev);
4086 	u32 i;
4087 
4088 	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4089 		ci_convert_mc_reg_table_entry_to_smc(rdev,
4090 						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4091 						     &mc_reg_table->data[i]);
4092 }
4093 
4094 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4095 {
4096 	struct ci_power_info *pi = ci_get_pi(rdev);
4097 	int ret;
4098 
4099 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4100 
4101 	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4102 	if (ret)
4103 		return ret;
4104 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4105 
4106 	return ci_copy_bytes_to_smc(rdev,
4107 				    pi->mc_reg_table_start,
4108 				    (u8 *)&pi->smc_mc_reg_table,
4109 				    sizeof(SMU7_Discrete_MCRegisters),
4110 				    pi->sram_end);
4111 }
4112 
4113 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4114 {
4115 	struct ci_power_info *pi = ci_get_pi(rdev);
4116 
4117 	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4118 		return 0;
4119 
4120 	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4121 
4122 	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4123 
4124 	return ci_copy_bytes_to_smc(rdev,
4125 				    pi->mc_reg_table_start +
4126 				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4127 				    (u8 *)&pi->smc_mc_reg_table.data[0],
4128 				    sizeof(SMU7_Discrete_MCRegisterSet) *
4129 				    pi->dpm_table.mclk_table.count,
4130 				    pi->sram_end);
4131 }
4132 
4133 static void ci_enable_voltage_control(struct radeon_device *rdev)
4134 {
4135 	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4136 
4137 	tmp |= VOLT_PWRMGT_EN;
4138 	WREG32_SMC(GENERAL_PWRMGT, tmp);
4139 }
4140 
4141 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4142 						      struct radeon_ps *radeon_state)
4143 {
4144 	struct ci_ps *state = ci_get_ps(radeon_state);
4145 	int i;
4146 	u16 pcie_speed, max_speed = 0;
4147 
4148 	for (i = 0; i < state->performance_level_count; i++) {
4149 		pcie_speed = state->performance_levels[i].pcie_gen;
4150 		if (max_speed < pcie_speed)
4151 			max_speed = pcie_speed;
4152 	}
4153 
4154 	return max_speed;
4155 }
4156 
4157 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4158 {
4159 	u32 speed_cntl = 0;
4160 
4161 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4162 	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4163 
4164 	return (u16)speed_cntl;
4165 }
4166 
4167 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4168 {
4169 	u32 link_width = 0;
4170 
4171 	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4172 	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4173 
4174 	switch (link_width) {
4175 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4176 		return 1;
4177 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4178 		return 2;
4179 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4180 		return 4;
4181 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4182 		return 8;
4183 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4184 		/* not actually supported */
4185 		return 12;
4186 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4187 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4188 	default:
4189 		return 16;
4190 	}
4191 }
4192 
4193 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4194 							     struct radeon_ps *radeon_new_state,
4195 							     struct radeon_ps *radeon_current_state)
4196 {
4197 	struct ci_power_info *pi = ci_get_pi(rdev);
4198 	enum radeon_pcie_gen target_link_speed =
4199 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4200 	enum radeon_pcie_gen current_link_speed;
4201 
4202 	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4203 		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4204 	else
4205 		current_link_speed = pi->force_pcie_gen;
4206 
4207 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4208 	pi->pspp_notify_required = false;
4209 	if (target_link_speed > current_link_speed) {
4210 		switch (target_link_speed) {
4211 #ifdef CONFIG_ACPI
4212 		case RADEON_PCIE_GEN3:
4213 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4214 				break;
4215 			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4216 			if (current_link_speed == RADEON_PCIE_GEN2)
4217 				break;
4218 		case RADEON_PCIE_GEN2:
4219 			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4220 				break;
4221 #endif
4222 		default:
4223 			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4224 			break;
4225 		}
4226 	} else {
4227 		if (target_link_speed < current_link_speed)
4228 			pi->pspp_notify_required = true;
4229 	}
4230 }
4231 
4232 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4233 							   struct radeon_ps *radeon_new_state,
4234 							   struct radeon_ps *radeon_current_state)
4235 {
4236 	struct ci_power_info *pi = ci_get_pi(rdev);
4237 	enum radeon_pcie_gen target_link_speed =
4238 		ci_get_maximum_link_speed(rdev, radeon_new_state);
4239 	u8 request;
4240 
4241 	if (pi->pspp_notify_required) {
4242 		if (target_link_speed == RADEON_PCIE_GEN3)
4243 			request = PCIE_PERF_REQ_PECI_GEN3;
4244 		else if (target_link_speed == RADEON_PCIE_GEN2)
4245 			request = PCIE_PERF_REQ_PECI_GEN2;
4246 		else
4247 			request = PCIE_PERF_REQ_PECI_GEN1;
4248 
4249 		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4250 		    (ci_get_current_pcie_speed(rdev) > 0))
4251 			return;
4252 
4253 #ifdef CONFIG_ACPI
4254 		radeon_acpi_pcie_performance_request(rdev, request, false);
4255 #endif
4256 	}
4257 }
4258 
4259 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4260 {
4261 	struct ci_power_info *pi = ci_get_pi(rdev);
4262 	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4263 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4264 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4265 		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4266 	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4267 		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4268 
4269 	if (allowed_sclk_vddc_table == NULL)
4270 		return -EINVAL;
4271 	if (allowed_sclk_vddc_table->count < 1)
4272 		return -EINVAL;
4273 	if (allowed_mclk_vddc_table == NULL)
4274 		return -EINVAL;
4275 	if (allowed_mclk_vddc_table->count < 1)
4276 		return -EINVAL;
4277 	if (allowed_mclk_vddci_table == NULL)
4278 		return -EINVAL;
4279 	if (allowed_mclk_vddci_table->count < 1)
4280 		return -EINVAL;
4281 
4282 	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4283 	pi->max_vddc_in_pp_table =
4284 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4285 
4286 	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4287 	pi->max_vddci_in_pp_table =
4288 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4289 
4290 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4291 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4292 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4293 		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4294 	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4295 		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4296         rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4297 		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4298 
4299 	return 0;
4300 }
4301 
4302 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4303 {
4304 	struct ci_power_info *pi = ci_get_pi(rdev);
4305 	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4306 	u32 leakage_index;
4307 
4308 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4309 		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4310 			*vddc = leakage_table->actual_voltage[leakage_index];
4311 			break;
4312 		}
4313 	}
4314 }
4315 
4316 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4317 {
4318 	struct ci_power_info *pi = ci_get_pi(rdev);
4319 	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4320 	u32 leakage_index;
4321 
4322 	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4323 		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4324 			*vddci = leakage_table->actual_voltage[leakage_index];
4325 			break;
4326 		}
4327 	}
4328 }
4329 
4330 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4331 								      struct radeon_clock_voltage_dependency_table *table)
4332 {
4333 	u32 i;
4334 
4335 	if (table) {
4336 		for (i = 0; i < table->count; i++)
4337 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4338 	}
4339 }
4340 
4341 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4342 								       struct radeon_clock_voltage_dependency_table *table)
4343 {
4344 	u32 i;
4345 
4346 	if (table) {
4347 		for (i = 0; i < table->count; i++)
4348 			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4349 	}
4350 }
4351 
4352 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4353 									  struct radeon_vce_clock_voltage_dependency_table *table)
4354 {
4355 	u32 i;
4356 
4357 	if (table) {
4358 		for (i = 0; i < table->count; i++)
4359 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4360 	}
4361 }
4362 
4363 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4364 									  struct radeon_uvd_clock_voltage_dependency_table *table)
4365 {
4366 	u32 i;
4367 
4368 	if (table) {
4369 		for (i = 0; i < table->count; i++)
4370 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4371 	}
4372 }
4373 
4374 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4375 								   struct radeon_phase_shedding_limits_table *table)
4376 {
4377 	u32 i;
4378 
4379 	if (table) {
4380 		for (i = 0; i < table->count; i++)
4381 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4382 	}
4383 }
4384 
4385 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4386 							    struct radeon_clock_and_voltage_limits *table)
4387 {
4388 	if (table) {
4389 		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4390 		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4391 	}
4392 }
4393 
4394 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4395 							 struct radeon_cac_leakage_table *table)
4396 {
4397 	u32 i;
4398 
4399 	if (table) {
4400 		for (i = 0; i < table->count; i++)
4401 			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4402 	}
4403 }
4404 
4405 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4406 {
4407 
4408 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4409 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4410 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4411 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4412 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4413 								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4414 	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4415 								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4416 	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4417 								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4418 	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4419 								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4420 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4421 								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4422 	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4423 								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4424 	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4425 							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4426 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4427 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4428 	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4429 							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4430 	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4431 						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
4432 
4433 }
4434 
4435 static void ci_get_memory_type(struct radeon_device *rdev)
4436 {
4437 	struct ci_power_info *pi = ci_get_pi(rdev);
4438 	u32 tmp;
4439 
4440 	tmp = RREG32(MC_SEQ_MISC0);
4441 
4442 	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4443 	    MC_SEQ_MISC0_GDDR5_VALUE)
4444 		pi->mem_gddr5 = true;
4445 	else
4446 		pi->mem_gddr5 = false;
4447 
4448 }
4449 
4450 void ci_update_current_ps(struct radeon_device *rdev,
4451 			  struct radeon_ps *rps)
4452 {
4453 	struct ci_ps *new_ps = ci_get_ps(rps);
4454 	struct ci_power_info *pi = ci_get_pi(rdev);
4455 
4456 	pi->current_rps = *rps;
4457 	pi->current_ps = *new_ps;
4458 	pi->current_rps.ps_priv = &pi->current_ps;
4459 }
4460 
4461 void ci_update_requested_ps(struct radeon_device *rdev,
4462 			    struct radeon_ps *rps)
4463 {
4464 	struct ci_ps *new_ps = ci_get_ps(rps);
4465 	struct ci_power_info *pi = ci_get_pi(rdev);
4466 
4467 	pi->requested_rps = *rps;
4468 	pi->requested_ps = *new_ps;
4469 	pi->requested_rps.ps_priv = &pi->requested_ps;
4470 }
4471 
4472 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4473 {
4474 	struct ci_power_info *pi = ci_get_pi(rdev);
4475 	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4476 	struct radeon_ps *new_ps = &requested_ps;
4477 
4478 	ci_update_requested_ps(rdev, new_ps);
4479 
4480 	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4481 
4482 	return 0;
4483 }
4484 
4485 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4486 {
4487 	struct ci_power_info *pi = ci_get_pi(rdev);
4488 	struct radeon_ps *new_ps = &pi->requested_rps;
4489 
4490 	ci_update_current_ps(rdev, new_ps);
4491 }
4492 
4493 
4494 void ci_dpm_setup_asic(struct radeon_device *rdev)
4495 {
4496 	ci_read_clock_registers(rdev);
4497 	ci_get_memory_type(rdev);
4498 	ci_enable_acpi_power_management(rdev);
4499 	ci_init_sclk_t(rdev);
4500 }
4501 
4502 int ci_dpm_enable(struct radeon_device *rdev)
4503 {
4504 	struct ci_power_info *pi = ci_get_pi(rdev);
4505 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4506 	int ret;
4507 
4508 	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4509 			     RADEON_CG_BLOCK_MC |
4510 			     RADEON_CG_BLOCK_SDMA |
4511 			     RADEON_CG_BLOCK_BIF |
4512 			     RADEON_CG_BLOCK_UVD |
4513 			     RADEON_CG_BLOCK_HDP), false);
4514 
4515 	if (ci_is_smc_running(rdev))
4516 		return -EINVAL;
4517 	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4518 		ci_enable_voltage_control(rdev);
4519 		ret = ci_construct_voltage_tables(rdev);
4520 		if (ret) {
4521 			DRM_ERROR("ci_construct_voltage_tables failed\n");
4522 			return ret;
4523 		}
4524 	}
4525 	if (pi->caps_dynamic_ac_timing) {
4526 		ret = ci_initialize_mc_reg_table(rdev);
4527 		if (ret)
4528 			pi->caps_dynamic_ac_timing = false;
4529 	}
4530 	if (pi->dynamic_ss)
4531 		ci_enable_spread_spectrum(rdev, true);
4532 	if (pi->thermal_protection)
4533 		ci_enable_thermal_protection(rdev, true);
4534 	ci_program_sstp(rdev);
4535 	ci_enable_display_gap(rdev);
4536 	ci_program_vc(rdev);
4537 	ret = ci_upload_firmware(rdev);
4538 	if (ret) {
4539 		DRM_ERROR("ci_upload_firmware failed\n");
4540 		return ret;
4541 	}
4542 	ret = ci_process_firmware_header(rdev);
4543 	if (ret) {
4544 		DRM_ERROR("ci_process_firmware_header failed\n");
4545 		return ret;
4546 	}
4547 	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4548 	if (ret) {
4549 		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4550 		return ret;
4551 	}
4552 	ret = ci_init_smc_table(rdev);
4553 	if (ret) {
4554 		DRM_ERROR("ci_init_smc_table failed\n");
4555 		return ret;
4556 	}
4557 	ret = ci_init_arb_table_index(rdev);
4558 	if (ret) {
4559 		DRM_ERROR("ci_init_arb_table_index failed\n");
4560 		return ret;
4561 	}
4562 	if (pi->caps_dynamic_ac_timing) {
4563 		ret = ci_populate_initial_mc_reg_table(rdev);
4564 		if (ret) {
4565 			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4566 			return ret;
4567 		}
4568 	}
4569 	ret = ci_populate_pm_base(rdev);
4570 	if (ret) {
4571 		DRM_ERROR("ci_populate_pm_base failed\n");
4572 		return ret;
4573 	}
4574 	ci_dpm_start_smc(rdev);
4575 	ci_enable_vr_hot_gpio_interrupt(rdev);
4576 	ret = ci_notify_smc_display_change(rdev, false);
4577 	if (ret) {
4578 		DRM_ERROR("ci_notify_smc_display_change failed\n");
4579 		return ret;
4580 	}
4581 	ci_enable_sclk_control(rdev, true);
4582 	ret = ci_enable_ulv(rdev, true);
4583 	if (ret) {
4584 		DRM_ERROR("ci_enable_ulv failed\n");
4585 		return ret;
4586 	}
4587 	ret = ci_enable_ds_master_switch(rdev, true);
4588 	if (ret) {
4589 		DRM_ERROR("ci_enable_ds_master_switch failed\n");
4590 		return ret;
4591 	}
4592 	ret = ci_start_dpm(rdev);
4593 	if (ret) {
4594 		DRM_ERROR("ci_start_dpm failed\n");
4595 		return ret;
4596 	}
4597 	ret = ci_enable_didt(rdev, true);
4598 	if (ret) {
4599 		DRM_ERROR("ci_enable_didt failed\n");
4600 		return ret;
4601 	}
4602 	ret = ci_enable_smc_cac(rdev, true);
4603 	if (ret) {
4604 		DRM_ERROR("ci_enable_smc_cac failed\n");
4605 		return ret;
4606 	}
4607 	ret = ci_enable_power_containment(rdev, true);
4608 	if (ret) {
4609 		DRM_ERROR("ci_enable_power_containment failed\n");
4610 		return ret;
4611 	}
4612 	if (rdev->irq.installed &&
4613 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4614 #if 0
4615 		PPSMC_Result result;
4616 #endif
4617 		ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4618 		if (ret) {
4619 			DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4620 			return ret;
4621 		}
4622 		rdev->irq.dpm_thermal = true;
4623 		radeon_irq_set(rdev);
4624 #if 0
4625 		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4626 
4627 		if (result != PPSMC_Result_OK)
4628 			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4629 #endif
4630 	}
4631 
4632 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4633 
4634 	ci_dpm_powergate_uvd(rdev, true);
4635 
4636 	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4637 			     RADEON_CG_BLOCK_MC |
4638 			     RADEON_CG_BLOCK_SDMA |
4639 			     RADEON_CG_BLOCK_BIF |
4640 			     RADEON_CG_BLOCK_UVD |
4641 			     RADEON_CG_BLOCK_HDP), true);
4642 
4643 	ci_update_current_ps(rdev, boot_ps);
4644 
4645 	return 0;
4646 }
4647 
4648 void ci_dpm_disable(struct radeon_device *rdev)
4649 {
4650 	struct ci_power_info *pi = ci_get_pi(rdev);
4651 	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4652 
4653 	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4654 			     RADEON_CG_BLOCK_MC |
4655 			     RADEON_CG_BLOCK_SDMA |
4656 			     RADEON_CG_BLOCK_UVD |
4657 			     RADEON_CG_BLOCK_HDP), false);
4658 
4659 	ci_dpm_powergate_uvd(rdev, false);
4660 
4661 	if (!ci_is_smc_running(rdev))
4662 		return;
4663 
4664 	if (pi->thermal_protection)
4665 		ci_enable_thermal_protection(rdev, false);
4666 	ci_enable_power_containment(rdev, false);
4667 	ci_enable_smc_cac(rdev, false);
4668 	ci_enable_didt(rdev, false);
4669 	ci_enable_spread_spectrum(rdev, false);
4670 	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4671 	ci_stop_dpm(rdev);
4672 	ci_enable_ds_master_switch(rdev, true);
4673 	ci_enable_ulv(rdev, false);
4674 	ci_clear_vc(rdev);
4675 	ci_reset_to_default(rdev);
4676 	ci_dpm_stop_smc(rdev);
4677 	ci_force_switch_to_arb_f0(rdev);
4678 
4679 	ci_update_current_ps(rdev, boot_ps);
4680 }
4681 
4682 int ci_dpm_set_power_state(struct radeon_device *rdev)
4683 {
4684 	struct ci_power_info *pi = ci_get_pi(rdev);
4685 	struct radeon_ps *new_ps = &pi->requested_rps;
4686 	struct radeon_ps *old_ps = &pi->current_rps;
4687 	int ret;
4688 
4689 	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4690 			     RADEON_CG_BLOCK_MC |
4691 			     RADEON_CG_BLOCK_SDMA |
4692 			     RADEON_CG_BLOCK_BIF |
4693 			     RADEON_CG_BLOCK_UVD |
4694 			     RADEON_CG_BLOCK_HDP), false);
4695 
4696 	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4697 	if (pi->pcie_performance_request)
4698 		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4699 	ret = ci_freeze_sclk_mclk_dpm(rdev);
4700 	if (ret) {
4701 		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4702 		return ret;
4703 	}
4704 	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4705 	if (ret) {
4706 		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4707 		return ret;
4708 	}
4709 	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4710 	if (ret) {
4711 		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4712 		return ret;
4713 	}
4714 #if 0
4715 	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4716 	if (ret) {
4717 		DRM_ERROR("ci_update_vce_dpm failed\n");
4718 		return ret;
4719 	}
4720 #endif
4721 	ret = ci_update_sclk_t(rdev);
4722 	if (ret) {
4723 		DRM_ERROR("ci_update_sclk_t failed\n");
4724 		return ret;
4725 	}
4726 	if (pi->caps_dynamic_ac_timing) {
4727 		ret = ci_update_and_upload_mc_reg_table(rdev);
4728 		if (ret) {
4729 			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4730 			return ret;
4731 		}
4732 	}
4733 	ret = ci_program_memory_timing_parameters(rdev);
4734 	if (ret) {
4735 		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4736 		return ret;
4737 	}
4738 	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4739 	if (ret) {
4740 		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4741 		return ret;
4742 	}
4743 	ret = ci_upload_dpm_level_enable_mask(rdev);
4744 	if (ret) {
4745 		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4746 		return ret;
4747 	}
4748 	if (pi->pcie_performance_request)
4749 		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4750 
4751 	ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
4752 	if (ret) {
4753 		DRM_ERROR("ci_dpm_force_performance_level failed\n");
4754 		return ret;
4755 	}
4756 
4757 	cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
4758 			     RADEON_CG_BLOCK_MC |
4759 			     RADEON_CG_BLOCK_SDMA |
4760 			     RADEON_CG_BLOCK_BIF |
4761 			     RADEON_CG_BLOCK_UVD |
4762 			     RADEON_CG_BLOCK_HDP), true);
4763 
4764 	return 0;
4765 }
4766 
4767 int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4768 {
4769 	return ci_power_control_set_level(rdev);
4770 }
4771 
4772 void ci_dpm_reset_asic(struct radeon_device *rdev)
4773 {
4774 	ci_set_boot_state(rdev);
4775 }
4776 
4777 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4778 {
4779 	ci_program_display_gap(rdev);
4780 }
4781 
4782 union power_info {
4783 	struct _ATOM_POWERPLAY_INFO info;
4784 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
4785 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
4786 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4787 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4788 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4789 };
4790 
4791 union pplib_clock_info {
4792 	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4793 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4794 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4795 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4796 	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4797 	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4798 };
4799 
4800 union pplib_power_state {
4801 	struct _ATOM_PPLIB_STATE v1;
4802 	struct _ATOM_PPLIB_STATE_V2 v2;
4803 };
4804 
4805 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4806 					  struct radeon_ps *rps,
4807 					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4808 					  u8 table_rev)
4809 {
4810 	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4811 	rps->class = le16_to_cpu(non_clock_info->usClassification);
4812 	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4813 
4814 	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4815 		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4816 		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4817 	} else {
4818 		rps->vclk = 0;
4819 		rps->dclk = 0;
4820 	}
4821 
4822 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4823 		rdev->pm.dpm.boot_ps = rps;
4824 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4825 		rdev->pm.dpm.uvd_ps = rps;
4826 }
4827 
4828 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4829 				      struct radeon_ps *rps, int index,
4830 				      union pplib_clock_info *clock_info)
4831 {
4832 	struct ci_power_info *pi = ci_get_pi(rdev);
4833 	struct ci_ps *ps = ci_get_ps(rps);
4834 	struct ci_pl *pl = &ps->performance_levels[index];
4835 
4836 	ps->performance_level_count = index + 1;
4837 
4838 	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4839 	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4840 	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4841 	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4842 
4843 	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4844 						 pi->sys_pcie_mask,
4845 						 pi->vbios_boot_state.pcie_gen_bootup_value,
4846 						 clock_info->ci.ucPCIEGen);
4847 	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4848 						   pi->vbios_boot_state.pcie_lane_bootup_value,
4849 						   le16_to_cpu(clock_info->ci.usPCIELane));
4850 
4851 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4852 		pi->acpi_pcie_gen = pl->pcie_gen;
4853 	}
4854 
4855 	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4856 		pi->ulv.supported = true;
4857 		pi->ulv.pl = *pl;
4858 		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4859 	}
4860 
4861 	/* patch up boot state */
4862 	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4863 		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4864 		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4865 		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4866 		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4867 	}
4868 
4869 	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4870 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4871 		pi->use_pcie_powersaving_levels = true;
4872 		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4873 			pi->pcie_gen_powersaving.max = pl->pcie_gen;
4874 		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4875 			pi->pcie_gen_powersaving.min = pl->pcie_gen;
4876 		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4877 			pi->pcie_lane_powersaving.max = pl->pcie_lane;
4878 		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4879 			pi->pcie_lane_powersaving.min = pl->pcie_lane;
4880 		break;
4881 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4882 		pi->use_pcie_performance_levels = true;
4883 		if (pi->pcie_gen_performance.max < pl->pcie_gen)
4884 			pi->pcie_gen_performance.max = pl->pcie_gen;
4885 		if (pi->pcie_gen_performance.min > pl->pcie_gen)
4886 			pi->pcie_gen_performance.min = pl->pcie_gen;
4887 		if (pi->pcie_lane_performance.max < pl->pcie_lane)
4888 			pi->pcie_lane_performance.max = pl->pcie_lane;
4889 		if (pi->pcie_lane_performance.min > pl->pcie_lane)
4890 			pi->pcie_lane_performance.min = pl->pcie_lane;
4891 		break;
4892 	default:
4893 		break;
4894 	}
4895 }
4896 
4897 static int ci_parse_power_table(struct radeon_device *rdev)
4898 {
4899 	struct radeon_mode_info *mode_info = &rdev->mode_info;
4900 	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4901 	union pplib_power_state *power_state;
4902 	int i, j, k, non_clock_array_index, clock_array_index;
4903 	union pplib_clock_info *clock_info;
4904 	struct _StateArray *state_array;
4905 	struct _ClockInfoArray *clock_info_array;
4906 	struct _NonClockInfoArray *non_clock_info_array;
4907 	union power_info *power_info;
4908 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4909         u16 data_offset;
4910 	u8 frev, crev;
4911 	u8 *power_state_offset;
4912 	struct ci_ps *ps;
4913 
4914 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4915 				   &frev, &crev, &data_offset))
4916 		return -EINVAL;
4917 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4918 
4919 	state_array = (struct _StateArray *)
4920 		(mode_info->atom_context->bios + data_offset +
4921 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4922 	clock_info_array = (struct _ClockInfoArray *)
4923 		(mode_info->atom_context->bios + data_offset +
4924 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4925 	non_clock_info_array = (struct _NonClockInfoArray *)
4926 		(mode_info->atom_context->bios + data_offset +
4927 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4928 
4929 	rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4930 				  state_array->ucNumEntries, GFP_KERNEL);
4931 	if (!rdev->pm.dpm.ps)
4932 		return -ENOMEM;
4933 	power_state_offset = (u8 *)state_array->states;
4934 	rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
4935 	rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
4936 	rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
4937 	for (i = 0; i < state_array->ucNumEntries; i++) {
4938 		u8 *idx;
4939 		power_state = (union pplib_power_state *)power_state_offset;
4940 		non_clock_array_index = power_state->v2.nonClockInfoIndex;
4941 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4942 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
4943 		if (!rdev->pm.power_state[i].clock_info)
4944 			return -EINVAL;
4945 		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4946 		if (ps == NULL) {
4947 			kfree(rdev->pm.dpm.ps);
4948 			return -ENOMEM;
4949 		}
4950 		rdev->pm.dpm.ps[i].ps_priv = ps;
4951 		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4952 					      non_clock_info,
4953 					      non_clock_info_array->ucEntrySize);
4954 		k = 0;
4955 		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
4956 		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4957 			clock_array_index = idx[j];
4958 			if (clock_array_index >= clock_info_array->ucNumEntries)
4959 				continue;
4960 			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4961 				break;
4962 			clock_info = (union pplib_clock_info *)
4963 				((u8 *)&clock_info_array->clockInfo[0] +
4964 				 (clock_array_index * clock_info_array->ucEntrySize));
4965 			ci_parse_pplib_clock_info(rdev,
4966 						  &rdev->pm.dpm.ps[i], k,
4967 						  clock_info);
4968 			k++;
4969 		}
4970 		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
4971 	}
4972 	rdev->pm.dpm.num_ps = state_array->ucNumEntries;
4973 	return 0;
4974 }
4975 
4976 int ci_get_vbios_boot_values(struct radeon_device *rdev,
4977 			     struct ci_vbios_boot_state *boot_state)
4978 {
4979 	struct radeon_mode_info *mode_info = &rdev->mode_info;
4980 	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
4981 	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
4982 	u8 frev, crev;
4983 	u16 data_offset;
4984 
4985 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
4986 				   &frev, &crev, &data_offset)) {
4987 		firmware_info =
4988 			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
4989 						    data_offset);
4990 		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
4991 		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
4992 		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
4993 		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
4994 		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
4995 		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
4996 		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
4997 
4998 		return 0;
4999 	}
5000 	return -EINVAL;
5001 }
5002 
5003 void ci_dpm_fini(struct radeon_device *rdev)
5004 {
5005 	int i;
5006 
5007 	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5008 		kfree(rdev->pm.dpm.ps[i].ps_priv);
5009 	}
5010 	kfree(rdev->pm.dpm.ps);
5011 	kfree(rdev->pm.dpm.priv);
5012 	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5013 	r600_free_extended_power_table(rdev);
5014 }
5015 
5016 int ci_dpm_init(struct radeon_device *rdev)
5017 {
5018 	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5019 	u16 data_offset, size;
5020 	u8 frev, crev;
5021 	struct ci_power_info *pi;
5022 	int ret;
5023 	u32 mask;
5024 
5025 	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5026 	if (pi == NULL)
5027 		return -ENOMEM;
5028 	rdev->pm.dpm.priv = pi;
5029 
5030 	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5031 	if (ret)
5032 		pi->sys_pcie_mask = 0;
5033 	else
5034 		pi->sys_pcie_mask = mask;
5035 	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5036 
5037 	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5038 	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5039 	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5040 	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5041 
5042 	pi->pcie_lane_performance.max = 0;
5043 	pi->pcie_lane_performance.min = 16;
5044 	pi->pcie_lane_powersaving.max = 0;
5045 	pi->pcie_lane_powersaving.min = 16;
5046 
5047 	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5048 	if (ret) {
5049 		ci_dpm_fini(rdev);
5050 		return ret;
5051 	}
5052 	ret = ci_parse_power_table(rdev);
5053 	if (ret) {
5054 		ci_dpm_fini(rdev);
5055 		return ret;
5056 	}
5057 	ret = r600_parse_extended_power_table(rdev);
5058 	if (ret) {
5059 		ci_dpm_fini(rdev);
5060 		return ret;
5061 	}
5062 
5063         pi->dll_default_on = false;
5064         pi->sram_end = SMC_RAM_END;
5065 
5066 	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5067 	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5068 	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5069 	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5070 	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5071 	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5072 	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5073 	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5074 
5075 	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5076 
5077 	pi->sclk_dpm_key_disabled = 0;
5078 	pi->mclk_dpm_key_disabled = 0;
5079 	pi->pcie_dpm_key_disabled = 0;
5080 
5081 	pi->caps_sclk_ds = true;
5082 
5083 	pi->mclk_strobe_mode_threshold = 40000;
5084 	pi->mclk_stutter_mode_threshold = 40000;
5085 	pi->mclk_edc_enable_threshold = 40000;
5086 	pi->mclk_edc_wr_enable_threshold = 40000;
5087 
5088 	ci_initialize_powertune_defaults(rdev);
5089 
5090 	pi->caps_fps = false;
5091 
5092 	pi->caps_sclk_throttle_low_notification = false;
5093 
5094 	pi->caps_uvd_dpm = true;
5095 
5096         ci_get_leakage_voltages(rdev);
5097         ci_patch_dependency_tables_with_leakage(rdev);
5098         ci_set_private_data_variables_based_on_pptable(rdev);
5099 
5100 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5101 		kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5102 	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5103 		ci_dpm_fini(rdev);
5104 		return -ENOMEM;
5105 	}
5106 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5107 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5108 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5109 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5110 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5111 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5112 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5113 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5114 	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5115 
5116 	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5117 	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5118 	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5119 
5120 	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5121 	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5122 	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5123 	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5124 
5125 	pi->thermal_temp_setting.temperature_low = 99500;
5126 	pi->thermal_temp_setting.temperature_high = 100000;
5127 	pi->thermal_temp_setting.temperature_shutdown = 104000;
5128 
5129 	pi->uvd_enabled = false;
5130 
5131 	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5132 	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5133 	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5134 	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5135 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5136 	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5137 		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5138 
5139 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5140 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5141 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5142 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5143 			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5144 		else
5145 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5146         }
5147 
5148 	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5149 		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5150 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5151 		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5152 			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5153 		else
5154 			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5155 	}
5156 
5157 	pi->vddc_phase_shed_control = true;
5158 
5159 #if defined(CONFIG_ACPI)
5160 	pi->pcie_performance_request =
5161 		radeon_acpi_is_pcie_performance_request_supported(rdev);
5162 #else
5163 	pi->pcie_performance_request = false;
5164 #endif
5165 
5166 	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5167                                    &frev, &crev, &data_offset)) {
5168 		pi->caps_sclk_ss_support = true;
5169 		pi->caps_mclk_ss_support = true;
5170 		pi->dynamic_ss = true;
5171 	} else {
5172 		pi->caps_sclk_ss_support = false;
5173 		pi->caps_mclk_ss_support = false;
5174 		pi->dynamic_ss = true;
5175 	}
5176 
5177 	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5178 		pi->thermal_protection = true;
5179 	else
5180 		pi->thermal_protection = false;
5181 
5182 	pi->caps_dynamic_ac_timing = true;
5183 
5184 	pi->uvd_power_gated = false;
5185 
5186 	/* make sure dc limits are valid */
5187 	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5188 	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5189 		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5190 			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5191 
5192 	return 0;
5193 }
5194 
5195 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5196 						    struct seq_file *m)
5197 {
5198 	u32 sclk = ci_get_average_sclk_freq(rdev);
5199 	u32 mclk = ci_get_average_mclk_freq(rdev);
5200 
5201 	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5202 		   sclk, mclk);
5203 }
5204 
5205 void ci_dpm_print_power_state(struct radeon_device *rdev,
5206 			      struct radeon_ps *rps)
5207 {
5208 	struct ci_ps *ps = ci_get_ps(rps);
5209 	struct ci_pl *pl;
5210 	int i;
5211 
5212 	r600_dpm_print_class_info(rps->class, rps->class2);
5213 	r600_dpm_print_cap_info(rps->caps);
5214 	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5215 	for (i = 0; i < ps->performance_level_count; i++) {
5216 		pl = &ps->performance_levels[i];
5217 		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5218 		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5219 	}
5220 	r600_dpm_print_ps_status(rdev, rps);
5221 }
5222 
5223 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5224 {
5225 	struct ci_power_info *pi = ci_get_pi(rdev);
5226 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5227 
5228 	if (low)
5229 		return requested_state->performance_levels[0].sclk;
5230 	else
5231 		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5232 }
5233 
5234 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5235 {
5236 	struct ci_power_info *pi = ci_get_pi(rdev);
5237 	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5238 
5239 	if (low)
5240 		return requested_state->performance_levels[0].mclk;
5241 	else
5242 		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5243 }
5244