xref: /openbmc/linux/drivers/gpu/drm/radeon/r600_dpm.c (revision afb46f79)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include "drmP.h"
26 #include "radeon.h"
27 #include "r600d.h"
28 #include "r600_dpm.h"
29 #include "atom.h"
30 
31 const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
32 {
33 	R600_UTC_DFLT_00,
34 	R600_UTC_DFLT_01,
35 	R600_UTC_DFLT_02,
36 	R600_UTC_DFLT_03,
37 	R600_UTC_DFLT_04,
38 	R600_UTC_DFLT_05,
39 	R600_UTC_DFLT_06,
40 	R600_UTC_DFLT_07,
41 	R600_UTC_DFLT_08,
42 	R600_UTC_DFLT_09,
43 	R600_UTC_DFLT_10,
44 	R600_UTC_DFLT_11,
45 	R600_UTC_DFLT_12,
46 	R600_UTC_DFLT_13,
47 	R600_UTC_DFLT_14,
48 };
49 
50 const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
51 {
52 	R600_DTC_DFLT_00,
53 	R600_DTC_DFLT_01,
54 	R600_DTC_DFLT_02,
55 	R600_DTC_DFLT_03,
56 	R600_DTC_DFLT_04,
57 	R600_DTC_DFLT_05,
58 	R600_DTC_DFLT_06,
59 	R600_DTC_DFLT_07,
60 	R600_DTC_DFLT_08,
61 	R600_DTC_DFLT_09,
62 	R600_DTC_DFLT_10,
63 	R600_DTC_DFLT_11,
64 	R600_DTC_DFLT_12,
65 	R600_DTC_DFLT_13,
66 	R600_DTC_DFLT_14,
67 };
68 
69 void r600_dpm_print_class_info(u32 class, u32 class2)
70 {
71 	printk("\tui class: ");
72 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
73 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
74 	default:
75 		printk("none\n");
76 		break;
77 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
78 		printk("battery\n");
79 		break;
80 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
81 		printk("balanced\n");
82 		break;
83 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
84 		printk("performance\n");
85 		break;
86 	}
87 	printk("\tinternal class: ");
88 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
89 	    (class2 == 0))
90 		printk("none");
91 	else {
92 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
93 			printk("boot ");
94 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
95 			printk("thermal ");
96 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
97 			printk("limited_pwr ");
98 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
99 			printk("rest ");
100 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
101 			printk("forced ");
102 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
103 			printk("3d_perf ");
104 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
105 			printk("ovrdrv ");
106 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
107 			printk("uvd ");
108 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
109 			printk("3d_low ");
110 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
111 			printk("acpi ");
112 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
113 			printk("uvd_hd2 ");
114 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
115 			printk("uvd_hd ");
116 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
117 			printk("uvd_sd ");
118 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
119 			printk("limited_pwr2 ");
120 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
121 			printk("ulv ");
122 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
123 			printk("uvd_mvc ");
124 	}
125 	printk("\n");
126 }
127 
128 void r600_dpm_print_cap_info(u32 caps)
129 {
130 	printk("\tcaps: ");
131 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
132 		printk("single_disp ");
133 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
134 		printk("video ");
135 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
136 		printk("no_dc ");
137 	printk("\n");
138 }
139 
140 void r600_dpm_print_ps_status(struct radeon_device *rdev,
141 			      struct radeon_ps *rps)
142 {
143 	printk("\tstatus: ");
144 	if (rps == rdev->pm.dpm.current_ps)
145 		printk("c ");
146 	if (rps == rdev->pm.dpm.requested_ps)
147 		printk("r ");
148 	if (rps == rdev->pm.dpm.boot_ps)
149 		printk("b ");
150 	printk("\n");
151 }
152 
153 u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
154 {
155 	struct drm_device *dev = rdev->ddev;
156 	struct drm_crtc *crtc;
157 	struct radeon_crtc *radeon_crtc;
158 	u32 line_time_us, vblank_lines;
159 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
160 
161 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
162 		radeon_crtc = to_radeon_crtc(crtc);
163 		if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
164 			line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
165 				radeon_crtc->hw_mode.clock;
166 			vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
167 				radeon_crtc->hw_mode.crtc_vdisplay +
168 				(radeon_crtc->v_border * 2);
169 			vblank_time_us = vblank_lines * line_time_us;
170 			break;
171 		}
172 	}
173 
174 	return vblank_time_us;
175 }
176 
177 u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
178 {
179 	struct drm_device *dev = rdev->ddev;
180 	struct drm_crtc *crtc;
181 	struct radeon_crtc *radeon_crtc;
182 	u32 vrefresh = 0;
183 
184 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
185 		radeon_crtc = to_radeon_crtc(crtc);
186 		if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
187 			vrefresh = radeon_crtc->hw_mode.vrefresh;
188 			break;
189 		}
190 	}
191 
192 	return vrefresh;
193 }
194 
195 void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
196 			    u32 *p, u32 *u)
197 {
198 	u32 b_c = 0;
199 	u32 i_c;
200 	u32 tmp;
201 
202 	i_c = (i * r_c) / 100;
203 	tmp = i_c >> p_b;
204 
205 	while (tmp) {
206 		b_c++;
207 		tmp >>= 1;
208 	}
209 
210 	*u = (b_c + 1) / 2;
211 	*p = i_c / (1 << (2 * (*u)));
212 }
213 
214 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
215 {
216 	u32 k, a, ah, al;
217 	u32 t1;
218 
219 	if ((fl == 0) || (fh == 0) || (fl > fh))
220 		return -EINVAL;
221 
222 	k = (100 * fh) / fl;
223 	t1 = (t * (k - 100));
224 	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
225 	a = (a + 5) / 10;
226 	ah = ((a * t) + 5000) / 10000;
227 	al = a - ah;
228 
229 	*th = t - ah;
230 	*tl = t + al;
231 
232 	return 0;
233 }
234 
235 void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
236 {
237 	int i;
238 
239 	if (enable) {
240 		WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
241 	} else {
242 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
243 
244 		WREG32(CG_RLC_REQ_AND_RSP, 0x2);
245 
246 		for (i = 0; i < rdev->usec_timeout; i++) {
247 			if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
248 				break;
249 			udelay(1);
250 		}
251 
252 		WREG32(CG_RLC_REQ_AND_RSP, 0x0);
253 
254 		WREG32(GRBM_PWR_CNTL, 0x1);
255 		RREG32(GRBM_PWR_CNTL);
256 	}
257 }
258 
259 void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
260 {
261 	if (enable)
262 		WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
263 	else
264 		WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
265 }
266 
267 void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
268 {
269 	if (enable)
270 		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
271 	else
272 		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
273 }
274 
275 void r600_enable_acpi_pm(struct radeon_device *rdev)
276 {
277 	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
278 }
279 
280 void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
281 {
282 	if (enable)
283 		WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
284 	else
285 		WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
286 }
287 
288 bool r600_dynamicpm_enabled(struct radeon_device *rdev)
289 {
290 	if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
291 		return true;
292 	else
293 		return false;
294 }
295 
296 void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
297 {
298 	if (enable)
299 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
300 	else
301 		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
302 }
303 
304 void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
305 {
306 	if (enable)
307 		WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
308 	else
309 		WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
310 }
311 
312 void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
313 {
314 	if (enable)
315 		WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
316 	else
317 		WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
318 }
319 
320 void r600_wait_for_spll_change(struct radeon_device *rdev)
321 {
322 	int i;
323 
324 	for (i = 0; i < rdev->usec_timeout; i++) {
325 		if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
326 			break;
327 		udelay(1);
328 	}
329 }
330 
331 void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
332 {
333 	WREG32(CG_BSP, BSP(p) | BSU(u));
334 }
335 
336 void r600_set_at(struct radeon_device *rdev,
337 		 u32 l_to_m, u32 m_to_h,
338 		 u32 h_to_m, u32 m_to_l)
339 {
340 	WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
341 	WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
342 }
343 
344 void r600_set_tc(struct radeon_device *rdev,
345 		 u32 index, u32 u_t, u32 d_t)
346 {
347 	WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
348 }
349 
350 void r600_select_td(struct radeon_device *rdev,
351 		    enum r600_td td)
352 {
353 	if (td == R600_TD_AUTO)
354 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
355 	else
356 		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
357 	if (td == R600_TD_UP)
358 		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
359 	if (td == R600_TD_DOWN)
360 		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
361 }
362 
363 void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
364 {
365 	WREG32(CG_FTV, vrv);
366 }
367 
368 void r600_set_tpu(struct radeon_device *rdev, u32 u)
369 {
370 	WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
371 }
372 
373 void r600_set_tpc(struct radeon_device *rdev, u32 c)
374 {
375 	WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
376 }
377 
378 void r600_set_sstu(struct radeon_device *rdev, u32 u)
379 {
380 	WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
381 }
382 
383 void r600_set_sst(struct radeon_device *rdev, u32 t)
384 {
385 	WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
386 }
387 
388 void r600_set_git(struct radeon_device *rdev, u32 t)
389 {
390 	WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
391 }
392 
393 void r600_set_fctu(struct radeon_device *rdev, u32 u)
394 {
395 	WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
396 }
397 
398 void r600_set_fct(struct radeon_device *rdev, u32 t)
399 {
400 	WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
401 }
402 
403 void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
404 {
405 	WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
406 }
407 
408 void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
409 {
410 	WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
411 }
412 
413 void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
414 {
415 	WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
416 }
417 
418 void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
419 {
420 	WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
421 }
422 
423 void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
424 {
425 	WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
426 }
427 
428 void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
429 {
430 	WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
431 }
432 
433 void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
434 {
435 	WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
436 }
437 
438 void r600_engine_clock_entry_enable(struct radeon_device *rdev,
439 				    u32 index, bool enable)
440 {
441 	if (enable)
442 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
443 			 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
444 	else
445 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
446 			 0, ~STEP_0_SPLL_ENTRY_VALID);
447 }
448 
449 void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
450 						   u32 index, bool enable)
451 {
452 	if (enable)
453 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
454 			 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
455 	else
456 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
457 			 0, ~STEP_0_SPLL_STEP_ENABLE);
458 }
459 
460 void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
461 						 u32 index, bool enable)
462 {
463 	if (enable)
464 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
465 			 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
466 	else
467 		WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
468 			 0, ~STEP_0_POST_DIV_EN);
469 }
470 
471 void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
472 					      u32 index, u32 divider)
473 {
474 	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
475 		 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
476 }
477 
478 void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
479 						   u32 index, u32 divider)
480 {
481 	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
482 		 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
483 }
484 
485 void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
486 						  u32 index, u32 divider)
487 {
488 	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
489 		 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
490 }
491 
492 void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
493 					   u32 index, u32 step_time)
494 {
495 	WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
496 		 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
497 }
498 
499 void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
500 {
501 	WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
502 }
503 
504 void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
505 {
506 	WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
507 }
508 
509 void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
510 {
511 	WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
512 }
513 
514 void r600_voltage_control_enable_pins(struct radeon_device *rdev,
515 				      u64 mask)
516 {
517 	WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
518 	WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
519 }
520 
521 
522 void r600_voltage_control_program_voltages(struct radeon_device *rdev,
523 					   enum r600_power_level index, u64 pins)
524 {
525 	u32 tmp, mask;
526 	u32 ix = 3 - (3 & index);
527 
528 	WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
529 
530 	mask = 7 << (3 * ix);
531 	tmp = RREG32(VID_UPPER_GPIO_CNTL);
532 	tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
533 	WREG32(VID_UPPER_GPIO_CNTL, tmp);
534 }
535 
536 void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
537 						    u64 mask)
538 {
539 	u32 gpio;
540 
541 	gpio = RREG32(GPIOPAD_MASK);
542 	gpio &= ~mask;
543 	WREG32(GPIOPAD_MASK, gpio);
544 
545 	gpio = RREG32(GPIOPAD_EN);
546 	gpio &= ~mask;
547 	WREG32(GPIOPAD_EN, gpio);
548 
549 	gpio = RREG32(GPIOPAD_A);
550 	gpio &= ~mask;
551 	WREG32(GPIOPAD_A, gpio);
552 }
553 
554 void r600_power_level_enable(struct radeon_device *rdev,
555 			     enum r600_power_level index, bool enable)
556 {
557 	u32 ix = 3 - (3 & index);
558 
559 	if (enable)
560 		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
561 			 ~CTXSW_FREQ_STATE_ENABLE);
562 	else
563 		WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
564 			 ~CTXSW_FREQ_STATE_ENABLE);
565 }
566 
567 void r600_power_level_set_voltage_index(struct radeon_device *rdev,
568 					enum r600_power_level index, u32 voltage_index)
569 {
570 	u32 ix = 3 - (3 & index);
571 
572 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
573 		 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
574 }
575 
576 void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
577 					  enum r600_power_level index, u32 mem_clock_index)
578 {
579 	u32 ix = 3 - (3 & index);
580 
581 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
582 		 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
583 }
584 
585 void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
586 					  enum r600_power_level index, u32 eng_clock_index)
587 {
588 	u32 ix = 3 - (3 & index);
589 
590 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
591 		 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
592 }
593 
594 void r600_power_level_set_watermark_id(struct radeon_device *rdev,
595 				       enum r600_power_level index,
596 				       enum r600_display_watermark watermark_id)
597 {
598 	u32 ix = 3 - (3 & index);
599 	u32 tmp = 0;
600 
601 	if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
602 		tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
603 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
604 }
605 
606 void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
607 				    enum r600_power_level index, bool compatible)
608 {
609 	u32 ix = 3 - (3 & index);
610 	u32 tmp = 0;
611 
612 	if (compatible)
613 		tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
614 	WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
615 }
616 
617 enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
618 {
619 	u32 tmp;
620 
621 	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
622 	tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
623 	return tmp;
624 }
625 
626 enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
627 {
628 	u32 tmp;
629 
630 	tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
631 	tmp >>= TARGET_PROFILE_INDEX_SHIFT;
632 	return tmp;
633 }
634 
635 void r600_power_level_set_enter_index(struct radeon_device *rdev,
636 				      enum r600_power_level index)
637 {
638 	WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
639 		 ~DYN_PWR_ENTER_INDEX_MASK);
640 }
641 
642 void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
643 				       enum r600_power_level index)
644 {
645 	int i;
646 
647 	for (i = 0; i < rdev->usec_timeout; i++) {
648 		if (r600_power_level_get_target_index(rdev) != index)
649 			break;
650 		udelay(1);
651 	}
652 
653 	for (i = 0; i < rdev->usec_timeout; i++) {
654 		if (r600_power_level_get_current_index(rdev) != index)
655 			break;
656 		udelay(1);
657 	}
658 }
659 
660 void r600_wait_for_power_level(struct radeon_device *rdev,
661 			       enum r600_power_level index)
662 {
663 	int i;
664 
665 	for (i = 0; i < rdev->usec_timeout; i++) {
666 		if (r600_power_level_get_target_index(rdev) == index)
667 			break;
668 		udelay(1);
669 	}
670 
671 	for (i = 0; i < rdev->usec_timeout; i++) {
672 		if (r600_power_level_get_current_index(rdev) == index)
673 			break;
674 		udelay(1);
675 	}
676 }
677 
678 void r600_start_dpm(struct radeon_device *rdev)
679 {
680 	r600_enable_sclk_control(rdev, false);
681 	r600_enable_mclk_control(rdev, false);
682 
683 	r600_dynamicpm_enable(rdev, true);
684 
685 	radeon_wait_for_vblank(rdev, 0);
686 	radeon_wait_for_vblank(rdev, 1);
687 
688 	r600_enable_spll_bypass(rdev, true);
689 	r600_wait_for_spll_change(rdev);
690 	r600_enable_spll_bypass(rdev, false);
691 	r600_wait_for_spll_change(rdev);
692 
693 	r600_enable_spll_bypass(rdev, true);
694 	r600_wait_for_spll_change(rdev);
695 	r600_enable_spll_bypass(rdev, false);
696 	r600_wait_for_spll_change(rdev);
697 
698 	r600_enable_sclk_control(rdev, true);
699 	r600_enable_mclk_control(rdev, true);
700 }
701 
702 void r600_stop_dpm(struct radeon_device *rdev)
703 {
704 	r600_dynamicpm_enable(rdev, false);
705 }
706 
707 int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
708 {
709 	return 0;
710 }
711 
712 void r600_dpm_post_set_power_state(struct radeon_device *rdev)
713 {
714 
715 }
716 
717 bool r600_is_uvd_state(u32 class, u32 class2)
718 {
719 	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
720 		return true;
721 	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
722 		return true;
723 	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
724 		return true;
725 	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
726 		return true;
727 	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
728 		return true;
729 	return false;
730 }
731 
732 static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
733 					      int min_temp, int max_temp)
734 {
735 	int low_temp = 0 * 1000;
736 	int high_temp = 255 * 1000;
737 
738 	if (low_temp < min_temp)
739 		low_temp = min_temp;
740 	if (high_temp > max_temp)
741 		high_temp = max_temp;
742 	if (high_temp < low_temp) {
743 		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
744 		return -EINVAL;
745 	}
746 
747 	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
748 	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
749 	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
750 
751 	rdev->pm.dpm.thermal.min_temp = low_temp;
752 	rdev->pm.dpm.thermal.max_temp = high_temp;
753 
754 	return 0;
755 }
756 
757 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
758 {
759 	switch (sensor) {
760 	case THERMAL_TYPE_RV6XX:
761 	case THERMAL_TYPE_RV770:
762 	case THERMAL_TYPE_EVERGREEN:
763 	case THERMAL_TYPE_SUMO:
764 	case THERMAL_TYPE_NI:
765 	case THERMAL_TYPE_SI:
766 	case THERMAL_TYPE_CI:
767 	case THERMAL_TYPE_KV:
768 		return true;
769 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
770 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
771 		return false; /* need special handling */
772 	case THERMAL_TYPE_NONE:
773 	case THERMAL_TYPE_EXTERNAL:
774 	case THERMAL_TYPE_EXTERNAL_GPIO:
775 	default:
776 		return false;
777 	}
778 }
779 
780 int r600_dpm_late_enable(struct radeon_device *rdev)
781 {
782 	int ret;
783 
784 	if (rdev->irq.installed &&
785 	    r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
786 		ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
787 		if (ret)
788 			return ret;
789 		rdev->irq.dpm_thermal = true;
790 		radeon_irq_set(rdev);
791 	}
792 
793 	return 0;
794 }
795 
796 union power_info {
797 	struct _ATOM_POWERPLAY_INFO info;
798 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
799 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
800 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
801 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
802 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
803 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
804 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
805 };
806 
807 union fan_info {
808 	struct _ATOM_PPLIB_FANTABLE fan;
809 	struct _ATOM_PPLIB_FANTABLE2 fan2;
810 };
811 
812 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
813 					    ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
814 {
815 	u32 size = atom_table->ucNumEntries *
816 		sizeof(struct radeon_clock_voltage_dependency_entry);
817 	int i;
818 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
819 
820 	radeon_table->entries = kzalloc(size, GFP_KERNEL);
821 	if (!radeon_table->entries)
822 		return -ENOMEM;
823 
824 	entry = &atom_table->entries[0];
825 	for (i = 0; i < atom_table->ucNumEntries; i++) {
826 		radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
827 			(entry->ucClockHigh << 16);
828 		radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
829 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
830 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
831 	}
832 	radeon_table->count = atom_table->ucNumEntries;
833 
834 	return 0;
835 }
836 
837 int r600_get_platform_caps(struct radeon_device *rdev)
838 {
839 	struct radeon_mode_info *mode_info = &rdev->mode_info;
840 	union power_info *power_info;
841 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
842         u16 data_offset;
843 	u8 frev, crev;
844 
845 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
846 				   &frev, &crev, &data_offset))
847 		return -EINVAL;
848 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
849 
850 	rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
851 	rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
852 	rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
853 
854 	return 0;
855 }
856 
857 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
858 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
859 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
860 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
861 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
862 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
863 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
864 
865 int r600_parse_extended_power_table(struct radeon_device *rdev)
866 {
867 	struct radeon_mode_info *mode_info = &rdev->mode_info;
868 	union power_info *power_info;
869 	union fan_info *fan_info;
870 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
871 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
872         u16 data_offset;
873 	u8 frev, crev;
874 	int ret, i;
875 
876 	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
877 				   &frev, &crev, &data_offset))
878 		return -EINVAL;
879 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
880 
881 	/* fan table */
882 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
883 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
884 		if (power_info->pplib3.usFanTableOffset) {
885 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
886 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
887 			rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
888 			rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
889 			rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
890 			rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
891 			rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
892 			rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
893 			rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
894 			if (fan_info->fan.ucFanTableFormat >= 2)
895 				rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
896 			else
897 				rdev->pm.dpm.fan.t_max = 10900;
898 			rdev->pm.dpm.fan.cycle_delay = 100000;
899 			rdev->pm.dpm.fan.ucode_fan_control = true;
900 		}
901 	}
902 
903 	/* clock dependancy tables, shedding tables */
904 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
905 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
906 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
907 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
908 				(mode_info->atom_context->bios + data_offset +
909 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
910 			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
911 							       dep_table);
912 			if (ret)
913 				return ret;
914 		}
915 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
916 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
917 				(mode_info->atom_context->bios + data_offset +
918 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
919 			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
920 							       dep_table);
921 			if (ret) {
922 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
923 				return ret;
924 			}
925 		}
926 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
927 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
928 				(mode_info->atom_context->bios + data_offset +
929 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
930 			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
931 							       dep_table);
932 			if (ret) {
933 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
934 				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
935 				return ret;
936 			}
937 		}
938 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
939 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
940 				(mode_info->atom_context->bios + data_offset +
941 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
942 			ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
943 							       dep_table);
944 			if (ret) {
945 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
946 				kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
947 				kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
948 				return ret;
949 			}
950 		}
951 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
952 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
953 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
954 				(mode_info->atom_context->bios + data_offset +
955 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
956 			if (clk_v->ucNumEntries) {
957 				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
958 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
959 					(clk_v->entries[0].ucSclkHigh << 16);
960 				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
961 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
962 					(clk_v->entries[0].ucMclkHigh << 16);
963 				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
964 					le16_to_cpu(clk_v->entries[0].usVddc);
965 				rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
966 					le16_to_cpu(clk_v->entries[0].usVddci);
967 			}
968 		}
969 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
970 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
971 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
972 				(mode_info->atom_context->bios + data_offset +
973 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
974 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
975 
976 			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
977 				kzalloc(psl->ucNumEntries *
978 					sizeof(struct radeon_phase_shedding_limits_entry),
979 					GFP_KERNEL);
980 			if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
981 				r600_free_extended_power_table(rdev);
982 				return -ENOMEM;
983 			}
984 
985 			entry = &psl->entries[0];
986 			for (i = 0; i < psl->ucNumEntries; i++) {
987 				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
988 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
989 				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
990 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
991 				rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
992 					le16_to_cpu(entry->usVoltage);
993 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
994 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
995 			}
996 			rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
997 				psl->ucNumEntries;
998 		}
999 	}
1000 
1001 	/* cac data */
1002 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
1003 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
1004 		rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
1005 		rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
1006 		rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
1007 		rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
1008 		if (rdev->pm.dpm.tdp_od_limit)
1009 			rdev->pm.dpm.power_control = true;
1010 		else
1011 			rdev->pm.dpm.power_control = false;
1012 		rdev->pm.dpm.tdp_adjustment = 0;
1013 		rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
1014 		rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
1015 		rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
1016 		if (power_info->pplib5.usCACLeakageTableOffset) {
1017 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
1018 				(ATOM_PPLIB_CAC_Leakage_Table *)
1019 				(mode_info->atom_context->bios + data_offset +
1020 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
1021 			ATOM_PPLIB_CAC_Leakage_Record *entry;
1022 			u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
1023 			rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
1024 			if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1025 				r600_free_extended_power_table(rdev);
1026 				return -ENOMEM;
1027 			}
1028 			entry = &cac_table->entries[0];
1029 			for (i = 0; i < cac_table->ucNumEntries; i++) {
1030 				if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1031 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
1032 						le16_to_cpu(entry->usVddc1);
1033 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
1034 						le16_to_cpu(entry->usVddc2);
1035 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1036 						le16_to_cpu(entry->usVddc3);
1037 				} else {
1038 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1039 						le16_to_cpu(entry->usVddc);
1040 					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1041 						le32_to_cpu(entry->ulLeakageValue);
1042 				}
1043 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1044 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
1045 			}
1046 			rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
1047 		}
1048 	}
1049 
1050 	/* ext tables */
1051 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
1052 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
1053 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
1054 			(mode_info->atom_context->bios + data_offset +
1055 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1056 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1057 			ext_hdr->usVCETableOffset) {
1058 			VCEClockInfoArray *array = (VCEClockInfoArray *)
1059 				(mode_info->atom_context->bios + data_offset +
1060                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1061 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1062 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1063 				(mode_info->atom_context->bios + data_offset +
1064 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1065 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1066 			ATOM_PPLIB_VCE_State_Table *states =
1067 				(ATOM_PPLIB_VCE_State_Table *)
1068 				(mode_info->atom_context->bios + data_offset +
1069 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1070 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1071 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
1072 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1073 			ATOM_PPLIB_VCE_State_Record *state_entry;
1074 			VCEClockInfo *vce_clk;
1075 			u32 size = limits->numEntries *
1076 				sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1077 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1078 				kzalloc(size, GFP_KERNEL);
1079 			if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1080 				r600_free_extended_power_table(rdev);
1081 				return -ENOMEM;
1082 			}
1083 			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1084 				limits->numEntries;
1085 			entry = &limits->entries[0];
1086 			state_entry = &states->entries[0];
1087 			for (i = 0; i < limits->numEntries; i++) {
1088 				vce_clk = (VCEClockInfo *)
1089 					((u8 *)&array->entries[0] +
1090 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1091 				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1092 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1093 				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1094 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1095 				rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1096 					le16_to_cpu(entry->usVoltage);
1097 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1098 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1099 			}
1100 			for (i = 0; i < states->numEntries; i++) {
1101 				if (i >= RADEON_MAX_VCE_LEVELS)
1102 					break;
1103 				vce_clk = (VCEClockInfo *)
1104 					((u8 *)&array->entries[0] +
1105 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1106 				rdev->pm.dpm.vce_states[i].evclk =
1107 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1108 				rdev->pm.dpm.vce_states[i].ecclk =
1109 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1110 				rdev->pm.dpm.vce_states[i].clk_idx =
1111 					state_entry->ucClockInfoIndex & 0x3f;
1112 				rdev->pm.dpm.vce_states[i].pstate =
1113 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
1114 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
1115 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
1116 			}
1117 		}
1118 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1119 			ext_hdr->usUVDTableOffset) {
1120 			UVDClockInfoArray *array = (UVDClockInfoArray *)
1121 				(mode_info->atom_context->bios + data_offset +
1122 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1123 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1124 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1125 				(mode_info->atom_context->bios + data_offset +
1126 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1127 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1128 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1129 			u32 size = limits->numEntries *
1130 				sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1131 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1132 				kzalloc(size, GFP_KERNEL);
1133 			if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1134 				r600_free_extended_power_table(rdev);
1135 				return -ENOMEM;
1136 			}
1137 			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1138 				limits->numEntries;
1139 			entry = &limits->entries[0];
1140 			for (i = 0; i < limits->numEntries; i++) {
1141 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
1142 					((u8 *)&array->entries[0] +
1143 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1144 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1145 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1146 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1147 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1148 				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1149 					le16_to_cpu(entry->usVoltage);
1150 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1151 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1152 			}
1153 		}
1154 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1155 			ext_hdr->usSAMUTableOffset) {
1156 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1157 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1158 				(mode_info->atom_context->bios + data_offset +
1159 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1160 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1161 			u32 size = limits->numEntries *
1162 				sizeof(struct radeon_clock_voltage_dependency_entry);
1163 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1164 				kzalloc(size, GFP_KERNEL);
1165 			if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1166 				r600_free_extended_power_table(rdev);
1167 				return -ENOMEM;
1168 			}
1169 			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1170 				limits->numEntries;
1171 			entry = &limits->entries[0];
1172 			for (i = 0; i < limits->numEntries; i++) {
1173 				rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1174 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1175 				rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1176 					le16_to_cpu(entry->usVoltage);
1177 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1178 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1179 			}
1180 		}
1181 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
1182 		    ext_hdr->usPPMTableOffset) {
1183 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
1184 				(mode_info->atom_context->bios + data_offset +
1185 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
1186 			rdev->pm.dpm.dyn_state.ppm_table =
1187 				kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
1188 			if (!rdev->pm.dpm.dyn_state.ppm_table) {
1189 				r600_free_extended_power_table(rdev);
1190 				return -ENOMEM;
1191 			}
1192 			rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
1193 			rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
1194 				le16_to_cpu(ppm->usCpuCoreNumber);
1195 			rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
1196 				le32_to_cpu(ppm->ulPlatformTDP);
1197 			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
1198 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
1199 			rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
1200 				le32_to_cpu(ppm->ulPlatformTDC);
1201 			rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
1202 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
1203 			rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
1204 				le32_to_cpu(ppm->ulApuTDP);
1205 			rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1206 				le32_to_cpu(ppm->ulDGpuTDP);
1207 			rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1208 				le32_to_cpu(ppm->ulDGpuUlvPower);
1209 			rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1210 				le32_to_cpu(ppm->ulTjmax);
1211 		}
1212 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1213 			ext_hdr->usACPTableOffset) {
1214 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1215 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1216 				(mode_info->atom_context->bios + data_offset +
1217 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1218 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1219 			u32 size = limits->numEntries *
1220 				sizeof(struct radeon_clock_voltage_dependency_entry);
1221 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1222 				kzalloc(size, GFP_KERNEL);
1223 			if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1224 				r600_free_extended_power_table(rdev);
1225 				return -ENOMEM;
1226 			}
1227 			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1228 				limits->numEntries;
1229 			entry = &limits->entries[0];
1230 			for (i = 0; i < limits->numEntries; i++) {
1231 				rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1232 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1233 				rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1234 					le16_to_cpu(entry->usVoltage);
1235 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1236 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1237 			}
1238 		}
1239 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1240 			ext_hdr->usPowerTuneTableOffset) {
1241 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1242 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1243 			ATOM_PowerTune_Table *pt;
1244 			rdev->pm.dpm.dyn_state.cac_tdp_table =
1245 				kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1246 			if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1247 				r600_free_extended_power_table(rdev);
1248 				return -ENOMEM;
1249 			}
1250 			if (rev > 0) {
1251 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1252 					(mode_info->atom_context->bios + data_offset +
1253 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1254 				rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1255 					ppt->usMaximumPowerDeliveryLimit;
1256 				pt = &ppt->power_tune_table;
1257 			} else {
1258 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1259 					(mode_info->atom_context->bios + data_offset +
1260 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1261 				rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1262 				pt = &ppt->power_tune_table;
1263 			}
1264 			rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1265 			rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1266 				le16_to_cpu(pt->usConfigurableTDP);
1267 			rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1268 			rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1269 				le16_to_cpu(pt->usBatteryPowerLimit);
1270 			rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1271 				le16_to_cpu(pt->usSmallPowerLimit);
1272 			rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1273 				le16_to_cpu(pt->usLowCACLeakage);
1274 			rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1275 				le16_to_cpu(pt->usHighCACLeakage);
1276 		}
1277 	}
1278 
1279 	return 0;
1280 }
1281 
1282 void r600_free_extended_power_table(struct radeon_device *rdev)
1283 {
1284 	struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1285 
1286 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
1287 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
1288 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
1289 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1290 	kfree(dyn_state->cac_leakage_table.entries);
1291 	kfree(dyn_state->phase_shedding_limits_table.entries);
1292 	kfree(dyn_state->ppm_table);
1293 	kfree(dyn_state->cac_tdp_table);
1294 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1295 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1296 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1297 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1298 }
1299 
1300 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1301 					       u32 sys_mask,
1302 					       enum radeon_pcie_gen asic_gen,
1303 					       enum radeon_pcie_gen default_gen)
1304 {
1305 	switch (asic_gen) {
1306 	case RADEON_PCIE_GEN1:
1307 		return RADEON_PCIE_GEN1;
1308 	case RADEON_PCIE_GEN2:
1309 		return RADEON_PCIE_GEN2;
1310 	case RADEON_PCIE_GEN3:
1311 		return RADEON_PCIE_GEN3;
1312 	default:
1313 		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1314 			return RADEON_PCIE_GEN3;
1315 		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1316 			return RADEON_PCIE_GEN2;
1317 		else
1318 			return RADEON_PCIE_GEN1;
1319 	}
1320 	return RADEON_PCIE_GEN1;
1321 }
1322 
1323 u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1324 			       u16 asic_lanes,
1325 			       u16 default_lanes)
1326 {
1327 	switch (asic_lanes) {
1328 	case 0:
1329 	default:
1330 		return default_lanes;
1331 	case 1:
1332 		return 1;
1333 	case 2:
1334 		return 2;
1335 	case 4:
1336 		return 4;
1337 	case 8:
1338 		return 8;
1339 	case 12:
1340 		return 12;
1341 	case 16:
1342 		return 16;
1343 	}
1344 }
1345 
1346 u8 r600_encode_pci_lane_width(u32 lanes)
1347 {
1348 	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1349 
1350 	if (lanes > 16)
1351 		return 0;
1352 
1353 	return encoded_lanes[lanes];
1354 }
1355