xref: /openbmc/linux/drivers/gpu/drm/radeon/radeon_pm.c (revision ce8f53709bf440100cb9d31b1303291551cf517f)
17433874eSRafał Miłecki /*
27433874eSRafał Miłecki  * Permission is hereby granted, free of charge, to any person obtaining a
37433874eSRafał Miłecki  * copy of this software and associated documentation files (the "Software"),
47433874eSRafał Miłecki  * to deal in the Software without restriction, including without limitation
57433874eSRafał Miłecki  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
67433874eSRafał Miłecki  * and/or sell copies of the Software, and to permit persons to whom the
77433874eSRafał Miłecki  * Software is furnished to do so, subject to the following conditions:
87433874eSRafał Miłecki  *
97433874eSRafał Miłecki  * The above copyright notice and this permission notice shall be included in
107433874eSRafał Miłecki  * all copies or substantial portions of the Software.
117433874eSRafał Miłecki  *
127433874eSRafał Miłecki  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
137433874eSRafał Miłecki  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
147433874eSRafał Miłecki  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
157433874eSRafał Miłecki  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
167433874eSRafał Miłecki  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
177433874eSRafał Miłecki  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
187433874eSRafał Miłecki  * OTHER DEALINGS IN THE SOFTWARE.
197433874eSRafał Miłecki  *
207433874eSRafał Miłecki  * Authors: Rafał Miłecki <zajec5@gmail.com>
2156278a8eSAlex Deucher  *          Alex Deucher <alexdeucher@gmail.com>
227433874eSRafał Miłecki  */
237433874eSRafał Miłecki #include "drmP.h"
247433874eSRafał Miłecki #include "radeon.h"
25f735261bSDave Airlie #include "avivod.h"
26*ce8f5370SAlex Deucher #ifdef CONFIG_ACPI
27*ce8f5370SAlex Deucher #include <linux/acpi.h>
28*ce8f5370SAlex Deucher #endif
29*ce8f5370SAlex Deucher #include <linux/power_supply.h>
307433874eSRafał Miłecki 
31c913e23aSRafał Miłecki #define RADEON_IDLE_LOOP_MS 100
32c913e23aSRafał Miłecki #define RADEON_RECLOCK_DELAY_MS 200
3373a6d3fcSRafał Miłecki #define RADEON_WAIT_VBLANK_TIMEOUT 200
342031f77cSAlex Deucher #define RADEON_WAIT_IDLE_TIMEOUT 200
35c913e23aSRafał Miłecki 
36*ce8f5370SAlex Deucher static void radeon_dynpm_idle_work_handler(struct work_struct *work);
37c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev);
38*ce8f5370SAlex Deucher static bool radeon_pm_in_vbl(struct radeon_device *rdev);
39*ce8f5370SAlex Deucher static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
40*ce8f5370SAlex Deucher static void radeon_pm_update_profile(struct radeon_device *rdev);
41*ce8f5370SAlex Deucher static void radeon_pm_set_clocks(struct radeon_device *rdev);
42*ce8f5370SAlex Deucher 
43*ce8f5370SAlex Deucher #define ACPI_AC_CLASS           "ac_adapter"
44*ce8f5370SAlex Deucher 
45*ce8f5370SAlex Deucher #ifdef CONFIG_ACPI
46*ce8f5370SAlex Deucher static int radeon_acpi_event(struct notifier_block *nb,
47*ce8f5370SAlex Deucher 			     unsigned long val,
48*ce8f5370SAlex Deucher 			     void *data)
49*ce8f5370SAlex Deucher {
50*ce8f5370SAlex Deucher 	struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
51*ce8f5370SAlex Deucher 	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
52*ce8f5370SAlex Deucher 
53*ce8f5370SAlex Deucher 	if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
54*ce8f5370SAlex Deucher 		if (power_supply_is_system_supplied() > 0)
55*ce8f5370SAlex Deucher 			DRM_INFO("pm: AC\n");
56*ce8f5370SAlex Deucher 		else
57*ce8f5370SAlex Deucher 			DRM_INFO("pm: DC\n");
58*ce8f5370SAlex Deucher 
59*ce8f5370SAlex Deucher 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
60*ce8f5370SAlex Deucher 			if (rdev->pm.profile == PM_PROFILE_AUTO) {
61*ce8f5370SAlex Deucher 				mutex_lock(&rdev->pm.mutex);
62*ce8f5370SAlex Deucher 				radeon_pm_update_profile(rdev);
63*ce8f5370SAlex Deucher 				radeon_pm_set_clocks(rdev);
64*ce8f5370SAlex Deucher 				mutex_unlock(&rdev->pm.mutex);
65*ce8f5370SAlex Deucher 			}
66*ce8f5370SAlex Deucher 		}
67*ce8f5370SAlex Deucher 	}
68*ce8f5370SAlex Deucher 
69*ce8f5370SAlex Deucher 	return NOTIFY_OK;
70*ce8f5370SAlex Deucher }
71*ce8f5370SAlex Deucher #endif
72*ce8f5370SAlex Deucher 
73*ce8f5370SAlex Deucher static void radeon_pm_update_profile(struct radeon_device *rdev)
74*ce8f5370SAlex Deucher {
75*ce8f5370SAlex Deucher 	switch (rdev->pm.profile) {
76*ce8f5370SAlex Deucher 	case PM_PROFILE_DEFAULT:
77*ce8f5370SAlex Deucher 		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
78*ce8f5370SAlex Deucher 		break;
79*ce8f5370SAlex Deucher 	case PM_PROFILE_AUTO:
80*ce8f5370SAlex Deucher 		if (power_supply_is_system_supplied() > 0) {
81*ce8f5370SAlex Deucher 			if (rdev->pm.active_crtc_count > 1)
82*ce8f5370SAlex Deucher 				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
83*ce8f5370SAlex Deucher 			else
84*ce8f5370SAlex Deucher 				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
85*ce8f5370SAlex Deucher 		} else {
86*ce8f5370SAlex Deucher 			if (rdev->pm.active_crtc_count > 1)
87*ce8f5370SAlex Deucher 				rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
88*ce8f5370SAlex Deucher 			else
89*ce8f5370SAlex Deucher 				rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
90*ce8f5370SAlex Deucher 		}
91*ce8f5370SAlex Deucher 		break;
92*ce8f5370SAlex Deucher 	case PM_PROFILE_LOW:
93*ce8f5370SAlex Deucher 		if (rdev->pm.active_crtc_count > 1)
94*ce8f5370SAlex Deucher 			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
95*ce8f5370SAlex Deucher 		else
96*ce8f5370SAlex Deucher 			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
97*ce8f5370SAlex Deucher 		break;
98*ce8f5370SAlex Deucher 	case PM_PROFILE_HIGH:
99*ce8f5370SAlex Deucher 		if (rdev->pm.active_crtc_count > 1)
100*ce8f5370SAlex Deucher 			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
101*ce8f5370SAlex Deucher 		else
102*ce8f5370SAlex Deucher 			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
103*ce8f5370SAlex Deucher 		break;
104*ce8f5370SAlex Deucher 	}
105*ce8f5370SAlex Deucher 
106*ce8f5370SAlex Deucher 	if (rdev->pm.active_crtc_count == 0) {
107*ce8f5370SAlex Deucher 		rdev->pm.requested_power_state_index =
108*ce8f5370SAlex Deucher 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
109*ce8f5370SAlex Deucher 		rdev->pm.requested_clock_mode_index =
110*ce8f5370SAlex Deucher 			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
111*ce8f5370SAlex Deucher 	} else {
112*ce8f5370SAlex Deucher 		rdev->pm.requested_power_state_index =
113*ce8f5370SAlex Deucher 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
114*ce8f5370SAlex Deucher 		rdev->pm.requested_clock_mode_index =
115*ce8f5370SAlex Deucher 			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
116*ce8f5370SAlex Deucher 	}
117*ce8f5370SAlex Deucher }
118c913e23aSRafał Miłecki 
1195876dd24SMatthew Garrett static void radeon_unmap_vram_bos(struct radeon_device *rdev)
1205876dd24SMatthew Garrett {
1215876dd24SMatthew Garrett 	struct radeon_bo *bo, *n;
1225876dd24SMatthew Garrett 
1235876dd24SMatthew Garrett 	if (list_empty(&rdev->gem.objects))
1245876dd24SMatthew Garrett 		return;
1255876dd24SMatthew Garrett 
1265876dd24SMatthew Garrett 	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
1275876dd24SMatthew Garrett 		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
1285876dd24SMatthew Garrett 			ttm_bo_unmap_virtual(&bo->tbo);
1295876dd24SMatthew Garrett 	}
1305876dd24SMatthew Garrett 
1315876dd24SMatthew Garrett 	if (rdev->gart.table.vram.robj)
1325876dd24SMatthew Garrett 		ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
1335876dd24SMatthew Garrett 
1345876dd24SMatthew Garrett 	if (rdev->stollen_vga_memory)
1355876dd24SMatthew Garrett 		ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
1365876dd24SMatthew Garrett 
1375876dd24SMatthew Garrett 	if (rdev->r600_blit.shader_obj)
1385876dd24SMatthew Garrett 		ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
1395876dd24SMatthew Garrett }
1405876dd24SMatthew Garrett 
141*ce8f5370SAlex Deucher static void radeon_sync_with_vblank(struct radeon_device *rdev)
142*ce8f5370SAlex Deucher {
143*ce8f5370SAlex Deucher 	if (rdev->pm.active_crtcs) {
144*ce8f5370SAlex Deucher 		rdev->pm.vblank_sync = false;
145*ce8f5370SAlex Deucher 		wait_event_timeout(
146*ce8f5370SAlex Deucher 			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
147*ce8f5370SAlex Deucher 			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
148*ce8f5370SAlex Deucher 	}
149*ce8f5370SAlex Deucher }
150*ce8f5370SAlex Deucher 
151*ce8f5370SAlex Deucher static void radeon_set_power_state(struct radeon_device *rdev)
152*ce8f5370SAlex Deucher {
153*ce8f5370SAlex Deucher 	u32 sclk, mclk;
154*ce8f5370SAlex Deucher 
155*ce8f5370SAlex Deucher 	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
156*ce8f5370SAlex Deucher 	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
157*ce8f5370SAlex Deucher 		return;
158*ce8f5370SAlex Deucher 
159*ce8f5370SAlex Deucher 	if (radeon_gui_idle(rdev)) {
160*ce8f5370SAlex Deucher 		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
161*ce8f5370SAlex Deucher 			clock_info[rdev->pm.requested_clock_mode_index].sclk;
162*ce8f5370SAlex Deucher 		if (sclk > rdev->clock.default_sclk)
163*ce8f5370SAlex Deucher 			sclk = rdev->clock.default_sclk;
164*ce8f5370SAlex Deucher 
165*ce8f5370SAlex Deucher 		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
166*ce8f5370SAlex Deucher 			clock_info[rdev->pm.requested_clock_mode_index].mclk;
167*ce8f5370SAlex Deucher 		if (mclk > rdev->clock.default_mclk)
168*ce8f5370SAlex Deucher 			mclk = rdev->clock.default_mclk;
169*ce8f5370SAlex Deucher 
170*ce8f5370SAlex Deucher 		/* voltage, pcie lanes, etc.*/
171*ce8f5370SAlex Deucher 		radeon_pm_misc(rdev);
172*ce8f5370SAlex Deucher 
173*ce8f5370SAlex Deucher 		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
174*ce8f5370SAlex Deucher 			radeon_sync_with_vblank(rdev);
175*ce8f5370SAlex Deucher 
176*ce8f5370SAlex Deucher 			if (!radeon_pm_in_vbl(rdev))
177*ce8f5370SAlex Deucher 				return;
178*ce8f5370SAlex Deucher 
179*ce8f5370SAlex Deucher 			radeon_pm_prepare(rdev);
180*ce8f5370SAlex Deucher 			/* set engine clock */
181*ce8f5370SAlex Deucher 			if (sclk != rdev->pm.current_sclk) {
182*ce8f5370SAlex Deucher 				radeon_pm_debug_check_in_vbl(rdev, false);
183*ce8f5370SAlex Deucher 				radeon_set_engine_clock(rdev, sclk);
184*ce8f5370SAlex Deucher 				radeon_pm_debug_check_in_vbl(rdev, true);
185*ce8f5370SAlex Deucher 				rdev->pm.current_sclk = sclk;
186*ce8f5370SAlex Deucher 				DRM_INFO("Setting: e: %d\n", sclk);
187*ce8f5370SAlex Deucher 			}
188*ce8f5370SAlex Deucher 
189*ce8f5370SAlex Deucher 			/* set memory clock */
190*ce8f5370SAlex Deucher 			if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
191*ce8f5370SAlex Deucher 				radeon_pm_debug_check_in_vbl(rdev, false);
192*ce8f5370SAlex Deucher 				radeon_set_memory_clock(rdev, mclk);
193*ce8f5370SAlex Deucher 				radeon_pm_debug_check_in_vbl(rdev, true);
194*ce8f5370SAlex Deucher 				rdev->pm.current_mclk = mclk;
195*ce8f5370SAlex Deucher 				DRM_INFO("Setting: m: %d\n", mclk);
196*ce8f5370SAlex Deucher 			}
197*ce8f5370SAlex Deucher 			radeon_pm_finish(rdev);
198*ce8f5370SAlex Deucher 		} else {
199*ce8f5370SAlex Deucher 			/* set engine clock */
200*ce8f5370SAlex Deucher 			if (sclk != rdev->pm.current_sclk) {
201*ce8f5370SAlex Deucher 				radeon_sync_with_vblank(rdev);
202*ce8f5370SAlex Deucher 				radeon_pm_prepare(rdev);
203*ce8f5370SAlex Deucher 				radeon_set_engine_clock(rdev, sclk);
204*ce8f5370SAlex Deucher 				radeon_pm_finish(rdev);
205*ce8f5370SAlex Deucher 				rdev->pm.current_sclk = sclk;
206*ce8f5370SAlex Deucher 				DRM_INFO("Setting: e: %d\n", sclk);
207*ce8f5370SAlex Deucher 			}
208*ce8f5370SAlex Deucher 			/* set memory clock */
209*ce8f5370SAlex Deucher 			if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
210*ce8f5370SAlex Deucher 				radeon_sync_with_vblank(rdev);
211*ce8f5370SAlex Deucher 				radeon_pm_prepare(rdev);
212*ce8f5370SAlex Deucher 				radeon_set_memory_clock(rdev, mclk);
213*ce8f5370SAlex Deucher 				radeon_pm_finish(rdev);
214*ce8f5370SAlex Deucher 				rdev->pm.current_mclk = mclk;
215*ce8f5370SAlex Deucher 				DRM_INFO("Setting: m: %d\n", mclk);
216*ce8f5370SAlex Deucher 			}
217*ce8f5370SAlex Deucher 		}
218*ce8f5370SAlex Deucher 
219*ce8f5370SAlex Deucher 		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
220*ce8f5370SAlex Deucher 		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
221*ce8f5370SAlex Deucher 	} else
222*ce8f5370SAlex Deucher 		DRM_INFO("pm: GUI not idle!!!\n");
223*ce8f5370SAlex Deucher }
224*ce8f5370SAlex Deucher 
225*ce8f5370SAlex Deucher static void radeon_pm_set_clocks(struct radeon_device *rdev)
226a424816fSAlex Deucher {
2272aba631cSMatthew Garrett 	int i;
2282aba631cSMatthew Garrett 
229612e06ceSMatthew Garrett 	mutex_lock(&rdev->ddev->struct_mutex);
230612e06ceSMatthew Garrett 	mutex_lock(&rdev->vram_mutex);
231a424816fSAlex Deucher 	mutex_lock(&rdev->cp.mutex);
2324f3218cbSAlex Deucher 
2334f3218cbSAlex Deucher 	/* gui idle int has issues on older chips it seems */
2344f3218cbSAlex Deucher 	if (rdev->family >= CHIP_R600) {
235*ce8f5370SAlex Deucher 		if (rdev->irq.installed) {
236a424816fSAlex Deucher 			/* wait for GPU idle */
237a424816fSAlex Deucher 			rdev->pm.gui_idle = false;
238a424816fSAlex Deucher 			rdev->irq.gui_idle = true;
239a424816fSAlex Deucher 			radeon_irq_set(rdev);
240a424816fSAlex Deucher 			wait_event_interruptible_timeout(
241a424816fSAlex Deucher 				rdev->irq.idle_queue, rdev->pm.gui_idle,
242a424816fSAlex Deucher 				msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
243a424816fSAlex Deucher 			rdev->irq.gui_idle = false;
244a424816fSAlex Deucher 			radeon_irq_set(rdev);
245*ce8f5370SAlex Deucher 		}
24601434b4bSMatthew Garrett 	} else {
247*ce8f5370SAlex Deucher 		if (rdev->cp.ready) {
24801434b4bSMatthew Garrett 			struct radeon_fence *fence;
24901434b4bSMatthew Garrett 			radeon_ring_alloc(rdev, 64);
25001434b4bSMatthew Garrett 			radeon_fence_create(rdev, &fence);
25101434b4bSMatthew Garrett 			radeon_fence_emit(rdev, fence);
25201434b4bSMatthew Garrett 			radeon_ring_commit(rdev);
25301434b4bSMatthew Garrett 			radeon_fence_wait(fence, false);
25401434b4bSMatthew Garrett 			radeon_fence_unref(&fence);
2554f3218cbSAlex Deucher 		}
256*ce8f5370SAlex Deucher 	}
2575876dd24SMatthew Garrett 	radeon_unmap_vram_bos(rdev);
2585876dd24SMatthew Garrett 
259*ce8f5370SAlex Deucher 	if (rdev->irq.installed) {
2602aba631cSMatthew Garrett 		for (i = 0; i < rdev->num_crtc; i++) {
2612aba631cSMatthew Garrett 			if (rdev->pm.active_crtcs & (1 << i)) {
2622aba631cSMatthew Garrett 				rdev->pm.req_vblank |= (1 << i);
2632aba631cSMatthew Garrett 				drm_vblank_get(rdev->ddev, i);
2642aba631cSMatthew Garrett 			}
2652aba631cSMatthew Garrett 		}
2662aba631cSMatthew Garrett 	}
2672aba631cSMatthew Garrett 
268*ce8f5370SAlex Deucher 	radeon_set_power_state(rdev);
2692aba631cSMatthew Garrett 
270*ce8f5370SAlex Deucher 	if (rdev->irq.installed) {
2712aba631cSMatthew Garrett 		for (i = 0; i < rdev->num_crtc; i++) {
2722aba631cSMatthew Garrett 			if (rdev->pm.req_vblank & (1 << i)) {
2732aba631cSMatthew Garrett 				rdev->pm.req_vblank &= ~(1 << i);
2742aba631cSMatthew Garrett 				drm_vblank_put(rdev->ddev, i);
2752aba631cSMatthew Garrett 			}
2762aba631cSMatthew Garrett 		}
2772aba631cSMatthew Garrett 	}
278a424816fSAlex Deucher 
279a424816fSAlex Deucher 	/* update display watermarks based on new power state */
280a424816fSAlex Deucher 	radeon_update_bandwidth_info(rdev);
281a424816fSAlex Deucher 	if (rdev->pm.active_crtc_count)
282a424816fSAlex Deucher 		radeon_bandwidth_update(rdev);
283a424816fSAlex Deucher 
284*ce8f5370SAlex Deucher 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
2852aba631cSMatthew Garrett 
286a424816fSAlex Deucher 	mutex_unlock(&rdev->cp.mutex);
287612e06ceSMatthew Garrett 	mutex_unlock(&rdev->vram_mutex);
288612e06ceSMatthew Garrett 	mutex_unlock(&rdev->ddev->struct_mutex);
289a424816fSAlex Deucher }
290a424816fSAlex Deucher 
291*ce8f5370SAlex Deucher static ssize_t radeon_get_pm_profile(struct device *dev,
292a424816fSAlex Deucher 				     struct device_attribute *attr,
293a424816fSAlex Deucher 				     char *buf)
294a424816fSAlex Deucher {
295a424816fSAlex Deucher 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
296a424816fSAlex Deucher 	struct radeon_device *rdev = ddev->dev_private;
297*ce8f5370SAlex Deucher 	int cp = rdev->pm.profile;
298a424816fSAlex Deucher 
299a424816fSAlex Deucher 	return snprintf(buf, PAGE_SIZE, "%s\n",
300*ce8f5370SAlex Deucher 			(cp == PM_PROFILE_AUTO) ? "auto" :
301*ce8f5370SAlex Deucher 			(cp == PM_PROFILE_LOW) ? "low" :
302*ce8f5370SAlex Deucher 			(cp == PM_PROFILE_HIGH) ? "high" : "default");
303a424816fSAlex Deucher }
304a424816fSAlex Deucher 
305*ce8f5370SAlex Deucher static ssize_t radeon_set_pm_profile(struct device *dev,
306a424816fSAlex Deucher 				     struct device_attribute *attr,
307a424816fSAlex Deucher 				     const char *buf,
308a424816fSAlex Deucher 				     size_t count)
309a424816fSAlex Deucher {
310a424816fSAlex Deucher 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
311a424816fSAlex Deucher 	struct radeon_device *rdev = ddev->dev_private;
312a424816fSAlex Deucher 
313a424816fSAlex Deucher 	mutex_lock(&rdev->pm.mutex);
314*ce8f5370SAlex Deucher 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
315*ce8f5370SAlex Deucher 		if (strncmp("default", buf, strlen("default")) == 0)
316*ce8f5370SAlex Deucher 			rdev->pm.profile = PM_PROFILE_DEFAULT;
317*ce8f5370SAlex Deucher 		else if (strncmp("auto", buf, strlen("auto")) == 0)
318*ce8f5370SAlex Deucher 			rdev->pm.profile = PM_PROFILE_AUTO;
319*ce8f5370SAlex Deucher 		else if (strncmp("low", buf, strlen("low")) == 0)
320*ce8f5370SAlex Deucher 			rdev->pm.profile = PM_PROFILE_LOW;
321*ce8f5370SAlex Deucher 		else if (strncmp("high", buf, strlen("high")) == 0)
322*ce8f5370SAlex Deucher 			rdev->pm.profile = PM_PROFILE_HIGH;
323*ce8f5370SAlex Deucher 		else {
324*ce8f5370SAlex Deucher 			DRM_ERROR("invalid power profile!\n");
325*ce8f5370SAlex Deucher 			goto fail;
326*ce8f5370SAlex Deucher 		}
327*ce8f5370SAlex Deucher 		radeon_pm_update_profile(rdev);
328*ce8f5370SAlex Deucher 		radeon_pm_set_clocks(rdev);
329*ce8f5370SAlex Deucher 	}
330*ce8f5370SAlex Deucher fail:
331a424816fSAlex Deucher 	mutex_unlock(&rdev->pm.mutex);
332a424816fSAlex Deucher 
333a424816fSAlex Deucher 	return count;
334a424816fSAlex Deucher }
335a424816fSAlex Deucher 
336*ce8f5370SAlex Deucher static ssize_t radeon_get_pm_method(struct device *dev,
337*ce8f5370SAlex Deucher 				    struct device_attribute *attr,
338*ce8f5370SAlex Deucher 				    char *buf)
33956278a8eSAlex Deucher {
340*ce8f5370SAlex Deucher 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
341*ce8f5370SAlex Deucher 	struct radeon_device *rdev = ddev->dev_private;
342*ce8f5370SAlex Deucher 	int pm = rdev->pm.pm_method;
34356278a8eSAlex Deucher 
344*ce8f5370SAlex Deucher 	return snprintf(buf, PAGE_SIZE, "%s\n",
345*ce8f5370SAlex Deucher 			(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
34656278a8eSAlex Deucher }
34756278a8eSAlex Deucher 
348*ce8f5370SAlex Deucher static ssize_t radeon_set_pm_method(struct device *dev,
349*ce8f5370SAlex Deucher 				    struct device_attribute *attr,
350*ce8f5370SAlex Deucher 				    const char *buf,
351*ce8f5370SAlex Deucher 				    size_t count)
352d0d6cb81SRafał Miłecki {
353*ce8f5370SAlex Deucher 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
354*ce8f5370SAlex Deucher 	struct radeon_device *rdev = ddev->dev_private;
355*ce8f5370SAlex Deucher 
356*ce8f5370SAlex Deucher 
357*ce8f5370SAlex Deucher 	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
358*ce8f5370SAlex Deucher 		mutex_lock(&rdev->pm.mutex);
359*ce8f5370SAlex Deucher 		rdev->pm.pm_method = PM_METHOD_DYNPM;
360*ce8f5370SAlex Deucher 		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
361*ce8f5370SAlex Deucher 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
362*ce8f5370SAlex Deucher 		mutex_unlock(&rdev->pm.mutex);
363*ce8f5370SAlex Deucher 	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
364*ce8f5370SAlex Deucher 		mutex_lock(&rdev->pm.mutex);
365*ce8f5370SAlex Deucher 		rdev->pm.pm_method = PM_METHOD_PROFILE;
366*ce8f5370SAlex Deucher 		/* disable dynpm */
367*ce8f5370SAlex Deucher 		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
368*ce8f5370SAlex Deucher 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
369*ce8f5370SAlex Deucher 		cancel_delayed_work(&rdev->pm.dynpm_idle_work);
370*ce8f5370SAlex Deucher 		mutex_unlock(&rdev->pm.mutex);
371*ce8f5370SAlex Deucher 	} else {
372*ce8f5370SAlex Deucher 		DRM_ERROR("invalid power method!\n");
373*ce8f5370SAlex Deucher 		goto fail;
374d0d6cb81SRafał Miłecki 	}
375*ce8f5370SAlex Deucher 	radeon_pm_compute_clocks(rdev);
376*ce8f5370SAlex Deucher fail:
377*ce8f5370SAlex Deucher 	return count;
378*ce8f5370SAlex Deucher }
379*ce8f5370SAlex Deucher 
380*ce8f5370SAlex Deucher static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
381*ce8f5370SAlex Deucher static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
382*ce8f5370SAlex Deucher 
383*ce8f5370SAlex Deucher void radeon_pm_suspend(struct radeon_device *rdev)
384*ce8f5370SAlex Deucher {
385*ce8f5370SAlex Deucher 	mutex_lock(&rdev->pm.mutex);
386*ce8f5370SAlex Deucher 	cancel_delayed_work(&rdev->pm.dynpm_idle_work);
387*ce8f5370SAlex Deucher 	rdev->pm.current_power_state_index = -1;
388*ce8f5370SAlex Deucher 	rdev->pm.current_clock_mode_index = -1;
389*ce8f5370SAlex Deucher 	rdev->pm.current_sclk = 0;
390*ce8f5370SAlex Deucher 	rdev->pm.current_mclk = 0;
391*ce8f5370SAlex Deucher 	mutex_unlock(&rdev->pm.mutex);
392*ce8f5370SAlex Deucher }
393*ce8f5370SAlex Deucher 
394*ce8f5370SAlex Deucher void radeon_pm_resume(struct radeon_device *rdev)
395*ce8f5370SAlex Deucher {
396*ce8f5370SAlex Deucher 	radeon_pm_compute_clocks(rdev);
397d0d6cb81SRafał Miłecki }
398d0d6cb81SRafał Miłecki 
3997433874eSRafał Miłecki int radeon_pm_init(struct radeon_device *rdev)
4007433874eSRafał Miłecki {
401*ce8f5370SAlex Deucher 	/* default to profile method */
402*ce8f5370SAlex Deucher 	rdev->pm.pm_method = PM_METHOD_PROFILE;
403*ce8f5370SAlex Deucher 	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
404*ce8f5370SAlex Deucher 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
405*ce8f5370SAlex Deucher 	rdev->pm.dynpm_can_upclock = true;
406*ce8f5370SAlex Deucher 	rdev->pm.dynpm_can_downclock = true;
407*ce8f5370SAlex Deucher 	rdev->pm.current_sclk = 0;
408*ce8f5370SAlex Deucher 	rdev->pm.current_mclk = 0;
409c913e23aSRafał Miłecki 
41056278a8eSAlex Deucher 	if (rdev->bios) {
41156278a8eSAlex Deucher 		if (rdev->is_atom_bios)
41256278a8eSAlex Deucher 			radeon_atombios_get_power_modes(rdev);
41356278a8eSAlex Deucher 		else
41456278a8eSAlex Deucher 			radeon_combios_get_power_modes(rdev);
415*ce8f5370SAlex Deucher 		radeon_pm_init_profile(rdev);
416*ce8f5370SAlex Deucher 		rdev->pm.current_power_state_index = -1;
417*ce8f5370SAlex Deucher 		rdev->pm.current_clock_mode_index = -1;
41856278a8eSAlex Deucher 	}
41956278a8eSAlex Deucher 
420*ce8f5370SAlex Deucher 	if (rdev->pm.num_power_states > 1) {
421*ce8f5370SAlex Deucher 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
422*ce8f5370SAlex Deucher 			mutex_lock(&rdev->pm.mutex);
423*ce8f5370SAlex Deucher 			rdev->pm.profile = PM_PROFILE_DEFAULT;
424*ce8f5370SAlex Deucher 			radeon_pm_update_profile(rdev);
425*ce8f5370SAlex Deucher 			radeon_pm_set_clocks(rdev);
426*ce8f5370SAlex Deucher 			mutex_unlock(&rdev->pm.mutex);
427*ce8f5370SAlex Deucher 		}
428*ce8f5370SAlex Deucher 
429*ce8f5370SAlex Deucher 		/* where's the best place to put these? */
430*ce8f5370SAlex Deucher 		device_create_file(rdev->dev, &dev_attr_power_profile);
431*ce8f5370SAlex Deucher 		device_create_file(rdev->dev, &dev_attr_power_method);
432*ce8f5370SAlex Deucher 
433*ce8f5370SAlex Deucher #ifdef CONFIG_ACPI
434*ce8f5370SAlex Deucher 		rdev->acpi_nb.notifier_call = radeon_acpi_event;
435*ce8f5370SAlex Deucher 		register_acpi_notifier(&rdev->acpi_nb);
436*ce8f5370SAlex Deucher #endif
437*ce8f5370SAlex Deucher 		INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
438*ce8f5370SAlex Deucher 
4397433874eSRafał Miłecki 		if (radeon_debugfs_pm_init(rdev)) {
440c142c3e5SRafał Miłecki 			DRM_ERROR("Failed to register debugfs file for PM!\n");
4417433874eSRafał Miłecki 		}
4427433874eSRafał Miłecki 
443c913e23aSRafał Miłecki 		DRM_INFO("radeon: power management initialized\n");
444*ce8f5370SAlex Deucher 	}
445c913e23aSRafał Miłecki 
4467433874eSRafał Miłecki 	return 0;
4477433874eSRafał Miłecki }
4487433874eSRafał Miłecki 
44929fb52caSAlex Deucher void radeon_pm_fini(struct radeon_device *rdev)
45029fb52caSAlex Deucher {
451*ce8f5370SAlex Deucher 	if (rdev->pm.num_power_states > 1) {
452a424816fSAlex Deucher 		mutex_lock(&rdev->pm.mutex);
453*ce8f5370SAlex Deucher 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
454*ce8f5370SAlex Deucher 			rdev->pm.profile = PM_PROFILE_DEFAULT;
455*ce8f5370SAlex Deucher 			radeon_pm_update_profile(rdev);
456*ce8f5370SAlex Deucher 			radeon_pm_set_clocks(rdev);
457*ce8f5370SAlex Deucher 		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
458*ce8f5370SAlex Deucher 			/* cancel work */
459*ce8f5370SAlex Deucher 			cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
460*ce8f5370SAlex Deucher 			/* reset default clocks */
461*ce8f5370SAlex Deucher 			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
462*ce8f5370SAlex Deucher 			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
463*ce8f5370SAlex Deucher 			radeon_pm_set_clocks(rdev);
46458e21dffSAlex Deucher 		}
465*ce8f5370SAlex Deucher 		mutex_unlock(&rdev->pm.mutex);
46658e21dffSAlex Deucher 
467*ce8f5370SAlex Deucher 		device_remove_file(rdev->dev, &dev_attr_power_profile);
468*ce8f5370SAlex Deucher 		device_remove_file(rdev->dev, &dev_attr_power_method);
469*ce8f5370SAlex Deucher #ifdef CONFIG_ACPI
470*ce8f5370SAlex Deucher 		unregister_acpi_notifier(&rdev->acpi_nb);
471*ce8f5370SAlex Deucher #endif
472*ce8f5370SAlex Deucher 	}
473a424816fSAlex Deucher 
47429fb52caSAlex Deucher 	if (rdev->pm.i2c_bus)
47529fb52caSAlex Deucher 		radeon_i2c_destroy(rdev->pm.i2c_bus);
47629fb52caSAlex Deucher }
47729fb52caSAlex Deucher 
478c913e23aSRafał Miłecki void radeon_pm_compute_clocks(struct radeon_device *rdev)
479c913e23aSRafał Miłecki {
480c913e23aSRafał Miłecki 	struct drm_device *ddev = rdev->ddev;
481a48b9b4eSAlex Deucher 	struct drm_crtc *crtc;
482c913e23aSRafał Miłecki 	struct radeon_crtc *radeon_crtc;
483c913e23aSRafał Miłecki 
484*ce8f5370SAlex Deucher 	if (rdev->pm.num_power_states < 2)
485*ce8f5370SAlex Deucher 		return;
486*ce8f5370SAlex Deucher 
487c913e23aSRafał Miłecki 	mutex_lock(&rdev->pm.mutex);
488c913e23aSRafał Miłecki 
489c913e23aSRafał Miłecki 	rdev->pm.active_crtcs = 0;
490a48b9b4eSAlex Deucher 	rdev->pm.active_crtc_count = 0;
491a48b9b4eSAlex Deucher 	list_for_each_entry(crtc,
492a48b9b4eSAlex Deucher 		&ddev->mode_config.crtc_list, head) {
493a48b9b4eSAlex Deucher 		radeon_crtc = to_radeon_crtc(crtc);
494a48b9b4eSAlex Deucher 		if (radeon_crtc->enabled) {
495c913e23aSRafał Miłecki 			rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
496a48b9b4eSAlex Deucher 			rdev->pm.active_crtc_count++;
497c913e23aSRafał Miłecki 		}
498c913e23aSRafał Miłecki 	}
499c913e23aSRafał Miłecki 
500*ce8f5370SAlex Deucher 	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
501*ce8f5370SAlex Deucher 		radeon_pm_update_profile(rdev);
502*ce8f5370SAlex Deucher 		radeon_pm_set_clocks(rdev);
503*ce8f5370SAlex Deucher 	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
504*ce8f5370SAlex Deucher 		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
505a48b9b4eSAlex Deucher 			if (rdev->pm.active_crtc_count > 1) {
506*ce8f5370SAlex Deucher 				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
507*ce8f5370SAlex Deucher 					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
508c913e23aSRafał Miłecki 
509*ce8f5370SAlex Deucher 					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
510*ce8f5370SAlex Deucher 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
511*ce8f5370SAlex Deucher 					radeon_pm_get_dynpm_state(rdev);
512*ce8f5370SAlex Deucher 					radeon_pm_set_clocks(rdev);
513c913e23aSRafał Miłecki 
514c913e23aSRafał Miłecki 					DRM_DEBUG("radeon: dynamic power management deactivated\n");
515c913e23aSRafał Miłecki 				}
516a48b9b4eSAlex Deucher 			} else if (rdev->pm.active_crtc_count == 1) {
517c913e23aSRafał Miłecki 				/* TODO: Increase clocks if needed for current mode */
518c913e23aSRafał Miłecki 
519*ce8f5370SAlex Deucher 				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
520*ce8f5370SAlex Deucher 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
521*ce8f5370SAlex Deucher 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
522*ce8f5370SAlex Deucher 					radeon_pm_get_dynpm_state(rdev);
523*ce8f5370SAlex Deucher 					radeon_pm_set_clocks(rdev);
524c913e23aSRafał Miłecki 
525*ce8f5370SAlex Deucher 					queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
526c913e23aSRafał Miłecki 							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
527*ce8f5370SAlex Deucher 				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
528*ce8f5370SAlex Deucher 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
529*ce8f5370SAlex Deucher 					queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
530c913e23aSRafał Miłecki 							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
531c913e23aSRafał Miłecki 					DRM_DEBUG("radeon: dynamic power management activated\n");
532c913e23aSRafał Miłecki 				}
533a48b9b4eSAlex Deucher 			} else { /* count == 0 */
534*ce8f5370SAlex Deucher 				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
535*ce8f5370SAlex Deucher 					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
536c913e23aSRafał Miłecki 
537*ce8f5370SAlex Deucher 					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
538*ce8f5370SAlex Deucher 					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
539*ce8f5370SAlex Deucher 					radeon_pm_get_dynpm_state(rdev);
540*ce8f5370SAlex Deucher 					radeon_pm_set_clocks(rdev);
541*ce8f5370SAlex Deucher 				}
542*ce8f5370SAlex Deucher 			}
54373a6d3fcSRafał Miłecki 		}
544c913e23aSRafał Miłecki 	}
545c913e23aSRafał Miłecki 
546c913e23aSRafał Miłecki 	mutex_unlock(&rdev->pm.mutex);
547c913e23aSRafał Miłecki }
548c913e23aSRafał Miłecki 
549*ce8f5370SAlex Deucher static bool radeon_pm_in_vbl(struct radeon_device *rdev)
550f735261bSDave Airlie {
551539d2418SAlex Deucher 	u32 stat_crtc = 0, vbl = 0, position = 0;
552f735261bSDave Airlie 	bool in_vbl = true;
553f735261bSDave Airlie 
554bae6b562SAlex Deucher 	if (ASIC_IS_DCE4(rdev)) {
555f735261bSDave Airlie 		if (rdev->pm.active_crtcs & (1 << 0)) {
556539d2418SAlex Deucher 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
557539d2418SAlex Deucher 				     EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
558539d2418SAlex Deucher 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
559539d2418SAlex Deucher 					  EVERGREEN_CRTC0_REGISTER_OFFSET) & 0xfff;
560f735261bSDave Airlie 		}
561f735261bSDave Airlie 		if (rdev->pm.active_crtcs & (1 << 1)) {
562539d2418SAlex Deucher 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
563539d2418SAlex Deucher 				     EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
564539d2418SAlex Deucher 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
565539d2418SAlex Deucher 					  EVERGREEN_CRTC1_REGISTER_OFFSET) & 0xfff;
566bae6b562SAlex Deucher 		}
567bae6b562SAlex Deucher 		if (rdev->pm.active_crtcs & (1 << 2)) {
568539d2418SAlex Deucher 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
569539d2418SAlex Deucher 				     EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
570539d2418SAlex Deucher 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
571539d2418SAlex Deucher 					  EVERGREEN_CRTC2_REGISTER_OFFSET) & 0xfff;
572bae6b562SAlex Deucher 		}
573bae6b562SAlex Deucher 		if (rdev->pm.active_crtcs & (1 << 3)) {
574539d2418SAlex Deucher 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
575539d2418SAlex Deucher 				     EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
576539d2418SAlex Deucher 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
577539d2418SAlex Deucher 					  EVERGREEN_CRTC3_REGISTER_OFFSET) & 0xfff;
578bae6b562SAlex Deucher 		}
579bae6b562SAlex Deucher 		if (rdev->pm.active_crtcs & (1 << 4)) {
580539d2418SAlex Deucher 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
581539d2418SAlex Deucher 				     EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
582539d2418SAlex Deucher 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
583539d2418SAlex Deucher 					  EVERGREEN_CRTC4_REGISTER_OFFSET) & 0xfff;
584bae6b562SAlex Deucher 		}
585bae6b562SAlex Deucher 		if (rdev->pm.active_crtcs & (1 << 5)) {
586539d2418SAlex Deucher 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
587539d2418SAlex Deucher 				     EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
588539d2418SAlex Deucher 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
589539d2418SAlex Deucher 					  EVERGREEN_CRTC5_REGISTER_OFFSET) & 0xfff;
590bae6b562SAlex Deucher 		}
591bae6b562SAlex Deucher 	} else if (ASIC_IS_AVIVO(rdev)) {
592bae6b562SAlex Deucher 		if (rdev->pm.active_crtcs & (1 << 0)) {
593539d2418SAlex Deucher 			vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END) & 0xfff;
594539d2418SAlex Deucher 			position = RREG32(AVIVO_D1CRTC_STATUS_POSITION) & 0xfff;
595bae6b562SAlex Deucher 		}
596bae6b562SAlex Deucher 		if (rdev->pm.active_crtcs & (1 << 1)) {
597539d2418SAlex Deucher 			vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END) & 0xfff;
598539d2418SAlex Deucher 			position = RREG32(AVIVO_D2CRTC_STATUS_POSITION) & 0xfff;
599bae6b562SAlex Deucher 		}
600539d2418SAlex Deucher 		if (position < vbl && position > 1)
601539d2418SAlex Deucher 			in_vbl = false;
602bae6b562SAlex Deucher 	} else {
603bae6b562SAlex Deucher 		if (rdev->pm.active_crtcs & (1 << 0)) {
604bae6b562SAlex Deucher 			stat_crtc = RREG32(RADEON_CRTC_STATUS);
605bae6b562SAlex Deucher 			if (!(stat_crtc & 1))
606bae6b562SAlex Deucher 				in_vbl = false;
607bae6b562SAlex Deucher 		}
608bae6b562SAlex Deucher 		if (rdev->pm.active_crtcs & (1 << 1)) {
609bae6b562SAlex Deucher 			stat_crtc = RREG32(RADEON_CRTC2_STATUS);
610bae6b562SAlex Deucher 			if (!(stat_crtc & 1))
611f735261bSDave Airlie 				in_vbl = false;
612f735261bSDave Airlie 		}
613f735261bSDave Airlie 	}
614f81f2024SMatthew Garrett 
615539d2418SAlex Deucher 	if (position < vbl && position > 1)
616539d2418SAlex Deucher 		in_vbl = false;
617539d2418SAlex Deucher 
618f81f2024SMatthew Garrett 	return in_vbl;
619f81f2024SMatthew Garrett }
620f81f2024SMatthew Garrett 
621*ce8f5370SAlex Deucher static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
622f81f2024SMatthew Garrett {
623f81f2024SMatthew Garrett 	u32 stat_crtc = 0;
624f81f2024SMatthew Garrett 	bool in_vbl = radeon_pm_in_vbl(rdev);
625f81f2024SMatthew Garrett 
626f735261bSDave Airlie 	if (in_vbl == false)
627bae6b562SAlex Deucher 		DRM_INFO("not in vbl for pm change %08x at %s\n", stat_crtc,
628bae6b562SAlex Deucher 			 finish ? "exit" : "entry");
629f735261bSDave Airlie 	return in_vbl;
630f735261bSDave Airlie }
631c913e23aSRafał Miłecki 
632*ce8f5370SAlex Deucher static void radeon_dynpm_idle_work_handler(struct work_struct *work)
633c913e23aSRafał Miłecki {
634c913e23aSRafał Miłecki 	struct radeon_device *rdev;
635d9932a32SMatthew Garrett 	int resched;
636c913e23aSRafał Miłecki 	rdev = container_of(work, struct radeon_device,
637*ce8f5370SAlex Deucher 				pm.dynpm_idle_work.work);
638c913e23aSRafał Miłecki 
639d9932a32SMatthew Garrett 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
640c913e23aSRafał Miłecki 	mutex_lock(&rdev->pm.mutex);
641*ce8f5370SAlex Deucher 	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
642c913e23aSRafał Miłecki 		unsigned long irq_flags;
643c913e23aSRafał Miłecki 		int not_processed = 0;
644c913e23aSRafał Miłecki 
645c913e23aSRafał Miłecki 		read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
646c913e23aSRafał Miłecki 		if (!list_empty(&rdev->fence_drv.emited)) {
647c913e23aSRafał Miłecki 			struct list_head *ptr;
648c913e23aSRafał Miłecki 			list_for_each(ptr, &rdev->fence_drv.emited) {
649c913e23aSRafał Miłecki 				/* count up to 3, that's enought info */
650c913e23aSRafał Miłecki 				if (++not_processed >= 3)
651c913e23aSRafał Miłecki 					break;
652c913e23aSRafał Miłecki 			}
653c913e23aSRafał Miłecki 		}
654c913e23aSRafał Miłecki 		read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
655c913e23aSRafał Miłecki 
656c913e23aSRafał Miłecki 		if (not_processed >= 3) { /* should upclock */
657*ce8f5370SAlex Deucher 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
658*ce8f5370SAlex Deucher 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
659*ce8f5370SAlex Deucher 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
660*ce8f5370SAlex Deucher 				   rdev->pm.dynpm_can_upclock) {
661*ce8f5370SAlex Deucher 				rdev->pm.dynpm_planned_action =
662*ce8f5370SAlex Deucher 					DYNPM_ACTION_UPCLOCK;
663*ce8f5370SAlex Deucher 				rdev->pm.dynpm_action_timeout = jiffies +
664c913e23aSRafał Miłecki 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
665c913e23aSRafał Miłecki 			}
666c913e23aSRafał Miłecki 		} else if (not_processed == 0) { /* should downclock */
667*ce8f5370SAlex Deucher 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
668*ce8f5370SAlex Deucher 				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
669*ce8f5370SAlex Deucher 			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
670*ce8f5370SAlex Deucher 				   rdev->pm.dynpm_can_downclock) {
671*ce8f5370SAlex Deucher 				rdev->pm.dynpm_planned_action =
672*ce8f5370SAlex Deucher 					DYNPM_ACTION_DOWNCLOCK;
673*ce8f5370SAlex Deucher 				rdev->pm.dynpm_action_timeout = jiffies +
674c913e23aSRafał Miłecki 				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
675c913e23aSRafał Miłecki 			}
676c913e23aSRafał Miłecki 		}
677c913e23aSRafał Miłecki 
678d7311171SAlex Deucher 		/* Note, radeon_pm_set_clocks is called with static_switch set
679d7311171SAlex Deucher 		 * to false since we want to wait for vbl to avoid flicker.
680d7311171SAlex Deucher 		 */
681*ce8f5370SAlex Deucher 		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
682*ce8f5370SAlex Deucher 		    jiffies > rdev->pm.dynpm_action_timeout) {
683*ce8f5370SAlex Deucher 			radeon_pm_get_dynpm_state(rdev);
684*ce8f5370SAlex Deucher 			radeon_pm_set_clocks(rdev);
685c913e23aSRafał Miłecki 		}
686c913e23aSRafał Miłecki 	}
687c913e23aSRafał Miłecki 	mutex_unlock(&rdev->pm.mutex);
688d9932a32SMatthew Garrett 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
689c913e23aSRafał Miłecki 
690*ce8f5370SAlex Deucher 	queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
691c913e23aSRafał Miłecki 					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
692c913e23aSRafał Miłecki }
693c913e23aSRafał Miłecki 
6947433874eSRafał Miłecki /*
6957433874eSRafał Miłecki  * Debugfs info
6967433874eSRafał Miłecki  */
6977433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS)
6987433874eSRafał Miłecki 
6997433874eSRafał Miłecki static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
7007433874eSRafał Miłecki {
7017433874eSRafał Miłecki 	struct drm_info_node *node = (struct drm_info_node *) m->private;
7027433874eSRafał Miłecki 	struct drm_device *dev = node->minor->dev;
7037433874eSRafał Miłecki 	struct radeon_device *rdev = dev->dev_private;
7047433874eSRafał Miłecki 
7056234077dSRafał Miłecki 	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
7066234077dSRafał Miłecki 	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
7076234077dSRafał Miłecki 	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
7086234077dSRafał Miłecki 	if (rdev->asic->get_memory_clock)
7096234077dSRafał Miłecki 		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
710aa5120d2SRafał Miłecki 	if (rdev->asic->get_pcie_lanes)
711aa5120d2SRafał Miłecki 		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
7127433874eSRafał Miłecki 
7137433874eSRafał Miłecki 	return 0;
7147433874eSRafał Miłecki }
7157433874eSRafał Miłecki 
7167433874eSRafał Miłecki static struct drm_info_list radeon_pm_info_list[] = {
7177433874eSRafał Miłecki 	{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
7187433874eSRafał Miłecki };
7197433874eSRafał Miłecki #endif
7207433874eSRafał Miłecki 
721c913e23aSRafał Miłecki static int radeon_debugfs_pm_init(struct radeon_device *rdev)
7227433874eSRafał Miłecki {
7237433874eSRafał Miłecki #if defined(CONFIG_DEBUG_FS)
7247433874eSRafał Miłecki 	return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
7257433874eSRafał Miłecki #else
7267433874eSRafał Miłecki 	return 0;
7277433874eSRafał Miłecki #endif
7287433874eSRafał Miłecki }
729