xref: /openbmc/linux/drivers/gpu/drm/radeon/r600.c (revision ff212f25feb44a915ce9c0144faef7fae27a6e61)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/module.h>
32 #include <drm/drmP.h>
33 #include <drm/radeon_drm.h>
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
37 #include "r600d.h"
38 #include "atom.h"
39 #include "avivod.h"
40 #include "radeon_ucode.h"
41 
42 /* Firmware Names */
43 MODULE_FIRMWARE("radeon/R600_pfp.bin");
44 MODULE_FIRMWARE("radeon/R600_me.bin");
45 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
46 MODULE_FIRMWARE("radeon/RV610_me.bin");
47 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
48 MODULE_FIRMWARE("radeon/RV630_me.bin");
49 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
50 MODULE_FIRMWARE("radeon/RV620_me.bin");
51 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV635_me.bin");
53 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV670_me.bin");
55 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
56 MODULE_FIRMWARE("radeon/RS780_me.bin");
57 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
58 MODULE_FIRMWARE("radeon/RV770_me.bin");
59 MODULE_FIRMWARE("radeon/RV770_smc.bin");
60 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV730_me.bin");
62 MODULE_FIRMWARE("radeon/RV730_smc.bin");
63 MODULE_FIRMWARE("radeon/RV740_smc.bin");
64 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65 MODULE_FIRMWARE("radeon/RV710_me.bin");
66 MODULE_FIRMWARE("radeon/RV710_smc.bin");
67 MODULE_FIRMWARE("radeon/R600_rlc.bin");
68 MODULE_FIRMWARE("radeon/R700_rlc.bin");
69 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
70 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
71 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
72 MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
73 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
74 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
75 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
76 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
77 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
78 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
79 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
81 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
82 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
85 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
86 MODULE_FIRMWARE("radeon/PALM_me.bin");
87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
89 MODULE_FIRMWARE("radeon/SUMO_me.bin");
90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
91 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
92 
93 static const u32 crtc_offsets[2] =
94 {
95 	0,
96 	AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
97 };
98 
99 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
100 
101 /* r600,rv610,rv630,rv620,rv635,rv670 */
102 int r600_mc_wait_for_idle(struct radeon_device *rdev);
103 static void r600_gpu_init(struct radeon_device *rdev);
104 void r600_fini(struct radeon_device *rdev);
105 void r600_irq_disable(struct radeon_device *rdev);
106 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
107 extern int evergreen_rlc_resume(struct radeon_device *rdev);
108 extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
109 
110 /**
111  * r600_get_xclk - get the xclk
112  *
113  * @rdev: radeon_device pointer
114  *
115  * Returns the reference clock used by the gfx engine
116  * (r6xx, IGPs, APUs).
117  */
118 u32 r600_get_xclk(struct radeon_device *rdev)
119 {
120 	return rdev->clock.spll.reference_freq;
121 }
122 
123 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
124 {
125 	return 0;
126 }
127 
128 void dce3_program_fmt(struct drm_encoder *encoder)
129 {
130 	struct drm_device *dev = encoder->dev;
131 	struct radeon_device *rdev = dev->dev_private;
132 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
133 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
134 	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
135 	int bpc = 0;
136 	u32 tmp = 0;
137 	enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
138 
139 	if (connector) {
140 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
141 		bpc = radeon_get_monitor_bpc(connector);
142 		dither = radeon_connector->dither;
143 	}
144 
145 	/* LVDS FMT is set up by atom */
146 	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
147 		return;
148 
149 	/* not needed for analog */
150 	if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
151 	    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
152 		return;
153 
154 	if (bpc == 0)
155 		return;
156 
157 	switch (bpc) {
158 	case 6:
159 		if (dither == RADEON_FMT_DITHER_ENABLE)
160 			/* XXX sort out optimal dither settings */
161 			tmp |= FMT_SPATIAL_DITHER_EN;
162 		else
163 			tmp |= FMT_TRUNCATE_EN;
164 		break;
165 	case 8:
166 		if (dither == RADEON_FMT_DITHER_ENABLE)
167 			/* XXX sort out optimal dither settings */
168 			tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
169 		else
170 			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
171 		break;
172 	case 10:
173 	default:
174 		/* not needed */
175 		break;
176 	}
177 
178 	WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
179 }
180 
181 /* get temperature in millidegrees */
182 int rv6xx_get_temp(struct radeon_device *rdev)
183 {
184 	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
185 		ASIC_T_SHIFT;
186 	int actual_temp = temp & 0xff;
187 
188 	if (temp & 0x100)
189 		actual_temp -= 256;
190 
191 	return actual_temp * 1000;
192 }
193 
194 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
195 {
196 	int i;
197 
198 	rdev->pm.dynpm_can_upclock = true;
199 	rdev->pm.dynpm_can_downclock = true;
200 
201 	/* power state array is low to high, default is first */
202 	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
203 		int min_power_state_index = 0;
204 
205 		if (rdev->pm.num_power_states > 2)
206 			min_power_state_index = 1;
207 
208 		switch (rdev->pm.dynpm_planned_action) {
209 		case DYNPM_ACTION_MINIMUM:
210 			rdev->pm.requested_power_state_index = min_power_state_index;
211 			rdev->pm.requested_clock_mode_index = 0;
212 			rdev->pm.dynpm_can_downclock = false;
213 			break;
214 		case DYNPM_ACTION_DOWNCLOCK:
215 			if (rdev->pm.current_power_state_index == min_power_state_index) {
216 				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
217 				rdev->pm.dynpm_can_downclock = false;
218 			} else {
219 				if (rdev->pm.active_crtc_count > 1) {
220 					for (i = 0; i < rdev->pm.num_power_states; i++) {
221 						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
222 							continue;
223 						else if (i >= rdev->pm.current_power_state_index) {
224 							rdev->pm.requested_power_state_index =
225 								rdev->pm.current_power_state_index;
226 							break;
227 						} else {
228 							rdev->pm.requested_power_state_index = i;
229 							break;
230 						}
231 					}
232 				} else {
233 					if (rdev->pm.current_power_state_index == 0)
234 						rdev->pm.requested_power_state_index =
235 							rdev->pm.num_power_states - 1;
236 					else
237 						rdev->pm.requested_power_state_index =
238 							rdev->pm.current_power_state_index - 1;
239 				}
240 			}
241 			rdev->pm.requested_clock_mode_index = 0;
242 			/* don't use the power state if crtcs are active and no display flag is set */
243 			if ((rdev->pm.active_crtc_count > 0) &&
244 			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
245 			     clock_info[rdev->pm.requested_clock_mode_index].flags &
246 			     RADEON_PM_MODE_NO_DISPLAY)) {
247 				rdev->pm.requested_power_state_index++;
248 			}
249 			break;
250 		case DYNPM_ACTION_UPCLOCK:
251 			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
252 				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
253 				rdev->pm.dynpm_can_upclock = false;
254 			} else {
255 				if (rdev->pm.active_crtc_count > 1) {
256 					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
257 						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
258 							continue;
259 						else if (i <= rdev->pm.current_power_state_index) {
260 							rdev->pm.requested_power_state_index =
261 								rdev->pm.current_power_state_index;
262 							break;
263 						} else {
264 							rdev->pm.requested_power_state_index = i;
265 							break;
266 						}
267 					}
268 				} else
269 					rdev->pm.requested_power_state_index =
270 						rdev->pm.current_power_state_index + 1;
271 			}
272 			rdev->pm.requested_clock_mode_index = 0;
273 			break;
274 		case DYNPM_ACTION_DEFAULT:
275 			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
276 			rdev->pm.requested_clock_mode_index = 0;
277 			rdev->pm.dynpm_can_upclock = false;
278 			break;
279 		case DYNPM_ACTION_NONE:
280 		default:
281 			DRM_ERROR("Requested mode for not defined action\n");
282 			return;
283 		}
284 	} else {
285 		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
286 		/* for now just select the first power state and switch between clock modes */
287 		/* power state array is low to high, default is first (0) */
288 		if (rdev->pm.active_crtc_count > 1) {
289 			rdev->pm.requested_power_state_index = -1;
290 			/* start at 1 as we don't want the default mode */
291 			for (i = 1; i < rdev->pm.num_power_states; i++) {
292 				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
293 					continue;
294 				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
295 					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
296 					rdev->pm.requested_power_state_index = i;
297 					break;
298 				}
299 			}
300 			/* if nothing selected, grab the default state. */
301 			if (rdev->pm.requested_power_state_index == -1)
302 				rdev->pm.requested_power_state_index = 0;
303 		} else
304 			rdev->pm.requested_power_state_index = 1;
305 
306 		switch (rdev->pm.dynpm_planned_action) {
307 		case DYNPM_ACTION_MINIMUM:
308 			rdev->pm.requested_clock_mode_index = 0;
309 			rdev->pm.dynpm_can_downclock = false;
310 			break;
311 		case DYNPM_ACTION_DOWNCLOCK:
312 			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
313 				if (rdev->pm.current_clock_mode_index == 0) {
314 					rdev->pm.requested_clock_mode_index = 0;
315 					rdev->pm.dynpm_can_downclock = false;
316 				} else
317 					rdev->pm.requested_clock_mode_index =
318 						rdev->pm.current_clock_mode_index - 1;
319 			} else {
320 				rdev->pm.requested_clock_mode_index = 0;
321 				rdev->pm.dynpm_can_downclock = false;
322 			}
323 			/* don't use the power state if crtcs are active and no display flag is set */
324 			if ((rdev->pm.active_crtc_count > 0) &&
325 			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
326 			     clock_info[rdev->pm.requested_clock_mode_index].flags &
327 			     RADEON_PM_MODE_NO_DISPLAY)) {
328 				rdev->pm.requested_clock_mode_index++;
329 			}
330 			break;
331 		case DYNPM_ACTION_UPCLOCK:
332 			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
333 				if (rdev->pm.current_clock_mode_index ==
334 				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
335 					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
336 					rdev->pm.dynpm_can_upclock = false;
337 				} else
338 					rdev->pm.requested_clock_mode_index =
339 						rdev->pm.current_clock_mode_index + 1;
340 			} else {
341 				rdev->pm.requested_clock_mode_index =
342 					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
343 				rdev->pm.dynpm_can_upclock = false;
344 			}
345 			break;
346 		case DYNPM_ACTION_DEFAULT:
347 			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
348 			rdev->pm.requested_clock_mode_index = 0;
349 			rdev->pm.dynpm_can_upclock = false;
350 			break;
351 		case DYNPM_ACTION_NONE:
352 		default:
353 			DRM_ERROR("Requested mode for not defined action\n");
354 			return;
355 		}
356 	}
357 
358 	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
359 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
360 		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
361 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
362 		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
363 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
364 		  pcie_lanes);
365 }
366 
367 void rs780_pm_init_profile(struct radeon_device *rdev)
368 {
369 	if (rdev->pm.num_power_states == 2) {
370 		/* default */
371 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
372 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
373 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
374 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
375 		/* low sh */
376 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
377 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
378 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
379 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
380 		/* mid sh */
381 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
382 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
383 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
384 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
385 		/* high sh */
386 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
387 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
388 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
389 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
390 		/* low mh */
391 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
392 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
393 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
394 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
395 		/* mid mh */
396 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
397 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
398 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
399 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
400 		/* high mh */
401 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
402 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
403 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
404 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
405 	} else if (rdev->pm.num_power_states == 3) {
406 		/* default */
407 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
408 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
409 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
410 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
411 		/* low sh */
412 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
413 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
414 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
415 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
416 		/* mid sh */
417 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
418 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
419 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
420 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
421 		/* high sh */
422 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
423 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
424 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
425 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
426 		/* low mh */
427 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
428 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
429 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
430 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
431 		/* mid mh */
432 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
433 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
434 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
435 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
436 		/* high mh */
437 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
438 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
439 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
440 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
441 	} else {
442 		/* default */
443 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
444 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
445 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
446 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
447 		/* low sh */
448 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
449 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
450 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
451 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
452 		/* mid sh */
453 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
454 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
455 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
456 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
457 		/* high sh */
458 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
459 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
460 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
461 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
462 		/* low mh */
463 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
464 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
465 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
466 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
467 		/* mid mh */
468 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
469 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
470 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
471 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
472 		/* high mh */
473 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
474 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
475 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
476 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
477 	}
478 }
479 
480 void r600_pm_init_profile(struct radeon_device *rdev)
481 {
482 	int idx;
483 
484 	if (rdev->family == CHIP_R600) {
485 		/* XXX */
486 		/* default */
487 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
488 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
489 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
490 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
491 		/* low sh */
492 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
493 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
494 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
495 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
496 		/* mid sh */
497 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
498 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
499 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
500 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
501 		/* high sh */
502 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
503 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
504 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
505 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
506 		/* low mh */
507 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
508 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
509 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
510 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
511 		/* mid mh */
512 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
513 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
514 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
515 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
516 		/* high mh */
517 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
518 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
519 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
520 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
521 	} else {
522 		if (rdev->pm.num_power_states < 4) {
523 			/* default */
524 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
525 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
526 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
527 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
528 			/* low sh */
529 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
530 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
531 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
532 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
533 			/* mid sh */
534 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
535 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
536 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
537 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
538 			/* high sh */
539 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
540 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
541 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
542 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
543 			/* low mh */
544 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
545 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
546 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
547 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
548 			/* low mh */
549 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
550 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
551 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
552 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
553 			/* high mh */
554 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
555 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
556 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
557 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
558 		} else {
559 			/* default */
560 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
561 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
562 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
563 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
564 			/* low sh */
565 			if (rdev->flags & RADEON_IS_MOBILITY)
566 				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
567 			else
568 				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
569 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
570 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
571 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
572 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
573 			/* mid sh */
574 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
575 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
576 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
577 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
578 			/* high sh */
579 			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
580 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
581 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
582 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
583 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
584 			/* low mh */
585 			if (rdev->flags & RADEON_IS_MOBILITY)
586 				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
587 			else
588 				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
589 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
590 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
591 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
592 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
593 			/* mid mh */
594 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
595 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
596 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
597 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
598 			/* high mh */
599 			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
600 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
601 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
602 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
603 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
604 		}
605 	}
606 }
607 
608 void r600_pm_misc(struct radeon_device *rdev)
609 {
610 	int req_ps_idx = rdev->pm.requested_power_state_index;
611 	int req_cm_idx = rdev->pm.requested_clock_mode_index;
612 	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
613 	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
614 
615 	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
616 		/* 0xff01 is a flag rather then an actual voltage */
617 		if (voltage->voltage == 0xff01)
618 			return;
619 		if (voltage->voltage != rdev->pm.current_vddc) {
620 			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
621 			rdev->pm.current_vddc = voltage->voltage;
622 			DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
623 		}
624 	}
625 }
626 
627 bool r600_gui_idle(struct radeon_device *rdev)
628 {
629 	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
630 		return false;
631 	else
632 		return true;
633 }
634 
635 /* hpd for digital panel detect/disconnect */
636 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
637 {
638 	bool connected = false;
639 
640 	if (ASIC_IS_DCE3(rdev)) {
641 		switch (hpd) {
642 		case RADEON_HPD_1:
643 			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
644 				connected = true;
645 			break;
646 		case RADEON_HPD_2:
647 			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
648 				connected = true;
649 			break;
650 		case RADEON_HPD_3:
651 			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
652 				connected = true;
653 			break;
654 		case RADEON_HPD_4:
655 			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
656 				connected = true;
657 			break;
658 			/* DCE 3.2 */
659 		case RADEON_HPD_5:
660 			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
661 				connected = true;
662 			break;
663 		case RADEON_HPD_6:
664 			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
665 				connected = true;
666 			break;
667 		default:
668 			break;
669 		}
670 	} else {
671 		switch (hpd) {
672 		case RADEON_HPD_1:
673 			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
674 				connected = true;
675 			break;
676 		case RADEON_HPD_2:
677 			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
678 				connected = true;
679 			break;
680 		case RADEON_HPD_3:
681 			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
682 				connected = true;
683 			break;
684 		default:
685 			break;
686 		}
687 	}
688 	return connected;
689 }
690 
691 void r600_hpd_set_polarity(struct radeon_device *rdev,
692 			   enum radeon_hpd_id hpd)
693 {
694 	u32 tmp;
695 	bool connected = r600_hpd_sense(rdev, hpd);
696 
697 	if (ASIC_IS_DCE3(rdev)) {
698 		switch (hpd) {
699 		case RADEON_HPD_1:
700 			tmp = RREG32(DC_HPD1_INT_CONTROL);
701 			if (connected)
702 				tmp &= ~DC_HPDx_INT_POLARITY;
703 			else
704 				tmp |= DC_HPDx_INT_POLARITY;
705 			WREG32(DC_HPD1_INT_CONTROL, tmp);
706 			break;
707 		case RADEON_HPD_2:
708 			tmp = RREG32(DC_HPD2_INT_CONTROL);
709 			if (connected)
710 				tmp &= ~DC_HPDx_INT_POLARITY;
711 			else
712 				tmp |= DC_HPDx_INT_POLARITY;
713 			WREG32(DC_HPD2_INT_CONTROL, tmp);
714 			break;
715 		case RADEON_HPD_3:
716 			tmp = RREG32(DC_HPD3_INT_CONTROL);
717 			if (connected)
718 				tmp &= ~DC_HPDx_INT_POLARITY;
719 			else
720 				tmp |= DC_HPDx_INT_POLARITY;
721 			WREG32(DC_HPD3_INT_CONTROL, tmp);
722 			break;
723 		case RADEON_HPD_4:
724 			tmp = RREG32(DC_HPD4_INT_CONTROL);
725 			if (connected)
726 				tmp &= ~DC_HPDx_INT_POLARITY;
727 			else
728 				tmp |= DC_HPDx_INT_POLARITY;
729 			WREG32(DC_HPD4_INT_CONTROL, tmp);
730 			break;
731 		case RADEON_HPD_5:
732 			tmp = RREG32(DC_HPD5_INT_CONTROL);
733 			if (connected)
734 				tmp &= ~DC_HPDx_INT_POLARITY;
735 			else
736 				tmp |= DC_HPDx_INT_POLARITY;
737 			WREG32(DC_HPD5_INT_CONTROL, tmp);
738 			break;
739 			/* DCE 3.2 */
740 		case RADEON_HPD_6:
741 			tmp = RREG32(DC_HPD6_INT_CONTROL);
742 			if (connected)
743 				tmp &= ~DC_HPDx_INT_POLARITY;
744 			else
745 				tmp |= DC_HPDx_INT_POLARITY;
746 			WREG32(DC_HPD6_INT_CONTROL, tmp);
747 			break;
748 		default:
749 			break;
750 		}
751 	} else {
752 		switch (hpd) {
753 		case RADEON_HPD_1:
754 			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
755 			if (connected)
756 				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
757 			else
758 				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
759 			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
760 			break;
761 		case RADEON_HPD_2:
762 			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
763 			if (connected)
764 				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
765 			else
766 				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
767 			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
768 			break;
769 		case RADEON_HPD_3:
770 			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
771 			if (connected)
772 				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
773 			else
774 				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
775 			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
776 			break;
777 		default:
778 			break;
779 		}
780 	}
781 }
782 
783 void r600_hpd_init(struct radeon_device *rdev)
784 {
785 	struct drm_device *dev = rdev->ddev;
786 	struct drm_connector *connector;
787 	unsigned enable = 0;
788 
789 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
790 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
791 
792 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
793 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
794 			/* don't try to enable hpd on eDP or LVDS avoid breaking the
795 			 * aux dp channel on imac and help (but not completely fix)
796 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
797 			 */
798 			continue;
799 		}
800 		if (ASIC_IS_DCE3(rdev)) {
801 			u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
802 			if (ASIC_IS_DCE32(rdev))
803 				tmp |= DC_HPDx_EN;
804 
805 			switch (radeon_connector->hpd.hpd) {
806 			case RADEON_HPD_1:
807 				WREG32(DC_HPD1_CONTROL, tmp);
808 				break;
809 			case RADEON_HPD_2:
810 				WREG32(DC_HPD2_CONTROL, tmp);
811 				break;
812 			case RADEON_HPD_3:
813 				WREG32(DC_HPD3_CONTROL, tmp);
814 				break;
815 			case RADEON_HPD_4:
816 				WREG32(DC_HPD4_CONTROL, tmp);
817 				break;
818 				/* DCE 3.2 */
819 			case RADEON_HPD_5:
820 				WREG32(DC_HPD5_CONTROL, tmp);
821 				break;
822 			case RADEON_HPD_6:
823 				WREG32(DC_HPD6_CONTROL, tmp);
824 				break;
825 			default:
826 				break;
827 			}
828 		} else {
829 			switch (radeon_connector->hpd.hpd) {
830 			case RADEON_HPD_1:
831 				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
832 				break;
833 			case RADEON_HPD_2:
834 				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
835 				break;
836 			case RADEON_HPD_3:
837 				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
838 				break;
839 			default:
840 				break;
841 			}
842 		}
843 		enable |= 1 << radeon_connector->hpd.hpd;
844 		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
845 	}
846 	radeon_irq_kms_enable_hpd(rdev, enable);
847 }
848 
849 void r600_hpd_fini(struct radeon_device *rdev)
850 {
851 	struct drm_device *dev = rdev->ddev;
852 	struct drm_connector *connector;
853 	unsigned disable = 0;
854 
855 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
856 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
857 		if (ASIC_IS_DCE3(rdev)) {
858 			switch (radeon_connector->hpd.hpd) {
859 			case RADEON_HPD_1:
860 				WREG32(DC_HPD1_CONTROL, 0);
861 				break;
862 			case RADEON_HPD_2:
863 				WREG32(DC_HPD2_CONTROL, 0);
864 				break;
865 			case RADEON_HPD_3:
866 				WREG32(DC_HPD3_CONTROL, 0);
867 				break;
868 			case RADEON_HPD_4:
869 				WREG32(DC_HPD4_CONTROL, 0);
870 				break;
871 				/* DCE 3.2 */
872 			case RADEON_HPD_5:
873 				WREG32(DC_HPD5_CONTROL, 0);
874 				break;
875 			case RADEON_HPD_6:
876 				WREG32(DC_HPD6_CONTROL, 0);
877 				break;
878 			default:
879 				break;
880 			}
881 		} else {
882 			switch (radeon_connector->hpd.hpd) {
883 			case RADEON_HPD_1:
884 				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
885 				break;
886 			case RADEON_HPD_2:
887 				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
888 				break;
889 			case RADEON_HPD_3:
890 				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
891 				break;
892 			default:
893 				break;
894 			}
895 		}
896 		disable |= 1 << radeon_connector->hpd.hpd;
897 	}
898 	radeon_irq_kms_disable_hpd(rdev, disable);
899 }
900 
901 /*
902  * R600 PCIE GART
903  */
904 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
905 {
906 	unsigned i;
907 	u32 tmp;
908 
909 	/* flush hdp cache so updates hit vram */
910 	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
911 	    !(rdev->flags & RADEON_IS_AGP)) {
912 		void __iomem *ptr = (void *)rdev->gart.ptr;
913 		u32 tmp;
914 
915 		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
916 		 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
917 		 * This seems to cause problems on some AGP cards. Just use the old
918 		 * method for them.
919 		 */
920 		WREG32(HDP_DEBUG1, 0);
921 		tmp = readl((void __iomem *)ptr);
922 	} else
923 		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
924 
925 	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
926 	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
927 	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
928 	for (i = 0; i < rdev->usec_timeout; i++) {
929 		/* read MC_STATUS */
930 		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
931 		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
932 		if (tmp == 2) {
933 			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
934 			return;
935 		}
936 		if (tmp) {
937 			return;
938 		}
939 		udelay(1);
940 	}
941 }
942 
943 int r600_pcie_gart_init(struct radeon_device *rdev)
944 {
945 	int r;
946 
947 	if (rdev->gart.robj) {
948 		WARN(1, "R600 PCIE GART already initialized\n");
949 		return 0;
950 	}
951 	/* Initialize common gart structure */
952 	r = radeon_gart_init(rdev);
953 	if (r)
954 		return r;
955 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
956 	return radeon_gart_table_vram_alloc(rdev);
957 }
958 
959 static int r600_pcie_gart_enable(struct radeon_device *rdev)
960 {
961 	u32 tmp;
962 	int r, i;
963 
964 	if (rdev->gart.robj == NULL) {
965 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
966 		return -EINVAL;
967 	}
968 	r = radeon_gart_table_vram_pin(rdev);
969 	if (r)
970 		return r;
971 	radeon_gart_restore(rdev);
972 
973 	/* Setup L2 cache */
974 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
975 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
976 				EFFECTIVE_L2_QUEUE_SIZE(7));
977 	WREG32(VM_L2_CNTL2, 0);
978 	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
979 	/* Setup TLB control */
980 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
981 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
982 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
983 		ENABLE_WAIT_L2_QUERY;
984 	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
985 	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
986 	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
987 	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
988 	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
989 	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
990 	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
991 	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
992 	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
993 	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
994 	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
995 	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
996 	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
997 	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
998 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
999 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1000 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1001 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1002 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1003 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1004 			(u32)(rdev->dummy_page.addr >> 12));
1005 	for (i = 1; i < 7; i++)
1006 		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1007 
1008 	r600_pcie_gart_tlb_flush(rdev);
1009 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1010 		 (unsigned)(rdev->mc.gtt_size >> 20),
1011 		 (unsigned long long)rdev->gart.table_addr);
1012 	rdev->gart.ready = true;
1013 	return 0;
1014 }
1015 
1016 static void r600_pcie_gart_disable(struct radeon_device *rdev)
1017 {
1018 	u32 tmp;
1019 	int i;
1020 
1021 	/* Disable all tables */
1022 	for (i = 0; i < 7; i++)
1023 		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1024 
1025 	/* Disable L2 cache */
1026 	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1027 				EFFECTIVE_L2_QUEUE_SIZE(7));
1028 	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1029 	/* Setup L1 TLB control */
1030 	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1031 		ENABLE_WAIT_L2_QUERY;
1032 	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1033 	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1034 	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1035 	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1036 	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1037 	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1038 	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1039 	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1040 	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1041 	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1042 	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1043 	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1044 	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1045 	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1046 	radeon_gart_table_vram_unpin(rdev);
1047 }
1048 
1049 static void r600_pcie_gart_fini(struct radeon_device *rdev)
1050 {
1051 	radeon_gart_fini(rdev);
1052 	r600_pcie_gart_disable(rdev);
1053 	radeon_gart_table_vram_free(rdev);
1054 }
1055 
1056 static void r600_agp_enable(struct radeon_device *rdev)
1057 {
1058 	u32 tmp;
1059 	int i;
1060 
1061 	/* Setup L2 cache */
1062 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1063 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1064 				EFFECTIVE_L2_QUEUE_SIZE(7));
1065 	WREG32(VM_L2_CNTL2, 0);
1066 	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1067 	/* Setup TLB control */
1068 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1069 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1070 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1071 		ENABLE_WAIT_L2_QUERY;
1072 	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1073 	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1074 	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1075 	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1076 	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1077 	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1078 	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1079 	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1080 	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1081 	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1082 	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1083 	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1084 	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1085 	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1086 	for (i = 0; i < 7; i++)
1087 		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1088 }
1089 
1090 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1091 {
1092 	unsigned i;
1093 	u32 tmp;
1094 
1095 	for (i = 0; i < rdev->usec_timeout; i++) {
1096 		/* read MC_STATUS */
1097 		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1098 		if (!tmp)
1099 			return 0;
1100 		udelay(1);
1101 	}
1102 	return -1;
1103 }
1104 
1105 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
1106 {
1107 	unsigned long flags;
1108 	uint32_t r;
1109 
1110 	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1111 	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
1112 	r = RREG32(R_0028FC_MC_DATA);
1113 	WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
1114 	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1115 	return r;
1116 }
1117 
1118 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1119 {
1120 	unsigned long flags;
1121 
1122 	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
1123 	WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
1124 		S_0028F8_MC_IND_WR_EN(1));
1125 	WREG32(R_0028FC_MC_DATA, v);
1126 	WREG32(R_0028F8_MC_INDEX, 0x7F);
1127 	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
1128 }
1129 
1130 static void r600_mc_program(struct radeon_device *rdev)
1131 {
1132 	struct rv515_mc_save save;
1133 	u32 tmp;
1134 	int i, j;
1135 
1136 	/* Initialize HDP */
1137 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1138 		WREG32((0x2c14 + j), 0x00000000);
1139 		WREG32((0x2c18 + j), 0x00000000);
1140 		WREG32((0x2c1c + j), 0x00000000);
1141 		WREG32((0x2c20 + j), 0x00000000);
1142 		WREG32((0x2c24 + j), 0x00000000);
1143 	}
1144 	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1145 
1146 	rv515_mc_stop(rdev, &save);
1147 	if (r600_mc_wait_for_idle(rdev)) {
1148 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1149 	}
1150 	/* Lockout access through VGA aperture (doesn't exist before R600) */
1151 	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1152 	/* Update configuration */
1153 	if (rdev->flags & RADEON_IS_AGP) {
1154 		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1155 			/* VRAM before AGP */
1156 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1157 				rdev->mc.vram_start >> 12);
1158 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1159 				rdev->mc.gtt_end >> 12);
1160 		} else {
1161 			/* VRAM after AGP */
1162 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1163 				rdev->mc.gtt_start >> 12);
1164 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1165 				rdev->mc.vram_end >> 12);
1166 		}
1167 	} else {
1168 		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1169 		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1170 	}
1171 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1172 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1173 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1174 	WREG32(MC_VM_FB_LOCATION, tmp);
1175 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1176 	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1177 	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1178 	if (rdev->flags & RADEON_IS_AGP) {
1179 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1180 		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1181 		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1182 	} else {
1183 		WREG32(MC_VM_AGP_BASE, 0);
1184 		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1185 		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1186 	}
1187 	if (r600_mc_wait_for_idle(rdev)) {
1188 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1189 	}
1190 	rv515_mc_resume(rdev, &save);
1191 	/* we need to own VRAM, so turn off the VGA renderer here
1192 	 * to stop it overwriting our objects */
1193 	rv515_vga_render_disable(rdev);
1194 }
1195 
1196 /**
1197  * r600_vram_gtt_location - try to find VRAM & GTT location
1198  * @rdev: radeon device structure holding all necessary informations
1199  * @mc: memory controller structure holding memory informations
1200  *
1201  * Function will place try to place VRAM at same place as in CPU (PCI)
1202  * address space as some GPU seems to have issue when we reprogram at
1203  * different address space.
1204  *
1205  * If there is not enough space to fit the unvisible VRAM after the
1206  * aperture then we limit the VRAM size to the aperture.
1207  *
1208  * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1209  * them to be in one from GPU point of view so that we can program GPU to
1210  * catch access outside them (weird GPU policy see ??).
1211  *
1212  * This function will never fails, worst case are limiting VRAM or GTT.
1213  *
1214  * Note: GTT start, end, size should be initialized before calling this
1215  * function on AGP platform.
1216  */
1217 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1218 {
1219 	u64 size_bf, size_af;
1220 
1221 	if (mc->mc_vram_size > 0xE0000000) {
1222 		/* leave room for at least 512M GTT */
1223 		dev_warn(rdev->dev, "limiting VRAM\n");
1224 		mc->real_vram_size = 0xE0000000;
1225 		mc->mc_vram_size = 0xE0000000;
1226 	}
1227 	if (rdev->flags & RADEON_IS_AGP) {
1228 		size_bf = mc->gtt_start;
1229 		size_af = mc->mc_mask - mc->gtt_end;
1230 		if (size_bf > size_af) {
1231 			if (mc->mc_vram_size > size_bf) {
1232 				dev_warn(rdev->dev, "limiting VRAM\n");
1233 				mc->real_vram_size = size_bf;
1234 				mc->mc_vram_size = size_bf;
1235 			}
1236 			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1237 		} else {
1238 			if (mc->mc_vram_size > size_af) {
1239 				dev_warn(rdev->dev, "limiting VRAM\n");
1240 				mc->real_vram_size = size_af;
1241 				mc->mc_vram_size = size_af;
1242 			}
1243 			mc->vram_start = mc->gtt_end + 1;
1244 		}
1245 		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1246 		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1247 				mc->mc_vram_size >> 20, mc->vram_start,
1248 				mc->vram_end, mc->real_vram_size >> 20);
1249 	} else {
1250 		u64 base = 0;
1251 		if (rdev->flags & RADEON_IS_IGP) {
1252 			base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1253 			base <<= 24;
1254 		}
1255 		radeon_vram_location(rdev, &rdev->mc, base);
1256 		rdev->mc.gtt_base_align = 0;
1257 		radeon_gtt_location(rdev, mc);
1258 	}
1259 }
1260 
1261 static int r600_mc_init(struct radeon_device *rdev)
1262 {
1263 	u32 tmp;
1264 	int chansize, numchan;
1265 	uint32_t h_addr, l_addr;
1266 	unsigned long long k8_addr;
1267 
1268 	/* Get VRAM informations */
1269 	rdev->mc.vram_is_ddr = true;
1270 	tmp = RREG32(RAMCFG);
1271 	if (tmp & CHANSIZE_OVERRIDE) {
1272 		chansize = 16;
1273 	} else if (tmp & CHANSIZE_MASK) {
1274 		chansize = 64;
1275 	} else {
1276 		chansize = 32;
1277 	}
1278 	tmp = RREG32(CHMAP);
1279 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1280 	case 0:
1281 	default:
1282 		numchan = 1;
1283 		break;
1284 	case 1:
1285 		numchan = 2;
1286 		break;
1287 	case 2:
1288 		numchan = 4;
1289 		break;
1290 	case 3:
1291 		numchan = 8;
1292 		break;
1293 	}
1294 	rdev->mc.vram_width = numchan * chansize;
1295 	/* Could aper size report 0 ? */
1296 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1297 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1298 	/* Setup GPU memory space */
1299 	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1300 	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1301 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
1302 	r600_vram_gtt_location(rdev, &rdev->mc);
1303 
1304 	if (rdev->flags & RADEON_IS_IGP) {
1305 		rs690_pm_info(rdev);
1306 		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1307 
1308 		if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
1309 			/* Use K8 direct mapping for fast fb access. */
1310 			rdev->fastfb_working = false;
1311 			h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
1312 			l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
1313 			k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
1314 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1315 			if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
1316 #endif
1317 			{
1318 				/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1319 		 		* memory is present.
1320 		 		*/
1321 				if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
1322 					DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1323 						(unsigned long long)rdev->mc.aper_base, k8_addr);
1324 					rdev->mc.aper_base = (resource_size_t)k8_addr;
1325 					rdev->fastfb_working = true;
1326 				}
1327 			}
1328   		}
1329 	}
1330 
1331 	radeon_update_bandwidth_info(rdev);
1332 	return 0;
1333 }
1334 
1335 int r600_vram_scratch_init(struct radeon_device *rdev)
1336 {
1337 	int r;
1338 
1339 	if (rdev->vram_scratch.robj == NULL) {
1340 		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
1341 				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
1342 				     NULL, &rdev->vram_scratch.robj);
1343 		if (r) {
1344 			return r;
1345 		}
1346 	}
1347 
1348 	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1349 	if (unlikely(r != 0))
1350 		return r;
1351 	r = radeon_bo_pin(rdev->vram_scratch.robj,
1352 			  RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
1353 	if (r) {
1354 		radeon_bo_unreserve(rdev->vram_scratch.robj);
1355 		return r;
1356 	}
1357 	r = radeon_bo_kmap(rdev->vram_scratch.robj,
1358 				(void **)&rdev->vram_scratch.ptr);
1359 	if (r)
1360 		radeon_bo_unpin(rdev->vram_scratch.robj);
1361 	radeon_bo_unreserve(rdev->vram_scratch.robj);
1362 
1363 	return r;
1364 }
1365 
1366 void r600_vram_scratch_fini(struct radeon_device *rdev)
1367 {
1368 	int r;
1369 
1370 	if (rdev->vram_scratch.robj == NULL) {
1371 		return;
1372 	}
1373 	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
1374 	if (likely(r == 0)) {
1375 		radeon_bo_kunmap(rdev->vram_scratch.robj);
1376 		radeon_bo_unpin(rdev->vram_scratch.robj);
1377 		radeon_bo_unreserve(rdev->vram_scratch.robj);
1378 	}
1379 	radeon_bo_unref(&rdev->vram_scratch.robj);
1380 }
1381 
1382 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1383 {
1384 	u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1385 
1386 	if (hung)
1387 		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1388 	else
1389 		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1390 
1391 	WREG32(R600_BIOS_3_SCRATCH, tmp);
1392 }
1393 
1394 static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1395 {
1396 	dev_info(rdev->dev, "  R_008010_GRBM_STATUS      = 0x%08X\n",
1397 		 RREG32(R_008010_GRBM_STATUS));
1398 	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2     = 0x%08X\n",
1399 		 RREG32(R_008014_GRBM_STATUS2));
1400 	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS      = 0x%08X\n",
1401 		 RREG32(R_000E50_SRBM_STATUS));
1402 	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1403 		 RREG32(CP_STALLED_STAT1));
1404 	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1405 		 RREG32(CP_STALLED_STAT2));
1406 	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
1407 		 RREG32(CP_BUSY_STAT));
1408 	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
1409 		 RREG32(CP_STAT));
1410 	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
1411 		RREG32(DMA_STATUS_REG));
1412 }
1413 
1414 static bool r600_is_display_hung(struct radeon_device *rdev)
1415 {
1416 	u32 crtc_hung = 0;
1417 	u32 crtc_status[2];
1418 	u32 i, j, tmp;
1419 
1420 	for (i = 0; i < rdev->num_crtc; i++) {
1421 		if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1422 			crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1423 			crtc_hung |= (1 << i);
1424 		}
1425 	}
1426 
1427 	for (j = 0; j < 10; j++) {
1428 		for (i = 0; i < rdev->num_crtc; i++) {
1429 			if (crtc_hung & (1 << i)) {
1430 				tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1431 				if (tmp != crtc_status[i])
1432 					crtc_hung &= ~(1 << i);
1433 			}
1434 		}
1435 		if (crtc_hung == 0)
1436 			return false;
1437 		udelay(100);
1438 	}
1439 
1440 	return true;
1441 }
1442 
1443 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1444 {
1445 	u32 reset_mask = 0;
1446 	u32 tmp;
1447 
1448 	/* GRBM_STATUS */
1449 	tmp = RREG32(R_008010_GRBM_STATUS);
1450 	if (rdev->family >= CHIP_RV770) {
1451 		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1452 		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1453 		    G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1454 		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1455 		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1456 			reset_mask |= RADEON_RESET_GFX;
1457 	} else {
1458 		if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1459 		    G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1460 		    G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1461 		    G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1462 		    G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1463 			reset_mask |= RADEON_RESET_GFX;
1464 	}
1465 
1466 	if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1467 	    G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1468 		reset_mask |= RADEON_RESET_CP;
1469 
1470 	if (G_008010_GRBM_EE_BUSY(tmp))
1471 		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1472 
1473 	/* DMA_STATUS_REG */
1474 	tmp = RREG32(DMA_STATUS_REG);
1475 	if (!(tmp & DMA_IDLE))
1476 		reset_mask |= RADEON_RESET_DMA;
1477 
1478 	/* SRBM_STATUS */
1479 	tmp = RREG32(R_000E50_SRBM_STATUS);
1480 	if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1481 		reset_mask |= RADEON_RESET_RLC;
1482 
1483 	if (G_000E50_IH_BUSY(tmp))
1484 		reset_mask |= RADEON_RESET_IH;
1485 
1486 	if (G_000E50_SEM_BUSY(tmp))
1487 		reset_mask |= RADEON_RESET_SEM;
1488 
1489 	if (G_000E50_GRBM_RQ_PENDING(tmp))
1490 		reset_mask |= RADEON_RESET_GRBM;
1491 
1492 	if (G_000E50_VMC_BUSY(tmp))
1493 		reset_mask |= RADEON_RESET_VMC;
1494 
1495 	if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1496 	    G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1497 	    G_000E50_MCDW_BUSY(tmp))
1498 		reset_mask |= RADEON_RESET_MC;
1499 
1500 	if (r600_is_display_hung(rdev))
1501 		reset_mask |= RADEON_RESET_DISPLAY;
1502 
1503 	/* Skip MC reset as it's mostly likely not hung, just busy */
1504 	if (reset_mask & RADEON_RESET_MC) {
1505 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1506 		reset_mask &= ~RADEON_RESET_MC;
1507 	}
1508 
1509 	return reset_mask;
1510 }
1511 
1512 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1513 {
1514 	struct rv515_mc_save save;
1515 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1516 	u32 tmp;
1517 
1518 	if (reset_mask == 0)
1519 		return;
1520 
1521 	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1522 
1523 	r600_print_gpu_status_regs(rdev);
1524 
1525 	/* Disable CP parsing/prefetching */
1526 	if (rdev->family >= CHIP_RV770)
1527 		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1528 	else
1529 		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1530 
1531 	/* disable the RLC */
1532 	WREG32(RLC_CNTL, 0);
1533 
1534 	if (reset_mask & RADEON_RESET_DMA) {
1535 		/* Disable DMA */
1536 		tmp = RREG32(DMA_RB_CNTL);
1537 		tmp &= ~DMA_RB_ENABLE;
1538 		WREG32(DMA_RB_CNTL, tmp);
1539 	}
1540 
1541 	mdelay(50);
1542 
1543 	rv515_mc_stop(rdev, &save);
1544 	if (r600_mc_wait_for_idle(rdev)) {
1545 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1546 	}
1547 
1548 	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1549 		if (rdev->family >= CHIP_RV770)
1550 			grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1551 				S_008020_SOFT_RESET_CB(1) |
1552 				S_008020_SOFT_RESET_PA(1) |
1553 				S_008020_SOFT_RESET_SC(1) |
1554 				S_008020_SOFT_RESET_SPI(1) |
1555 				S_008020_SOFT_RESET_SX(1) |
1556 				S_008020_SOFT_RESET_SH(1) |
1557 				S_008020_SOFT_RESET_TC(1) |
1558 				S_008020_SOFT_RESET_TA(1) |
1559 				S_008020_SOFT_RESET_VC(1) |
1560 				S_008020_SOFT_RESET_VGT(1);
1561 		else
1562 			grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1563 				S_008020_SOFT_RESET_DB(1) |
1564 				S_008020_SOFT_RESET_CB(1) |
1565 				S_008020_SOFT_RESET_PA(1) |
1566 				S_008020_SOFT_RESET_SC(1) |
1567 				S_008020_SOFT_RESET_SMX(1) |
1568 				S_008020_SOFT_RESET_SPI(1) |
1569 				S_008020_SOFT_RESET_SX(1) |
1570 				S_008020_SOFT_RESET_SH(1) |
1571 				S_008020_SOFT_RESET_TC(1) |
1572 				S_008020_SOFT_RESET_TA(1) |
1573 				S_008020_SOFT_RESET_VC(1) |
1574 				S_008020_SOFT_RESET_VGT(1);
1575 	}
1576 
1577 	if (reset_mask & RADEON_RESET_CP) {
1578 		grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1579 			S_008020_SOFT_RESET_VGT(1);
1580 
1581 		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1582 	}
1583 
1584 	if (reset_mask & RADEON_RESET_DMA) {
1585 		if (rdev->family >= CHIP_RV770)
1586 			srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1587 		else
1588 			srbm_soft_reset |= SOFT_RESET_DMA;
1589 	}
1590 
1591 	if (reset_mask & RADEON_RESET_RLC)
1592 		srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1593 
1594 	if (reset_mask & RADEON_RESET_SEM)
1595 		srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1596 
1597 	if (reset_mask & RADEON_RESET_IH)
1598 		srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1599 
1600 	if (reset_mask & RADEON_RESET_GRBM)
1601 		srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1602 
1603 	if (!(rdev->flags & RADEON_IS_IGP)) {
1604 		if (reset_mask & RADEON_RESET_MC)
1605 			srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1606 	}
1607 
1608 	if (reset_mask & RADEON_RESET_VMC)
1609 		srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1610 
1611 	if (grbm_soft_reset) {
1612 		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1613 		tmp |= grbm_soft_reset;
1614 		dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1615 		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1616 		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1617 
1618 		udelay(50);
1619 
1620 		tmp &= ~grbm_soft_reset;
1621 		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1622 		tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1623 	}
1624 
1625 	if (srbm_soft_reset) {
1626 		tmp = RREG32(SRBM_SOFT_RESET);
1627 		tmp |= srbm_soft_reset;
1628 		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1629 		WREG32(SRBM_SOFT_RESET, tmp);
1630 		tmp = RREG32(SRBM_SOFT_RESET);
1631 
1632 		udelay(50);
1633 
1634 		tmp &= ~srbm_soft_reset;
1635 		WREG32(SRBM_SOFT_RESET, tmp);
1636 		tmp = RREG32(SRBM_SOFT_RESET);
1637 	}
1638 
1639 	/* Wait a little for things to settle down */
1640 	mdelay(1);
1641 
1642 	rv515_mc_resume(rdev, &save);
1643 	udelay(50);
1644 
1645 	r600_print_gpu_status_regs(rdev);
1646 }
1647 
1648 static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
1649 {
1650 	struct rv515_mc_save save;
1651 	u32 tmp, i;
1652 
1653 	dev_info(rdev->dev, "GPU pci config reset\n");
1654 
1655 	/* disable dpm? */
1656 
1657 	/* Disable CP parsing/prefetching */
1658 	if (rdev->family >= CHIP_RV770)
1659 		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1660 	else
1661 		WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1662 
1663 	/* disable the RLC */
1664 	WREG32(RLC_CNTL, 0);
1665 
1666 	/* Disable DMA */
1667 	tmp = RREG32(DMA_RB_CNTL);
1668 	tmp &= ~DMA_RB_ENABLE;
1669 	WREG32(DMA_RB_CNTL, tmp);
1670 
1671 	mdelay(50);
1672 
1673 	/* set mclk/sclk to bypass */
1674 	if (rdev->family >= CHIP_RV770)
1675 		rv770_set_clk_bypass_mode(rdev);
1676 	/* disable BM */
1677 	pci_clear_master(rdev->pdev);
1678 	/* disable mem access */
1679 	rv515_mc_stop(rdev, &save);
1680 	if (r600_mc_wait_for_idle(rdev)) {
1681 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1682 	}
1683 
1684 	/* BIF reset workaround.  Not sure if this is needed on 6xx */
1685 	tmp = RREG32(BUS_CNTL);
1686 	tmp |= VGA_COHE_SPEC_TIMER_DIS;
1687 	WREG32(BUS_CNTL, tmp);
1688 
1689 	tmp = RREG32(BIF_SCRATCH0);
1690 
1691 	/* reset */
1692 	radeon_pci_config_reset(rdev);
1693 	mdelay(1);
1694 
1695 	/* BIF reset workaround.  Not sure if this is needed on 6xx */
1696 	tmp = SOFT_RESET_BIF;
1697 	WREG32(SRBM_SOFT_RESET, tmp);
1698 	mdelay(1);
1699 	WREG32(SRBM_SOFT_RESET, 0);
1700 
1701 	/* wait for asic to come out of reset */
1702 	for (i = 0; i < rdev->usec_timeout; i++) {
1703 		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
1704 			break;
1705 		udelay(1);
1706 	}
1707 }
1708 
1709 int r600_asic_reset(struct radeon_device *rdev)
1710 {
1711 	u32 reset_mask;
1712 
1713 	reset_mask = r600_gpu_check_soft_reset(rdev);
1714 
1715 	if (reset_mask)
1716 		r600_set_bios_scratch_engine_hung(rdev, true);
1717 
1718 	/* try soft reset */
1719 	r600_gpu_soft_reset(rdev, reset_mask);
1720 
1721 	reset_mask = r600_gpu_check_soft_reset(rdev);
1722 
1723 	/* try pci config reset */
1724 	if (reset_mask && radeon_hard_reset)
1725 		r600_gpu_pci_config_reset(rdev);
1726 
1727 	reset_mask = r600_gpu_check_soft_reset(rdev);
1728 
1729 	if (!reset_mask)
1730 		r600_set_bios_scratch_engine_hung(rdev, false);
1731 
1732 	return 0;
1733 }
1734 
1735 /**
1736  * r600_gfx_is_lockup - Check if the GFX engine is locked up
1737  *
1738  * @rdev: radeon_device pointer
1739  * @ring: radeon_ring structure holding ring information
1740  *
1741  * Check if the GFX engine is locked up.
1742  * Returns true if the engine appears to be locked up, false if not.
1743  */
1744 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1745 {
1746 	u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1747 
1748 	if (!(reset_mask & (RADEON_RESET_GFX |
1749 			    RADEON_RESET_COMPUTE |
1750 			    RADEON_RESET_CP))) {
1751 		radeon_ring_lockup_update(rdev, ring);
1752 		return false;
1753 	}
1754 	/* force CP activities */
1755 	radeon_ring_force_activity(rdev, ring);
1756 	return radeon_ring_test_lockup(rdev, ring);
1757 }
1758 
1759 u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1760 			      u32 tiling_pipe_num,
1761 			      u32 max_rb_num,
1762 			      u32 total_max_rb_num,
1763 			      u32 disabled_rb_mask)
1764 {
1765 	u32 rendering_pipe_num, rb_num_width, req_rb_num;
1766 	u32 pipe_rb_ratio, pipe_rb_remain, tmp;
1767 	u32 data = 0, mask = 1 << (max_rb_num - 1);
1768 	unsigned i, j;
1769 
1770 	/* mask out the RBs that don't exist on that asic */
1771 	tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
1772 	/* make sure at least one RB is available */
1773 	if ((tmp & 0xff) != 0xff)
1774 		disabled_rb_mask = tmp;
1775 
1776 	rendering_pipe_num = 1 << tiling_pipe_num;
1777 	req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1778 	BUG_ON(rendering_pipe_num < req_rb_num);
1779 
1780 	pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1781 	pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1782 
1783 	if (rdev->family <= CHIP_RV740) {
1784 		/* r6xx/r7xx */
1785 		rb_num_width = 2;
1786 	} else {
1787 		/* eg+ */
1788 		rb_num_width = 4;
1789 	}
1790 
1791 	for (i = 0; i < max_rb_num; i++) {
1792 		if (!(mask & disabled_rb_mask)) {
1793 			for (j = 0; j < pipe_rb_ratio; j++) {
1794 				data <<= rb_num_width;
1795 				data |= max_rb_num - i - 1;
1796 			}
1797 			if (pipe_rb_remain) {
1798 				data <<= rb_num_width;
1799 				data |= max_rb_num - i - 1;
1800 				pipe_rb_remain--;
1801 			}
1802 		}
1803 		mask >>= 1;
1804 	}
1805 
1806 	return data;
1807 }
1808 
1809 int r600_count_pipe_bits(uint32_t val)
1810 {
1811 	return hweight32(val);
1812 }
1813 
1814 static void r600_gpu_init(struct radeon_device *rdev)
1815 {
1816 	u32 tiling_config;
1817 	u32 ramcfg;
1818 	u32 cc_rb_backend_disable;
1819 	u32 cc_gc_shader_pipe_config;
1820 	u32 tmp;
1821 	int i, j;
1822 	u32 sq_config;
1823 	u32 sq_gpr_resource_mgmt_1 = 0;
1824 	u32 sq_gpr_resource_mgmt_2 = 0;
1825 	u32 sq_thread_resource_mgmt = 0;
1826 	u32 sq_stack_resource_mgmt_1 = 0;
1827 	u32 sq_stack_resource_mgmt_2 = 0;
1828 	u32 disabled_rb_mask;
1829 
1830 	rdev->config.r600.tiling_group_size = 256;
1831 	switch (rdev->family) {
1832 	case CHIP_R600:
1833 		rdev->config.r600.max_pipes = 4;
1834 		rdev->config.r600.max_tile_pipes = 8;
1835 		rdev->config.r600.max_simds = 4;
1836 		rdev->config.r600.max_backends = 4;
1837 		rdev->config.r600.max_gprs = 256;
1838 		rdev->config.r600.max_threads = 192;
1839 		rdev->config.r600.max_stack_entries = 256;
1840 		rdev->config.r600.max_hw_contexts = 8;
1841 		rdev->config.r600.max_gs_threads = 16;
1842 		rdev->config.r600.sx_max_export_size = 128;
1843 		rdev->config.r600.sx_max_export_pos_size = 16;
1844 		rdev->config.r600.sx_max_export_smx_size = 128;
1845 		rdev->config.r600.sq_num_cf_insts = 2;
1846 		break;
1847 	case CHIP_RV630:
1848 	case CHIP_RV635:
1849 		rdev->config.r600.max_pipes = 2;
1850 		rdev->config.r600.max_tile_pipes = 2;
1851 		rdev->config.r600.max_simds = 3;
1852 		rdev->config.r600.max_backends = 1;
1853 		rdev->config.r600.max_gprs = 128;
1854 		rdev->config.r600.max_threads = 192;
1855 		rdev->config.r600.max_stack_entries = 128;
1856 		rdev->config.r600.max_hw_contexts = 8;
1857 		rdev->config.r600.max_gs_threads = 4;
1858 		rdev->config.r600.sx_max_export_size = 128;
1859 		rdev->config.r600.sx_max_export_pos_size = 16;
1860 		rdev->config.r600.sx_max_export_smx_size = 128;
1861 		rdev->config.r600.sq_num_cf_insts = 2;
1862 		break;
1863 	case CHIP_RV610:
1864 	case CHIP_RV620:
1865 	case CHIP_RS780:
1866 	case CHIP_RS880:
1867 		rdev->config.r600.max_pipes = 1;
1868 		rdev->config.r600.max_tile_pipes = 1;
1869 		rdev->config.r600.max_simds = 2;
1870 		rdev->config.r600.max_backends = 1;
1871 		rdev->config.r600.max_gprs = 128;
1872 		rdev->config.r600.max_threads = 192;
1873 		rdev->config.r600.max_stack_entries = 128;
1874 		rdev->config.r600.max_hw_contexts = 4;
1875 		rdev->config.r600.max_gs_threads = 4;
1876 		rdev->config.r600.sx_max_export_size = 128;
1877 		rdev->config.r600.sx_max_export_pos_size = 16;
1878 		rdev->config.r600.sx_max_export_smx_size = 128;
1879 		rdev->config.r600.sq_num_cf_insts = 1;
1880 		break;
1881 	case CHIP_RV670:
1882 		rdev->config.r600.max_pipes = 4;
1883 		rdev->config.r600.max_tile_pipes = 4;
1884 		rdev->config.r600.max_simds = 4;
1885 		rdev->config.r600.max_backends = 4;
1886 		rdev->config.r600.max_gprs = 192;
1887 		rdev->config.r600.max_threads = 192;
1888 		rdev->config.r600.max_stack_entries = 256;
1889 		rdev->config.r600.max_hw_contexts = 8;
1890 		rdev->config.r600.max_gs_threads = 16;
1891 		rdev->config.r600.sx_max_export_size = 128;
1892 		rdev->config.r600.sx_max_export_pos_size = 16;
1893 		rdev->config.r600.sx_max_export_smx_size = 128;
1894 		rdev->config.r600.sq_num_cf_insts = 2;
1895 		break;
1896 	default:
1897 		break;
1898 	}
1899 
1900 	/* Initialize HDP */
1901 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1902 		WREG32((0x2c14 + j), 0x00000000);
1903 		WREG32((0x2c18 + j), 0x00000000);
1904 		WREG32((0x2c1c + j), 0x00000000);
1905 		WREG32((0x2c20 + j), 0x00000000);
1906 		WREG32((0x2c24 + j), 0x00000000);
1907 	}
1908 
1909 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1910 
1911 	/* Setup tiling */
1912 	tiling_config = 0;
1913 	ramcfg = RREG32(RAMCFG);
1914 	switch (rdev->config.r600.max_tile_pipes) {
1915 	case 1:
1916 		tiling_config |= PIPE_TILING(0);
1917 		break;
1918 	case 2:
1919 		tiling_config |= PIPE_TILING(1);
1920 		break;
1921 	case 4:
1922 		tiling_config |= PIPE_TILING(2);
1923 		break;
1924 	case 8:
1925 		tiling_config |= PIPE_TILING(3);
1926 		break;
1927 	default:
1928 		break;
1929 	}
1930 	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1931 	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1932 	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1933 	tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1934 
1935 	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1936 	if (tmp > 3) {
1937 		tiling_config |= ROW_TILING(3);
1938 		tiling_config |= SAMPLE_SPLIT(3);
1939 	} else {
1940 		tiling_config |= ROW_TILING(tmp);
1941 		tiling_config |= SAMPLE_SPLIT(tmp);
1942 	}
1943 	tiling_config |= BANK_SWAPS(1);
1944 
1945 	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1946 	tmp = R6XX_MAX_BACKENDS -
1947 		r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1948 	if (tmp < rdev->config.r600.max_backends) {
1949 		rdev->config.r600.max_backends = tmp;
1950 	}
1951 
1952 	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1953 	tmp = R6XX_MAX_PIPES -
1954 		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1955 	if (tmp < rdev->config.r600.max_pipes) {
1956 		rdev->config.r600.max_pipes = tmp;
1957 	}
1958 	tmp = R6XX_MAX_SIMDS -
1959 		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1960 	if (tmp < rdev->config.r600.max_simds) {
1961 		rdev->config.r600.max_simds = tmp;
1962 	}
1963 
1964 	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1965 	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1966 	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1967 					R6XX_MAX_BACKENDS, disabled_rb_mask);
1968 	tiling_config |= tmp << 16;
1969 	rdev->config.r600.backend_map = tmp;
1970 
1971 	rdev->config.r600.tile_config = tiling_config;
1972 	WREG32(GB_TILING_CONFIG, tiling_config);
1973 	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1974 	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1975 	WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
1976 
1977 	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1978 	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1979 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1980 
1981 	/* Setup some CP states */
1982 	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1983 	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1984 
1985 	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1986 			     SYNC_WALKER | SYNC_ALIGNER));
1987 	/* Setup various GPU states */
1988 	if (rdev->family == CHIP_RV670)
1989 		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1990 
1991 	tmp = RREG32(SX_DEBUG_1);
1992 	tmp |= SMX_EVENT_RELEASE;
1993 	if ((rdev->family > CHIP_R600))
1994 		tmp |= ENABLE_NEW_SMX_ADDRESS;
1995 	WREG32(SX_DEBUG_1, tmp);
1996 
1997 	if (((rdev->family) == CHIP_R600) ||
1998 	    ((rdev->family) == CHIP_RV630) ||
1999 	    ((rdev->family) == CHIP_RV610) ||
2000 	    ((rdev->family) == CHIP_RV620) ||
2001 	    ((rdev->family) == CHIP_RS780) ||
2002 	    ((rdev->family) == CHIP_RS880)) {
2003 		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
2004 	} else {
2005 		WREG32(DB_DEBUG, 0);
2006 	}
2007 	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
2008 			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
2009 
2010 	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2011 	WREG32(VGT_NUM_INSTANCES, 0);
2012 
2013 	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
2014 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
2015 
2016 	tmp = RREG32(SQ_MS_FIFO_SIZES);
2017 	if (((rdev->family) == CHIP_RV610) ||
2018 	    ((rdev->family) == CHIP_RV620) ||
2019 	    ((rdev->family) == CHIP_RS780) ||
2020 	    ((rdev->family) == CHIP_RS880)) {
2021 		tmp = (CACHE_FIFO_SIZE(0xa) |
2022 		       FETCH_FIFO_HIWATER(0xa) |
2023 		       DONE_FIFO_HIWATER(0xe0) |
2024 		       ALU_UPDATE_FIFO_HIWATER(0x8));
2025 	} else if (((rdev->family) == CHIP_R600) ||
2026 		   ((rdev->family) == CHIP_RV630)) {
2027 		tmp &= ~DONE_FIFO_HIWATER(0xff);
2028 		tmp |= DONE_FIFO_HIWATER(0x4);
2029 	}
2030 	WREG32(SQ_MS_FIFO_SIZES, tmp);
2031 
2032 	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2033 	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
2034 	 */
2035 	sq_config = RREG32(SQ_CONFIG);
2036 	sq_config &= ~(PS_PRIO(3) |
2037 		       VS_PRIO(3) |
2038 		       GS_PRIO(3) |
2039 		       ES_PRIO(3));
2040 	sq_config |= (DX9_CONSTS |
2041 		      VC_ENABLE |
2042 		      PS_PRIO(0) |
2043 		      VS_PRIO(1) |
2044 		      GS_PRIO(2) |
2045 		      ES_PRIO(3));
2046 
2047 	if ((rdev->family) == CHIP_R600) {
2048 		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
2049 					  NUM_VS_GPRS(124) |
2050 					  NUM_CLAUSE_TEMP_GPRS(4));
2051 		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
2052 					  NUM_ES_GPRS(0));
2053 		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
2054 					   NUM_VS_THREADS(48) |
2055 					   NUM_GS_THREADS(4) |
2056 					   NUM_ES_THREADS(4));
2057 		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
2058 					    NUM_VS_STACK_ENTRIES(128));
2059 		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
2060 					    NUM_ES_STACK_ENTRIES(0));
2061 	} else if (((rdev->family) == CHIP_RV610) ||
2062 		   ((rdev->family) == CHIP_RV620) ||
2063 		   ((rdev->family) == CHIP_RS780) ||
2064 		   ((rdev->family) == CHIP_RS880)) {
2065 		/* no vertex cache */
2066 		sq_config &= ~VC_ENABLE;
2067 
2068 		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2069 					  NUM_VS_GPRS(44) |
2070 					  NUM_CLAUSE_TEMP_GPRS(2));
2071 		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2072 					  NUM_ES_GPRS(17));
2073 		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2074 					   NUM_VS_THREADS(78) |
2075 					   NUM_GS_THREADS(4) |
2076 					   NUM_ES_THREADS(31));
2077 		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2078 					    NUM_VS_STACK_ENTRIES(40));
2079 		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2080 					    NUM_ES_STACK_ENTRIES(16));
2081 	} else if (((rdev->family) == CHIP_RV630) ||
2082 		   ((rdev->family) == CHIP_RV635)) {
2083 		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2084 					  NUM_VS_GPRS(44) |
2085 					  NUM_CLAUSE_TEMP_GPRS(2));
2086 		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
2087 					  NUM_ES_GPRS(18));
2088 		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2089 					   NUM_VS_THREADS(78) |
2090 					   NUM_GS_THREADS(4) |
2091 					   NUM_ES_THREADS(31));
2092 		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
2093 					    NUM_VS_STACK_ENTRIES(40));
2094 		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
2095 					    NUM_ES_STACK_ENTRIES(16));
2096 	} else if ((rdev->family) == CHIP_RV670) {
2097 		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
2098 					  NUM_VS_GPRS(44) |
2099 					  NUM_CLAUSE_TEMP_GPRS(2));
2100 		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
2101 					  NUM_ES_GPRS(17));
2102 		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
2103 					   NUM_VS_THREADS(78) |
2104 					   NUM_GS_THREADS(4) |
2105 					   NUM_ES_THREADS(31));
2106 		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
2107 					    NUM_VS_STACK_ENTRIES(64));
2108 		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
2109 					    NUM_ES_STACK_ENTRIES(64));
2110 	}
2111 
2112 	WREG32(SQ_CONFIG, sq_config);
2113 	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
2114 	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
2115 	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2116 	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2117 	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2118 
2119 	if (((rdev->family) == CHIP_RV610) ||
2120 	    ((rdev->family) == CHIP_RV620) ||
2121 	    ((rdev->family) == CHIP_RS780) ||
2122 	    ((rdev->family) == CHIP_RS880)) {
2123 		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
2124 	} else {
2125 		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
2126 	}
2127 
2128 	/* More default values. 2D/3D driver should adjust as needed */
2129 	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
2130 					 S1_X(0x4) | S1_Y(0xc)));
2131 	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
2132 					 S1_X(0x2) | S1_Y(0x2) |
2133 					 S2_X(0xa) | S2_Y(0x6) |
2134 					 S3_X(0x6) | S3_Y(0xa)));
2135 	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
2136 					     S1_X(0x4) | S1_Y(0xc) |
2137 					     S2_X(0x1) | S2_Y(0x6) |
2138 					     S3_X(0xa) | S3_Y(0xe)));
2139 	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
2140 					     S5_X(0x0) | S5_Y(0x0) |
2141 					     S6_X(0xb) | S6_Y(0x4) |
2142 					     S7_X(0x7) | S7_Y(0x8)));
2143 
2144 	WREG32(VGT_STRMOUT_EN, 0);
2145 	tmp = rdev->config.r600.max_pipes * 16;
2146 	switch (rdev->family) {
2147 	case CHIP_RV610:
2148 	case CHIP_RV620:
2149 	case CHIP_RS780:
2150 	case CHIP_RS880:
2151 		tmp += 32;
2152 		break;
2153 	case CHIP_RV670:
2154 		tmp += 128;
2155 		break;
2156 	default:
2157 		break;
2158 	}
2159 	if (tmp > 256) {
2160 		tmp = 256;
2161 	}
2162 	WREG32(VGT_ES_PER_GS, 128);
2163 	WREG32(VGT_GS_PER_ES, tmp);
2164 	WREG32(VGT_GS_PER_VS, 2);
2165 	WREG32(VGT_GS_VERTEX_REUSE, 16);
2166 
2167 	/* more default values. 2D/3D driver should adjust as needed */
2168 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2169 	WREG32(VGT_STRMOUT_EN, 0);
2170 	WREG32(SX_MISC, 0);
2171 	WREG32(PA_SC_MODE_CNTL, 0);
2172 	WREG32(PA_SC_AA_CONFIG, 0);
2173 	WREG32(PA_SC_LINE_STIPPLE, 0);
2174 	WREG32(SPI_INPUT_Z, 0);
2175 	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
2176 	WREG32(CB_COLOR7_FRAG, 0);
2177 
2178 	/* Clear render buffer base addresses */
2179 	WREG32(CB_COLOR0_BASE, 0);
2180 	WREG32(CB_COLOR1_BASE, 0);
2181 	WREG32(CB_COLOR2_BASE, 0);
2182 	WREG32(CB_COLOR3_BASE, 0);
2183 	WREG32(CB_COLOR4_BASE, 0);
2184 	WREG32(CB_COLOR5_BASE, 0);
2185 	WREG32(CB_COLOR6_BASE, 0);
2186 	WREG32(CB_COLOR7_BASE, 0);
2187 	WREG32(CB_COLOR7_FRAG, 0);
2188 
2189 	switch (rdev->family) {
2190 	case CHIP_RV610:
2191 	case CHIP_RV620:
2192 	case CHIP_RS780:
2193 	case CHIP_RS880:
2194 		tmp = TC_L2_SIZE(8);
2195 		break;
2196 	case CHIP_RV630:
2197 	case CHIP_RV635:
2198 		tmp = TC_L2_SIZE(4);
2199 		break;
2200 	case CHIP_R600:
2201 		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
2202 		break;
2203 	default:
2204 		tmp = TC_L2_SIZE(0);
2205 		break;
2206 	}
2207 	WREG32(TC_CNTL, tmp);
2208 
2209 	tmp = RREG32(HDP_HOST_PATH_CNTL);
2210 	WREG32(HDP_HOST_PATH_CNTL, tmp);
2211 
2212 	tmp = RREG32(ARB_POP);
2213 	tmp |= ENABLE_TC128;
2214 	WREG32(ARB_POP, tmp);
2215 
2216 	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
2217 	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
2218 			       NUM_CLIP_SEQ(3)));
2219 	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
2220 	WREG32(VC_ENHANCE, 0);
2221 }
2222 
2223 
2224 /*
2225  * Indirect registers accessor
2226  */
2227 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
2228 {
2229 	unsigned long flags;
2230 	u32 r;
2231 
2232 	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2233 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2234 	(void)RREG32(PCIE_PORT_INDEX);
2235 	r = RREG32(PCIE_PORT_DATA);
2236 	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2237 	return r;
2238 }
2239 
2240 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
2241 {
2242 	unsigned long flags;
2243 
2244 	spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
2245 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
2246 	(void)RREG32(PCIE_PORT_INDEX);
2247 	WREG32(PCIE_PORT_DATA, (v));
2248 	(void)RREG32(PCIE_PORT_DATA);
2249 	spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
2250 }
2251 
2252 /*
2253  * CP & Ring
2254  */
2255 void r600_cp_stop(struct radeon_device *rdev)
2256 {
2257 	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2258 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2259 	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
2260 	WREG32(SCRATCH_UMSK, 0);
2261 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2262 }
2263 
2264 int r600_init_microcode(struct radeon_device *rdev)
2265 {
2266 	const char *chip_name;
2267 	const char *rlc_chip_name;
2268 	const char *smc_chip_name = "RV770";
2269 	size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
2270 	char fw_name[30];
2271 	int err;
2272 
2273 	DRM_DEBUG("\n");
2274 
2275 	switch (rdev->family) {
2276 	case CHIP_R600:
2277 		chip_name = "R600";
2278 		rlc_chip_name = "R600";
2279 		break;
2280 	case CHIP_RV610:
2281 		chip_name = "RV610";
2282 		rlc_chip_name = "R600";
2283 		break;
2284 	case CHIP_RV630:
2285 		chip_name = "RV630";
2286 		rlc_chip_name = "R600";
2287 		break;
2288 	case CHIP_RV620:
2289 		chip_name = "RV620";
2290 		rlc_chip_name = "R600";
2291 		break;
2292 	case CHIP_RV635:
2293 		chip_name = "RV635";
2294 		rlc_chip_name = "R600";
2295 		break;
2296 	case CHIP_RV670:
2297 		chip_name = "RV670";
2298 		rlc_chip_name = "R600";
2299 		break;
2300 	case CHIP_RS780:
2301 	case CHIP_RS880:
2302 		chip_name = "RS780";
2303 		rlc_chip_name = "R600";
2304 		break;
2305 	case CHIP_RV770:
2306 		chip_name = "RV770";
2307 		rlc_chip_name = "R700";
2308 		smc_chip_name = "RV770";
2309 		smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
2310 		break;
2311 	case CHIP_RV730:
2312 		chip_name = "RV730";
2313 		rlc_chip_name = "R700";
2314 		smc_chip_name = "RV730";
2315 		smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
2316 		break;
2317 	case CHIP_RV710:
2318 		chip_name = "RV710";
2319 		rlc_chip_name = "R700";
2320 		smc_chip_name = "RV710";
2321 		smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
2322 		break;
2323 	case CHIP_RV740:
2324 		chip_name = "RV730";
2325 		rlc_chip_name = "R700";
2326 		smc_chip_name = "RV740";
2327 		smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
2328 		break;
2329 	case CHIP_CEDAR:
2330 		chip_name = "CEDAR";
2331 		rlc_chip_name = "CEDAR";
2332 		smc_chip_name = "CEDAR";
2333 		smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
2334 		break;
2335 	case CHIP_REDWOOD:
2336 		chip_name = "REDWOOD";
2337 		rlc_chip_name = "REDWOOD";
2338 		smc_chip_name = "REDWOOD";
2339 		smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
2340 		break;
2341 	case CHIP_JUNIPER:
2342 		chip_name = "JUNIPER";
2343 		rlc_chip_name = "JUNIPER";
2344 		smc_chip_name = "JUNIPER";
2345 		smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
2346 		break;
2347 	case CHIP_CYPRESS:
2348 	case CHIP_HEMLOCK:
2349 		chip_name = "CYPRESS";
2350 		rlc_chip_name = "CYPRESS";
2351 		smc_chip_name = "CYPRESS";
2352 		smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
2353 		break;
2354 	case CHIP_PALM:
2355 		chip_name = "PALM";
2356 		rlc_chip_name = "SUMO";
2357 		break;
2358 	case CHIP_SUMO:
2359 		chip_name = "SUMO";
2360 		rlc_chip_name = "SUMO";
2361 		break;
2362 	case CHIP_SUMO2:
2363 		chip_name = "SUMO2";
2364 		rlc_chip_name = "SUMO";
2365 		break;
2366 	default: BUG();
2367 	}
2368 
2369 	if (rdev->family >= CHIP_CEDAR) {
2370 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2371 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2372 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2373 	} else if (rdev->family >= CHIP_RV770) {
2374 		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2375 		me_req_size = R700_PM4_UCODE_SIZE * 4;
2376 		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2377 	} else {
2378 		pfp_req_size = R600_PFP_UCODE_SIZE * 4;
2379 		me_req_size = R600_PM4_UCODE_SIZE * 12;
2380 		rlc_req_size = R600_RLC_UCODE_SIZE * 4;
2381 	}
2382 
2383 	DRM_INFO("Loading %s Microcode\n", chip_name);
2384 
2385 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2386 	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
2387 	if (err)
2388 		goto out;
2389 	if (rdev->pfp_fw->size != pfp_req_size) {
2390 		printk(KERN_ERR
2391 		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2392 		       rdev->pfp_fw->size, fw_name);
2393 		err = -EINVAL;
2394 		goto out;
2395 	}
2396 
2397 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2398 	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
2399 	if (err)
2400 		goto out;
2401 	if (rdev->me_fw->size != me_req_size) {
2402 		printk(KERN_ERR
2403 		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2404 		       rdev->me_fw->size, fw_name);
2405 		err = -EINVAL;
2406 	}
2407 
2408 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2409 	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
2410 	if (err)
2411 		goto out;
2412 	if (rdev->rlc_fw->size != rlc_req_size) {
2413 		printk(KERN_ERR
2414 		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2415 		       rdev->rlc_fw->size, fw_name);
2416 		err = -EINVAL;
2417 	}
2418 
2419 	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
2420 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
2421 		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2422 		if (err) {
2423 			printk(KERN_ERR
2424 			       "smc: error loading firmware \"%s\"\n",
2425 			       fw_name);
2426 			release_firmware(rdev->smc_fw);
2427 			rdev->smc_fw = NULL;
2428 			err = 0;
2429 		} else if (rdev->smc_fw->size != smc_req_size) {
2430 			printk(KERN_ERR
2431 			       "smc: Bogus length %zu in firmware \"%s\"\n",
2432 			       rdev->smc_fw->size, fw_name);
2433 			err = -EINVAL;
2434 		}
2435 	}
2436 
2437 out:
2438 	if (err) {
2439 		if (err != -EINVAL)
2440 			printk(KERN_ERR
2441 			       "r600_cp: Failed to load firmware \"%s\"\n",
2442 			       fw_name);
2443 		release_firmware(rdev->pfp_fw);
2444 		rdev->pfp_fw = NULL;
2445 		release_firmware(rdev->me_fw);
2446 		rdev->me_fw = NULL;
2447 		release_firmware(rdev->rlc_fw);
2448 		rdev->rlc_fw = NULL;
2449 		release_firmware(rdev->smc_fw);
2450 		rdev->smc_fw = NULL;
2451 	}
2452 	return err;
2453 }
2454 
2455 u32 r600_gfx_get_rptr(struct radeon_device *rdev,
2456 		      struct radeon_ring *ring)
2457 {
2458 	u32 rptr;
2459 
2460 	if (rdev->wb.enabled)
2461 		rptr = rdev->wb.wb[ring->rptr_offs/4];
2462 	else
2463 		rptr = RREG32(R600_CP_RB_RPTR);
2464 
2465 	return rptr;
2466 }
2467 
2468 u32 r600_gfx_get_wptr(struct radeon_device *rdev,
2469 		      struct radeon_ring *ring)
2470 {
2471 	u32 wptr;
2472 
2473 	wptr = RREG32(R600_CP_RB_WPTR);
2474 
2475 	return wptr;
2476 }
2477 
2478 void r600_gfx_set_wptr(struct radeon_device *rdev,
2479 		       struct radeon_ring *ring)
2480 {
2481 	WREG32(R600_CP_RB_WPTR, ring->wptr);
2482 	(void)RREG32(R600_CP_RB_WPTR);
2483 }
2484 
2485 static int r600_cp_load_microcode(struct radeon_device *rdev)
2486 {
2487 	const __be32 *fw_data;
2488 	int i;
2489 
2490 	if (!rdev->me_fw || !rdev->pfp_fw)
2491 		return -EINVAL;
2492 
2493 	r600_cp_stop(rdev);
2494 
2495 	WREG32(CP_RB_CNTL,
2496 #ifdef __BIG_ENDIAN
2497 	       BUF_SWAP_32BIT |
2498 #endif
2499 	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2500 
2501 	/* Reset cp */
2502 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2503 	RREG32(GRBM_SOFT_RESET);
2504 	mdelay(15);
2505 	WREG32(GRBM_SOFT_RESET, 0);
2506 
2507 	WREG32(CP_ME_RAM_WADDR, 0);
2508 
2509 	fw_data = (const __be32 *)rdev->me_fw->data;
2510 	WREG32(CP_ME_RAM_WADDR, 0);
2511 	for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
2512 		WREG32(CP_ME_RAM_DATA,
2513 		       be32_to_cpup(fw_data++));
2514 
2515 	fw_data = (const __be32 *)rdev->pfp_fw->data;
2516 	WREG32(CP_PFP_UCODE_ADDR, 0);
2517 	for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
2518 		WREG32(CP_PFP_UCODE_DATA,
2519 		       be32_to_cpup(fw_data++));
2520 
2521 	WREG32(CP_PFP_UCODE_ADDR, 0);
2522 	WREG32(CP_ME_RAM_WADDR, 0);
2523 	WREG32(CP_ME_RAM_RADDR, 0);
2524 	return 0;
2525 }
2526 
2527 int r600_cp_start(struct radeon_device *rdev)
2528 {
2529 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2530 	int r;
2531 	uint32_t cp_me;
2532 
2533 	r = radeon_ring_lock(rdev, ring, 7);
2534 	if (r) {
2535 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2536 		return r;
2537 	}
2538 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2539 	radeon_ring_write(ring, 0x1);
2540 	if (rdev->family >= CHIP_RV770) {
2541 		radeon_ring_write(ring, 0x0);
2542 		radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2543 	} else {
2544 		radeon_ring_write(ring, 0x3);
2545 		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2546 	}
2547 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2548 	radeon_ring_write(ring, 0);
2549 	radeon_ring_write(ring, 0);
2550 	radeon_ring_unlock_commit(rdev, ring);
2551 
2552 	cp_me = 0xff;
2553 	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2554 	return 0;
2555 }
2556 
2557 int r600_cp_resume(struct radeon_device *rdev)
2558 {
2559 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2560 	u32 tmp;
2561 	u32 rb_bufsz;
2562 	int r;
2563 
2564 	/* Reset cp */
2565 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2566 	RREG32(GRBM_SOFT_RESET);
2567 	mdelay(15);
2568 	WREG32(GRBM_SOFT_RESET, 0);
2569 
2570 	/* Set ring buffer size */
2571 	rb_bufsz = order_base_2(ring->ring_size / 8);
2572 	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2573 #ifdef __BIG_ENDIAN
2574 	tmp |= BUF_SWAP_32BIT;
2575 #endif
2576 	WREG32(CP_RB_CNTL, tmp);
2577 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
2578 
2579 	/* Set the write pointer delay */
2580 	WREG32(CP_RB_WPTR_DELAY, 0);
2581 
2582 	/* Initialize the ring buffer's read and write pointers */
2583 	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2584 	WREG32(CP_RB_RPTR_WR, 0);
2585 	ring->wptr = 0;
2586 	WREG32(CP_RB_WPTR, ring->wptr);
2587 
2588 	/* set the wb address whether it's enabled or not */
2589 	WREG32(CP_RB_RPTR_ADDR,
2590 	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2591 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2592 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2593 
2594 	if (rdev->wb.enabled)
2595 		WREG32(SCRATCH_UMSK, 0xff);
2596 	else {
2597 		tmp |= RB_NO_UPDATE;
2598 		WREG32(SCRATCH_UMSK, 0);
2599 	}
2600 
2601 	mdelay(1);
2602 	WREG32(CP_RB_CNTL, tmp);
2603 
2604 	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2605 	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2606 
2607 	r600_cp_start(rdev);
2608 	ring->ready = true;
2609 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2610 	if (r) {
2611 		ring->ready = false;
2612 		return r;
2613 	}
2614 
2615 	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
2616 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2617 
2618 	return 0;
2619 }
2620 
2621 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2622 {
2623 	u32 rb_bufsz;
2624 	int r;
2625 
2626 	/* Align ring size */
2627 	rb_bufsz = order_base_2(ring_size / 8);
2628 	ring_size = (1 << (rb_bufsz + 1)) * 4;
2629 	ring->ring_size = ring_size;
2630 	ring->align_mask = 16 - 1;
2631 
2632 	if (radeon_ring_supports_scratch_reg(rdev, ring)) {
2633 		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
2634 		if (r) {
2635 			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
2636 			ring->rptr_save_reg = 0;
2637 		}
2638 	}
2639 }
2640 
2641 void r600_cp_fini(struct radeon_device *rdev)
2642 {
2643 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2644 	r600_cp_stop(rdev);
2645 	radeon_ring_fini(rdev, ring);
2646 	radeon_scratch_free(rdev, ring->rptr_save_reg);
2647 }
2648 
2649 /*
2650  * GPU scratch registers helpers function.
2651  */
2652 void r600_scratch_init(struct radeon_device *rdev)
2653 {
2654 	int i;
2655 
2656 	rdev->scratch.num_reg = 7;
2657 	rdev->scratch.reg_base = SCRATCH_REG0;
2658 	for (i = 0; i < rdev->scratch.num_reg; i++) {
2659 		rdev->scratch.free[i] = true;
2660 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2661 	}
2662 }
2663 
2664 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2665 {
2666 	uint32_t scratch;
2667 	uint32_t tmp = 0;
2668 	unsigned i;
2669 	int r;
2670 
2671 	r = radeon_scratch_get(rdev, &scratch);
2672 	if (r) {
2673 		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2674 		return r;
2675 	}
2676 	WREG32(scratch, 0xCAFEDEAD);
2677 	r = radeon_ring_lock(rdev, ring, 3);
2678 	if (r) {
2679 		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
2680 		radeon_scratch_free(rdev, scratch);
2681 		return r;
2682 	}
2683 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2684 	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2685 	radeon_ring_write(ring, 0xDEADBEEF);
2686 	radeon_ring_unlock_commit(rdev, ring);
2687 	for (i = 0; i < rdev->usec_timeout; i++) {
2688 		tmp = RREG32(scratch);
2689 		if (tmp == 0xDEADBEEF)
2690 			break;
2691 		DRM_UDELAY(1);
2692 	}
2693 	if (i < rdev->usec_timeout) {
2694 		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2695 	} else {
2696 		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2697 			  ring->idx, scratch, tmp);
2698 		r = -EINVAL;
2699 	}
2700 	radeon_scratch_free(rdev, scratch);
2701 	return r;
2702 }
2703 
2704 /*
2705  * CP fences/semaphores
2706  */
2707 
2708 void r600_fence_ring_emit(struct radeon_device *rdev,
2709 			  struct radeon_fence *fence)
2710 {
2711 	struct radeon_ring *ring = &rdev->ring[fence->ring];
2712 	u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
2713 		PACKET3_SH_ACTION_ENA;
2714 
2715 	if (rdev->family >= CHIP_RV770)
2716 		cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
2717 
2718 	if (rdev->wb.use_event) {
2719 		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2720 		/* flush read cache over gart */
2721 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2722 		radeon_ring_write(ring, cp_coher_cntl);
2723 		radeon_ring_write(ring, 0xFFFFFFFF);
2724 		radeon_ring_write(ring, 0);
2725 		radeon_ring_write(ring, 10); /* poll interval */
2726 		/* EVENT_WRITE_EOP - flush caches, send int */
2727 		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2728 		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2729 		radeon_ring_write(ring, addr & 0xffffffff);
2730 		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2731 		radeon_ring_write(ring, fence->seq);
2732 		radeon_ring_write(ring, 0);
2733 	} else {
2734 		/* flush read cache over gart */
2735 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2736 		radeon_ring_write(ring, cp_coher_cntl);
2737 		radeon_ring_write(ring, 0xFFFFFFFF);
2738 		radeon_ring_write(ring, 0);
2739 		radeon_ring_write(ring, 10); /* poll interval */
2740 		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2741 		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2742 		/* wait for 3D idle clean */
2743 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2744 		radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2745 		radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2746 		/* Emit fence sequence & fire IRQ */
2747 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2748 		radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2749 		radeon_ring_write(ring, fence->seq);
2750 		/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2751 		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2752 		radeon_ring_write(ring, RB_INT_STAT);
2753 	}
2754 }
2755 
2756 bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2757 			      struct radeon_ring *ring,
2758 			      struct radeon_semaphore *semaphore,
2759 			      bool emit_wait)
2760 {
2761 	uint64_t addr = semaphore->gpu_addr;
2762 	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2763 
2764 	if (rdev->family < CHIP_CAYMAN)
2765 		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
2766 
2767 	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2768 	radeon_ring_write(ring, addr & 0xffffffff);
2769 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2770 
2771 	return true;
2772 }
2773 
2774 /**
2775  * r600_copy_cpdma - copy pages using the CP DMA engine
2776  *
2777  * @rdev: radeon_device pointer
2778  * @src_offset: src GPU address
2779  * @dst_offset: dst GPU address
2780  * @num_gpu_pages: number of GPU pages to xfer
2781  * @fence: radeon fence object
2782  *
2783  * Copy GPU paging using the CP DMA engine (r6xx+).
2784  * Used by the radeon ttm implementation to move pages if
2785  * registered as the asic copy callback.
2786  */
2787 int r600_copy_cpdma(struct radeon_device *rdev,
2788 		    uint64_t src_offset, uint64_t dst_offset,
2789 		    unsigned num_gpu_pages,
2790 		    struct radeon_fence **fence)
2791 {
2792 	struct radeon_semaphore *sem = NULL;
2793 	int ring_index = rdev->asic->copy.blit_ring_index;
2794 	struct radeon_ring *ring = &rdev->ring[ring_index];
2795 	u32 size_in_bytes, cur_size_in_bytes, tmp;
2796 	int i, num_loops;
2797 	int r = 0;
2798 
2799 	r = radeon_semaphore_create(rdev, &sem);
2800 	if (r) {
2801 		DRM_ERROR("radeon: moving bo (%d).\n", r);
2802 		return r;
2803 	}
2804 
2805 	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2806 	num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2807 	r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2808 	if (r) {
2809 		DRM_ERROR("radeon: moving bo (%d).\n", r);
2810 		radeon_semaphore_free(rdev, &sem, NULL);
2811 		return r;
2812 	}
2813 
2814 	radeon_semaphore_sync_to(sem, *fence);
2815 	radeon_semaphore_sync_rings(rdev, sem, ring->idx);
2816 
2817 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2818 	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2819 	radeon_ring_write(ring, WAIT_3D_IDLE_bit);
2820 	for (i = 0; i < num_loops; i++) {
2821 		cur_size_in_bytes = size_in_bytes;
2822 		if (cur_size_in_bytes > 0x1fffff)
2823 			cur_size_in_bytes = 0x1fffff;
2824 		size_in_bytes -= cur_size_in_bytes;
2825 		tmp = upper_32_bits(src_offset) & 0xff;
2826 		if (size_in_bytes == 0)
2827 			tmp |= PACKET3_CP_DMA_CP_SYNC;
2828 		radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
2829 		radeon_ring_write(ring, src_offset & 0xffffffff);
2830 		radeon_ring_write(ring, tmp);
2831 		radeon_ring_write(ring, dst_offset & 0xffffffff);
2832 		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2833 		radeon_ring_write(ring, cur_size_in_bytes);
2834 		src_offset += cur_size_in_bytes;
2835 		dst_offset += cur_size_in_bytes;
2836 	}
2837 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2838 	radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2839 	radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
2840 
2841 	r = radeon_fence_emit(rdev, fence, ring->idx);
2842 	if (r) {
2843 		radeon_ring_unlock_undo(rdev, ring);
2844 		return r;
2845 	}
2846 
2847 	radeon_ring_unlock_commit(rdev, ring);
2848 	radeon_semaphore_free(rdev, &sem, *fence);
2849 
2850 	return r;
2851 }
2852 
2853 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2854 			 uint32_t tiling_flags, uint32_t pitch,
2855 			 uint32_t offset, uint32_t obj_size)
2856 {
2857 	/* FIXME: implement */
2858 	return 0;
2859 }
2860 
2861 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2862 {
2863 	/* FIXME: implement */
2864 }
2865 
2866 static int r600_startup(struct radeon_device *rdev)
2867 {
2868 	struct radeon_ring *ring;
2869 	int r;
2870 
2871 	/* enable pcie gen2 link */
2872 	r600_pcie_gen2_enable(rdev);
2873 
2874 	/* scratch needs to be initialized before MC */
2875 	r = r600_vram_scratch_init(rdev);
2876 	if (r)
2877 		return r;
2878 
2879 	r600_mc_program(rdev);
2880 
2881 	if (rdev->flags & RADEON_IS_AGP) {
2882 		r600_agp_enable(rdev);
2883 	} else {
2884 		r = r600_pcie_gart_enable(rdev);
2885 		if (r)
2886 			return r;
2887 	}
2888 	r600_gpu_init(rdev);
2889 
2890 	/* allocate wb buffer */
2891 	r = radeon_wb_init(rdev);
2892 	if (r)
2893 		return r;
2894 
2895 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2896 	if (r) {
2897 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2898 		return r;
2899 	}
2900 
2901 	/* Enable IRQ */
2902 	if (!rdev->irq.installed) {
2903 		r = radeon_irq_kms_init(rdev);
2904 		if (r)
2905 			return r;
2906 	}
2907 
2908 	r = r600_irq_init(rdev);
2909 	if (r) {
2910 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2911 		radeon_irq_kms_fini(rdev);
2912 		return r;
2913 	}
2914 	r600_irq_set(rdev);
2915 
2916 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2917 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2918 			     RADEON_CP_PACKET2);
2919 	if (r)
2920 		return r;
2921 
2922 	r = r600_cp_load_microcode(rdev);
2923 	if (r)
2924 		return r;
2925 	r = r600_cp_resume(rdev);
2926 	if (r)
2927 		return r;
2928 
2929 	r = radeon_ib_pool_init(rdev);
2930 	if (r) {
2931 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2932 		return r;
2933 	}
2934 
2935 	r = r600_audio_init(rdev);
2936 	if (r) {
2937 		DRM_ERROR("radeon: audio init failed\n");
2938 		return r;
2939 	}
2940 
2941 	return 0;
2942 }
2943 
2944 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2945 {
2946 	uint32_t temp;
2947 
2948 	temp = RREG32(CONFIG_CNTL);
2949 	if (state == false) {
2950 		temp &= ~(1<<0);
2951 		temp |= (1<<1);
2952 	} else {
2953 		temp &= ~(1<<1);
2954 	}
2955 	WREG32(CONFIG_CNTL, temp);
2956 }
2957 
2958 int r600_resume(struct radeon_device *rdev)
2959 {
2960 	int r;
2961 
2962 	/* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2963 	 * posting will perform necessary task to bring back GPU into good
2964 	 * shape.
2965 	 */
2966 	/* post card */
2967 	atom_asic_init(rdev->mode_info.atom_context);
2968 
2969 	radeon_pm_resume(rdev);
2970 
2971 	rdev->accel_working = true;
2972 	r = r600_startup(rdev);
2973 	if (r) {
2974 		DRM_ERROR("r600 startup failed on resume\n");
2975 		rdev->accel_working = false;
2976 		return r;
2977 	}
2978 
2979 	return r;
2980 }
2981 
2982 int r600_suspend(struct radeon_device *rdev)
2983 {
2984 	radeon_pm_suspend(rdev);
2985 	r600_audio_fini(rdev);
2986 	r600_cp_stop(rdev);
2987 	r600_irq_suspend(rdev);
2988 	radeon_wb_disable(rdev);
2989 	r600_pcie_gart_disable(rdev);
2990 
2991 	return 0;
2992 }
2993 
2994 /* Plan is to move initialization in that function and use
2995  * helper function so that radeon_device_init pretty much
2996  * do nothing more than calling asic specific function. This
2997  * should also allow to remove a bunch of callback function
2998  * like vram_info.
2999  */
3000 int r600_init(struct radeon_device *rdev)
3001 {
3002 	int r;
3003 
3004 	if (r600_debugfs_mc_info_init(rdev)) {
3005 		DRM_ERROR("Failed to register debugfs file for mc !\n");
3006 	}
3007 	/* Read BIOS */
3008 	if (!radeon_get_bios(rdev)) {
3009 		if (ASIC_IS_AVIVO(rdev))
3010 			return -EINVAL;
3011 	}
3012 	/* Must be an ATOMBIOS */
3013 	if (!rdev->is_atom_bios) {
3014 		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
3015 		return -EINVAL;
3016 	}
3017 	r = radeon_atombios_init(rdev);
3018 	if (r)
3019 		return r;
3020 	/* Post card if necessary */
3021 	if (!radeon_card_posted(rdev)) {
3022 		if (!rdev->bios) {
3023 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3024 			return -EINVAL;
3025 		}
3026 		DRM_INFO("GPU not posted. posting now...\n");
3027 		atom_asic_init(rdev->mode_info.atom_context);
3028 	}
3029 	/* Initialize scratch registers */
3030 	r600_scratch_init(rdev);
3031 	/* Initialize surface registers */
3032 	radeon_surface_init(rdev);
3033 	/* Initialize clocks */
3034 	radeon_get_clock_info(rdev->ddev);
3035 	/* Fence driver */
3036 	r = radeon_fence_driver_init(rdev);
3037 	if (r)
3038 		return r;
3039 	if (rdev->flags & RADEON_IS_AGP) {
3040 		r = radeon_agp_init(rdev);
3041 		if (r)
3042 			radeon_agp_disable(rdev);
3043 	}
3044 	r = r600_mc_init(rdev);
3045 	if (r)
3046 		return r;
3047 	/* Memory manager */
3048 	r = radeon_bo_init(rdev);
3049 	if (r)
3050 		return r;
3051 
3052 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3053 		r = r600_init_microcode(rdev);
3054 		if (r) {
3055 			DRM_ERROR("Failed to load firmware!\n");
3056 			return r;
3057 		}
3058 	}
3059 
3060 	/* Initialize power management */
3061 	radeon_pm_init(rdev);
3062 
3063 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3064 	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3065 
3066 	rdev->ih.ring_obj = NULL;
3067 	r600_ih_ring_init(rdev, 64 * 1024);
3068 
3069 	r = r600_pcie_gart_init(rdev);
3070 	if (r)
3071 		return r;
3072 
3073 	rdev->accel_working = true;
3074 	r = r600_startup(rdev);
3075 	if (r) {
3076 		dev_err(rdev->dev, "disabling GPU acceleration\n");
3077 		r600_cp_fini(rdev);
3078 		r600_irq_fini(rdev);
3079 		radeon_wb_fini(rdev);
3080 		radeon_ib_pool_fini(rdev);
3081 		radeon_irq_kms_fini(rdev);
3082 		r600_pcie_gart_fini(rdev);
3083 		rdev->accel_working = false;
3084 	}
3085 
3086 	return 0;
3087 }
3088 
3089 void r600_fini(struct radeon_device *rdev)
3090 {
3091 	radeon_pm_fini(rdev);
3092 	r600_audio_fini(rdev);
3093 	r600_cp_fini(rdev);
3094 	r600_irq_fini(rdev);
3095 	radeon_wb_fini(rdev);
3096 	radeon_ib_pool_fini(rdev);
3097 	radeon_irq_kms_fini(rdev);
3098 	r600_pcie_gart_fini(rdev);
3099 	r600_vram_scratch_fini(rdev);
3100 	radeon_agp_fini(rdev);
3101 	radeon_gem_fini(rdev);
3102 	radeon_fence_driver_fini(rdev);
3103 	radeon_bo_fini(rdev);
3104 	radeon_atombios_fini(rdev);
3105 	kfree(rdev->bios);
3106 	rdev->bios = NULL;
3107 }
3108 
3109 
3110 /*
3111  * CS stuff
3112  */
3113 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3114 {
3115 	struct radeon_ring *ring = &rdev->ring[ib->ring];
3116 	u32 next_rptr;
3117 
3118 	if (ring->rptr_save_reg) {
3119 		next_rptr = ring->wptr + 3 + 4;
3120 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3121 		radeon_ring_write(ring, ((ring->rptr_save_reg -
3122 					 PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
3123 		radeon_ring_write(ring, next_rptr);
3124 	} else if (rdev->wb.enabled) {
3125 		next_rptr = ring->wptr + 5 + 4;
3126 		radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
3127 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3128 		radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
3129 		radeon_ring_write(ring, next_rptr);
3130 		radeon_ring_write(ring, 0);
3131 	}
3132 
3133 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3134 	radeon_ring_write(ring,
3135 #ifdef __BIG_ENDIAN
3136 			  (2 << 0) |
3137 #endif
3138 			  (ib->gpu_addr & 0xFFFFFFFC));
3139 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
3140 	radeon_ring_write(ring, ib->length_dw);
3141 }
3142 
3143 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3144 {
3145 	struct radeon_ib ib;
3146 	uint32_t scratch;
3147 	uint32_t tmp = 0;
3148 	unsigned i;
3149 	int r;
3150 
3151 	r = radeon_scratch_get(rdev, &scratch);
3152 	if (r) {
3153 		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3154 		return r;
3155 	}
3156 	WREG32(scratch, 0xCAFEDEAD);
3157 	r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3158 	if (r) {
3159 		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3160 		goto free_scratch;
3161 	}
3162 	ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
3163 	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3164 	ib.ptr[2] = 0xDEADBEEF;
3165 	ib.length_dw = 3;
3166 	r = radeon_ib_schedule(rdev, &ib, NULL);
3167 	if (r) {
3168 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3169 		goto free_ib;
3170 	}
3171 	r = radeon_fence_wait(ib.fence, false);
3172 	if (r) {
3173 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3174 		goto free_ib;
3175 	}
3176 	for (i = 0; i < rdev->usec_timeout; i++) {
3177 		tmp = RREG32(scratch);
3178 		if (tmp == 0xDEADBEEF)
3179 			break;
3180 		DRM_UDELAY(1);
3181 	}
3182 	if (i < rdev->usec_timeout) {
3183 		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3184 	} else {
3185 		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3186 			  scratch, tmp);
3187 		r = -EINVAL;
3188 	}
3189 free_ib:
3190 	radeon_ib_free(rdev, &ib);
3191 free_scratch:
3192 	radeon_scratch_free(rdev, scratch);
3193 	return r;
3194 }
3195 
3196 /*
3197  * Interrupts
3198  *
3199  * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
3200  * the same as the CP ring buffer, but in reverse.  Rather than the CPU
3201  * writing to the ring and the GPU consuming, the GPU writes to the ring
3202  * and host consumes.  As the host irq handler processes interrupts, it
3203  * increments the rptr.  When the rptr catches up with the wptr, all the
3204  * current interrupts have been processed.
3205  */
3206 
3207 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
3208 {
3209 	u32 rb_bufsz;
3210 
3211 	/* Align ring size */
3212 	rb_bufsz = order_base_2(ring_size / 4);
3213 	ring_size = (1 << rb_bufsz) * 4;
3214 	rdev->ih.ring_size = ring_size;
3215 	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3216 	rdev->ih.rptr = 0;
3217 }
3218 
3219 int r600_ih_ring_alloc(struct radeon_device *rdev)
3220 {
3221 	int r;
3222 
3223 	/* Allocate ring buffer */
3224 	if (rdev->ih.ring_obj == NULL) {
3225 		r = radeon_bo_create(rdev, rdev->ih.ring_size,
3226 				     PAGE_SIZE, true,
3227 				     RADEON_GEM_DOMAIN_GTT,
3228 				     NULL, &rdev->ih.ring_obj);
3229 		if (r) {
3230 			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3231 			return r;
3232 		}
3233 		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3234 		if (unlikely(r != 0))
3235 			return r;
3236 		r = radeon_bo_pin(rdev->ih.ring_obj,
3237 				  RADEON_GEM_DOMAIN_GTT,
3238 				  &rdev->ih.gpu_addr);
3239 		if (r) {
3240 			radeon_bo_unreserve(rdev->ih.ring_obj);
3241 			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
3242 			return r;
3243 		}
3244 		r = radeon_bo_kmap(rdev->ih.ring_obj,
3245 				   (void **)&rdev->ih.ring);
3246 		radeon_bo_unreserve(rdev->ih.ring_obj);
3247 		if (r) {
3248 			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
3249 			return r;
3250 		}
3251 	}
3252 	return 0;
3253 }
3254 
3255 void r600_ih_ring_fini(struct radeon_device *rdev)
3256 {
3257 	int r;
3258 	if (rdev->ih.ring_obj) {
3259 		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
3260 		if (likely(r == 0)) {
3261 			radeon_bo_kunmap(rdev->ih.ring_obj);
3262 			radeon_bo_unpin(rdev->ih.ring_obj);
3263 			radeon_bo_unreserve(rdev->ih.ring_obj);
3264 		}
3265 		radeon_bo_unref(&rdev->ih.ring_obj);
3266 		rdev->ih.ring = NULL;
3267 		rdev->ih.ring_obj = NULL;
3268 	}
3269 }
3270 
3271 void r600_rlc_stop(struct radeon_device *rdev)
3272 {
3273 
3274 	if ((rdev->family >= CHIP_RV770) &&
3275 	    (rdev->family <= CHIP_RV740)) {
3276 		/* r7xx asics need to soft reset RLC before halting */
3277 		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
3278 		RREG32(SRBM_SOFT_RESET);
3279 		mdelay(15);
3280 		WREG32(SRBM_SOFT_RESET, 0);
3281 		RREG32(SRBM_SOFT_RESET);
3282 	}
3283 
3284 	WREG32(RLC_CNTL, 0);
3285 }
3286 
3287 static void r600_rlc_start(struct radeon_device *rdev)
3288 {
3289 	WREG32(RLC_CNTL, RLC_ENABLE);
3290 }
3291 
3292 static int r600_rlc_resume(struct radeon_device *rdev)
3293 {
3294 	u32 i;
3295 	const __be32 *fw_data;
3296 
3297 	if (!rdev->rlc_fw)
3298 		return -EINVAL;
3299 
3300 	r600_rlc_stop(rdev);
3301 
3302 	WREG32(RLC_HB_CNTL, 0);
3303 
3304 	WREG32(RLC_HB_BASE, 0);
3305 	WREG32(RLC_HB_RPTR, 0);
3306 	WREG32(RLC_HB_WPTR, 0);
3307 	WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
3308 	WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
3309 	WREG32(RLC_MC_CNTL, 0);
3310 	WREG32(RLC_UCODE_CNTL, 0);
3311 
3312 	fw_data = (const __be32 *)rdev->rlc_fw->data;
3313 	if (rdev->family >= CHIP_RV770) {
3314 		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
3315 			WREG32(RLC_UCODE_ADDR, i);
3316 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3317 		}
3318 	} else {
3319 		for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
3320 			WREG32(RLC_UCODE_ADDR, i);
3321 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
3322 		}
3323 	}
3324 	WREG32(RLC_UCODE_ADDR, 0);
3325 
3326 	r600_rlc_start(rdev);
3327 
3328 	return 0;
3329 }
3330 
3331 static void r600_enable_interrupts(struct radeon_device *rdev)
3332 {
3333 	u32 ih_cntl = RREG32(IH_CNTL);
3334 	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3335 
3336 	ih_cntl |= ENABLE_INTR;
3337 	ih_rb_cntl |= IH_RB_ENABLE;
3338 	WREG32(IH_CNTL, ih_cntl);
3339 	WREG32(IH_RB_CNTL, ih_rb_cntl);
3340 	rdev->ih.enabled = true;
3341 }
3342 
3343 void r600_disable_interrupts(struct radeon_device *rdev)
3344 {
3345 	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
3346 	u32 ih_cntl = RREG32(IH_CNTL);
3347 
3348 	ih_rb_cntl &= ~IH_RB_ENABLE;
3349 	ih_cntl &= ~ENABLE_INTR;
3350 	WREG32(IH_RB_CNTL, ih_rb_cntl);
3351 	WREG32(IH_CNTL, ih_cntl);
3352 	/* set rptr, wptr to 0 */
3353 	WREG32(IH_RB_RPTR, 0);
3354 	WREG32(IH_RB_WPTR, 0);
3355 	rdev->ih.enabled = false;
3356 	rdev->ih.rptr = 0;
3357 }
3358 
3359 static void r600_disable_interrupt_state(struct radeon_device *rdev)
3360 {
3361 	u32 tmp;
3362 
3363 	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3364 	tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3365 	WREG32(DMA_CNTL, tmp);
3366 	WREG32(GRBM_INT_CNTL, 0);
3367 	WREG32(DxMODE_INT_MASK, 0);
3368 	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
3369 	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
3370 	if (ASIC_IS_DCE3(rdev)) {
3371 		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
3372 		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
3373 		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3374 		WREG32(DC_HPD1_INT_CONTROL, tmp);
3375 		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3376 		WREG32(DC_HPD2_INT_CONTROL, tmp);
3377 		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3378 		WREG32(DC_HPD3_INT_CONTROL, tmp);
3379 		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3380 		WREG32(DC_HPD4_INT_CONTROL, tmp);
3381 		if (ASIC_IS_DCE32(rdev)) {
3382 			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3383 			WREG32(DC_HPD5_INT_CONTROL, tmp);
3384 			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
3385 			WREG32(DC_HPD6_INT_CONTROL, tmp);
3386 			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3387 			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3388 			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3389 			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3390 		} else {
3391 			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3392 			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3393 			tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3394 			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3395 		}
3396 	} else {
3397 		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
3398 		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
3399 		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3400 		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3401 		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3402 		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3403 		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
3404 		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3405 		tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3406 		WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3407 		tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3408 		WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3409 	}
3410 }
3411 
3412 int r600_irq_init(struct radeon_device *rdev)
3413 {
3414 	int ret = 0;
3415 	int rb_bufsz;
3416 	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
3417 
3418 	/* allocate ring */
3419 	ret = r600_ih_ring_alloc(rdev);
3420 	if (ret)
3421 		return ret;
3422 
3423 	/* disable irqs */
3424 	r600_disable_interrupts(rdev);
3425 
3426 	/* init rlc */
3427 	if (rdev->family >= CHIP_CEDAR)
3428 		ret = evergreen_rlc_resume(rdev);
3429 	else
3430 		ret = r600_rlc_resume(rdev);
3431 	if (ret) {
3432 		r600_ih_ring_fini(rdev);
3433 		return ret;
3434 	}
3435 
3436 	/* setup interrupt control */
3437 	/* set dummy read address to ring address */
3438 	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
3439 	interrupt_cntl = RREG32(INTERRUPT_CNTL);
3440 	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3441 	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3442 	 */
3443 	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
3444 	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3445 	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
3446 	WREG32(INTERRUPT_CNTL, interrupt_cntl);
3447 
3448 	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
3449 	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
3450 
3451 	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
3452 		      IH_WPTR_OVERFLOW_CLEAR |
3453 		      (rb_bufsz << 1));
3454 
3455 	if (rdev->wb.enabled)
3456 		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
3457 
3458 	/* set the writeback address whether it's enabled or not */
3459 	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
3460 	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
3461 
3462 	WREG32(IH_RB_CNTL, ih_rb_cntl);
3463 
3464 	/* set rptr, wptr to 0 */
3465 	WREG32(IH_RB_RPTR, 0);
3466 	WREG32(IH_RB_WPTR, 0);
3467 
3468 	/* Default settings for IH_CNTL (disabled at first) */
3469 	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3470 	/* RPTR_REARM only works if msi's are enabled */
3471 	if (rdev->msi_enabled)
3472 		ih_cntl |= RPTR_REARM;
3473 	WREG32(IH_CNTL, ih_cntl);
3474 
3475 	/* force the active interrupt state to all disabled */
3476 	if (rdev->family >= CHIP_CEDAR)
3477 		evergreen_disable_interrupt_state(rdev);
3478 	else
3479 		r600_disable_interrupt_state(rdev);
3480 
3481 	/* at this point everything should be setup correctly to enable master */
3482 	pci_set_master(rdev->pdev);
3483 
3484 	/* enable irqs */
3485 	r600_enable_interrupts(rdev);
3486 
3487 	return ret;
3488 }
3489 
3490 void r600_irq_suspend(struct radeon_device *rdev)
3491 {
3492 	r600_irq_disable(rdev);
3493 	r600_rlc_stop(rdev);
3494 }
3495 
3496 void r600_irq_fini(struct radeon_device *rdev)
3497 {
3498 	r600_irq_suspend(rdev);
3499 	r600_ih_ring_fini(rdev);
3500 }
3501 
3502 int r600_irq_set(struct radeon_device *rdev)
3503 {
3504 	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3505 	u32 mode_int = 0;
3506 	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3507 	u32 grbm_int_cntl = 0;
3508 	u32 hdmi0, hdmi1;
3509 	u32 d1grph = 0, d2grph = 0;
3510 	u32 dma_cntl;
3511 	u32 thermal_int = 0;
3512 
3513 	if (!rdev->irq.installed) {
3514 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3515 		return -EINVAL;
3516 	}
3517 	/* don't enable anything if the ih is disabled */
3518 	if (!rdev->ih.enabled) {
3519 		r600_disable_interrupts(rdev);
3520 		/* force the active interrupt state to all disabled */
3521 		r600_disable_interrupt_state(rdev);
3522 		return 0;
3523 	}
3524 
3525 	if (ASIC_IS_DCE3(rdev)) {
3526 		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3527 		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3528 		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3529 		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3530 		if (ASIC_IS_DCE32(rdev)) {
3531 			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3532 			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3533 			hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3534 			hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
3535 		} else {
3536 			hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3537 			hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3538 		}
3539 	} else {
3540 		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3541 		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3542 		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3543 		hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3544 		hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3545 	}
3546 
3547 	dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3548 
3549 	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3550 		thermal_int = RREG32(CG_THERMAL_INT) &
3551 			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3552 	} else if (rdev->family >= CHIP_RV770) {
3553 		thermal_int = RREG32(RV770_CG_THERMAL_INT) &
3554 			~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
3555 	}
3556 	if (rdev->irq.dpm_thermal) {
3557 		DRM_DEBUG("dpm thermal\n");
3558 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
3559 	}
3560 
3561 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3562 		DRM_DEBUG("r600_irq_set: sw int\n");
3563 		cp_int_cntl |= RB_INT_ENABLE;
3564 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3565 	}
3566 
3567 	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3568 		DRM_DEBUG("r600_irq_set: sw int dma\n");
3569 		dma_cntl |= TRAP_ENABLE;
3570 	}
3571 
3572 	if (rdev->irq.crtc_vblank_int[0] ||
3573 	    atomic_read(&rdev->irq.pflip[0])) {
3574 		DRM_DEBUG("r600_irq_set: vblank 0\n");
3575 		mode_int |= D1MODE_VBLANK_INT_MASK;
3576 	}
3577 	if (rdev->irq.crtc_vblank_int[1] ||
3578 	    atomic_read(&rdev->irq.pflip[1])) {
3579 		DRM_DEBUG("r600_irq_set: vblank 1\n");
3580 		mode_int |= D2MODE_VBLANK_INT_MASK;
3581 	}
3582 	if (rdev->irq.hpd[0]) {
3583 		DRM_DEBUG("r600_irq_set: hpd 1\n");
3584 		hpd1 |= DC_HPDx_INT_EN;
3585 	}
3586 	if (rdev->irq.hpd[1]) {
3587 		DRM_DEBUG("r600_irq_set: hpd 2\n");
3588 		hpd2 |= DC_HPDx_INT_EN;
3589 	}
3590 	if (rdev->irq.hpd[2]) {
3591 		DRM_DEBUG("r600_irq_set: hpd 3\n");
3592 		hpd3 |= DC_HPDx_INT_EN;
3593 	}
3594 	if (rdev->irq.hpd[3]) {
3595 		DRM_DEBUG("r600_irq_set: hpd 4\n");
3596 		hpd4 |= DC_HPDx_INT_EN;
3597 	}
3598 	if (rdev->irq.hpd[4]) {
3599 		DRM_DEBUG("r600_irq_set: hpd 5\n");
3600 		hpd5 |= DC_HPDx_INT_EN;
3601 	}
3602 	if (rdev->irq.hpd[5]) {
3603 		DRM_DEBUG("r600_irq_set: hpd 6\n");
3604 		hpd6 |= DC_HPDx_INT_EN;
3605 	}
3606 	if (rdev->irq.afmt[0]) {
3607 		DRM_DEBUG("r600_irq_set: hdmi 0\n");
3608 		hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3609 	}
3610 	if (rdev->irq.afmt[1]) {
3611 		DRM_DEBUG("r600_irq_set: hdmi 0\n");
3612 		hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3613 	}
3614 
3615 	WREG32(CP_INT_CNTL, cp_int_cntl);
3616 	WREG32(DMA_CNTL, dma_cntl);
3617 	WREG32(DxMODE_INT_MASK, mode_int);
3618 	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3619 	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3620 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3621 	if (ASIC_IS_DCE3(rdev)) {
3622 		WREG32(DC_HPD1_INT_CONTROL, hpd1);
3623 		WREG32(DC_HPD2_INT_CONTROL, hpd2);
3624 		WREG32(DC_HPD3_INT_CONTROL, hpd3);
3625 		WREG32(DC_HPD4_INT_CONTROL, hpd4);
3626 		if (ASIC_IS_DCE32(rdev)) {
3627 			WREG32(DC_HPD5_INT_CONTROL, hpd5);
3628 			WREG32(DC_HPD6_INT_CONTROL, hpd6);
3629 			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
3630 			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
3631 		} else {
3632 			WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3633 			WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3634 		}
3635 	} else {
3636 		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3637 		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3638 		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3639 		WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
3640 		WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
3641 	}
3642 	if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
3643 		WREG32(CG_THERMAL_INT, thermal_int);
3644 	} else if (rdev->family >= CHIP_RV770) {
3645 		WREG32(RV770_CG_THERMAL_INT, thermal_int);
3646 	}
3647 
3648 	return 0;
3649 }
3650 
3651 static void r600_irq_ack(struct radeon_device *rdev)
3652 {
3653 	u32 tmp;
3654 
3655 	if (ASIC_IS_DCE3(rdev)) {
3656 		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3657 		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3658 		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3659 		if (ASIC_IS_DCE32(rdev)) {
3660 			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
3661 			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
3662 		} else {
3663 			rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3664 			rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
3665 		}
3666 	} else {
3667 		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3668 		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3669 		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3670 		rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
3671 		rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
3672 	}
3673 	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3674 	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3675 
3676 	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3677 		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3678 	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3679 		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3680 	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3681 		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3682 	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3683 		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3684 	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3685 		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3686 	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3687 		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3688 	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3689 		if (ASIC_IS_DCE3(rdev)) {
3690 			tmp = RREG32(DC_HPD1_INT_CONTROL);
3691 			tmp |= DC_HPDx_INT_ACK;
3692 			WREG32(DC_HPD1_INT_CONTROL, tmp);
3693 		} else {
3694 			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3695 			tmp |= DC_HPDx_INT_ACK;
3696 			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3697 		}
3698 	}
3699 	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3700 		if (ASIC_IS_DCE3(rdev)) {
3701 			tmp = RREG32(DC_HPD2_INT_CONTROL);
3702 			tmp |= DC_HPDx_INT_ACK;
3703 			WREG32(DC_HPD2_INT_CONTROL, tmp);
3704 		} else {
3705 			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3706 			tmp |= DC_HPDx_INT_ACK;
3707 			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3708 		}
3709 	}
3710 	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3711 		if (ASIC_IS_DCE3(rdev)) {
3712 			tmp = RREG32(DC_HPD3_INT_CONTROL);
3713 			tmp |= DC_HPDx_INT_ACK;
3714 			WREG32(DC_HPD3_INT_CONTROL, tmp);
3715 		} else {
3716 			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3717 			tmp |= DC_HPDx_INT_ACK;
3718 			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3719 		}
3720 	}
3721 	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3722 		tmp = RREG32(DC_HPD4_INT_CONTROL);
3723 		tmp |= DC_HPDx_INT_ACK;
3724 		WREG32(DC_HPD4_INT_CONTROL, tmp);
3725 	}
3726 	if (ASIC_IS_DCE32(rdev)) {
3727 		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3728 			tmp = RREG32(DC_HPD5_INT_CONTROL);
3729 			tmp |= DC_HPDx_INT_ACK;
3730 			WREG32(DC_HPD5_INT_CONTROL, tmp);
3731 		}
3732 		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3733 			tmp = RREG32(DC_HPD5_INT_CONTROL);
3734 			tmp |= DC_HPDx_INT_ACK;
3735 			WREG32(DC_HPD6_INT_CONTROL, tmp);
3736 		}
3737 		if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
3738 			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
3739 			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3740 			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
3741 		}
3742 		if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
3743 			tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
3744 			tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
3745 			WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
3746 		}
3747 	} else {
3748 		if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3749 			tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
3750 			tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3751 			WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
3752 		}
3753 		if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3754 			if (ASIC_IS_DCE3(rdev)) {
3755 				tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
3756 				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3757 				WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
3758 			} else {
3759 				tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
3760 				tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
3761 				WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
3762 			}
3763 		}
3764 	}
3765 }
3766 
3767 void r600_irq_disable(struct radeon_device *rdev)
3768 {
3769 	r600_disable_interrupts(rdev);
3770 	/* Wait and acknowledge irq */
3771 	mdelay(1);
3772 	r600_irq_ack(rdev);
3773 	r600_disable_interrupt_state(rdev);
3774 }
3775 
3776 static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3777 {
3778 	u32 wptr, tmp;
3779 
3780 	if (rdev->wb.enabled)
3781 		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3782 	else
3783 		wptr = RREG32(IH_RB_WPTR);
3784 
3785 	if (wptr & RB_OVERFLOW) {
3786 		/* When a ring buffer overflow happen start parsing interrupt
3787 		 * from the last not overwritten vector (wptr + 16). Hopefully
3788 		 * this should allow us to catchup.
3789 		 */
3790 		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3791 			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3792 		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3793 		tmp = RREG32(IH_RB_CNTL);
3794 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3795 		WREG32(IH_RB_CNTL, tmp);
3796 	}
3797 	return (wptr & rdev->ih.ptr_mask);
3798 }
3799 
3800 /*        r600 IV Ring
3801  * Each IV ring entry is 128 bits:
3802  * [7:0]    - interrupt source id
3803  * [31:8]   - reserved
3804  * [59:32]  - interrupt source data
3805  * [127:60]  - reserved
3806  *
3807  * The basic interrupt vector entries
3808  * are decoded as follows:
3809  * src_id  src_data  description
3810  *      1         0  D1 Vblank
3811  *      1         1  D1 Vline
3812  *      5         0  D2 Vblank
3813  *      5         1  D2 Vline
3814  *     19         0  FP Hot plug detection A
3815  *     19         1  FP Hot plug detection B
3816  *     19         2  DAC A auto-detection
3817  *     19         3  DAC B auto-detection
3818  *     21         4  HDMI block A
3819  *     21         5  HDMI block B
3820  *    176         -  CP_INT RB
3821  *    177         -  CP_INT IB1
3822  *    178         -  CP_INT IB2
3823  *    181         -  EOP Interrupt
3824  *    233         -  GUI Idle
3825  *
3826  * Note, these are based on r600 and may need to be
3827  * adjusted or added to on newer asics
3828  */
3829 
3830 int r600_irq_process(struct radeon_device *rdev)
3831 {
3832 	u32 wptr;
3833 	u32 rptr;
3834 	u32 src_id, src_data;
3835 	u32 ring_index;
3836 	bool queue_hotplug = false;
3837 	bool queue_hdmi = false;
3838 	bool queue_thermal = false;
3839 
3840 	if (!rdev->ih.enabled || rdev->shutdown)
3841 		return IRQ_NONE;
3842 
3843 	/* No MSIs, need a dummy read to flush PCI DMAs */
3844 	if (!rdev->msi_enabled)
3845 		RREG32(IH_RB_WPTR);
3846 
3847 	wptr = r600_get_ih_wptr(rdev);
3848 
3849 restart_ih:
3850 	/* is somebody else already processing irqs? */
3851 	if (atomic_xchg(&rdev->ih.lock, 1))
3852 		return IRQ_NONE;
3853 
3854 	rptr = rdev->ih.rptr;
3855 	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3856 
3857 	/* Order reading of wptr vs. reading of IH ring data */
3858 	rmb();
3859 
3860 	/* display interrupts */
3861 	r600_irq_ack(rdev);
3862 
3863 	while (rptr != wptr) {
3864 		/* wptr/rptr are in bytes! */
3865 		ring_index = rptr / 4;
3866 		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3867 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3868 
3869 		switch (src_id) {
3870 		case 1: /* D1 vblank/vline */
3871 			switch (src_data) {
3872 			case 0: /* D1 vblank */
3873 				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3874 					if (rdev->irq.crtc_vblank_int[0]) {
3875 						drm_handle_vblank(rdev->ddev, 0);
3876 						rdev->pm.vblank_sync = true;
3877 						wake_up(&rdev->irq.vblank_queue);
3878 					}
3879 					if (atomic_read(&rdev->irq.pflip[0]))
3880 						radeon_crtc_handle_flip(rdev, 0);
3881 					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3882 					DRM_DEBUG("IH: D1 vblank\n");
3883 				}
3884 				break;
3885 			case 1: /* D1 vline */
3886 				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3887 					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3888 					DRM_DEBUG("IH: D1 vline\n");
3889 				}
3890 				break;
3891 			default:
3892 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3893 				break;
3894 			}
3895 			break;
3896 		case 5: /* D2 vblank/vline */
3897 			switch (src_data) {
3898 			case 0: /* D2 vblank */
3899 				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3900 					if (rdev->irq.crtc_vblank_int[1]) {
3901 						drm_handle_vblank(rdev->ddev, 1);
3902 						rdev->pm.vblank_sync = true;
3903 						wake_up(&rdev->irq.vblank_queue);
3904 					}
3905 					if (atomic_read(&rdev->irq.pflip[1]))
3906 						radeon_crtc_handle_flip(rdev, 1);
3907 					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3908 					DRM_DEBUG("IH: D2 vblank\n");
3909 				}
3910 				break;
3911 			case 1: /* D1 vline */
3912 				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3913 					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3914 					DRM_DEBUG("IH: D2 vline\n");
3915 				}
3916 				break;
3917 			default:
3918 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3919 				break;
3920 			}
3921 			break;
3922 		case 19: /* HPD/DAC hotplug */
3923 			switch (src_data) {
3924 			case 0:
3925 				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3926 					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3927 					queue_hotplug = true;
3928 					DRM_DEBUG("IH: HPD1\n");
3929 				}
3930 				break;
3931 			case 1:
3932 				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3933 					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3934 					queue_hotplug = true;
3935 					DRM_DEBUG("IH: HPD2\n");
3936 				}
3937 				break;
3938 			case 4:
3939 				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3940 					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3941 					queue_hotplug = true;
3942 					DRM_DEBUG("IH: HPD3\n");
3943 				}
3944 				break;
3945 			case 5:
3946 				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3947 					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3948 					queue_hotplug = true;
3949 					DRM_DEBUG("IH: HPD4\n");
3950 				}
3951 				break;
3952 			case 10:
3953 				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3954 					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3955 					queue_hotplug = true;
3956 					DRM_DEBUG("IH: HPD5\n");
3957 				}
3958 				break;
3959 			case 12:
3960 				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3961 					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3962 					queue_hotplug = true;
3963 					DRM_DEBUG("IH: HPD6\n");
3964 				}
3965 				break;
3966 			default:
3967 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3968 				break;
3969 			}
3970 			break;
3971 		case 21: /* hdmi */
3972 			switch (src_data) {
3973 			case 4:
3974 				if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
3975 					rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3976 					queue_hdmi = true;
3977 					DRM_DEBUG("IH: HDMI0\n");
3978 				}
3979 				break;
3980 			case 5:
3981 				if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
3982 					rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
3983 					queue_hdmi = true;
3984 					DRM_DEBUG("IH: HDMI1\n");
3985 				}
3986 				break;
3987 			default:
3988 				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
3989 				break;
3990 			}
3991 			break;
3992 		case 124: /* UVD */
3993 			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
3994 			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
3995 			break;
3996 		case 176: /* CP_INT in ring buffer */
3997 		case 177: /* CP_INT in IB1 */
3998 		case 178: /* CP_INT in IB2 */
3999 			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
4000 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4001 			break;
4002 		case 181: /* CP EOP event */
4003 			DRM_DEBUG("IH: CP EOP\n");
4004 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
4005 			break;
4006 		case 224: /* DMA trap event */
4007 			DRM_DEBUG("IH: DMA trap\n");
4008 			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
4009 			break;
4010 		case 230: /* thermal low to high */
4011 			DRM_DEBUG("IH: thermal low to high\n");
4012 			rdev->pm.dpm.thermal.high_to_low = false;
4013 			queue_thermal = true;
4014 			break;
4015 		case 231: /* thermal high to low */
4016 			DRM_DEBUG("IH: thermal high to low\n");
4017 			rdev->pm.dpm.thermal.high_to_low = true;
4018 			queue_thermal = true;
4019 			break;
4020 		case 233: /* GUI IDLE */
4021 			DRM_DEBUG("IH: GUI idle\n");
4022 			break;
4023 		default:
4024 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4025 			break;
4026 		}
4027 
4028 		/* wptr/rptr are in bytes! */
4029 		rptr += 16;
4030 		rptr &= rdev->ih.ptr_mask;
4031 	}
4032 	if (queue_hotplug)
4033 		schedule_work(&rdev->hotplug_work);
4034 	if (queue_hdmi)
4035 		schedule_work(&rdev->audio_work);
4036 	if (queue_thermal && rdev->pm.dpm_enabled)
4037 		schedule_work(&rdev->pm.dpm.thermal.work);
4038 	rdev->ih.rptr = rptr;
4039 	WREG32(IH_RB_RPTR, rdev->ih.rptr);
4040 	atomic_set(&rdev->ih.lock, 0);
4041 
4042 	/* make sure wptr hasn't changed while processing */
4043 	wptr = r600_get_ih_wptr(rdev);
4044 	if (wptr != rptr)
4045 		goto restart_ih;
4046 
4047 	return IRQ_HANDLED;
4048 }
4049 
4050 /*
4051  * Debugfs info
4052  */
4053 #if defined(CONFIG_DEBUG_FS)
4054 
4055 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
4056 {
4057 	struct drm_info_node *node = (struct drm_info_node *) m->private;
4058 	struct drm_device *dev = node->minor->dev;
4059 	struct radeon_device *rdev = dev->dev_private;
4060 
4061 	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
4062 	DREG32_SYS(m, rdev, VM_L2_STATUS);
4063 	return 0;
4064 }
4065 
4066 static struct drm_info_list r600_mc_info_list[] = {
4067 	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
4068 };
4069 #endif
4070 
4071 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
4072 {
4073 #if defined(CONFIG_DEBUG_FS)
4074 	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
4075 #else
4076 	return 0;
4077 #endif
4078 }
4079 
4080 /**
4081  * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
4082  * rdev: radeon device structure
4083  * bo: buffer object struct which userspace is waiting for idle
4084  *
4085  * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
4086  * through ring buffer, this leads to corruption in rendering, see
4087  * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
4088  * directly perform HDP flush by writing register through MMIO.
4089  */
4090 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
4091 {
4092 	/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
4093 	 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4094 	 * This seems to cause problems on some AGP cards. Just use the old
4095 	 * method for them.
4096 	 */
4097 	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
4098 	    rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
4099 		void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
4100 		u32 tmp;
4101 
4102 		WREG32(HDP_DEBUG1, 0);
4103 		tmp = readl((void __iomem *)ptr);
4104 	} else
4105 		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4106 }
4107 
4108 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
4109 {
4110 	u32 link_width_cntl, mask;
4111 
4112 	if (rdev->flags & RADEON_IS_IGP)
4113 		return;
4114 
4115 	if (!(rdev->flags & RADEON_IS_PCIE))
4116 		return;
4117 
4118 	/* x2 cards have a special sequence */
4119 	if (ASIC_IS_X2(rdev))
4120 		return;
4121 
4122 	radeon_gui_idle(rdev);
4123 
4124 	switch (lanes) {
4125 	case 0:
4126 		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
4127 		break;
4128 	case 1:
4129 		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
4130 		break;
4131 	case 2:
4132 		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
4133 		break;
4134 	case 4:
4135 		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
4136 		break;
4137 	case 8:
4138 		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
4139 		break;
4140 	case 12:
4141 		/* not actually supported */
4142 		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
4143 		break;
4144 	case 16:
4145 		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
4146 		break;
4147 	default:
4148 		DRM_ERROR("invalid pcie lane request: %d\n", lanes);
4149 		return;
4150 	}
4151 
4152 	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4153 	link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
4154 	link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
4155 	link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
4156 			    R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
4157 
4158 	WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4159 }
4160 
4161 int r600_get_pcie_lanes(struct radeon_device *rdev)
4162 {
4163 	u32 link_width_cntl;
4164 
4165 	if (rdev->flags & RADEON_IS_IGP)
4166 		return 0;
4167 
4168 	if (!(rdev->flags & RADEON_IS_PCIE))
4169 		return 0;
4170 
4171 	/* x2 cards have a special sequence */
4172 	if (ASIC_IS_X2(rdev))
4173 		return 0;
4174 
4175 	radeon_gui_idle(rdev);
4176 
4177 	link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
4178 
4179 	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
4180 	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4181 		return 1;
4182 	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4183 		return 2;
4184 	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4185 		return 4;
4186 	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4187 		return 8;
4188 	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4189 		/* not actually supported */
4190 		return 12;
4191 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4192 	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4193 	default:
4194 		return 16;
4195 	}
4196 }
4197 
4198 static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4199 {
4200 	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
4201 	u16 link_cntl2;
4202 
4203 	if (radeon_pcie_gen2 == 0)
4204 		return;
4205 
4206 	if (rdev->flags & RADEON_IS_IGP)
4207 		return;
4208 
4209 	if (!(rdev->flags & RADEON_IS_PCIE))
4210 		return;
4211 
4212 	/* x2 cards have a special sequence */
4213 	if (ASIC_IS_X2(rdev))
4214 		return;
4215 
4216 	/* only RV6xx+ chips are supported */
4217 	if (rdev->family <= CHIP_R600)
4218 		return;
4219 
4220 	if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
4221 		(rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
4222 		return;
4223 
4224 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4225 	if (speed_cntl & LC_CURRENT_DATA_RATE) {
4226 		DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4227 		return;
4228 	}
4229 
4230 	DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4231 
4232 	/* 55 nm r6xx asics */
4233 	if ((rdev->family == CHIP_RV670) ||
4234 	    (rdev->family == CHIP_RV620) ||
4235 	    (rdev->family == CHIP_RV635)) {
4236 		/* advertise upconfig capability */
4237 		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4238 		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4239 		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4240 		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4241 		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
4242 			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
4243 			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
4244 					     LC_RECONFIG_ARC_MISSING_ESCAPE);
4245 			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
4246 			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4247 		} else {
4248 			link_width_cntl |= LC_UPCONFIGURE_DIS;
4249 			WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4250 		}
4251 	}
4252 
4253 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4254 	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
4255 	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
4256 
4257 		/* 55 nm r6xx asics */
4258 		if ((rdev->family == CHIP_RV670) ||
4259 		    (rdev->family == CHIP_RV620) ||
4260 		    (rdev->family == CHIP_RV635)) {
4261 			WREG32(MM_CFGREGS_CNTL, 0x8);
4262 			link_cntl2 = RREG32(0x4088);
4263 			WREG32(MM_CFGREGS_CNTL, 0);
4264 			/* not supported yet */
4265 			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
4266 				return;
4267 		}
4268 
4269 		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
4270 		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
4271 		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
4272 		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
4273 		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
4274 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4275 
4276 		tmp = RREG32(0x541c);
4277 		WREG32(0x541c, tmp | 0x8);
4278 		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
4279 		link_cntl2 = RREG16(0x4088);
4280 		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
4281 		link_cntl2 |= 0x2;
4282 		WREG16(0x4088, link_cntl2);
4283 		WREG32(MM_CFGREGS_CNTL, 0);
4284 
4285 		if ((rdev->family == CHIP_RV670) ||
4286 		    (rdev->family == CHIP_RV620) ||
4287 		    (rdev->family == CHIP_RV635)) {
4288 			training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
4289 			training_cntl &= ~LC_POINT_7_PLUS_EN;
4290 			WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
4291 		} else {
4292 			speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4293 			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
4294 			WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4295 		}
4296 
4297 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
4298 		speed_cntl |= LC_GEN2_EN_STRAP;
4299 		WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
4300 
4301 	} else {
4302 		link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
4303 		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4304 		if (1)
4305 			link_width_cntl |= LC_UPCONFIGURE_DIS;
4306 		else
4307 			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
4308 		WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
4309 	}
4310 }
4311 
4312 /**
4313  * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4314  *
4315  * @rdev: radeon_device pointer
4316  *
4317  * Fetches a GPU clock counter snapshot (R6xx-cayman).
4318  * Returns the 64 bit clock counter snapshot.
4319  */
4320 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4321 {
4322 	uint64_t clock;
4323 
4324 	mutex_lock(&rdev->gpu_clock_mutex);
4325 	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4326 	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
4327 	        ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4328 	mutex_unlock(&rdev->gpu_clock_mutex);
4329 	return clock;
4330 }
4331