xref: /openbmc/linux/drivers/gpu/drm/radeon/evergreen.c (revision 25985edc)
1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include "drmP.h"
28 #include "radeon.h"
29 #include "radeon_asic.h"
30 #include "radeon_drm.h"
31 #include "evergreend.h"
32 #include "atom.h"
33 #include "avivod.h"
34 #include "evergreen_reg.h"
35 #include "evergreen_blit_shaders.h"
36 
37 #define EVERGREEN_PFP_UCODE_SIZE 1120
38 #define EVERGREEN_PM4_UCODE_SIZE 1376
39 
40 static void evergreen_gpu_init(struct radeon_device *rdev);
41 void evergreen_fini(struct radeon_device *rdev);
42 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
43 
44 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
45 {
46 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
47 	u32 tmp;
48 
49 	/* make sure flip is at vb rather than hb */
50 	tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
51 	tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
52 	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
53 
54 	/* set pageflip to happen anywhere in vblank interval */
55 	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
56 
57 	/* enable the pflip int */
58 	radeon_irq_kms_pflip_irq_get(rdev, crtc);
59 }
60 
61 void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
62 {
63 	/* disable the pflip int */
64 	radeon_irq_kms_pflip_irq_put(rdev, crtc);
65 }
66 
67 u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
68 {
69 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
70 	u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
71 
72 	/* Lock the graphics update lock */
73 	tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
74 	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
75 
76 	/* update the scanout addresses */
77 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
78 	       upper_32_bits(crtc_base));
79 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
80 	       (u32)crtc_base);
81 
82 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
83 	       upper_32_bits(crtc_base));
84 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
85 	       (u32)crtc_base);
86 
87 	/* Wait for update_pending to go high. */
88 	while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
89 	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
90 
91 	/* Unlock the lock, so double-buffering can take place inside vblank */
92 	tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
93 	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
94 
95 	/* Return current update_pending status: */
96 	return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
97 }
98 
99 /* get temperature in millidegrees */
100 int evergreen_get_temp(struct radeon_device *rdev)
101 {
102 	u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
103 		ASIC_T_SHIFT;
104 	u32 actual_temp = 0;
105 
106 	if (temp & 0x400)
107 		actual_temp = -256;
108 	else if (temp & 0x200)
109 		actual_temp = 255;
110 	else if (temp & 0x100) {
111 		actual_temp = temp & 0x1ff;
112 		actual_temp |= ~0x1ff;
113 	} else
114 		actual_temp = temp & 0xff;
115 
116 	return (actual_temp * 1000) / 2;
117 }
118 
119 int sumo_get_temp(struct radeon_device *rdev)
120 {
121 	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
122 	int actual_temp = temp - 49;
123 
124 	return actual_temp * 1000;
125 }
126 
127 void evergreen_pm_misc(struct radeon_device *rdev)
128 {
129 	int req_ps_idx = rdev->pm.requested_power_state_index;
130 	int req_cm_idx = rdev->pm.requested_clock_mode_index;
131 	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
132 	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
133 
134 	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
135 		if (voltage->voltage != rdev->pm.current_vddc) {
136 			radeon_atom_set_voltage(rdev, voltage->voltage);
137 			rdev->pm.current_vddc = voltage->voltage;
138 			DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
139 		}
140 	}
141 }
142 
143 void evergreen_pm_prepare(struct radeon_device *rdev)
144 {
145 	struct drm_device *ddev = rdev->ddev;
146 	struct drm_crtc *crtc;
147 	struct radeon_crtc *radeon_crtc;
148 	u32 tmp;
149 
150 	/* disable any active CRTCs */
151 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
152 		radeon_crtc = to_radeon_crtc(crtc);
153 		if (radeon_crtc->enabled) {
154 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
155 			tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
156 			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
157 		}
158 	}
159 }
160 
161 void evergreen_pm_finish(struct radeon_device *rdev)
162 {
163 	struct drm_device *ddev = rdev->ddev;
164 	struct drm_crtc *crtc;
165 	struct radeon_crtc *radeon_crtc;
166 	u32 tmp;
167 
168 	/* enable any active CRTCs */
169 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
170 		radeon_crtc = to_radeon_crtc(crtc);
171 		if (radeon_crtc->enabled) {
172 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
173 			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
174 			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
175 		}
176 	}
177 }
178 
179 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
180 {
181 	bool connected = false;
182 
183 	switch (hpd) {
184 	case RADEON_HPD_1:
185 		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
186 			connected = true;
187 		break;
188 	case RADEON_HPD_2:
189 		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
190 			connected = true;
191 		break;
192 	case RADEON_HPD_3:
193 		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
194 			connected = true;
195 		break;
196 	case RADEON_HPD_4:
197 		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
198 			connected = true;
199 		break;
200 	case RADEON_HPD_5:
201 		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
202 			connected = true;
203 		break;
204 	case RADEON_HPD_6:
205 		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
206 			connected = true;
207 			break;
208 	default:
209 		break;
210 	}
211 
212 	return connected;
213 }
214 
215 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
216 				enum radeon_hpd_id hpd)
217 {
218 	u32 tmp;
219 	bool connected = evergreen_hpd_sense(rdev, hpd);
220 
221 	switch (hpd) {
222 	case RADEON_HPD_1:
223 		tmp = RREG32(DC_HPD1_INT_CONTROL);
224 		if (connected)
225 			tmp &= ~DC_HPDx_INT_POLARITY;
226 		else
227 			tmp |= DC_HPDx_INT_POLARITY;
228 		WREG32(DC_HPD1_INT_CONTROL, tmp);
229 		break;
230 	case RADEON_HPD_2:
231 		tmp = RREG32(DC_HPD2_INT_CONTROL);
232 		if (connected)
233 			tmp &= ~DC_HPDx_INT_POLARITY;
234 		else
235 			tmp |= DC_HPDx_INT_POLARITY;
236 		WREG32(DC_HPD2_INT_CONTROL, tmp);
237 		break;
238 	case RADEON_HPD_3:
239 		tmp = RREG32(DC_HPD3_INT_CONTROL);
240 		if (connected)
241 			tmp &= ~DC_HPDx_INT_POLARITY;
242 		else
243 			tmp |= DC_HPDx_INT_POLARITY;
244 		WREG32(DC_HPD3_INT_CONTROL, tmp);
245 		break;
246 	case RADEON_HPD_4:
247 		tmp = RREG32(DC_HPD4_INT_CONTROL);
248 		if (connected)
249 			tmp &= ~DC_HPDx_INT_POLARITY;
250 		else
251 			tmp |= DC_HPDx_INT_POLARITY;
252 		WREG32(DC_HPD4_INT_CONTROL, tmp);
253 		break;
254 	case RADEON_HPD_5:
255 		tmp = RREG32(DC_HPD5_INT_CONTROL);
256 		if (connected)
257 			tmp &= ~DC_HPDx_INT_POLARITY;
258 		else
259 			tmp |= DC_HPDx_INT_POLARITY;
260 		WREG32(DC_HPD5_INT_CONTROL, tmp);
261 			break;
262 	case RADEON_HPD_6:
263 		tmp = RREG32(DC_HPD6_INT_CONTROL);
264 		if (connected)
265 			tmp &= ~DC_HPDx_INT_POLARITY;
266 		else
267 			tmp |= DC_HPDx_INT_POLARITY;
268 		WREG32(DC_HPD6_INT_CONTROL, tmp);
269 		break;
270 	default:
271 		break;
272 	}
273 }
274 
275 void evergreen_hpd_init(struct radeon_device *rdev)
276 {
277 	struct drm_device *dev = rdev->ddev;
278 	struct drm_connector *connector;
279 	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
280 		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
281 
282 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
283 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
284 		switch (radeon_connector->hpd.hpd) {
285 		case RADEON_HPD_1:
286 			WREG32(DC_HPD1_CONTROL, tmp);
287 			rdev->irq.hpd[0] = true;
288 			break;
289 		case RADEON_HPD_2:
290 			WREG32(DC_HPD2_CONTROL, tmp);
291 			rdev->irq.hpd[1] = true;
292 			break;
293 		case RADEON_HPD_3:
294 			WREG32(DC_HPD3_CONTROL, tmp);
295 			rdev->irq.hpd[2] = true;
296 			break;
297 		case RADEON_HPD_4:
298 			WREG32(DC_HPD4_CONTROL, tmp);
299 			rdev->irq.hpd[3] = true;
300 			break;
301 		case RADEON_HPD_5:
302 			WREG32(DC_HPD5_CONTROL, tmp);
303 			rdev->irq.hpd[4] = true;
304 			break;
305 		case RADEON_HPD_6:
306 			WREG32(DC_HPD6_CONTROL, tmp);
307 			rdev->irq.hpd[5] = true;
308 			break;
309 		default:
310 			break;
311 		}
312 	}
313 	if (rdev->irq.installed)
314 		evergreen_irq_set(rdev);
315 }
316 
317 void evergreen_hpd_fini(struct radeon_device *rdev)
318 {
319 	struct drm_device *dev = rdev->ddev;
320 	struct drm_connector *connector;
321 
322 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
323 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
324 		switch (radeon_connector->hpd.hpd) {
325 		case RADEON_HPD_1:
326 			WREG32(DC_HPD1_CONTROL, 0);
327 			rdev->irq.hpd[0] = false;
328 			break;
329 		case RADEON_HPD_2:
330 			WREG32(DC_HPD2_CONTROL, 0);
331 			rdev->irq.hpd[1] = false;
332 			break;
333 		case RADEON_HPD_3:
334 			WREG32(DC_HPD3_CONTROL, 0);
335 			rdev->irq.hpd[2] = false;
336 			break;
337 		case RADEON_HPD_4:
338 			WREG32(DC_HPD4_CONTROL, 0);
339 			rdev->irq.hpd[3] = false;
340 			break;
341 		case RADEON_HPD_5:
342 			WREG32(DC_HPD5_CONTROL, 0);
343 			rdev->irq.hpd[4] = false;
344 			break;
345 		case RADEON_HPD_6:
346 			WREG32(DC_HPD6_CONTROL, 0);
347 			rdev->irq.hpd[5] = false;
348 			break;
349 		default:
350 			break;
351 		}
352 	}
353 }
354 
355 /* watermark setup */
356 
357 static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
358 					struct radeon_crtc *radeon_crtc,
359 					struct drm_display_mode *mode,
360 					struct drm_display_mode *other_mode)
361 {
362 	u32 tmp = 0;
363 	/*
364 	 * Line Buffer Setup
365 	 * There are 3 line buffers, each one shared by 2 display controllers.
366 	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
367 	 * the display controllers.  The paritioning is done via one of four
368 	 * preset allocations specified in bits 2:0:
369 	 * first display controller
370 	 *  0 - first half of lb (3840 * 2)
371 	 *  1 - first 3/4 of lb (5760 * 2)
372 	 *  2 - whole lb (7680 * 2)
373 	 *  3 - first 1/4 of lb (1920 * 2)
374 	 * second display controller
375 	 *  4 - second half of lb (3840 * 2)
376 	 *  5 - second 3/4 of lb (5760 * 2)
377 	 *  6 - whole lb (7680 * 2)
378 	 *  7 - last 1/4 of lb (1920 * 2)
379 	 */
380 	if (mode && other_mode) {
381 		if (mode->hdisplay > other_mode->hdisplay) {
382 			if (mode->hdisplay > 2560)
383 				tmp = 1; /* 3/4 */
384 			else
385 				tmp = 0; /* 1/2 */
386 		} else if (other_mode->hdisplay > mode->hdisplay) {
387 			if (other_mode->hdisplay > 2560)
388 				tmp = 3; /* 1/4 */
389 			else
390 				tmp = 0; /* 1/2 */
391 		} else
392 			tmp = 0; /* 1/2 */
393 	} else if (mode)
394 		tmp = 2; /* whole */
395 	else if (other_mode)
396 		tmp = 3; /* 1/4 */
397 
398 	/* second controller of the pair uses second half of the lb */
399 	if (radeon_crtc->crtc_id % 2)
400 		tmp += 4;
401 	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
402 
403 	switch (tmp) {
404 	case 0:
405 	case 4:
406 	default:
407 		if (ASIC_IS_DCE5(rdev))
408 			return 4096 * 2;
409 		else
410 			return 3840 * 2;
411 	case 1:
412 	case 5:
413 		if (ASIC_IS_DCE5(rdev))
414 			return 6144 * 2;
415 		else
416 			return 5760 * 2;
417 	case 2:
418 	case 6:
419 		if (ASIC_IS_DCE5(rdev))
420 			return 8192 * 2;
421 		else
422 			return 7680 * 2;
423 	case 3:
424 	case 7:
425 		if (ASIC_IS_DCE5(rdev))
426 			return 2048 * 2;
427 		else
428 			return 1920 * 2;
429 	}
430 }
431 
432 static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
433 {
434 	u32 tmp = RREG32(MC_SHARED_CHMAP);
435 
436 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
437 	case 0:
438 	default:
439 		return 1;
440 	case 1:
441 		return 2;
442 	case 2:
443 		return 4;
444 	case 3:
445 		return 8;
446 	}
447 }
448 
449 struct evergreen_wm_params {
450 	u32 dram_channels; /* number of dram channels */
451 	u32 yclk;          /* bandwidth per dram data pin in kHz */
452 	u32 sclk;          /* engine clock in kHz */
453 	u32 disp_clk;      /* display clock in kHz */
454 	u32 src_width;     /* viewport width */
455 	u32 active_time;   /* active display time in ns */
456 	u32 blank_time;    /* blank time in ns */
457 	bool interlaced;    /* mode is interlaced */
458 	fixed20_12 vsc;    /* vertical scale ratio */
459 	u32 num_heads;     /* number of active crtcs */
460 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
461 	u32 lb_size;       /* line buffer allocated to pipe */
462 	u32 vtaps;         /* vertical scaler taps */
463 };
464 
465 static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
466 {
467 	/* Calculate DRAM Bandwidth and the part allocated to display. */
468 	fixed20_12 dram_efficiency; /* 0.7 */
469 	fixed20_12 yclk, dram_channels, bandwidth;
470 	fixed20_12 a;
471 
472 	a.full = dfixed_const(1000);
473 	yclk.full = dfixed_const(wm->yclk);
474 	yclk.full = dfixed_div(yclk, a);
475 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
476 	a.full = dfixed_const(10);
477 	dram_efficiency.full = dfixed_const(7);
478 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
479 	bandwidth.full = dfixed_mul(dram_channels, yclk);
480 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
481 
482 	return dfixed_trunc(bandwidth);
483 }
484 
485 static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
486 {
487 	/* Calculate DRAM Bandwidth and the part allocated to display. */
488 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
489 	fixed20_12 yclk, dram_channels, bandwidth;
490 	fixed20_12 a;
491 
492 	a.full = dfixed_const(1000);
493 	yclk.full = dfixed_const(wm->yclk);
494 	yclk.full = dfixed_div(yclk, a);
495 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
496 	a.full = dfixed_const(10);
497 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
498 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
499 	bandwidth.full = dfixed_mul(dram_channels, yclk);
500 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
501 
502 	return dfixed_trunc(bandwidth);
503 }
504 
505 static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
506 {
507 	/* Calculate the display Data return Bandwidth */
508 	fixed20_12 return_efficiency; /* 0.8 */
509 	fixed20_12 sclk, bandwidth;
510 	fixed20_12 a;
511 
512 	a.full = dfixed_const(1000);
513 	sclk.full = dfixed_const(wm->sclk);
514 	sclk.full = dfixed_div(sclk, a);
515 	a.full = dfixed_const(10);
516 	return_efficiency.full = dfixed_const(8);
517 	return_efficiency.full = dfixed_div(return_efficiency, a);
518 	a.full = dfixed_const(32);
519 	bandwidth.full = dfixed_mul(a, sclk);
520 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
521 
522 	return dfixed_trunc(bandwidth);
523 }
524 
525 static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
526 {
527 	/* Calculate the DMIF Request Bandwidth */
528 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
529 	fixed20_12 disp_clk, bandwidth;
530 	fixed20_12 a;
531 
532 	a.full = dfixed_const(1000);
533 	disp_clk.full = dfixed_const(wm->disp_clk);
534 	disp_clk.full = dfixed_div(disp_clk, a);
535 	a.full = dfixed_const(10);
536 	disp_clk_request_efficiency.full = dfixed_const(8);
537 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
538 	a.full = dfixed_const(32);
539 	bandwidth.full = dfixed_mul(a, disp_clk);
540 	bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
541 
542 	return dfixed_trunc(bandwidth);
543 }
544 
545 static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
546 {
547 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
548 	u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
549 	u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
550 	u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
551 
552 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
553 }
554 
555 static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
556 {
557 	/* Calculate the display mode Average Bandwidth
558 	 * DisplayMode should contain the source and destination dimensions,
559 	 * timing, etc.
560 	 */
561 	fixed20_12 bpp;
562 	fixed20_12 line_time;
563 	fixed20_12 src_width;
564 	fixed20_12 bandwidth;
565 	fixed20_12 a;
566 
567 	a.full = dfixed_const(1000);
568 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
569 	line_time.full = dfixed_div(line_time, a);
570 	bpp.full = dfixed_const(wm->bytes_per_pixel);
571 	src_width.full = dfixed_const(wm->src_width);
572 	bandwidth.full = dfixed_mul(src_width, bpp);
573 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
574 	bandwidth.full = dfixed_div(bandwidth, line_time);
575 
576 	return dfixed_trunc(bandwidth);
577 }
578 
579 static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
580 {
581 	/* First calcualte the latency in ns */
582 	u32 mc_latency = 2000; /* 2000 ns. */
583 	u32 available_bandwidth = evergreen_available_bandwidth(wm);
584 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
585 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
586 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
587 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
588 		(wm->num_heads * cursor_line_pair_return_time);
589 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
590 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
591 	fixed20_12 a, b, c;
592 
593 	if (wm->num_heads == 0)
594 		return 0;
595 
596 	a.full = dfixed_const(2);
597 	b.full = dfixed_const(1);
598 	if ((wm->vsc.full > a.full) ||
599 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
600 	    (wm->vtaps >= 5) ||
601 	    ((wm->vsc.full >= a.full) && wm->interlaced))
602 		max_src_lines_per_dst_line = 4;
603 	else
604 		max_src_lines_per_dst_line = 2;
605 
606 	a.full = dfixed_const(available_bandwidth);
607 	b.full = dfixed_const(wm->num_heads);
608 	a.full = dfixed_div(a, b);
609 
610 	b.full = dfixed_const(1000);
611 	c.full = dfixed_const(wm->disp_clk);
612 	b.full = dfixed_div(c, b);
613 	c.full = dfixed_const(wm->bytes_per_pixel);
614 	b.full = dfixed_mul(b, c);
615 
616 	lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
617 
618 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
619 	b.full = dfixed_const(1000);
620 	c.full = dfixed_const(lb_fill_bw);
621 	b.full = dfixed_div(c, b);
622 	a.full = dfixed_div(a, b);
623 	line_fill_time = dfixed_trunc(a);
624 
625 	if (line_fill_time < wm->active_time)
626 		return latency;
627 	else
628 		return latency + (line_fill_time - wm->active_time);
629 
630 }
631 
632 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
633 {
634 	if (evergreen_average_bandwidth(wm) <=
635 	    (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
636 		return true;
637 	else
638 		return false;
639 };
640 
641 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
642 {
643 	if (evergreen_average_bandwidth(wm) <=
644 	    (evergreen_available_bandwidth(wm) / wm->num_heads))
645 		return true;
646 	else
647 		return false;
648 };
649 
650 static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
651 {
652 	u32 lb_partitions = wm->lb_size / wm->src_width;
653 	u32 line_time = wm->active_time + wm->blank_time;
654 	u32 latency_tolerant_lines;
655 	u32 latency_hiding;
656 	fixed20_12 a;
657 
658 	a.full = dfixed_const(1);
659 	if (wm->vsc.full > a.full)
660 		latency_tolerant_lines = 1;
661 	else {
662 		if (lb_partitions <= (wm->vtaps + 1))
663 			latency_tolerant_lines = 1;
664 		else
665 			latency_tolerant_lines = 2;
666 	}
667 
668 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
669 
670 	if (evergreen_latency_watermark(wm) <= latency_hiding)
671 		return true;
672 	else
673 		return false;
674 }
675 
676 static void evergreen_program_watermarks(struct radeon_device *rdev,
677 					 struct radeon_crtc *radeon_crtc,
678 					 u32 lb_size, u32 num_heads)
679 {
680 	struct drm_display_mode *mode = &radeon_crtc->base.mode;
681 	struct evergreen_wm_params wm;
682 	u32 pixel_period;
683 	u32 line_time = 0;
684 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
685 	u32 priority_a_mark = 0, priority_b_mark = 0;
686 	u32 priority_a_cnt = PRIORITY_OFF;
687 	u32 priority_b_cnt = PRIORITY_OFF;
688 	u32 pipe_offset = radeon_crtc->crtc_id * 16;
689 	u32 tmp, arb_control3;
690 	fixed20_12 a, b, c;
691 
692 	if (radeon_crtc->base.enabled && num_heads && mode) {
693 		pixel_period = 1000000 / (u32)mode->clock;
694 		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
695 		priority_a_cnt = 0;
696 		priority_b_cnt = 0;
697 
698 		wm.yclk = rdev->pm.current_mclk * 10;
699 		wm.sclk = rdev->pm.current_sclk * 10;
700 		wm.disp_clk = mode->clock;
701 		wm.src_width = mode->crtc_hdisplay;
702 		wm.active_time = mode->crtc_hdisplay * pixel_period;
703 		wm.blank_time = line_time - wm.active_time;
704 		wm.interlaced = false;
705 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
706 			wm.interlaced = true;
707 		wm.vsc = radeon_crtc->vsc;
708 		wm.vtaps = 1;
709 		if (radeon_crtc->rmx_type != RMX_OFF)
710 			wm.vtaps = 2;
711 		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
712 		wm.lb_size = lb_size;
713 		wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
714 		wm.num_heads = num_heads;
715 
716 		/* set for high clocks */
717 		latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
718 		/* set for low clocks */
719 		/* wm.yclk = low clk; wm.sclk = low clk */
720 		latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
721 
722 		/* possibly force display priority to high */
723 		/* should really do this at mode validation time... */
724 		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
725 		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
726 		    !evergreen_check_latency_hiding(&wm) ||
727 		    (rdev->disp_priority == 2)) {
728 			DRM_INFO("force priority to high\n");
729 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
730 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
731 		}
732 
733 		a.full = dfixed_const(1000);
734 		b.full = dfixed_const(mode->clock);
735 		b.full = dfixed_div(b, a);
736 		c.full = dfixed_const(latency_watermark_a);
737 		c.full = dfixed_mul(c, b);
738 		c.full = dfixed_mul(c, radeon_crtc->hsc);
739 		c.full = dfixed_div(c, a);
740 		a.full = dfixed_const(16);
741 		c.full = dfixed_div(c, a);
742 		priority_a_mark = dfixed_trunc(c);
743 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
744 
745 		a.full = dfixed_const(1000);
746 		b.full = dfixed_const(mode->clock);
747 		b.full = dfixed_div(b, a);
748 		c.full = dfixed_const(latency_watermark_b);
749 		c.full = dfixed_mul(c, b);
750 		c.full = dfixed_mul(c, radeon_crtc->hsc);
751 		c.full = dfixed_div(c, a);
752 		a.full = dfixed_const(16);
753 		c.full = dfixed_div(c, a);
754 		priority_b_mark = dfixed_trunc(c);
755 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
756 	}
757 
758 	/* select wm A */
759 	arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
760 	tmp = arb_control3;
761 	tmp &= ~LATENCY_WATERMARK_MASK(3);
762 	tmp |= LATENCY_WATERMARK_MASK(1);
763 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
764 	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
765 	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
766 		LATENCY_HIGH_WATERMARK(line_time)));
767 	/* select wm B */
768 	tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
769 	tmp &= ~LATENCY_WATERMARK_MASK(3);
770 	tmp |= LATENCY_WATERMARK_MASK(2);
771 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
772 	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
773 	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
774 		LATENCY_HIGH_WATERMARK(line_time)));
775 	/* restore original selection */
776 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
777 
778 	/* write the priority marks */
779 	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
780 	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
781 
782 }
783 
784 void evergreen_bandwidth_update(struct radeon_device *rdev)
785 {
786 	struct drm_display_mode *mode0 = NULL;
787 	struct drm_display_mode *mode1 = NULL;
788 	u32 num_heads = 0, lb_size;
789 	int i;
790 
791 	radeon_update_display_priority(rdev);
792 
793 	for (i = 0; i < rdev->num_crtc; i++) {
794 		if (rdev->mode_info.crtcs[i]->base.enabled)
795 			num_heads++;
796 	}
797 	for (i = 0; i < rdev->num_crtc; i += 2) {
798 		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
799 		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
800 		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
801 		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
802 		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
803 		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
804 	}
805 }
806 
807 int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
808 {
809 	unsigned i;
810 	u32 tmp;
811 
812 	for (i = 0; i < rdev->usec_timeout; i++) {
813 		/* read MC_STATUS */
814 		tmp = RREG32(SRBM_STATUS) & 0x1F00;
815 		if (!tmp)
816 			return 0;
817 		udelay(1);
818 	}
819 	return -1;
820 }
821 
822 /*
823  * GART
824  */
825 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
826 {
827 	unsigned i;
828 	u32 tmp;
829 
830 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
831 
832 	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
833 	for (i = 0; i < rdev->usec_timeout; i++) {
834 		/* read MC_STATUS */
835 		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
836 		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
837 		if (tmp == 2) {
838 			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
839 			return;
840 		}
841 		if (tmp) {
842 			return;
843 		}
844 		udelay(1);
845 	}
846 }
847 
848 int evergreen_pcie_gart_enable(struct radeon_device *rdev)
849 {
850 	u32 tmp;
851 	int r;
852 
853 	if (rdev->gart.table.vram.robj == NULL) {
854 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
855 		return -EINVAL;
856 	}
857 	r = radeon_gart_table_vram_pin(rdev);
858 	if (r)
859 		return r;
860 	radeon_gart_restore(rdev);
861 	/* Setup L2 cache */
862 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
863 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
864 				EFFECTIVE_L2_QUEUE_SIZE(7));
865 	WREG32(VM_L2_CNTL2, 0);
866 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
867 	/* Setup TLB control */
868 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
869 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
870 		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
871 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
872 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
873 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
874 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
875 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
876 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
877 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
878 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
879 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
880 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
881 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
882 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
883 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
884 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
885 			(u32)(rdev->dummy_page.addr >> 12));
886 	WREG32(VM_CONTEXT1_CNTL, 0);
887 
888 	evergreen_pcie_gart_tlb_flush(rdev);
889 	rdev->gart.ready = true;
890 	return 0;
891 }
892 
893 void evergreen_pcie_gart_disable(struct radeon_device *rdev)
894 {
895 	u32 tmp;
896 	int r;
897 
898 	/* Disable all tables */
899 	WREG32(VM_CONTEXT0_CNTL, 0);
900 	WREG32(VM_CONTEXT1_CNTL, 0);
901 
902 	/* Setup L2 cache */
903 	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
904 				EFFECTIVE_L2_QUEUE_SIZE(7));
905 	WREG32(VM_L2_CNTL2, 0);
906 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
907 	/* Setup TLB control */
908 	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
909 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
910 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
911 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
912 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
913 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
914 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
915 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
916 	if (rdev->gart.table.vram.robj) {
917 		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
918 		if (likely(r == 0)) {
919 			radeon_bo_kunmap(rdev->gart.table.vram.robj);
920 			radeon_bo_unpin(rdev->gart.table.vram.robj);
921 			radeon_bo_unreserve(rdev->gart.table.vram.robj);
922 		}
923 	}
924 }
925 
926 void evergreen_pcie_gart_fini(struct radeon_device *rdev)
927 {
928 	evergreen_pcie_gart_disable(rdev);
929 	radeon_gart_table_vram_free(rdev);
930 	radeon_gart_fini(rdev);
931 }
932 
933 
934 void evergreen_agp_enable(struct radeon_device *rdev)
935 {
936 	u32 tmp;
937 
938 	/* Setup L2 cache */
939 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
940 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
941 				EFFECTIVE_L2_QUEUE_SIZE(7));
942 	WREG32(VM_L2_CNTL2, 0);
943 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
944 	/* Setup TLB control */
945 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
946 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
947 		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
948 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
949 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
950 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
951 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
952 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
953 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
954 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
955 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
956 	WREG32(VM_CONTEXT0_CNTL, 0);
957 	WREG32(VM_CONTEXT1_CNTL, 0);
958 }
959 
960 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
961 {
962 	save->vga_control[0] = RREG32(D1VGA_CONTROL);
963 	save->vga_control[1] = RREG32(D2VGA_CONTROL);
964 	save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
965 	save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
966 	save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
967 	save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
968 	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
969 	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
970 	save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
971 	save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
972 	if (!(rdev->flags & RADEON_IS_IGP)) {
973 		save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
974 		save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
975 		save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
976 		save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
977 	}
978 
979 	/* Stop all video */
980 	WREG32(VGA_RENDER_CONTROL, 0);
981 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
982 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
983 	if (!(rdev->flags & RADEON_IS_IGP)) {
984 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
985 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
986 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
987 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
988 	}
989 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
990 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
991 	if (!(rdev->flags & RADEON_IS_IGP)) {
992 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
993 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
994 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
995 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
996 	}
997 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
998 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
999 	if (!(rdev->flags & RADEON_IS_IGP)) {
1000 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1001 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1002 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1003 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1004 	}
1005 
1006 	WREG32(D1VGA_CONTROL, 0);
1007 	WREG32(D2VGA_CONTROL, 0);
1008 	WREG32(EVERGREEN_D3VGA_CONTROL, 0);
1009 	WREG32(EVERGREEN_D4VGA_CONTROL, 0);
1010 	WREG32(EVERGREEN_D5VGA_CONTROL, 0);
1011 	WREG32(EVERGREEN_D6VGA_CONTROL, 0);
1012 }
1013 
1014 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
1015 {
1016 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
1017 	       upper_32_bits(rdev->mc.vram_start));
1018 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
1019 	       upper_32_bits(rdev->mc.vram_start));
1020 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1021 	       (u32)rdev->mc.vram_start);
1022 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1023 	       (u32)rdev->mc.vram_start);
1024 
1025 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1026 	       upper_32_bits(rdev->mc.vram_start));
1027 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1028 	       upper_32_bits(rdev->mc.vram_start));
1029 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1030 	       (u32)rdev->mc.vram_start);
1031 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1032 	       (u32)rdev->mc.vram_start);
1033 
1034 	if (!(rdev->flags & RADEON_IS_IGP)) {
1035 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1036 		       upper_32_bits(rdev->mc.vram_start));
1037 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1038 		       upper_32_bits(rdev->mc.vram_start));
1039 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1040 		       (u32)rdev->mc.vram_start);
1041 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1042 		       (u32)rdev->mc.vram_start);
1043 
1044 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1045 		       upper_32_bits(rdev->mc.vram_start));
1046 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1047 		       upper_32_bits(rdev->mc.vram_start));
1048 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1049 		       (u32)rdev->mc.vram_start);
1050 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1051 		       (u32)rdev->mc.vram_start);
1052 
1053 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1054 		       upper_32_bits(rdev->mc.vram_start));
1055 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1056 		       upper_32_bits(rdev->mc.vram_start));
1057 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1058 		       (u32)rdev->mc.vram_start);
1059 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1060 		       (u32)rdev->mc.vram_start);
1061 
1062 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
1063 		       upper_32_bits(rdev->mc.vram_start));
1064 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
1065 		       upper_32_bits(rdev->mc.vram_start));
1066 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
1067 		       (u32)rdev->mc.vram_start);
1068 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
1069 		       (u32)rdev->mc.vram_start);
1070 	}
1071 
1072 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1073 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1074 	/* Unlock host access */
1075 	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1076 	mdelay(1);
1077 	/* Restore video state */
1078 	WREG32(D1VGA_CONTROL, save->vga_control[0]);
1079 	WREG32(D2VGA_CONTROL, save->vga_control[1]);
1080 	WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
1081 	WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
1082 	WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
1083 	WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
1084 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
1085 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
1086 	if (!(rdev->flags & RADEON_IS_IGP)) {
1087 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
1088 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
1089 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
1090 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
1091 	}
1092 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
1093 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
1094 	if (!(rdev->flags & RADEON_IS_IGP)) {
1095 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
1096 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
1097 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
1098 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
1099 	}
1100 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1101 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1102 	if (!(rdev->flags & RADEON_IS_IGP)) {
1103 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1104 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1105 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1106 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1107 	}
1108 	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1109 }
1110 
1111 void evergreen_mc_program(struct radeon_device *rdev)
1112 {
1113 	struct evergreen_mc_save save;
1114 	u32 tmp;
1115 	int i, j;
1116 
1117 	/* Initialize HDP */
1118 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1119 		WREG32((0x2c14 + j), 0x00000000);
1120 		WREG32((0x2c18 + j), 0x00000000);
1121 		WREG32((0x2c1c + j), 0x00000000);
1122 		WREG32((0x2c20 + j), 0x00000000);
1123 		WREG32((0x2c24 + j), 0x00000000);
1124 	}
1125 	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1126 
1127 	evergreen_mc_stop(rdev, &save);
1128 	if (evergreen_mc_wait_for_idle(rdev)) {
1129 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1130 	}
1131 	/* Lockout access through VGA aperture*/
1132 	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1133 	/* Update configuration */
1134 	if (rdev->flags & RADEON_IS_AGP) {
1135 		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1136 			/* VRAM before AGP */
1137 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1138 				rdev->mc.vram_start >> 12);
1139 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1140 				rdev->mc.gtt_end >> 12);
1141 		} else {
1142 			/* VRAM after AGP */
1143 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1144 				rdev->mc.gtt_start >> 12);
1145 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1146 				rdev->mc.vram_end >> 12);
1147 		}
1148 	} else {
1149 		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1150 			rdev->mc.vram_start >> 12);
1151 		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1152 			rdev->mc.vram_end >> 12);
1153 	}
1154 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1155 	if (rdev->flags & RADEON_IS_IGP) {
1156 		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1157 		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1158 		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1159 		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1160 	}
1161 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1162 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1163 	WREG32(MC_VM_FB_LOCATION, tmp);
1164 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1165 	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
1166 	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1167 	if (rdev->flags & RADEON_IS_AGP) {
1168 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1169 		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1170 		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1171 	} else {
1172 		WREG32(MC_VM_AGP_BASE, 0);
1173 		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1174 		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1175 	}
1176 	if (evergreen_mc_wait_for_idle(rdev)) {
1177 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1178 	}
1179 	evergreen_mc_resume(rdev, &save);
1180 	/* we need to own VRAM, so turn off the VGA renderer here
1181 	 * to stop it overwriting our objects */
1182 	rv515_vga_render_disable(rdev);
1183 }
1184 
1185 /*
1186  * CP.
1187  */
1188 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1189 {
1190 	/* set to DX10/11 mode */
1191 	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
1192 	radeon_ring_write(rdev, 1);
1193 	/* FIXME: implement */
1194 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1195 	radeon_ring_write(rdev,
1196 #ifdef __BIG_ENDIAN
1197 			  (2 << 0) |
1198 #endif
1199 			  (ib->gpu_addr & 0xFFFFFFFC));
1200 	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1201 	radeon_ring_write(rdev, ib->length_dw);
1202 }
1203 
1204 
1205 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1206 {
1207 	const __be32 *fw_data;
1208 	int i;
1209 
1210 	if (!rdev->me_fw || !rdev->pfp_fw)
1211 		return -EINVAL;
1212 
1213 	r700_cp_stop(rdev);
1214 	WREG32(CP_RB_CNTL,
1215 #ifdef __BIG_ENDIAN
1216 	       BUF_SWAP_32BIT |
1217 #endif
1218 	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1219 
1220 	fw_data = (const __be32 *)rdev->pfp_fw->data;
1221 	WREG32(CP_PFP_UCODE_ADDR, 0);
1222 	for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1223 		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1224 	WREG32(CP_PFP_UCODE_ADDR, 0);
1225 
1226 	fw_data = (const __be32 *)rdev->me_fw->data;
1227 	WREG32(CP_ME_RAM_WADDR, 0);
1228 	for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1229 		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1230 
1231 	WREG32(CP_PFP_UCODE_ADDR, 0);
1232 	WREG32(CP_ME_RAM_WADDR, 0);
1233 	WREG32(CP_ME_RAM_RADDR, 0);
1234 	return 0;
1235 }
1236 
1237 static int evergreen_cp_start(struct radeon_device *rdev)
1238 {
1239 	int r, i;
1240 	uint32_t cp_me;
1241 
1242 	r = radeon_ring_lock(rdev, 7);
1243 	if (r) {
1244 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1245 		return r;
1246 	}
1247 	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1248 	radeon_ring_write(rdev, 0x1);
1249 	radeon_ring_write(rdev, 0x0);
1250 	radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1251 	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1252 	radeon_ring_write(rdev, 0);
1253 	radeon_ring_write(rdev, 0);
1254 	radeon_ring_unlock_commit(rdev);
1255 
1256 	cp_me = 0xff;
1257 	WREG32(CP_ME_CNTL, cp_me);
1258 
1259 	r = radeon_ring_lock(rdev, evergreen_default_size + 19);
1260 	if (r) {
1261 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1262 		return r;
1263 	}
1264 
1265 	/* setup clear context state */
1266 	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1267 	radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1268 
1269 	for (i = 0; i < evergreen_default_size; i++)
1270 		radeon_ring_write(rdev, evergreen_default_state[i]);
1271 
1272 	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1273 	radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
1274 
1275 	/* set clear context state */
1276 	radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
1277 	radeon_ring_write(rdev, 0);
1278 
1279 	/* SQ_VTX_BASE_VTX_LOC */
1280 	radeon_ring_write(rdev, 0xc0026f00);
1281 	radeon_ring_write(rdev, 0x00000000);
1282 	radeon_ring_write(rdev, 0x00000000);
1283 	radeon_ring_write(rdev, 0x00000000);
1284 
1285 	/* Clear consts */
1286 	radeon_ring_write(rdev, 0xc0036f00);
1287 	radeon_ring_write(rdev, 0x00000bc4);
1288 	radeon_ring_write(rdev, 0xffffffff);
1289 	radeon_ring_write(rdev, 0xffffffff);
1290 	radeon_ring_write(rdev, 0xffffffff);
1291 
1292 	radeon_ring_write(rdev, 0xc0026900);
1293 	radeon_ring_write(rdev, 0x00000316);
1294 	radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1295 	radeon_ring_write(rdev, 0x00000010); /*  */
1296 
1297 	radeon_ring_unlock_commit(rdev);
1298 
1299 	return 0;
1300 }
1301 
1302 int evergreen_cp_resume(struct radeon_device *rdev)
1303 {
1304 	u32 tmp;
1305 	u32 rb_bufsz;
1306 	int r;
1307 
1308 	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1309 	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1310 				 SOFT_RESET_PA |
1311 				 SOFT_RESET_SH |
1312 				 SOFT_RESET_VGT |
1313 				 SOFT_RESET_SX));
1314 	RREG32(GRBM_SOFT_RESET);
1315 	mdelay(15);
1316 	WREG32(GRBM_SOFT_RESET, 0);
1317 	RREG32(GRBM_SOFT_RESET);
1318 
1319 	/* Set ring buffer size */
1320 	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1321 	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1322 #ifdef __BIG_ENDIAN
1323 	tmp |= BUF_SWAP_32BIT;
1324 #endif
1325 	WREG32(CP_RB_CNTL, tmp);
1326 	WREG32(CP_SEM_WAIT_TIMER, 0x4);
1327 
1328 	/* Set the write pointer delay */
1329 	WREG32(CP_RB_WPTR_DELAY, 0);
1330 
1331 	/* Initialize the ring buffer's read and write pointers */
1332 	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1333 	WREG32(CP_RB_RPTR_WR, 0);
1334 	WREG32(CP_RB_WPTR, 0);
1335 
1336 	/* set the wb address wether it's enabled or not */
1337 	WREG32(CP_RB_RPTR_ADDR,
1338 #ifdef __BIG_ENDIAN
1339 	       RB_RPTR_SWAP(2) |
1340 #endif
1341 	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1342 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1343 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1344 
1345 	if (rdev->wb.enabled)
1346 		WREG32(SCRATCH_UMSK, 0xff);
1347 	else {
1348 		tmp |= RB_NO_UPDATE;
1349 		WREG32(SCRATCH_UMSK, 0);
1350 	}
1351 
1352 	mdelay(1);
1353 	WREG32(CP_RB_CNTL, tmp);
1354 
1355 	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1356 	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1357 
1358 	rdev->cp.rptr = RREG32(CP_RB_RPTR);
1359 	rdev->cp.wptr = RREG32(CP_RB_WPTR);
1360 
1361 	evergreen_cp_start(rdev);
1362 	rdev->cp.ready = true;
1363 	r = radeon_ring_test(rdev);
1364 	if (r) {
1365 		rdev->cp.ready = false;
1366 		return r;
1367 	}
1368 	return 0;
1369 }
1370 
1371 /*
1372  * Core functions
1373  */
1374 static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1375 						  u32 num_tile_pipes,
1376 						  u32 num_backends,
1377 						  u32 backend_disable_mask)
1378 {
1379 	u32 backend_map = 0;
1380 	u32 enabled_backends_mask = 0;
1381 	u32 enabled_backends_count = 0;
1382 	u32 cur_pipe;
1383 	u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
1384 	u32 cur_backend = 0;
1385 	u32 i;
1386 	bool force_no_swizzle;
1387 
1388 	if (num_tile_pipes > EVERGREEN_MAX_PIPES)
1389 		num_tile_pipes = EVERGREEN_MAX_PIPES;
1390 	if (num_tile_pipes < 1)
1391 		num_tile_pipes = 1;
1392 	if (num_backends > EVERGREEN_MAX_BACKENDS)
1393 		num_backends = EVERGREEN_MAX_BACKENDS;
1394 	if (num_backends < 1)
1395 		num_backends = 1;
1396 
1397 	for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1398 		if (((backend_disable_mask >> i) & 1) == 0) {
1399 			enabled_backends_mask |= (1 << i);
1400 			++enabled_backends_count;
1401 		}
1402 		if (enabled_backends_count == num_backends)
1403 			break;
1404 	}
1405 
1406 	if (enabled_backends_count == 0) {
1407 		enabled_backends_mask = 1;
1408 		enabled_backends_count = 1;
1409 	}
1410 
1411 	if (enabled_backends_count != num_backends)
1412 		num_backends = enabled_backends_count;
1413 
1414 	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
1415 	switch (rdev->family) {
1416 	case CHIP_CEDAR:
1417 	case CHIP_REDWOOD:
1418 	case CHIP_PALM:
1419 	case CHIP_TURKS:
1420 	case CHIP_CAICOS:
1421 		force_no_swizzle = false;
1422 		break;
1423 	case CHIP_CYPRESS:
1424 	case CHIP_HEMLOCK:
1425 	case CHIP_JUNIPER:
1426 	case CHIP_BARTS:
1427 	default:
1428 		force_no_swizzle = true;
1429 		break;
1430 	}
1431 	if (force_no_swizzle) {
1432 		bool last_backend_enabled = false;
1433 
1434 		force_no_swizzle = false;
1435 		for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1436 			if (((enabled_backends_mask >> i) & 1) == 1) {
1437 				if (last_backend_enabled)
1438 					force_no_swizzle = true;
1439 				last_backend_enabled = true;
1440 			} else
1441 				last_backend_enabled = false;
1442 		}
1443 	}
1444 
1445 	switch (num_tile_pipes) {
1446 	case 1:
1447 	case 3:
1448 	case 5:
1449 	case 7:
1450 		DRM_ERROR("odd number of pipes!\n");
1451 		break;
1452 	case 2:
1453 		swizzle_pipe[0] = 0;
1454 		swizzle_pipe[1] = 1;
1455 		break;
1456 	case 4:
1457 		if (force_no_swizzle) {
1458 			swizzle_pipe[0] = 0;
1459 			swizzle_pipe[1] = 1;
1460 			swizzle_pipe[2] = 2;
1461 			swizzle_pipe[3] = 3;
1462 		} else {
1463 			swizzle_pipe[0] = 0;
1464 			swizzle_pipe[1] = 2;
1465 			swizzle_pipe[2] = 1;
1466 			swizzle_pipe[3] = 3;
1467 		}
1468 		break;
1469 	case 6:
1470 		if (force_no_swizzle) {
1471 			swizzle_pipe[0] = 0;
1472 			swizzle_pipe[1] = 1;
1473 			swizzle_pipe[2] = 2;
1474 			swizzle_pipe[3] = 3;
1475 			swizzle_pipe[4] = 4;
1476 			swizzle_pipe[5] = 5;
1477 		} else {
1478 			swizzle_pipe[0] = 0;
1479 			swizzle_pipe[1] = 2;
1480 			swizzle_pipe[2] = 4;
1481 			swizzle_pipe[3] = 1;
1482 			swizzle_pipe[4] = 3;
1483 			swizzle_pipe[5] = 5;
1484 		}
1485 		break;
1486 	case 8:
1487 		if (force_no_swizzle) {
1488 			swizzle_pipe[0] = 0;
1489 			swizzle_pipe[1] = 1;
1490 			swizzle_pipe[2] = 2;
1491 			swizzle_pipe[3] = 3;
1492 			swizzle_pipe[4] = 4;
1493 			swizzle_pipe[5] = 5;
1494 			swizzle_pipe[6] = 6;
1495 			swizzle_pipe[7] = 7;
1496 		} else {
1497 			swizzle_pipe[0] = 0;
1498 			swizzle_pipe[1] = 2;
1499 			swizzle_pipe[2] = 4;
1500 			swizzle_pipe[3] = 6;
1501 			swizzle_pipe[4] = 1;
1502 			swizzle_pipe[5] = 3;
1503 			swizzle_pipe[6] = 5;
1504 			swizzle_pipe[7] = 7;
1505 		}
1506 		break;
1507 	}
1508 
1509 	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1510 		while (((1 << cur_backend) & enabled_backends_mask) == 0)
1511 			cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1512 
1513 		backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1514 
1515 		cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1516 	}
1517 
1518 	return backend_map;
1519 }
1520 
1521 static void evergreen_program_channel_remap(struct radeon_device *rdev)
1522 {
1523 	u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
1524 
1525 	tmp = RREG32(MC_SHARED_CHMAP);
1526 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1527 	case 0:
1528 	case 1:
1529 	case 2:
1530 	case 3:
1531 	default:
1532 		/* default mapping */
1533 		mc_shared_chremap = 0x00fac688;
1534 		break;
1535 	}
1536 
1537 	switch (rdev->family) {
1538 	case CHIP_HEMLOCK:
1539 	case CHIP_CYPRESS:
1540 	case CHIP_BARTS:
1541 		tcp_chan_steer_lo = 0x54763210;
1542 		tcp_chan_steer_hi = 0x0000ba98;
1543 		break;
1544 	case CHIP_JUNIPER:
1545 	case CHIP_REDWOOD:
1546 	case CHIP_CEDAR:
1547 	case CHIP_PALM:
1548 	case CHIP_TURKS:
1549 	case CHIP_CAICOS:
1550 	default:
1551 		tcp_chan_steer_lo = 0x76543210;
1552 		tcp_chan_steer_hi = 0x0000ba98;
1553 		break;
1554 	}
1555 
1556 	WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
1557 	WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
1558 	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
1559 }
1560 
1561 static void evergreen_gpu_init(struct radeon_device *rdev)
1562 {
1563 	u32 cc_rb_backend_disable = 0;
1564 	u32 cc_gc_shader_pipe_config;
1565 	u32 gb_addr_config = 0;
1566 	u32 mc_shared_chmap, mc_arb_ramcfg;
1567 	u32 gb_backend_map;
1568 	u32 grbm_gfx_index;
1569 	u32 sx_debug_1;
1570 	u32 smx_dc_ctl0;
1571 	u32 sq_config;
1572 	u32 sq_lds_resource_mgmt;
1573 	u32 sq_gpr_resource_mgmt_1;
1574 	u32 sq_gpr_resource_mgmt_2;
1575 	u32 sq_gpr_resource_mgmt_3;
1576 	u32 sq_thread_resource_mgmt;
1577 	u32 sq_thread_resource_mgmt_2;
1578 	u32 sq_stack_resource_mgmt_1;
1579 	u32 sq_stack_resource_mgmt_2;
1580 	u32 sq_stack_resource_mgmt_3;
1581 	u32 vgt_cache_invalidation;
1582 	u32 hdp_host_path_cntl;
1583 	int i, j, num_shader_engines, ps_thread_count;
1584 
1585 	switch (rdev->family) {
1586 	case CHIP_CYPRESS:
1587 	case CHIP_HEMLOCK:
1588 		rdev->config.evergreen.num_ses = 2;
1589 		rdev->config.evergreen.max_pipes = 4;
1590 		rdev->config.evergreen.max_tile_pipes = 8;
1591 		rdev->config.evergreen.max_simds = 10;
1592 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1593 		rdev->config.evergreen.max_gprs = 256;
1594 		rdev->config.evergreen.max_threads = 248;
1595 		rdev->config.evergreen.max_gs_threads = 32;
1596 		rdev->config.evergreen.max_stack_entries = 512;
1597 		rdev->config.evergreen.sx_num_of_sets = 4;
1598 		rdev->config.evergreen.sx_max_export_size = 256;
1599 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1600 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1601 		rdev->config.evergreen.max_hw_contexts = 8;
1602 		rdev->config.evergreen.sq_num_cf_insts = 2;
1603 
1604 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1605 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1606 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1607 		break;
1608 	case CHIP_JUNIPER:
1609 		rdev->config.evergreen.num_ses = 1;
1610 		rdev->config.evergreen.max_pipes = 4;
1611 		rdev->config.evergreen.max_tile_pipes = 4;
1612 		rdev->config.evergreen.max_simds = 10;
1613 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1614 		rdev->config.evergreen.max_gprs = 256;
1615 		rdev->config.evergreen.max_threads = 248;
1616 		rdev->config.evergreen.max_gs_threads = 32;
1617 		rdev->config.evergreen.max_stack_entries = 512;
1618 		rdev->config.evergreen.sx_num_of_sets = 4;
1619 		rdev->config.evergreen.sx_max_export_size = 256;
1620 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1621 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1622 		rdev->config.evergreen.max_hw_contexts = 8;
1623 		rdev->config.evergreen.sq_num_cf_insts = 2;
1624 
1625 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1626 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1627 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1628 		break;
1629 	case CHIP_REDWOOD:
1630 		rdev->config.evergreen.num_ses = 1;
1631 		rdev->config.evergreen.max_pipes = 4;
1632 		rdev->config.evergreen.max_tile_pipes = 4;
1633 		rdev->config.evergreen.max_simds = 5;
1634 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1635 		rdev->config.evergreen.max_gprs = 256;
1636 		rdev->config.evergreen.max_threads = 248;
1637 		rdev->config.evergreen.max_gs_threads = 32;
1638 		rdev->config.evergreen.max_stack_entries = 256;
1639 		rdev->config.evergreen.sx_num_of_sets = 4;
1640 		rdev->config.evergreen.sx_max_export_size = 256;
1641 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1642 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1643 		rdev->config.evergreen.max_hw_contexts = 8;
1644 		rdev->config.evergreen.sq_num_cf_insts = 2;
1645 
1646 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1647 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1648 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1649 		break;
1650 	case CHIP_CEDAR:
1651 	default:
1652 		rdev->config.evergreen.num_ses = 1;
1653 		rdev->config.evergreen.max_pipes = 2;
1654 		rdev->config.evergreen.max_tile_pipes = 2;
1655 		rdev->config.evergreen.max_simds = 2;
1656 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1657 		rdev->config.evergreen.max_gprs = 256;
1658 		rdev->config.evergreen.max_threads = 192;
1659 		rdev->config.evergreen.max_gs_threads = 16;
1660 		rdev->config.evergreen.max_stack_entries = 256;
1661 		rdev->config.evergreen.sx_num_of_sets = 4;
1662 		rdev->config.evergreen.sx_max_export_size = 128;
1663 		rdev->config.evergreen.sx_max_export_pos_size = 32;
1664 		rdev->config.evergreen.sx_max_export_smx_size = 96;
1665 		rdev->config.evergreen.max_hw_contexts = 4;
1666 		rdev->config.evergreen.sq_num_cf_insts = 1;
1667 
1668 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1669 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1670 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1671 		break;
1672 	case CHIP_PALM:
1673 		rdev->config.evergreen.num_ses = 1;
1674 		rdev->config.evergreen.max_pipes = 2;
1675 		rdev->config.evergreen.max_tile_pipes = 2;
1676 		rdev->config.evergreen.max_simds = 2;
1677 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1678 		rdev->config.evergreen.max_gprs = 256;
1679 		rdev->config.evergreen.max_threads = 192;
1680 		rdev->config.evergreen.max_gs_threads = 16;
1681 		rdev->config.evergreen.max_stack_entries = 256;
1682 		rdev->config.evergreen.sx_num_of_sets = 4;
1683 		rdev->config.evergreen.sx_max_export_size = 128;
1684 		rdev->config.evergreen.sx_max_export_pos_size = 32;
1685 		rdev->config.evergreen.sx_max_export_smx_size = 96;
1686 		rdev->config.evergreen.max_hw_contexts = 4;
1687 		rdev->config.evergreen.sq_num_cf_insts = 1;
1688 
1689 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1690 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1691 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1692 		break;
1693 	case CHIP_BARTS:
1694 		rdev->config.evergreen.num_ses = 2;
1695 		rdev->config.evergreen.max_pipes = 4;
1696 		rdev->config.evergreen.max_tile_pipes = 8;
1697 		rdev->config.evergreen.max_simds = 7;
1698 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1699 		rdev->config.evergreen.max_gprs = 256;
1700 		rdev->config.evergreen.max_threads = 248;
1701 		rdev->config.evergreen.max_gs_threads = 32;
1702 		rdev->config.evergreen.max_stack_entries = 512;
1703 		rdev->config.evergreen.sx_num_of_sets = 4;
1704 		rdev->config.evergreen.sx_max_export_size = 256;
1705 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1706 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1707 		rdev->config.evergreen.max_hw_contexts = 8;
1708 		rdev->config.evergreen.sq_num_cf_insts = 2;
1709 
1710 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1711 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1712 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1713 		break;
1714 	case CHIP_TURKS:
1715 		rdev->config.evergreen.num_ses = 1;
1716 		rdev->config.evergreen.max_pipes = 4;
1717 		rdev->config.evergreen.max_tile_pipes = 4;
1718 		rdev->config.evergreen.max_simds = 6;
1719 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1720 		rdev->config.evergreen.max_gprs = 256;
1721 		rdev->config.evergreen.max_threads = 248;
1722 		rdev->config.evergreen.max_gs_threads = 32;
1723 		rdev->config.evergreen.max_stack_entries = 256;
1724 		rdev->config.evergreen.sx_num_of_sets = 4;
1725 		rdev->config.evergreen.sx_max_export_size = 256;
1726 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1727 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1728 		rdev->config.evergreen.max_hw_contexts = 8;
1729 		rdev->config.evergreen.sq_num_cf_insts = 2;
1730 
1731 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1732 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1733 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1734 		break;
1735 	case CHIP_CAICOS:
1736 		rdev->config.evergreen.num_ses = 1;
1737 		rdev->config.evergreen.max_pipes = 4;
1738 		rdev->config.evergreen.max_tile_pipes = 2;
1739 		rdev->config.evergreen.max_simds = 2;
1740 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1741 		rdev->config.evergreen.max_gprs = 256;
1742 		rdev->config.evergreen.max_threads = 192;
1743 		rdev->config.evergreen.max_gs_threads = 16;
1744 		rdev->config.evergreen.max_stack_entries = 256;
1745 		rdev->config.evergreen.sx_num_of_sets = 4;
1746 		rdev->config.evergreen.sx_max_export_size = 128;
1747 		rdev->config.evergreen.sx_max_export_pos_size = 32;
1748 		rdev->config.evergreen.sx_max_export_smx_size = 96;
1749 		rdev->config.evergreen.max_hw_contexts = 4;
1750 		rdev->config.evergreen.sq_num_cf_insts = 1;
1751 
1752 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1753 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1754 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1755 		break;
1756 	}
1757 
1758 	/* Initialize HDP */
1759 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1760 		WREG32((0x2c14 + j), 0x00000000);
1761 		WREG32((0x2c18 + j), 0x00000000);
1762 		WREG32((0x2c1c + j), 0x00000000);
1763 		WREG32((0x2c20 + j), 0x00000000);
1764 		WREG32((0x2c24 + j), 0x00000000);
1765 	}
1766 
1767 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1768 
1769 	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1770 
1771 	cc_gc_shader_pipe_config |=
1772 		INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
1773 				  & EVERGREEN_MAX_PIPES_MASK);
1774 	cc_gc_shader_pipe_config |=
1775 		INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
1776 			       & EVERGREEN_MAX_SIMDS_MASK);
1777 
1778 	cc_rb_backend_disable =
1779 		BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
1780 				& EVERGREEN_MAX_BACKENDS_MASK);
1781 
1782 
1783 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1784 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1785 
1786 	switch (rdev->config.evergreen.max_tile_pipes) {
1787 	case 1:
1788 	default:
1789 		gb_addr_config |= NUM_PIPES(0);
1790 		break;
1791 	case 2:
1792 		gb_addr_config |= NUM_PIPES(1);
1793 		break;
1794 	case 4:
1795 		gb_addr_config |= NUM_PIPES(2);
1796 		break;
1797 	case 8:
1798 		gb_addr_config |= NUM_PIPES(3);
1799 		break;
1800 	}
1801 
1802 	gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1803 	gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
1804 	gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
1805 	gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
1806 	gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
1807 	gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
1808 
1809 	if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
1810 		gb_addr_config |= ROW_SIZE(2);
1811 	else
1812 		gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
1813 
1814 	if (rdev->ddev->pdev->device == 0x689e) {
1815 		u32 efuse_straps_4;
1816 		u32 efuse_straps_3;
1817 		u8 efuse_box_bit_131_124;
1818 
1819 		WREG32(RCU_IND_INDEX, 0x204);
1820 		efuse_straps_4 = RREG32(RCU_IND_DATA);
1821 		WREG32(RCU_IND_INDEX, 0x203);
1822 		efuse_straps_3 = RREG32(RCU_IND_DATA);
1823 		efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
1824 
1825 		switch(efuse_box_bit_131_124) {
1826 		case 0x00:
1827 			gb_backend_map = 0x76543210;
1828 			break;
1829 		case 0x55:
1830 			gb_backend_map = 0x77553311;
1831 			break;
1832 		case 0x56:
1833 			gb_backend_map = 0x77553300;
1834 			break;
1835 		case 0x59:
1836 			gb_backend_map = 0x77552211;
1837 			break;
1838 		case 0x66:
1839 			gb_backend_map = 0x77443300;
1840 			break;
1841 		case 0x99:
1842 			gb_backend_map = 0x66552211;
1843 			break;
1844 		case 0x5a:
1845 			gb_backend_map = 0x77552200;
1846 			break;
1847 		case 0xaa:
1848 			gb_backend_map = 0x66442200;
1849 			break;
1850 		case 0x95:
1851 			gb_backend_map = 0x66553311;
1852 			break;
1853 		default:
1854 			DRM_ERROR("bad backend map, using default\n");
1855 			gb_backend_map =
1856 				evergreen_get_tile_pipe_to_backend_map(rdev,
1857 								       rdev->config.evergreen.max_tile_pipes,
1858 								       rdev->config.evergreen.max_backends,
1859 								       ((EVERGREEN_MAX_BACKENDS_MASK <<
1860 								   rdev->config.evergreen.max_backends) &
1861 									EVERGREEN_MAX_BACKENDS_MASK));
1862 			break;
1863 		}
1864 	} else if (rdev->ddev->pdev->device == 0x68b9) {
1865 		u32 efuse_straps_3;
1866 		u8 efuse_box_bit_127_124;
1867 
1868 		WREG32(RCU_IND_INDEX, 0x203);
1869 		efuse_straps_3 = RREG32(RCU_IND_DATA);
1870 		efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
1871 
1872 		switch(efuse_box_bit_127_124) {
1873 		case 0x0:
1874 			gb_backend_map = 0x00003210;
1875 			break;
1876 		case 0x5:
1877 		case 0x6:
1878 		case 0x9:
1879 		case 0xa:
1880 			gb_backend_map = 0x00003311;
1881 			break;
1882 		default:
1883 			DRM_ERROR("bad backend map, using default\n");
1884 			gb_backend_map =
1885 				evergreen_get_tile_pipe_to_backend_map(rdev,
1886 								       rdev->config.evergreen.max_tile_pipes,
1887 								       rdev->config.evergreen.max_backends,
1888 								       ((EVERGREEN_MAX_BACKENDS_MASK <<
1889 								   rdev->config.evergreen.max_backends) &
1890 									EVERGREEN_MAX_BACKENDS_MASK));
1891 			break;
1892 		}
1893 	} else {
1894 		switch (rdev->family) {
1895 		case CHIP_CYPRESS:
1896 		case CHIP_HEMLOCK:
1897 		case CHIP_BARTS:
1898 			gb_backend_map = 0x66442200;
1899 			break;
1900 		case CHIP_JUNIPER:
1901 			gb_backend_map = 0x00006420;
1902 			break;
1903 		default:
1904 			gb_backend_map =
1905 				evergreen_get_tile_pipe_to_backend_map(rdev,
1906 								       rdev->config.evergreen.max_tile_pipes,
1907 								       rdev->config.evergreen.max_backends,
1908 								       ((EVERGREEN_MAX_BACKENDS_MASK <<
1909 									 rdev->config.evergreen.max_backends) &
1910 									EVERGREEN_MAX_BACKENDS_MASK));
1911 		}
1912 	}
1913 
1914 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
1915 	 * not have bank info, so create a custom tiling dword.
1916 	 * bits 3:0   num_pipes
1917 	 * bits 7:4   num_banks
1918 	 * bits 11:8  group_size
1919 	 * bits 15:12 row_size
1920 	 */
1921 	rdev->config.evergreen.tile_config = 0;
1922 	switch (rdev->config.evergreen.max_tile_pipes) {
1923 	case 1:
1924 	default:
1925 		rdev->config.evergreen.tile_config |= (0 << 0);
1926 		break;
1927 	case 2:
1928 		rdev->config.evergreen.tile_config |= (1 << 0);
1929 		break;
1930 	case 4:
1931 		rdev->config.evergreen.tile_config |= (2 << 0);
1932 		break;
1933 	case 8:
1934 		rdev->config.evergreen.tile_config |= (3 << 0);
1935 		break;
1936 	}
1937 	rdev->config.evergreen.tile_config |=
1938 		((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
1939 	rdev->config.evergreen.tile_config |=
1940 		((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
1941 	rdev->config.evergreen.tile_config |=
1942 		((gb_addr_config & 0x30000000) >> 28) << 12;
1943 
1944 	WREG32(GB_BACKEND_MAP, gb_backend_map);
1945 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
1946 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1947 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1948 
1949 	evergreen_program_channel_remap(rdev);
1950 
1951 	num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
1952 	grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
1953 
1954 	for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
1955 		u32 rb = cc_rb_backend_disable | (0xf0 << 16);
1956 		u32 sp = cc_gc_shader_pipe_config;
1957 		u32 gfx = grbm_gfx_index | SE_INDEX(i);
1958 
1959 		if (i == num_shader_engines) {
1960 			rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
1961 			sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
1962 		}
1963 
1964 		WREG32(GRBM_GFX_INDEX, gfx);
1965 		WREG32(RLC_GFX_INDEX, gfx);
1966 
1967 		WREG32(CC_RB_BACKEND_DISABLE, rb);
1968 		WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
1969 		WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
1970 		WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
1971         }
1972 
1973 	grbm_gfx_index |= SE_BROADCAST_WRITES;
1974 	WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
1975 	WREG32(RLC_GFX_INDEX, grbm_gfx_index);
1976 
1977 	WREG32(CGTS_SYS_TCC_DISABLE, 0);
1978 	WREG32(CGTS_TCC_DISABLE, 0);
1979 	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
1980 	WREG32(CGTS_USER_TCC_DISABLE, 0);
1981 
1982 	/* set HW defaults for 3D engine */
1983 	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
1984 				     ROQ_IB2_START(0x2b)));
1985 
1986 	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
1987 
1988 	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
1989 			     SYNC_GRADIENT |
1990 			     SYNC_WALKER |
1991 			     SYNC_ALIGNER));
1992 
1993 	sx_debug_1 = RREG32(SX_DEBUG_1);
1994 	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1995 	WREG32(SX_DEBUG_1, sx_debug_1);
1996 
1997 
1998 	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1999 	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2000 	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2001 	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2002 
2003 	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2004 					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2005 					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2006 
2007 	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2008 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2009 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2010 
2011 	WREG32(VGT_NUM_INSTANCES, 1);
2012 	WREG32(SPI_CONFIG_CNTL, 0);
2013 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2014 	WREG32(CP_PERFMON_CNTL, 0);
2015 
2016 	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2017 				  FETCH_FIFO_HIWATER(0x4) |
2018 				  DONE_FIFO_HIWATER(0xe0) |
2019 				  ALU_UPDATE_FIFO_HIWATER(0x8)));
2020 
2021 	sq_config = RREG32(SQ_CONFIG);
2022 	sq_config &= ~(PS_PRIO(3) |
2023 		       VS_PRIO(3) |
2024 		       GS_PRIO(3) |
2025 		       ES_PRIO(3));
2026 	sq_config |= (VC_ENABLE |
2027 		      EXPORT_SRC_C |
2028 		      PS_PRIO(0) |
2029 		      VS_PRIO(1) |
2030 		      GS_PRIO(2) |
2031 		      ES_PRIO(3));
2032 
2033 	switch (rdev->family) {
2034 	case CHIP_CEDAR:
2035 	case CHIP_PALM:
2036 	case CHIP_CAICOS:
2037 		/* no vertex cache */
2038 		sq_config &= ~VC_ENABLE;
2039 		break;
2040 	default:
2041 		break;
2042 	}
2043 
2044 	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2045 
2046 	sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2047 	sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2048 	sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2049 	sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2050 	sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2051 	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2052 	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2053 
2054 	switch (rdev->family) {
2055 	case CHIP_CEDAR:
2056 	case CHIP_PALM:
2057 		ps_thread_count = 96;
2058 		break;
2059 	default:
2060 		ps_thread_count = 128;
2061 		break;
2062 	}
2063 
2064 	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
2065 	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2066 	sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2067 	sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2068 	sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2069 	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2070 
2071 	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2072 	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2073 	sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2074 	sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2075 	sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2076 	sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2077 
2078 	WREG32(SQ_CONFIG, sq_config);
2079 	WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2080 	WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2081 	WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2082 	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2083 	WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2084 	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2085 	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2086 	WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2087 	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2088 	WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2089 
2090 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2091 					  FORCE_EOV_MAX_REZ_CNT(255)));
2092 
2093 	switch (rdev->family) {
2094 	case CHIP_CEDAR:
2095 	case CHIP_PALM:
2096 	case CHIP_CAICOS:
2097 		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
2098 		break;
2099 	default:
2100 		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
2101 		break;
2102 	}
2103 	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2104 	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2105 
2106 	WREG32(VGT_GS_VERTEX_REUSE, 16);
2107 	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
2108 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2109 
2110 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2111 	WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2112 
2113 	WREG32(CB_PERF_CTR0_SEL_0, 0);
2114 	WREG32(CB_PERF_CTR0_SEL_1, 0);
2115 	WREG32(CB_PERF_CTR1_SEL_0, 0);
2116 	WREG32(CB_PERF_CTR1_SEL_1, 0);
2117 	WREG32(CB_PERF_CTR2_SEL_0, 0);
2118 	WREG32(CB_PERF_CTR2_SEL_1, 0);
2119 	WREG32(CB_PERF_CTR3_SEL_0, 0);
2120 	WREG32(CB_PERF_CTR3_SEL_1, 0);
2121 
2122 	/* clear render buffer base addresses */
2123 	WREG32(CB_COLOR0_BASE, 0);
2124 	WREG32(CB_COLOR1_BASE, 0);
2125 	WREG32(CB_COLOR2_BASE, 0);
2126 	WREG32(CB_COLOR3_BASE, 0);
2127 	WREG32(CB_COLOR4_BASE, 0);
2128 	WREG32(CB_COLOR5_BASE, 0);
2129 	WREG32(CB_COLOR6_BASE, 0);
2130 	WREG32(CB_COLOR7_BASE, 0);
2131 	WREG32(CB_COLOR8_BASE, 0);
2132 	WREG32(CB_COLOR9_BASE, 0);
2133 	WREG32(CB_COLOR10_BASE, 0);
2134 	WREG32(CB_COLOR11_BASE, 0);
2135 
2136 	/* set the shader const cache sizes to 0 */
2137 	for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2138 		WREG32(i, 0);
2139 	for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2140 		WREG32(i, 0);
2141 
2142 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2143 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2144 
2145 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2146 
2147 	udelay(50);
2148 
2149 }
2150 
2151 int evergreen_mc_init(struct radeon_device *rdev)
2152 {
2153 	u32 tmp;
2154 	int chansize, numchan;
2155 
2156 	/* Get VRAM informations */
2157 	rdev->mc.vram_is_ddr = true;
2158 	tmp = RREG32(MC_ARB_RAMCFG);
2159 	if (tmp & CHANSIZE_OVERRIDE) {
2160 		chansize = 16;
2161 	} else if (tmp & CHANSIZE_MASK) {
2162 		chansize = 64;
2163 	} else {
2164 		chansize = 32;
2165 	}
2166 	tmp = RREG32(MC_SHARED_CHMAP);
2167 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2168 	case 0:
2169 	default:
2170 		numchan = 1;
2171 		break;
2172 	case 1:
2173 		numchan = 2;
2174 		break;
2175 	case 2:
2176 		numchan = 4;
2177 		break;
2178 	case 3:
2179 		numchan = 8;
2180 		break;
2181 	}
2182 	rdev->mc.vram_width = numchan * chansize;
2183 	/* Could aper size report 0 ? */
2184 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2185 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2186 	/* Setup GPU memory space */
2187 	if (rdev->flags & RADEON_IS_IGP) {
2188 		/* size in bytes on fusion */
2189 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2190 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2191 	} else {
2192 		/* size in MB on evergreen */
2193 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2194 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2195 	}
2196 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
2197 	r700_vram_gtt_location(rdev, &rdev->mc);
2198 	radeon_update_bandwidth_info(rdev);
2199 
2200 	return 0;
2201 }
2202 
2203 bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
2204 {
2205 	u32 srbm_status;
2206 	u32 grbm_status;
2207 	u32 grbm_status_se0, grbm_status_se1;
2208 	struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
2209 	int r;
2210 
2211 	srbm_status = RREG32(SRBM_STATUS);
2212 	grbm_status = RREG32(GRBM_STATUS);
2213 	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2214 	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2215 	if (!(grbm_status & GUI_ACTIVE)) {
2216 		r100_gpu_lockup_update(lockup, &rdev->cp);
2217 		return false;
2218 	}
2219 	/* force CP activities */
2220 	r = radeon_ring_lock(rdev, 2);
2221 	if (!r) {
2222 		/* PACKET2 NOP */
2223 		radeon_ring_write(rdev, 0x80000000);
2224 		radeon_ring_write(rdev, 0x80000000);
2225 		radeon_ring_unlock_commit(rdev);
2226 	}
2227 	rdev->cp.rptr = RREG32(CP_RB_RPTR);
2228 	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
2229 }
2230 
2231 static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2232 {
2233 	struct evergreen_mc_save save;
2234 	u32 grbm_reset = 0;
2235 
2236 	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2237 		return 0;
2238 
2239 	dev_info(rdev->dev, "GPU softreset \n");
2240 	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
2241 		RREG32(GRBM_STATUS));
2242 	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
2243 		RREG32(GRBM_STATUS_SE0));
2244 	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
2245 		RREG32(GRBM_STATUS_SE1));
2246 	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
2247 		RREG32(SRBM_STATUS));
2248 	evergreen_mc_stop(rdev, &save);
2249 	if (evergreen_mc_wait_for_idle(rdev)) {
2250 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2251 	}
2252 	/* Disable CP parsing/prefetching */
2253 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2254 
2255 	/* reset all the gfx blocks */
2256 	grbm_reset = (SOFT_RESET_CP |
2257 		      SOFT_RESET_CB |
2258 		      SOFT_RESET_DB |
2259 		      SOFT_RESET_PA |
2260 		      SOFT_RESET_SC |
2261 		      SOFT_RESET_SPI |
2262 		      SOFT_RESET_SH |
2263 		      SOFT_RESET_SX |
2264 		      SOFT_RESET_TC |
2265 		      SOFT_RESET_TA |
2266 		      SOFT_RESET_VC |
2267 		      SOFT_RESET_VGT);
2268 
2269 	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2270 	WREG32(GRBM_SOFT_RESET, grbm_reset);
2271 	(void)RREG32(GRBM_SOFT_RESET);
2272 	udelay(50);
2273 	WREG32(GRBM_SOFT_RESET, 0);
2274 	(void)RREG32(GRBM_SOFT_RESET);
2275 	/* Wait a little for things to settle down */
2276 	udelay(50);
2277 	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
2278 		RREG32(GRBM_STATUS));
2279 	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
2280 		RREG32(GRBM_STATUS_SE0));
2281 	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
2282 		RREG32(GRBM_STATUS_SE1));
2283 	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
2284 		RREG32(SRBM_STATUS));
2285 	evergreen_mc_resume(rdev, &save);
2286 	return 0;
2287 }
2288 
2289 int evergreen_asic_reset(struct radeon_device *rdev)
2290 {
2291 	return evergreen_gpu_soft_reset(rdev);
2292 }
2293 
2294 /* Interrupts */
2295 
2296 u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2297 {
2298 	switch (crtc) {
2299 	case 0:
2300 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
2301 	case 1:
2302 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
2303 	case 2:
2304 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
2305 	case 3:
2306 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
2307 	case 4:
2308 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
2309 	case 5:
2310 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
2311 	default:
2312 		return 0;
2313 	}
2314 }
2315 
2316 void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2317 {
2318 	u32 tmp;
2319 
2320 	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2321 	WREG32(GRBM_INT_CNTL, 0);
2322 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2323 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2324 	if (!(rdev->flags & RADEON_IS_IGP)) {
2325 		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2326 		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2327 		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2328 		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2329 	}
2330 
2331 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2332 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2333 	if (!(rdev->flags & RADEON_IS_IGP)) {
2334 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2335 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2336 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2337 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2338 	}
2339 
2340 	WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2341 	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2342 
2343 	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2344 	WREG32(DC_HPD1_INT_CONTROL, tmp);
2345 	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2346 	WREG32(DC_HPD2_INT_CONTROL, tmp);
2347 	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2348 	WREG32(DC_HPD3_INT_CONTROL, tmp);
2349 	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2350 	WREG32(DC_HPD4_INT_CONTROL, tmp);
2351 	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2352 	WREG32(DC_HPD5_INT_CONTROL, tmp);
2353 	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2354 	WREG32(DC_HPD6_INT_CONTROL, tmp);
2355 
2356 }
2357 
2358 int evergreen_irq_set(struct radeon_device *rdev)
2359 {
2360 	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2361 	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2362 	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
2363 	u32 grbm_int_cntl = 0;
2364 	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2365 
2366 	if (!rdev->irq.installed) {
2367 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2368 		return -EINVAL;
2369 	}
2370 	/* don't enable anything if the ih is disabled */
2371 	if (!rdev->ih.enabled) {
2372 		r600_disable_interrupts(rdev);
2373 		/* force the active interrupt state to all disabled */
2374 		evergreen_disable_interrupt_state(rdev);
2375 		return 0;
2376 	}
2377 
2378 	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2379 	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2380 	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2381 	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2382 	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2383 	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2384 
2385 	if (rdev->irq.sw_int) {
2386 		DRM_DEBUG("evergreen_irq_set: sw int\n");
2387 		cp_int_cntl |= RB_INT_ENABLE;
2388 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2389 	}
2390 	if (rdev->irq.crtc_vblank_int[0] ||
2391 	    rdev->irq.pflip[0]) {
2392 		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2393 		crtc1 |= VBLANK_INT_MASK;
2394 	}
2395 	if (rdev->irq.crtc_vblank_int[1] ||
2396 	    rdev->irq.pflip[1]) {
2397 		DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2398 		crtc2 |= VBLANK_INT_MASK;
2399 	}
2400 	if (rdev->irq.crtc_vblank_int[2] ||
2401 	    rdev->irq.pflip[2]) {
2402 		DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2403 		crtc3 |= VBLANK_INT_MASK;
2404 	}
2405 	if (rdev->irq.crtc_vblank_int[3] ||
2406 	    rdev->irq.pflip[3]) {
2407 		DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2408 		crtc4 |= VBLANK_INT_MASK;
2409 	}
2410 	if (rdev->irq.crtc_vblank_int[4] ||
2411 	    rdev->irq.pflip[4]) {
2412 		DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2413 		crtc5 |= VBLANK_INT_MASK;
2414 	}
2415 	if (rdev->irq.crtc_vblank_int[5] ||
2416 	    rdev->irq.pflip[5]) {
2417 		DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2418 		crtc6 |= VBLANK_INT_MASK;
2419 	}
2420 	if (rdev->irq.hpd[0]) {
2421 		DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2422 		hpd1 |= DC_HPDx_INT_EN;
2423 	}
2424 	if (rdev->irq.hpd[1]) {
2425 		DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2426 		hpd2 |= DC_HPDx_INT_EN;
2427 	}
2428 	if (rdev->irq.hpd[2]) {
2429 		DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2430 		hpd3 |= DC_HPDx_INT_EN;
2431 	}
2432 	if (rdev->irq.hpd[3]) {
2433 		DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2434 		hpd4 |= DC_HPDx_INT_EN;
2435 	}
2436 	if (rdev->irq.hpd[4]) {
2437 		DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2438 		hpd5 |= DC_HPDx_INT_EN;
2439 	}
2440 	if (rdev->irq.hpd[5]) {
2441 		DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2442 		hpd6 |= DC_HPDx_INT_EN;
2443 	}
2444 	if (rdev->irq.gui_idle) {
2445 		DRM_DEBUG("gui idle\n");
2446 		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2447 	}
2448 
2449 	WREG32(CP_INT_CNTL, cp_int_cntl);
2450 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2451 
2452 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
2453 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
2454 	if (!(rdev->flags & RADEON_IS_IGP)) {
2455 		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
2456 		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
2457 		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2458 		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2459 	}
2460 
2461 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2462 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
2463 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2464 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2465 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2466 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2467 
2468 	WREG32(DC_HPD1_INT_CONTROL, hpd1);
2469 	WREG32(DC_HPD2_INT_CONTROL, hpd2);
2470 	WREG32(DC_HPD3_INT_CONTROL, hpd3);
2471 	WREG32(DC_HPD4_INT_CONTROL, hpd4);
2472 	WREG32(DC_HPD5_INT_CONTROL, hpd5);
2473 	WREG32(DC_HPD6_INT_CONTROL, hpd6);
2474 
2475 	return 0;
2476 }
2477 
2478 static inline void evergreen_irq_ack(struct radeon_device *rdev)
2479 {
2480 	u32 tmp;
2481 
2482 	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2483 	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2484 	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2485 	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2486 	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2487 	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2488 	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2489 	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2490 	rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2491 	rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2492 	rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2493 	rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2494 
2495 	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2496 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2497 	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2498 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2499 	if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2500 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2501 	if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2502 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2503 	if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2504 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2505 	if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2506 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2507 
2508 	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
2509 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
2510 	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
2511 		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
2512 
2513 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
2514 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
2515 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
2516 		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2517 
2518 	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2519 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2520 	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2521 		WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2522 
2523 	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2524 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2525 	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2526 		WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2527 
2528 	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2529 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2530 	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2531 		WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2532 
2533 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2534 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2535 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2536 		WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2537 
2538 	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2539 		tmp = RREG32(DC_HPD1_INT_CONTROL);
2540 		tmp |= DC_HPDx_INT_ACK;
2541 		WREG32(DC_HPD1_INT_CONTROL, tmp);
2542 	}
2543 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2544 		tmp = RREG32(DC_HPD2_INT_CONTROL);
2545 		tmp |= DC_HPDx_INT_ACK;
2546 		WREG32(DC_HPD2_INT_CONTROL, tmp);
2547 	}
2548 	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2549 		tmp = RREG32(DC_HPD3_INT_CONTROL);
2550 		tmp |= DC_HPDx_INT_ACK;
2551 		WREG32(DC_HPD3_INT_CONTROL, tmp);
2552 	}
2553 	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2554 		tmp = RREG32(DC_HPD4_INT_CONTROL);
2555 		tmp |= DC_HPDx_INT_ACK;
2556 		WREG32(DC_HPD4_INT_CONTROL, tmp);
2557 	}
2558 	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2559 		tmp = RREG32(DC_HPD5_INT_CONTROL);
2560 		tmp |= DC_HPDx_INT_ACK;
2561 		WREG32(DC_HPD5_INT_CONTROL, tmp);
2562 	}
2563 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2564 		tmp = RREG32(DC_HPD5_INT_CONTROL);
2565 		tmp |= DC_HPDx_INT_ACK;
2566 		WREG32(DC_HPD6_INT_CONTROL, tmp);
2567 	}
2568 }
2569 
2570 void evergreen_irq_disable(struct radeon_device *rdev)
2571 {
2572 	r600_disable_interrupts(rdev);
2573 	/* Wait and acknowledge irq */
2574 	mdelay(1);
2575 	evergreen_irq_ack(rdev);
2576 	evergreen_disable_interrupt_state(rdev);
2577 }
2578 
2579 void evergreen_irq_suspend(struct radeon_device *rdev)
2580 {
2581 	evergreen_irq_disable(rdev);
2582 	r600_rlc_stop(rdev);
2583 }
2584 
2585 static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
2586 {
2587 	u32 wptr, tmp;
2588 
2589 	if (rdev->wb.enabled)
2590 		wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
2591 	else
2592 		wptr = RREG32(IH_RB_WPTR);
2593 
2594 	if (wptr & RB_OVERFLOW) {
2595 		/* When a ring buffer overflow happen start parsing interrupt
2596 		 * from the last not overwritten vector (wptr + 16). Hopefully
2597 		 * this should allow us to catchup.
2598 		 */
2599 		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2600 			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2601 		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2602 		tmp = RREG32(IH_RB_CNTL);
2603 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
2604 		WREG32(IH_RB_CNTL, tmp);
2605 	}
2606 	return (wptr & rdev->ih.ptr_mask);
2607 }
2608 
2609 int evergreen_irq_process(struct radeon_device *rdev)
2610 {
2611 	u32 wptr = evergreen_get_ih_wptr(rdev);
2612 	u32 rptr = rdev->ih.rptr;
2613 	u32 src_id, src_data;
2614 	u32 ring_index;
2615 	unsigned long flags;
2616 	bool queue_hotplug = false;
2617 
2618 	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2619 	if (!rdev->ih.enabled)
2620 		return IRQ_NONE;
2621 
2622 	spin_lock_irqsave(&rdev->ih.lock, flags);
2623 
2624 	if (rptr == wptr) {
2625 		spin_unlock_irqrestore(&rdev->ih.lock, flags);
2626 		return IRQ_NONE;
2627 	}
2628 	if (rdev->shutdown) {
2629 		spin_unlock_irqrestore(&rdev->ih.lock, flags);
2630 		return IRQ_NONE;
2631 	}
2632 
2633 restart_ih:
2634 	/* display interrupts */
2635 	evergreen_irq_ack(rdev);
2636 
2637 	rdev->ih.wptr = wptr;
2638 	while (rptr != wptr) {
2639 		/* wptr/rptr are in bytes! */
2640 		ring_index = rptr / 4;
2641 		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2642 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
2643 
2644 		switch (src_id) {
2645 		case 1: /* D1 vblank/vline */
2646 			switch (src_data) {
2647 			case 0: /* D1 vblank */
2648 				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
2649 					if (rdev->irq.crtc_vblank_int[0]) {
2650 						drm_handle_vblank(rdev->ddev, 0);
2651 						rdev->pm.vblank_sync = true;
2652 						wake_up(&rdev->irq.vblank_queue);
2653 					}
2654 					if (rdev->irq.pflip[0])
2655 						radeon_crtc_handle_flip(rdev, 0);
2656 					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2657 					DRM_DEBUG("IH: D1 vblank\n");
2658 				}
2659 				break;
2660 			case 1: /* D1 vline */
2661 				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
2662 					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
2663 					DRM_DEBUG("IH: D1 vline\n");
2664 				}
2665 				break;
2666 			default:
2667 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2668 				break;
2669 			}
2670 			break;
2671 		case 2: /* D2 vblank/vline */
2672 			switch (src_data) {
2673 			case 0: /* D2 vblank */
2674 				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2675 					if (rdev->irq.crtc_vblank_int[1]) {
2676 						drm_handle_vblank(rdev->ddev, 1);
2677 						rdev->pm.vblank_sync = true;
2678 						wake_up(&rdev->irq.vblank_queue);
2679 					}
2680 					if (rdev->irq.pflip[1])
2681 						radeon_crtc_handle_flip(rdev, 1);
2682 					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2683 					DRM_DEBUG("IH: D2 vblank\n");
2684 				}
2685 				break;
2686 			case 1: /* D2 vline */
2687 				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
2688 					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
2689 					DRM_DEBUG("IH: D2 vline\n");
2690 				}
2691 				break;
2692 			default:
2693 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2694 				break;
2695 			}
2696 			break;
2697 		case 3: /* D3 vblank/vline */
2698 			switch (src_data) {
2699 			case 0: /* D3 vblank */
2700 				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2701 					if (rdev->irq.crtc_vblank_int[2]) {
2702 						drm_handle_vblank(rdev->ddev, 2);
2703 						rdev->pm.vblank_sync = true;
2704 						wake_up(&rdev->irq.vblank_queue);
2705 					}
2706 					if (rdev->irq.pflip[2])
2707 						radeon_crtc_handle_flip(rdev, 2);
2708 					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2709 					DRM_DEBUG("IH: D3 vblank\n");
2710 				}
2711 				break;
2712 			case 1: /* D3 vline */
2713 				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
2714 					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
2715 					DRM_DEBUG("IH: D3 vline\n");
2716 				}
2717 				break;
2718 			default:
2719 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2720 				break;
2721 			}
2722 			break;
2723 		case 4: /* D4 vblank/vline */
2724 			switch (src_data) {
2725 			case 0: /* D4 vblank */
2726 				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2727 					if (rdev->irq.crtc_vblank_int[3]) {
2728 						drm_handle_vblank(rdev->ddev, 3);
2729 						rdev->pm.vblank_sync = true;
2730 						wake_up(&rdev->irq.vblank_queue);
2731 					}
2732 					if (rdev->irq.pflip[3])
2733 						radeon_crtc_handle_flip(rdev, 3);
2734 					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2735 					DRM_DEBUG("IH: D4 vblank\n");
2736 				}
2737 				break;
2738 			case 1: /* D4 vline */
2739 				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
2740 					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
2741 					DRM_DEBUG("IH: D4 vline\n");
2742 				}
2743 				break;
2744 			default:
2745 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2746 				break;
2747 			}
2748 			break;
2749 		case 5: /* D5 vblank/vline */
2750 			switch (src_data) {
2751 			case 0: /* D5 vblank */
2752 				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2753 					if (rdev->irq.crtc_vblank_int[4]) {
2754 						drm_handle_vblank(rdev->ddev, 4);
2755 						rdev->pm.vblank_sync = true;
2756 						wake_up(&rdev->irq.vblank_queue);
2757 					}
2758 					if (rdev->irq.pflip[4])
2759 						radeon_crtc_handle_flip(rdev, 4);
2760 					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2761 					DRM_DEBUG("IH: D5 vblank\n");
2762 				}
2763 				break;
2764 			case 1: /* D5 vline */
2765 				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
2766 					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
2767 					DRM_DEBUG("IH: D5 vline\n");
2768 				}
2769 				break;
2770 			default:
2771 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2772 				break;
2773 			}
2774 			break;
2775 		case 6: /* D6 vblank/vline */
2776 			switch (src_data) {
2777 			case 0: /* D6 vblank */
2778 				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2779 					if (rdev->irq.crtc_vblank_int[5]) {
2780 						drm_handle_vblank(rdev->ddev, 5);
2781 						rdev->pm.vblank_sync = true;
2782 						wake_up(&rdev->irq.vblank_queue);
2783 					}
2784 					if (rdev->irq.pflip[5])
2785 						radeon_crtc_handle_flip(rdev, 5);
2786 					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2787 					DRM_DEBUG("IH: D6 vblank\n");
2788 				}
2789 				break;
2790 			case 1: /* D6 vline */
2791 				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
2792 					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
2793 					DRM_DEBUG("IH: D6 vline\n");
2794 				}
2795 				break;
2796 			default:
2797 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2798 				break;
2799 			}
2800 			break;
2801 		case 42: /* HPD hotplug */
2802 			switch (src_data) {
2803 			case 0:
2804 				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2805 					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
2806 					queue_hotplug = true;
2807 					DRM_DEBUG("IH: HPD1\n");
2808 				}
2809 				break;
2810 			case 1:
2811 				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2812 					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
2813 					queue_hotplug = true;
2814 					DRM_DEBUG("IH: HPD2\n");
2815 				}
2816 				break;
2817 			case 2:
2818 				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2819 					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
2820 					queue_hotplug = true;
2821 					DRM_DEBUG("IH: HPD3\n");
2822 				}
2823 				break;
2824 			case 3:
2825 				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2826 					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
2827 					queue_hotplug = true;
2828 					DRM_DEBUG("IH: HPD4\n");
2829 				}
2830 				break;
2831 			case 4:
2832 				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2833 					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
2834 					queue_hotplug = true;
2835 					DRM_DEBUG("IH: HPD5\n");
2836 				}
2837 				break;
2838 			case 5:
2839 				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2840 					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
2841 					queue_hotplug = true;
2842 					DRM_DEBUG("IH: HPD6\n");
2843 				}
2844 				break;
2845 			default:
2846 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2847 				break;
2848 			}
2849 			break;
2850 		case 176: /* CP_INT in ring buffer */
2851 		case 177: /* CP_INT in IB1 */
2852 		case 178: /* CP_INT in IB2 */
2853 			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2854 			radeon_fence_process(rdev);
2855 			break;
2856 		case 181: /* CP EOP event */
2857 			DRM_DEBUG("IH: CP EOP\n");
2858 			radeon_fence_process(rdev);
2859 			break;
2860 		case 233: /* GUI IDLE */
2861 			DRM_DEBUG("IH: CP EOP\n");
2862 			rdev->pm.gui_idle = true;
2863 			wake_up(&rdev->irq.idle_queue);
2864 			break;
2865 		default:
2866 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2867 			break;
2868 		}
2869 
2870 		/* wptr/rptr are in bytes! */
2871 		rptr += 16;
2872 		rptr &= rdev->ih.ptr_mask;
2873 	}
2874 	/* make sure wptr hasn't changed while processing */
2875 	wptr = evergreen_get_ih_wptr(rdev);
2876 	if (wptr != rdev->ih.wptr)
2877 		goto restart_ih;
2878 	if (queue_hotplug)
2879 		schedule_work(&rdev->hotplug_work);
2880 	rdev->ih.rptr = rptr;
2881 	WREG32(IH_RB_RPTR, rdev->ih.rptr);
2882 	spin_unlock_irqrestore(&rdev->ih.lock, flags);
2883 	return IRQ_HANDLED;
2884 }
2885 
2886 static int evergreen_startup(struct radeon_device *rdev)
2887 {
2888 	int r;
2889 
2890 	/* enable pcie gen2 link */
2891 	if (!ASIC_IS_DCE5(rdev))
2892 		evergreen_pcie_gen2_enable(rdev);
2893 
2894 	if (ASIC_IS_DCE5(rdev)) {
2895 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2896 			r = ni_init_microcode(rdev);
2897 			if (r) {
2898 				DRM_ERROR("Failed to load firmware!\n");
2899 				return r;
2900 			}
2901 		}
2902 		r = ni_mc_load_microcode(rdev);
2903 		if (r) {
2904 			DRM_ERROR("Failed to load MC firmware!\n");
2905 			return r;
2906 		}
2907 	} else {
2908 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2909 			r = r600_init_microcode(rdev);
2910 			if (r) {
2911 				DRM_ERROR("Failed to load firmware!\n");
2912 				return r;
2913 			}
2914 		}
2915 	}
2916 
2917 	evergreen_mc_program(rdev);
2918 	if (rdev->flags & RADEON_IS_AGP) {
2919 		evergreen_agp_enable(rdev);
2920 	} else {
2921 		r = evergreen_pcie_gart_enable(rdev);
2922 		if (r)
2923 			return r;
2924 	}
2925 	evergreen_gpu_init(rdev);
2926 
2927 	r = evergreen_blit_init(rdev);
2928 	if (r) {
2929 		evergreen_blit_fini(rdev);
2930 		rdev->asic->copy = NULL;
2931 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2932 	}
2933 	/* XXX: ontario has problems blitting to gart at the moment */
2934 	if (rdev->family == CHIP_PALM) {
2935 		rdev->asic->copy = NULL;
2936 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2937 	}
2938 
2939 	/* allocate wb buffer */
2940 	r = radeon_wb_init(rdev);
2941 	if (r)
2942 		return r;
2943 
2944 	/* Enable IRQ */
2945 	r = r600_irq_init(rdev);
2946 	if (r) {
2947 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2948 		radeon_irq_kms_fini(rdev);
2949 		return r;
2950 	}
2951 	evergreen_irq_set(rdev);
2952 
2953 	r = radeon_ring_init(rdev, rdev->cp.ring_size);
2954 	if (r)
2955 		return r;
2956 	r = evergreen_cp_load_microcode(rdev);
2957 	if (r)
2958 		return r;
2959 	r = evergreen_cp_resume(rdev);
2960 	if (r)
2961 		return r;
2962 
2963 	return 0;
2964 }
2965 
2966 int evergreen_resume(struct radeon_device *rdev)
2967 {
2968 	int r;
2969 
2970 	/* reset the asic, the gfx blocks are often in a bad state
2971 	 * after the driver is unloaded or after a resume
2972 	 */
2973 	if (radeon_asic_reset(rdev))
2974 		dev_warn(rdev->dev, "GPU reset failed !\n");
2975 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2976 	 * posting will perform necessary task to bring back GPU into good
2977 	 * shape.
2978 	 */
2979 	/* post card */
2980 	atom_asic_init(rdev->mode_info.atom_context);
2981 
2982 	r = evergreen_startup(rdev);
2983 	if (r) {
2984 		DRM_ERROR("evergreen startup failed on resume\n");
2985 		return r;
2986 	}
2987 
2988 	r = r600_ib_test(rdev);
2989 	if (r) {
2990 		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2991 		return r;
2992 	}
2993 
2994 	return r;
2995 
2996 }
2997 
2998 int evergreen_suspend(struct radeon_device *rdev)
2999 {
3000 	int r;
3001 
3002 	/* FIXME: we should wait for ring to be empty */
3003 	r700_cp_stop(rdev);
3004 	rdev->cp.ready = false;
3005 	evergreen_irq_suspend(rdev);
3006 	radeon_wb_disable(rdev);
3007 	evergreen_pcie_gart_disable(rdev);
3008 
3009 	/* unpin shaders bo */
3010 	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
3011 	if (likely(r == 0)) {
3012 		radeon_bo_unpin(rdev->r600_blit.shader_obj);
3013 		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
3014 	}
3015 
3016 	return 0;
3017 }
3018 
3019 int evergreen_copy_blit(struct radeon_device *rdev,
3020 			uint64_t src_offset, uint64_t dst_offset,
3021 			unsigned num_pages, struct radeon_fence *fence)
3022 {
3023 	int r;
3024 
3025 	mutex_lock(&rdev->r600_blit.mutex);
3026 	rdev->r600_blit.vb_ib = NULL;
3027 	r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
3028 	if (r) {
3029 		if (rdev->r600_blit.vb_ib)
3030 			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
3031 		mutex_unlock(&rdev->r600_blit.mutex);
3032 		return r;
3033 	}
3034 	evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
3035 	evergreen_blit_done_copy(rdev, fence);
3036 	mutex_unlock(&rdev->r600_blit.mutex);
3037 	return 0;
3038 }
3039 
3040 /* Plan is to move initialization in that function and use
3041  * helper function so that radeon_device_init pretty much
3042  * do nothing more than calling asic specific function. This
3043  * should also allow to remove a bunch of callback function
3044  * like vram_info.
3045  */
3046 int evergreen_init(struct radeon_device *rdev)
3047 {
3048 	int r;
3049 
3050 	r = radeon_dummy_page_init(rdev);
3051 	if (r)
3052 		return r;
3053 	/* This don't do much */
3054 	r = radeon_gem_init(rdev);
3055 	if (r)
3056 		return r;
3057 	/* Read BIOS */
3058 	if (!radeon_get_bios(rdev)) {
3059 		if (ASIC_IS_AVIVO(rdev))
3060 			return -EINVAL;
3061 	}
3062 	/* Must be an ATOMBIOS */
3063 	if (!rdev->is_atom_bios) {
3064 		dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
3065 		return -EINVAL;
3066 	}
3067 	r = radeon_atombios_init(rdev);
3068 	if (r)
3069 		return r;
3070 	/* reset the asic, the gfx blocks are often in a bad state
3071 	 * after the driver is unloaded or after a resume
3072 	 */
3073 	if (radeon_asic_reset(rdev))
3074 		dev_warn(rdev->dev, "GPU reset failed !\n");
3075 	/* Post card if necessary */
3076 	if (!radeon_card_posted(rdev)) {
3077 		if (!rdev->bios) {
3078 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3079 			return -EINVAL;
3080 		}
3081 		DRM_INFO("GPU not posted. posting now...\n");
3082 		atom_asic_init(rdev->mode_info.atom_context);
3083 	}
3084 	/* Initialize scratch registers */
3085 	r600_scratch_init(rdev);
3086 	/* Initialize surface registers */
3087 	radeon_surface_init(rdev);
3088 	/* Initialize clocks */
3089 	radeon_get_clock_info(rdev->ddev);
3090 	/* Fence driver */
3091 	r = radeon_fence_driver_init(rdev);
3092 	if (r)
3093 		return r;
3094 	/* initialize AGP */
3095 	if (rdev->flags & RADEON_IS_AGP) {
3096 		r = radeon_agp_init(rdev);
3097 		if (r)
3098 			radeon_agp_disable(rdev);
3099 	}
3100 	/* initialize memory controller */
3101 	r = evergreen_mc_init(rdev);
3102 	if (r)
3103 		return r;
3104 	/* Memory manager */
3105 	r = radeon_bo_init(rdev);
3106 	if (r)
3107 		return r;
3108 
3109 	r = radeon_irq_kms_init(rdev);
3110 	if (r)
3111 		return r;
3112 
3113 	rdev->cp.ring_obj = NULL;
3114 	r600_ring_init(rdev, 1024 * 1024);
3115 
3116 	rdev->ih.ring_obj = NULL;
3117 	r600_ih_ring_init(rdev, 64 * 1024);
3118 
3119 	r = r600_pcie_gart_init(rdev);
3120 	if (r)
3121 		return r;
3122 
3123 	rdev->accel_working = true;
3124 	r = evergreen_startup(rdev);
3125 	if (r) {
3126 		dev_err(rdev->dev, "disabling GPU acceleration\n");
3127 		r700_cp_fini(rdev);
3128 		r600_irq_fini(rdev);
3129 		radeon_wb_fini(rdev);
3130 		radeon_irq_kms_fini(rdev);
3131 		evergreen_pcie_gart_fini(rdev);
3132 		rdev->accel_working = false;
3133 	}
3134 	if (rdev->accel_working) {
3135 		r = radeon_ib_pool_init(rdev);
3136 		if (r) {
3137 			DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
3138 			rdev->accel_working = false;
3139 		}
3140 		r = r600_ib_test(rdev);
3141 		if (r) {
3142 			DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3143 			rdev->accel_working = false;
3144 		}
3145 	}
3146 	return 0;
3147 }
3148 
3149 void evergreen_fini(struct radeon_device *rdev)
3150 {
3151 	evergreen_blit_fini(rdev);
3152 	r700_cp_fini(rdev);
3153 	r600_irq_fini(rdev);
3154 	radeon_wb_fini(rdev);
3155 	radeon_irq_kms_fini(rdev);
3156 	evergreen_pcie_gart_fini(rdev);
3157 	radeon_gem_fini(rdev);
3158 	radeon_fence_driver_fini(rdev);
3159 	radeon_agp_fini(rdev);
3160 	radeon_bo_fini(rdev);
3161 	radeon_atombios_fini(rdev);
3162 	kfree(rdev->bios);
3163 	rdev->bios = NULL;
3164 	radeon_dummy_page_fini(rdev);
3165 }
3166 
3167 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3168 {
3169 	u32 link_width_cntl, speed_cntl;
3170 
3171 	if (radeon_pcie_gen2 == 0)
3172 		return;
3173 
3174 	if (rdev->flags & RADEON_IS_IGP)
3175 		return;
3176 
3177 	if (!(rdev->flags & RADEON_IS_PCIE))
3178 		return;
3179 
3180 	/* x2 cards have a special sequence */
3181 	if (ASIC_IS_X2(rdev))
3182 		return;
3183 
3184 	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3185 	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3186 	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3187 
3188 		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3189 		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3190 		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3191 
3192 		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3193 		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3194 		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3195 
3196 		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3197 		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3198 		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3199 
3200 		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3201 		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3202 		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3203 
3204 		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3205 		speed_cntl |= LC_GEN2_EN_STRAP;
3206 		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3207 
3208 	} else {
3209 		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3210 		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3211 		if (1)
3212 			link_width_cntl |= LC_UPCONFIGURE_DIS;
3213 		else
3214 			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3215 		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3216 	}
3217 }
3218