xref: /openbmc/linux/drivers/gpu/drm/radeon/evergreen.c (revision 9c1f8594)
1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include "drmP.h"
28 #include "radeon.h"
29 #include "radeon_asic.h"
30 #include "radeon_drm.h"
31 #include "evergreend.h"
32 #include "atom.h"
33 #include "avivod.h"
34 #include "evergreen_reg.h"
35 #include "evergreen_blit_shaders.h"
36 
37 #define EVERGREEN_PFP_UCODE_SIZE 1120
38 #define EVERGREEN_PM4_UCODE_SIZE 1376
39 
40 static void evergreen_gpu_init(struct radeon_device *rdev);
41 void evergreen_fini(struct radeon_device *rdev);
42 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
43 
44 void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
45 {
46 	u16 ctl, v;
47 	int cap, err;
48 
49 	cap = pci_pcie_cap(rdev->pdev);
50 	if (!cap)
51 		return;
52 
53 	err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
54 	if (err)
55 		return;
56 
57 	v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
58 
59 	/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
60 	 * to avoid hangs or perfomance issues
61 	 */
62 	if ((v == 0) || (v == 6) || (v == 7)) {
63 		ctl &= ~PCI_EXP_DEVCTL_READRQ;
64 		ctl |= (2 << 12);
65 		pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
66 	}
67 }
68 
69 void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
70 {
71 	/* enable the pflip int */
72 	radeon_irq_kms_pflip_irq_get(rdev, crtc);
73 }
74 
75 void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
76 {
77 	/* disable the pflip int */
78 	radeon_irq_kms_pflip_irq_put(rdev, crtc);
79 }
80 
81 u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
82 {
83 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
84 	u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
85 
86 	/* Lock the graphics update lock */
87 	tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
88 	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
89 
90 	/* update the scanout addresses */
91 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
92 	       upper_32_bits(crtc_base));
93 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
94 	       (u32)crtc_base);
95 
96 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
97 	       upper_32_bits(crtc_base));
98 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
99 	       (u32)crtc_base);
100 
101 	/* Wait for update_pending to go high. */
102 	while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
103 	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
104 
105 	/* Unlock the lock, so double-buffering can take place inside vblank */
106 	tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
107 	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
108 
109 	/* Return current update_pending status: */
110 	return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
111 }
112 
113 /* get temperature in millidegrees */
114 int evergreen_get_temp(struct radeon_device *rdev)
115 {
116 	u32 temp, toffset;
117 	int actual_temp = 0;
118 
119 	if (rdev->family == CHIP_JUNIPER) {
120 		toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
121 			TOFFSET_SHIFT;
122 		temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
123 			TS0_ADC_DOUT_SHIFT;
124 
125 		if (toffset & 0x100)
126 			actual_temp = temp / 2 - (0x200 - toffset);
127 		else
128 			actual_temp = temp / 2 + toffset;
129 
130 		actual_temp = actual_temp * 1000;
131 
132 	} else {
133 		temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
134 			ASIC_T_SHIFT;
135 
136 		if (temp & 0x400)
137 			actual_temp = -256;
138 		else if (temp & 0x200)
139 			actual_temp = 255;
140 		else if (temp & 0x100) {
141 			actual_temp = temp & 0x1ff;
142 			actual_temp |= ~0x1ff;
143 		} else
144 			actual_temp = temp & 0xff;
145 
146 		actual_temp = (actual_temp * 1000) / 2;
147 	}
148 
149 	return actual_temp;
150 }
151 
152 int sumo_get_temp(struct radeon_device *rdev)
153 {
154 	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
155 	int actual_temp = temp - 49;
156 
157 	return actual_temp * 1000;
158 }
159 
160 void evergreen_pm_misc(struct radeon_device *rdev)
161 {
162 	int req_ps_idx = rdev->pm.requested_power_state_index;
163 	int req_cm_idx = rdev->pm.requested_clock_mode_index;
164 	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
165 	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
166 
167 	if (voltage->type == VOLTAGE_SW) {
168 		/* 0xff01 is a flag rather then an actual voltage */
169 		if (voltage->voltage == 0xff01)
170 			return;
171 		if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
172 			radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
173 			rdev->pm.current_vddc = voltage->voltage;
174 			DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
175 		}
176 		/* 0xff01 is a flag rather then an actual voltage */
177 		if (voltage->vddci == 0xff01)
178 			return;
179 		if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
180 			radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
181 			rdev->pm.current_vddci = voltage->vddci;
182 			DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
183 		}
184 	}
185 }
186 
187 void evergreen_pm_prepare(struct radeon_device *rdev)
188 {
189 	struct drm_device *ddev = rdev->ddev;
190 	struct drm_crtc *crtc;
191 	struct radeon_crtc *radeon_crtc;
192 	u32 tmp;
193 
194 	/* disable any active CRTCs */
195 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
196 		radeon_crtc = to_radeon_crtc(crtc);
197 		if (radeon_crtc->enabled) {
198 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
199 			tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
200 			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
201 		}
202 	}
203 }
204 
205 void evergreen_pm_finish(struct radeon_device *rdev)
206 {
207 	struct drm_device *ddev = rdev->ddev;
208 	struct drm_crtc *crtc;
209 	struct radeon_crtc *radeon_crtc;
210 	u32 tmp;
211 
212 	/* enable any active CRTCs */
213 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
214 		radeon_crtc = to_radeon_crtc(crtc);
215 		if (radeon_crtc->enabled) {
216 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
217 			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
218 			WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
219 		}
220 	}
221 }
222 
223 bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
224 {
225 	bool connected = false;
226 
227 	switch (hpd) {
228 	case RADEON_HPD_1:
229 		if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
230 			connected = true;
231 		break;
232 	case RADEON_HPD_2:
233 		if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
234 			connected = true;
235 		break;
236 	case RADEON_HPD_3:
237 		if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
238 			connected = true;
239 		break;
240 	case RADEON_HPD_4:
241 		if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
242 			connected = true;
243 		break;
244 	case RADEON_HPD_5:
245 		if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
246 			connected = true;
247 		break;
248 	case RADEON_HPD_6:
249 		if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
250 			connected = true;
251 			break;
252 	default:
253 		break;
254 	}
255 
256 	return connected;
257 }
258 
259 void evergreen_hpd_set_polarity(struct radeon_device *rdev,
260 				enum radeon_hpd_id hpd)
261 {
262 	u32 tmp;
263 	bool connected = evergreen_hpd_sense(rdev, hpd);
264 
265 	switch (hpd) {
266 	case RADEON_HPD_1:
267 		tmp = RREG32(DC_HPD1_INT_CONTROL);
268 		if (connected)
269 			tmp &= ~DC_HPDx_INT_POLARITY;
270 		else
271 			tmp |= DC_HPDx_INT_POLARITY;
272 		WREG32(DC_HPD1_INT_CONTROL, tmp);
273 		break;
274 	case RADEON_HPD_2:
275 		tmp = RREG32(DC_HPD2_INT_CONTROL);
276 		if (connected)
277 			tmp &= ~DC_HPDx_INT_POLARITY;
278 		else
279 			tmp |= DC_HPDx_INT_POLARITY;
280 		WREG32(DC_HPD2_INT_CONTROL, tmp);
281 		break;
282 	case RADEON_HPD_3:
283 		tmp = RREG32(DC_HPD3_INT_CONTROL);
284 		if (connected)
285 			tmp &= ~DC_HPDx_INT_POLARITY;
286 		else
287 			tmp |= DC_HPDx_INT_POLARITY;
288 		WREG32(DC_HPD3_INT_CONTROL, tmp);
289 		break;
290 	case RADEON_HPD_4:
291 		tmp = RREG32(DC_HPD4_INT_CONTROL);
292 		if (connected)
293 			tmp &= ~DC_HPDx_INT_POLARITY;
294 		else
295 			tmp |= DC_HPDx_INT_POLARITY;
296 		WREG32(DC_HPD4_INT_CONTROL, tmp);
297 		break;
298 	case RADEON_HPD_5:
299 		tmp = RREG32(DC_HPD5_INT_CONTROL);
300 		if (connected)
301 			tmp &= ~DC_HPDx_INT_POLARITY;
302 		else
303 			tmp |= DC_HPDx_INT_POLARITY;
304 		WREG32(DC_HPD5_INT_CONTROL, tmp);
305 			break;
306 	case RADEON_HPD_6:
307 		tmp = RREG32(DC_HPD6_INT_CONTROL);
308 		if (connected)
309 			tmp &= ~DC_HPDx_INT_POLARITY;
310 		else
311 			tmp |= DC_HPDx_INT_POLARITY;
312 		WREG32(DC_HPD6_INT_CONTROL, tmp);
313 		break;
314 	default:
315 		break;
316 	}
317 }
318 
319 void evergreen_hpd_init(struct radeon_device *rdev)
320 {
321 	struct drm_device *dev = rdev->ddev;
322 	struct drm_connector *connector;
323 	u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
324 		DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
325 
326 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
327 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
328 		switch (radeon_connector->hpd.hpd) {
329 		case RADEON_HPD_1:
330 			WREG32(DC_HPD1_CONTROL, tmp);
331 			rdev->irq.hpd[0] = true;
332 			break;
333 		case RADEON_HPD_2:
334 			WREG32(DC_HPD2_CONTROL, tmp);
335 			rdev->irq.hpd[1] = true;
336 			break;
337 		case RADEON_HPD_3:
338 			WREG32(DC_HPD3_CONTROL, tmp);
339 			rdev->irq.hpd[2] = true;
340 			break;
341 		case RADEON_HPD_4:
342 			WREG32(DC_HPD4_CONTROL, tmp);
343 			rdev->irq.hpd[3] = true;
344 			break;
345 		case RADEON_HPD_5:
346 			WREG32(DC_HPD5_CONTROL, tmp);
347 			rdev->irq.hpd[4] = true;
348 			break;
349 		case RADEON_HPD_6:
350 			WREG32(DC_HPD6_CONTROL, tmp);
351 			rdev->irq.hpd[5] = true;
352 			break;
353 		default:
354 			break;
355 		}
356 	}
357 	if (rdev->irq.installed)
358 		evergreen_irq_set(rdev);
359 }
360 
361 void evergreen_hpd_fini(struct radeon_device *rdev)
362 {
363 	struct drm_device *dev = rdev->ddev;
364 	struct drm_connector *connector;
365 
366 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
367 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
368 		switch (radeon_connector->hpd.hpd) {
369 		case RADEON_HPD_1:
370 			WREG32(DC_HPD1_CONTROL, 0);
371 			rdev->irq.hpd[0] = false;
372 			break;
373 		case RADEON_HPD_2:
374 			WREG32(DC_HPD2_CONTROL, 0);
375 			rdev->irq.hpd[1] = false;
376 			break;
377 		case RADEON_HPD_3:
378 			WREG32(DC_HPD3_CONTROL, 0);
379 			rdev->irq.hpd[2] = false;
380 			break;
381 		case RADEON_HPD_4:
382 			WREG32(DC_HPD4_CONTROL, 0);
383 			rdev->irq.hpd[3] = false;
384 			break;
385 		case RADEON_HPD_5:
386 			WREG32(DC_HPD5_CONTROL, 0);
387 			rdev->irq.hpd[4] = false;
388 			break;
389 		case RADEON_HPD_6:
390 			WREG32(DC_HPD6_CONTROL, 0);
391 			rdev->irq.hpd[5] = false;
392 			break;
393 		default:
394 			break;
395 		}
396 	}
397 }
398 
399 /* watermark setup */
400 
401 static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
402 					struct radeon_crtc *radeon_crtc,
403 					struct drm_display_mode *mode,
404 					struct drm_display_mode *other_mode)
405 {
406 	u32 tmp;
407 	/*
408 	 * Line Buffer Setup
409 	 * There are 3 line buffers, each one shared by 2 display controllers.
410 	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
411 	 * the display controllers.  The paritioning is done via one of four
412 	 * preset allocations specified in bits 2:0:
413 	 * first display controller
414 	 *  0 - first half of lb (3840 * 2)
415 	 *  1 - first 3/4 of lb (5760 * 2)
416 	 *  2 - whole lb (7680 * 2), other crtc must be disabled
417 	 *  3 - first 1/4 of lb (1920 * 2)
418 	 * second display controller
419 	 *  4 - second half of lb (3840 * 2)
420 	 *  5 - second 3/4 of lb (5760 * 2)
421 	 *  6 - whole lb (7680 * 2), other crtc must be disabled
422 	 *  7 - last 1/4 of lb (1920 * 2)
423 	 */
424 	/* this can get tricky if we have two large displays on a paired group
425 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
426 	 * non-linked crtcs for maximum line buffer allocation.
427 	 */
428 	if (radeon_crtc->base.enabled && mode) {
429 		if (other_mode)
430 			tmp = 0; /* 1/2 */
431 		else
432 			tmp = 2; /* whole */
433 	} else
434 		tmp = 0;
435 
436 	/* second controller of the pair uses second half of the lb */
437 	if (radeon_crtc->crtc_id % 2)
438 		tmp += 4;
439 	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
440 
441 	if (radeon_crtc->base.enabled && mode) {
442 		switch (tmp) {
443 		case 0:
444 		case 4:
445 		default:
446 			if (ASIC_IS_DCE5(rdev))
447 				return 4096 * 2;
448 			else
449 				return 3840 * 2;
450 		case 1:
451 		case 5:
452 			if (ASIC_IS_DCE5(rdev))
453 				return 6144 * 2;
454 			else
455 				return 5760 * 2;
456 		case 2:
457 		case 6:
458 			if (ASIC_IS_DCE5(rdev))
459 				return 8192 * 2;
460 			else
461 				return 7680 * 2;
462 		case 3:
463 		case 7:
464 			if (ASIC_IS_DCE5(rdev))
465 				return 2048 * 2;
466 			else
467 				return 1920 * 2;
468 		}
469 	}
470 
471 	/* controller not enabled, so no lb used */
472 	return 0;
473 }
474 
475 static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
476 {
477 	u32 tmp = RREG32(MC_SHARED_CHMAP);
478 
479 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
480 	case 0:
481 	default:
482 		return 1;
483 	case 1:
484 		return 2;
485 	case 2:
486 		return 4;
487 	case 3:
488 		return 8;
489 	}
490 }
491 
492 struct evergreen_wm_params {
493 	u32 dram_channels; /* number of dram channels */
494 	u32 yclk;          /* bandwidth per dram data pin in kHz */
495 	u32 sclk;          /* engine clock in kHz */
496 	u32 disp_clk;      /* display clock in kHz */
497 	u32 src_width;     /* viewport width */
498 	u32 active_time;   /* active display time in ns */
499 	u32 blank_time;    /* blank time in ns */
500 	bool interlaced;    /* mode is interlaced */
501 	fixed20_12 vsc;    /* vertical scale ratio */
502 	u32 num_heads;     /* number of active crtcs */
503 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
504 	u32 lb_size;       /* line buffer allocated to pipe */
505 	u32 vtaps;         /* vertical scaler taps */
506 };
507 
508 static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
509 {
510 	/* Calculate DRAM Bandwidth and the part allocated to display. */
511 	fixed20_12 dram_efficiency; /* 0.7 */
512 	fixed20_12 yclk, dram_channels, bandwidth;
513 	fixed20_12 a;
514 
515 	a.full = dfixed_const(1000);
516 	yclk.full = dfixed_const(wm->yclk);
517 	yclk.full = dfixed_div(yclk, a);
518 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
519 	a.full = dfixed_const(10);
520 	dram_efficiency.full = dfixed_const(7);
521 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
522 	bandwidth.full = dfixed_mul(dram_channels, yclk);
523 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
524 
525 	return dfixed_trunc(bandwidth);
526 }
527 
528 static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
529 {
530 	/* Calculate DRAM Bandwidth and the part allocated to display. */
531 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
532 	fixed20_12 yclk, dram_channels, bandwidth;
533 	fixed20_12 a;
534 
535 	a.full = dfixed_const(1000);
536 	yclk.full = dfixed_const(wm->yclk);
537 	yclk.full = dfixed_div(yclk, a);
538 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
539 	a.full = dfixed_const(10);
540 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
541 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
542 	bandwidth.full = dfixed_mul(dram_channels, yclk);
543 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
544 
545 	return dfixed_trunc(bandwidth);
546 }
547 
548 static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
549 {
550 	/* Calculate the display Data return Bandwidth */
551 	fixed20_12 return_efficiency; /* 0.8 */
552 	fixed20_12 sclk, bandwidth;
553 	fixed20_12 a;
554 
555 	a.full = dfixed_const(1000);
556 	sclk.full = dfixed_const(wm->sclk);
557 	sclk.full = dfixed_div(sclk, a);
558 	a.full = dfixed_const(10);
559 	return_efficiency.full = dfixed_const(8);
560 	return_efficiency.full = dfixed_div(return_efficiency, a);
561 	a.full = dfixed_const(32);
562 	bandwidth.full = dfixed_mul(a, sclk);
563 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
564 
565 	return dfixed_trunc(bandwidth);
566 }
567 
568 static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
569 {
570 	/* Calculate the DMIF Request Bandwidth */
571 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
572 	fixed20_12 disp_clk, bandwidth;
573 	fixed20_12 a;
574 
575 	a.full = dfixed_const(1000);
576 	disp_clk.full = dfixed_const(wm->disp_clk);
577 	disp_clk.full = dfixed_div(disp_clk, a);
578 	a.full = dfixed_const(10);
579 	disp_clk_request_efficiency.full = dfixed_const(8);
580 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
581 	a.full = dfixed_const(32);
582 	bandwidth.full = dfixed_mul(a, disp_clk);
583 	bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
584 
585 	return dfixed_trunc(bandwidth);
586 }
587 
588 static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
589 {
590 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
591 	u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
592 	u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
593 	u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
594 
595 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
596 }
597 
598 static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
599 {
600 	/* Calculate the display mode Average Bandwidth
601 	 * DisplayMode should contain the source and destination dimensions,
602 	 * timing, etc.
603 	 */
604 	fixed20_12 bpp;
605 	fixed20_12 line_time;
606 	fixed20_12 src_width;
607 	fixed20_12 bandwidth;
608 	fixed20_12 a;
609 
610 	a.full = dfixed_const(1000);
611 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
612 	line_time.full = dfixed_div(line_time, a);
613 	bpp.full = dfixed_const(wm->bytes_per_pixel);
614 	src_width.full = dfixed_const(wm->src_width);
615 	bandwidth.full = dfixed_mul(src_width, bpp);
616 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
617 	bandwidth.full = dfixed_div(bandwidth, line_time);
618 
619 	return dfixed_trunc(bandwidth);
620 }
621 
622 static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
623 {
624 	/* First calcualte the latency in ns */
625 	u32 mc_latency = 2000; /* 2000 ns. */
626 	u32 available_bandwidth = evergreen_available_bandwidth(wm);
627 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
628 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
629 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
630 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
631 		(wm->num_heads * cursor_line_pair_return_time);
632 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
633 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
634 	fixed20_12 a, b, c;
635 
636 	if (wm->num_heads == 0)
637 		return 0;
638 
639 	a.full = dfixed_const(2);
640 	b.full = dfixed_const(1);
641 	if ((wm->vsc.full > a.full) ||
642 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
643 	    (wm->vtaps >= 5) ||
644 	    ((wm->vsc.full >= a.full) && wm->interlaced))
645 		max_src_lines_per_dst_line = 4;
646 	else
647 		max_src_lines_per_dst_line = 2;
648 
649 	a.full = dfixed_const(available_bandwidth);
650 	b.full = dfixed_const(wm->num_heads);
651 	a.full = dfixed_div(a, b);
652 
653 	b.full = dfixed_const(1000);
654 	c.full = dfixed_const(wm->disp_clk);
655 	b.full = dfixed_div(c, b);
656 	c.full = dfixed_const(wm->bytes_per_pixel);
657 	b.full = dfixed_mul(b, c);
658 
659 	lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
660 
661 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
662 	b.full = dfixed_const(1000);
663 	c.full = dfixed_const(lb_fill_bw);
664 	b.full = dfixed_div(c, b);
665 	a.full = dfixed_div(a, b);
666 	line_fill_time = dfixed_trunc(a);
667 
668 	if (line_fill_time < wm->active_time)
669 		return latency;
670 	else
671 		return latency + (line_fill_time - wm->active_time);
672 
673 }
674 
675 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
676 {
677 	if (evergreen_average_bandwidth(wm) <=
678 	    (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
679 		return true;
680 	else
681 		return false;
682 };
683 
684 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
685 {
686 	if (evergreen_average_bandwidth(wm) <=
687 	    (evergreen_available_bandwidth(wm) / wm->num_heads))
688 		return true;
689 	else
690 		return false;
691 };
692 
693 static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
694 {
695 	u32 lb_partitions = wm->lb_size / wm->src_width;
696 	u32 line_time = wm->active_time + wm->blank_time;
697 	u32 latency_tolerant_lines;
698 	u32 latency_hiding;
699 	fixed20_12 a;
700 
701 	a.full = dfixed_const(1);
702 	if (wm->vsc.full > a.full)
703 		latency_tolerant_lines = 1;
704 	else {
705 		if (lb_partitions <= (wm->vtaps + 1))
706 			latency_tolerant_lines = 1;
707 		else
708 			latency_tolerant_lines = 2;
709 	}
710 
711 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
712 
713 	if (evergreen_latency_watermark(wm) <= latency_hiding)
714 		return true;
715 	else
716 		return false;
717 }
718 
719 static void evergreen_program_watermarks(struct radeon_device *rdev,
720 					 struct radeon_crtc *radeon_crtc,
721 					 u32 lb_size, u32 num_heads)
722 {
723 	struct drm_display_mode *mode = &radeon_crtc->base.mode;
724 	struct evergreen_wm_params wm;
725 	u32 pixel_period;
726 	u32 line_time = 0;
727 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
728 	u32 priority_a_mark = 0, priority_b_mark = 0;
729 	u32 priority_a_cnt = PRIORITY_OFF;
730 	u32 priority_b_cnt = PRIORITY_OFF;
731 	u32 pipe_offset = radeon_crtc->crtc_id * 16;
732 	u32 tmp, arb_control3;
733 	fixed20_12 a, b, c;
734 
735 	if (radeon_crtc->base.enabled && num_heads && mode) {
736 		pixel_period = 1000000 / (u32)mode->clock;
737 		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
738 		priority_a_cnt = 0;
739 		priority_b_cnt = 0;
740 
741 		wm.yclk = rdev->pm.current_mclk * 10;
742 		wm.sclk = rdev->pm.current_sclk * 10;
743 		wm.disp_clk = mode->clock;
744 		wm.src_width = mode->crtc_hdisplay;
745 		wm.active_time = mode->crtc_hdisplay * pixel_period;
746 		wm.blank_time = line_time - wm.active_time;
747 		wm.interlaced = false;
748 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
749 			wm.interlaced = true;
750 		wm.vsc = radeon_crtc->vsc;
751 		wm.vtaps = 1;
752 		if (radeon_crtc->rmx_type != RMX_OFF)
753 			wm.vtaps = 2;
754 		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
755 		wm.lb_size = lb_size;
756 		wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
757 		wm.num_heads = num_heads;
758 
759 		/* set for high clocks */
760 		latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
761 		/* set for low clocks */
762 		/* wm.yclk = low clk; wm.sclk = low clk */
763 		latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
764 
765 		/* possibly force display priority to high */
766 		/* should really do this at mode validation time... */
767 		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
768 		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
769 		    !evergreen_check_latency_hiding(&wm) ||
770 		    (rdev->disp_priority == 2)) {
771 			DRM_DEBUG_KMS("force priority to high\n");
772 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
773 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
774 		}
775 
776 		a.full = dfixed_const(1000);
777 		b.full = dfixed_const(mode->clock);
778 		b.full = dfixed_div(b, a);
779 		c.full = dfixed_const(latency_watermark_a);
780 		c.full = dfixed_mul(c, b);
781 		c.full = dfixed_mul(c, radeon_crtc->hsc);
782 		c.full = dfixed_div(c, a);
783 		a.full = dfixed_const(16);
784 		c.full = dfixed_div(c, a);
785 		priority_a_mark = dfixed_trunc(c);
786 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
787 
788 		a.full = dfixed_const(1000);
789 		b.full = dfixed_const(mode->clock);
790 		b.full = dfixed_div(b, a);
791 		c.full = dfixed_const(latency_watermark_b);
792 		c.full = dfixed_mul(c, b);
793 		c.full = dfixed_mul(c, radeon_crtc->hsc);
794 		c.full = dfixed_div(c, a);
795 		a.full = dfixed_const(16);
796 		c.full = dfixed_div(c, a);
797 		priority_b_mark = dfixed_trunc(c);
798 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
799 	}
800 
801 	/* select wm A */
802 	arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
803 	tmp = arb_control3;
804 	tmp &= ~LATENCY_WATERMARK_MASK(3);
805 	tmp |= LATENCY_WATERMARK_MASK(1);
806 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
807 	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
808 	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
809 		LATENCY_HIGH_WATERMARK(line_time)));
810 	/* select wm B */
811 	tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
812 	tmp &= ~LATENCY_WATERMARK_MASK(3);
813 	tmp |= LATENCY_WATERMARK_MASK(2);
814 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
815 	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
816 	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
817 		LATENCY_HIGH_WATERMARK(line_time)));
818 	/* restore original selection */
819 	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
820 
821 	/* write the priority marks */
822 	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
823 	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
824 
825 }
826 
827 void evergreen_bandwidth_update(struct radeon_device *rdev)
828 {
829 	struct drm_display_mode *mode0 = NULL;
830 	struct drm_display_mode *mode1 = NULL;
831 	u32 num_heads = 0, lb_size;
832 	int i;
833 
834 	radeon_update_display_priority(rdev);
835 
836 	for (i = 0; i < rdev->num_crtc; i++) {
837 		if (rdev->mode_info.crtcs[i]->base.enabled)
838 			num_heads++;
839 	}
840 	for (i = 0; i < rdev->num_crtc; i += 2) {
841 		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
842 		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
843 		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
844 		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
845 		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
846 		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
847 	}
848 }
849 
850 int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
851 {
852 	unsigned i;
853 	u32 tmp;
854 
855 	for (i = 0; i < rdev->usec_timeout; i++) {
856 		/* read MC_STATUS */
857 		tmp = RREG32(SRBM_STATUS) & 0x1F00;
858 		if (!tmp)
859 			return 0;
860 		udelay(1);
861 	}
862 	return -1;
863 }
864 
865 /*
866  * GART
867  */
868 void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
869 {
870 	unsigned i;
871 	u32 tmp;
872 
873 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
874 
875 	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
876 	for (i = 0; i < rdev->usec_timeout; i++) {
877 		/* read MC_STATUS */
878 		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
879 		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
880 		if (tmp == 2) {
881 			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
882 			return;
883 		}
884 		if (tmp) {
885 			return;
886 		}
887 		udelay(1);
888 	}
889 }
890 
891 int evergreen_pcie_gart_enable(struct radeon_device *rdev)
892 {
893 	u32 tmp;
894 	int r;
895 
896 	if (rdev->gart.table.vram.robj == NULL) {
897 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
898 		return -EINVAL;
899 	}
900 	r = radeon_gart_table_vram_pin(rdev);
901 	if (r)
902 		return r;
903 	radeon_gart_restore(rdev);
904 	/* Setup L2 cache */
905 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
906 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
907 				EFFECTIVE_L2_QUEUE_SIZE(7));
908 	WREG32(VM_L2_CNTL2, 0);
909 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
910 	/* Setup TLB control */
911 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
912 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
913 		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
914 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
915 	if (rdev->flags & RADEON_IS_IGP) {
916 		WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
917 		WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
918 		WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
919 	} else {
920 		WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
921 		WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
922 		WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
923 	}
924 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
925 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
926 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
927 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
928 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
929 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
930 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
931 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
932 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
933 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
934 			(u32)(rdev->dummy_page.addr >> 12));
935 	WREG32(VM_CONTEXT1_CNTL, 0);
936 
937 	evergreen_pcie_gart_tlb_flush(rdev);
938 	rdev->gart.ready = true;
939 	return 0;
940 }
941 
942 void evergreen_pcie_gart_disable(struct radeon_device *rdev)
943 {
944 	u32 tmp;
945 	int r;
946 
947 	/* Disable all tables */
948 	WREG32(VM_CONTEXT0_CNTL, 0);
949 	WREG32(VM_CONTEXT1_CNTL, 0);
950 
951 	/* Setup L2 cache */
952 	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
953 				EFFECTIVE_L2_QUEUE_SIZE(7));
954 	WREG32(VM_L2_CNTL2, 0);
955 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
956 	/* Setup TLB control */
957 	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
958 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
959 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
960 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
961 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
962 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
963 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
964 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
965 	if (rdev->gart.table.vram.robj) {
966 		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
967 		if (likely(r == 0)) {
968 			radeon_bo_kunmap(rdev->gart.table.vram.robj);
969 			radeon_bo_unpin(rdev->gart.table.vram.robj);
970 			radeon_bo_unreserve(rdev->gart.table.vram.robj);
971 		}
972 	}
973 }
974 
975 void evergreen_pcie_gart_fini(struct radeon_device *rdev)
976 {
977 	evergreen_pcie_gart_disable(rdev);
978 	radeon_gart_table_vram_free(rdev);
979 	radeon_gart_fini(rdev);
980 }
981 
982 
983 void evergreen_agp_enable(struct radeon_device *rdev)
984 {
985 	u32 tmp;
986 
987 	/* Setup L2 cache */
988 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
989 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
990 				EFFECTIVE_L2_QUEUE_SIZE(7));
991 	WREG32(VM_L2_CNTL2, 0);
992 	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
993 	/* Setup TLB control */
994 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
995 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
996 		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
997 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
998 	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
999 	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1000 	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1001 	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1002 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1003 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
1004 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
1005 	WREG32(VM_CONTEXT0_CNTL, 0);
1006 	WREG32(VM_CONTEXT1_CNTL, 0);
1007 }
1008 
1009 void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
1010 {
1011 	save->vga_control[0] = RREG32(D1VGA_CONTROL);
1012 	save->vga_control[1] = RREG32(D2VGA_CONTROL);
1013 	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1014 	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
1015 	save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
1016 	save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
1017 	if (rdev->num_crtc >= 4) {
1018 		save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
1019 		save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
1020 		save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
1021 		save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
1022 	}
1023 	if (rdev->num_crtc >= 6) {
1024 		save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
1025 		save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
1026 		save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
1027 		save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
1028 	}
1029 
1030 	/* Stop all video */
1031 	WREG32(VGA_RENDER_CONTROL, 0);
1032 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
1033 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
1034 	if (rdev->num_crtc >= 4) {
1035 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
1036 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
1037 	}
1038 	if (rdev->num_crtc >= 6) {
1039 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
1040 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
1041 	}
1042 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1043 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1044 	if (rdev->num_crtc >= 4) {
1045 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1046 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1047 	}
1048 	if (rdev->num_crtc >= 6) {
1049 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1050 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1051 	}
1052 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1053 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1054 	if (rdev->num_crtc >= 4) {
1055 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1056 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1057 	}
1058 	if (rdev->num_crtc >= 6) {
1059 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1060 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1061 	}
1062 
1063 	WREG32(D1VGA_CONTROL, 0);
1064 	WREG32(D2VGA_CONTROL, 0);
1065 	if (rdev->num_crtc >= 4) {
1066 		WREG32(EVERGREEN_D3VGA_CONTROL, 0);
1067 		WREG32(EVERGREEN_D4VGA_CONTROL, 0);
1068 	}
1069 	if (rdev->num_crtc >= 6) {
1070 		WREG32(EVERGREEN_D5VGA_CONTROL, 0);
1071 		WREG32(EVERGREEN_D6VGA_CONTROL, 0);
1072 	}
1073 }
1074 
1075 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
1076 {
1077 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
1078 	       upper_32_bits(rdev->mc.vram_start));
1079 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
1080 	       upper_32_bits(rdev->mc.vram_start));
1081 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1082 	       (u32)rdev->mc.vram_start);
1083 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1084 	       (u32)rdev->mc.vram_start);
1085 
1086 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1087 	       upper_32_bits(rdev->mc.vram_start));
1088 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1089 	       upper_32_bits(rdev->mc.vram_start));
1090 	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1091 	       (u32)rdev->mc.vram_start);
1092 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1093 	       (u32)rdev->mc.vram_start);
1094 
1095 	if (rdev->num_crtc >= 4) {
1096 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1097 		       upper_32_bits(rdev->mc.vram_start));
1098 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1099 		       upper_32_bits(rdev->mc.vram_start));
1100 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1101 		       (u32)rdev->mc.vram_start);
1102 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1103 		       (u32)rdev->mc.vram_start);
1104 
1105 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1106 		       upper_32_bits(rdev->mc.vram_start));
1107 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1108 		       upper_32_bits(rdev->mc.vram_start));
1109 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1110 		       (u32)rdev->mc.vram_start);
1111 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1112 		       (u32)rdev->mc.vram_start);
1113 	}
1114 	if (rdev->num_crtc >= 6) {
1115 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1116 		       upper_32_bits(rdev->mc.vram_start));
1117 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1118 		       upper_32_bits(rdev->mc.vram_start));
1119 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1120 		       (u32)rdev->mc.vram_start);
1121 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1122 		       (u32)rdev->mc.vram_start);
1123 
1124 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
1125 		       upper_32_bits(rdev->mc.vram_start));
1126 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
1127 		       upper_32_bits(rdev->mc.vram_start));
1128 		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
1129 		       (u32)rdev->mc.vram_start);
1130 		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
1131 		       (u32)rdev->mc.vram_start);
1132 	}
1133 
1134 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1135 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1136 	/* Unlock host access */
1137 	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1138 	mdelay(1);
1139 	/* Restore video state */
1140 	WREG32(D1VGA_CONTROL, save->vga_control[0]);
1141 	WREG32(D2VGA_CONTROL, save->vga_control[1]);
1142 	if (rdev->num_crtc >= 4) {
1143 		WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
1144 		WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
1145 	}
1146 	if (rdev->num_crtc >= 6) {
1147 		WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
1148 		WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
1149 	}
1150 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
1151 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
1152 	if (rdev->num_crtc >= 4) {
1153 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
1154 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
1155 	}
1156 	if (rdev->num_crtc >= 6) {
1157 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
1158 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
1159 	}
1160 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
1161 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
1162 	if (rdev->num_crtc >= 4) {
1163 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
1164 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
1165 	}
1166 	if (rdev->num_crtc >= 6) {
1167 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
1168 		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
1169 	}
1170 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1171 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1172 	if (rdev->num_crtc >= 4) {
1173 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1174 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1175 	}
1176 	if (rdev->num_crtc >= 6) {
1177 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1178 		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1179 	}
1180 	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1181 }
1182 
1183 void evergreen_mc_program(struct radeon_device *rdev)
1184 {
1185 	struct evergreen_mc_save save;
1186 	u32 tmp;
1187 	int i, j;
1188 
1189 	/* Initialize HDP */
1190 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1191 		WREG32((0x2c14 + j), 0x00000000);
1192 		WREG32((0x2c18 + j), 0x00000000);
1193 		WREG32((0x2c1c + j), 0x00000000);
1194 		WREG32((0x2c20 + j), 0x00000000);
1195 		WREG32((0x2c24 + j), 0x00000000);
1196 	}
1197 	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1198 
1199 	evergreen_mc_stop(rdev, &save);
1200 	if (evergreen_mc_wait_for_idle(rdev)) {
1201 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1202 	}
1203 	/* Lockout access through VGA aperture*/
1204 	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1205 	/* Update configuration */
1206 	if (rdev->flags & RADEON_IS_AGP) {
1207 		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1208 			/* VRAM before AGP */
1209 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1210 				rdev->mc.vram_start >> 12);
1211 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1212 				rdev->mc.gtt_end >> 12);
1213 		} else {
1214 			/* VRAM after AGP */
1215 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1216 				rdev->mc.gtt_start >> 12);
1217 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1218 				rdev->mc.vram_end >> 12);
1219 		}
1220 	} else {
1221 		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1222 			rdev->mc.vram_start >> 12);
1223 		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1224 			rdev->mc.vram_end >> 12);
1225 	}
1226 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1227 	if (rdev->flags & RADEON_IS_IGP) {
1228 		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1229 		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1230 		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1231 		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1232 	}
1233 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1234 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1235 	WREG32(MC_VM_FB_LOCATION, tmp);
1236 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1237 	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
1238 	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1239 	if (rdev->flags & RADEON_IS_AGP) {
1240 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
1241 		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
1242 		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1243 	} else {
1244 		WREG32(MC_VM_AGP_BASE, 0);
1245 		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1246 		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1247 	}
1248 	if (evergreen_mc_wait_for_idle(rdev)) {
1249 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1250 	}
1251 	evergreen_mc_resume(rdev, &save);
1252 	/* we need to own VRAM, so turn off the VGA renderer here
1253 	 * to stop it overwriting our objects */
1254 	rv515_vga_render_disable(rdev);
1255 }
1256 
1257 /*
1258  * CP.
1259  */
1260 void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1261 {
1262 	/* set to DX10/11 mode */
1263 	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
1264 	radeon_ring_write(rdev, 1);
1265 	/* FIXME: implement */
1266 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1267 	radeon_ring_write(rdev,
1268 #ifdef __BIG_ENDIAN
1269 			  (2 << 0) |
1270 #endif
1271 			  (ib->gpu_addr & 0xFFFFFFFC));
1272 	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1273 	radeon_ring_write(rdev, ib->length_dw);
1274 }
1275 
1276 
1277 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1278 {
1279 	const __be32 *fw_data;
1280 	int i;
1281 
1282 	if (!rdev->me_fw || !rdev->pfp_fw)
1283 		return -EINVAL;
1284 
1285 	r700_cp_stop(rdev);
1286 	WREG32(CP_RB_CNTL,
1287 #ifdef __BIG_ENDIAN
1288 	       BUF_SWAP_32BIT |
1289 #endif
1290 	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1291 
1292 	fw_data = (const __be32 *)rdev->pfp_fw->data;
1293 	WREG32(CP_PFP_UCODE_ADDR, 0);
1294 	for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
1295 		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1296 	WREG32(CP_PFP_UCODE_ADDR, 0);
1297 
1298 	fw_data = (const __be32 *)rdev->me_fw->data;
1299 	WREG32(CP_ME_RAM_WADDR, 0);
1300 	for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
1301 		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1302 
1303 	WREG32(CP_PFP_UCODE_ADDR, 0);
1304 	WREG32(CP_ME_RAM_WADDR, 0);
1305 	WREG32(CP_ME_RAM_RADDR, 0);
1306 	return 0;
1307 }
1308 
1309 static int evergreen_cp_start(struct radeon_device *rdev)
1310 {
1311 	int r, i;
1312 	uint32_t cp_me;
1313 
1314 	r = radeon_ring_lock(rdev, 7);
1315 	if (r) {
1316 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1317 		return r;
1318 	}
1319 	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1320 	radeon_ring_write(rdev, 0x1);
1321 	radeon_ring_write(rdev, 0x0);
1322 	radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1323 	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1324 	radeon_ring_write(rdev, 0);
1325 	radeon_ring_write(rdev, 0);
1326 	radeon_ring_unlock_commit(rdev);
1327 
1328 	cp_me = 0xff;
1329 	WREG32(CP_ME_CNTL, cp_me);
1330 
1331 	r = radeon_ring_lock(rdev, evergreen_default_size + 19);
1332 	if (r) {
1333 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1334 		return r;
1335 	}
1336 
1337 	/* setup clear context state */
1338 	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1339 	radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1340 
1341 	for (i = 0; i < evergreen_default_size; i++)
1342 		radeon_ring_write(rdev, evergreen_default_state[i]);
1343 
1344 	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1345 	radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
1346 
1347 	/* set clear context state */
1348 	radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
1349 	radeon_ring_write(rdev, 0);
1350 
1351 	/* SQ_VTX_BASE_VTX_LOC */
1352 	radeon_ring_write(rdev, 0xc0026f00);
1353 	radeon_ring_write(rdev, 0x00000000);
1354 	radeon_ring_write(rdev, 0x00000000);
1355 	radeon_ring_write(rdev, 0x00000000);
1356 
1357 	/* Clear consts */
1358 	radeon_ring_write(rdev, 0xc0036f00);
1359 	radeon_ring_write(rdev, 0x00000bc4);
1360 	radeon_ring_write(rdev, 0xffffffff);
1361 	radeon_ring_write(rdev, 0xffffffff);
1362 	radeon_ring_write(rdev, 0xffffffff);
1363 
1364 	radeon_ring_write(rdev, 0xc0026900);
1365 	radeon_ring_write(rdev, 0x00000316);
1366 	radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1367 	radeon_ring_write(rdev, 0x00000010); /*  */
1368 
1369 	radeon_ring_unlock_commit(rdev);
1370 
1371 	return 0;
1372 }
1373 
1374 int evergreen_cp_resume(struct radeon_device *rdev)
1375 {
1376 	u32 tmp;
1377 	u32 rb_bufsz;
1378 	int r;
1379 
1380 	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1381 	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1382 				 SOFT_RESET_PA |
1383 				 SOFT_RESET_SH |
1384 				 SOFT_RESET_VGT |
1385 				 SOFT_RESET_SPI |
1386 				 SOFT_RESET_SX));
1387 	RREG32(GRBM_SOFT_RESET);
1388 	mdelay(15);
1389 	WREG32(GRBM_SOFT_RESET, 0);
1390 	RREG32(GRBM_SOFT_RESET);
1391 
1392 	/* Set ring buffer size */
1393 	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1394 	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1395 #ifdef __BIG_ENDIAN
1396 	tmp |= BUF_SWAP_32BIT;
1397 #endif
1398 	WREG32(CP_RB_CNTL, tmp);
1399 	WREG32(CP_SEM_WAIT_TIMER, 0x4);
1400 
1401 	/* Set the write pointer delay */
1402 	WREG32(CP_RB_WPTR_DELAY, 0);
1403 
1404 	/* Initialize the ring buffer's read and write pointers */
1405 	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1406 	WREG32(CP_RB_RPTR_WR, 0);
1407 	rdev->cp.wptr = 0;
1408 	WREG32(CP_RB_WPTR, rdev->cp.wptr);
1409 
1410 	/* set the wb address wether it's enabled or not */
1411 	WREG32(CP_RB_RPTR_ADDR,
1412 	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1413 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1414 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1415 
1416 	if (rdev->wb.enabled)
1417 		WREG32(SCRATCH_UMSK, 0xff);
1418 	else {
1419 		tmp |= RB_NO_UPDATE;
1420 		WREG32(SCRATCH_UMSK, 0);
1421 	}
1422 
1423 	mdelay(1);
1424 	WREG32(CP_RB_CNTL, tmp);
1425 
1426 	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1427 	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1428 
1429 	rdev->cp.rptr = RREG32(CP_RB_RPTR);
1430 
1431 	evergreen_cp_start(rdev);
1432 	rdev->cp.ready = true;
1433 	r = radeon_ring_test(rdev);
1434 	if (r) {
1435 		rdev->cp.ready = false;
1436 		return r;
1437 	}
1438 	return 0;
1439 }
1440 
1441 /*
1442  * Core functions
1443  */
1444 static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1445 						  u32 num_tile_pipes,
1446 						  u32 num_backends,
1447 						  u32 backend_disable_mask)
1448 {
1449 	u32 backend_map = 0;
1450 	u32 enabled_backends_mask = 0;
1451 	u32 enabled_backends_count = 0;
1452 	u32 cur_pipe;
1453 	u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
1454 	u32 cur_backend = 0;
1455 	u32 i;
1456 	bool force_no_swizzle;
1457 
1458 	if (num_tile_pipes > EVERGREEN_MAX_PIPES)
1459 		num_tile_pipes = EVERGREEN_MAX_PIPES;
1460 	if (num_tile_pipes < 1)
1461 		num_tile_pipes = 1;
1462 	if (num_backends > EVERGREEN_MAX_BACKENDS)
1463 		num_backends = EVERGREEN_MAX_BACKENDS;
1464 	if (num_backends < 1)
1465 		num_backends = 1;
1466 
1467 	for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1468 		if (((backend_disable_mask >> i) & 1) == 0) {
1469 			enabled_backends_mask |= (1 << i);
1470 			++enabled_backends_count;
1471 		}
1472 		if (enabled_backends_count == num_backends)
1473 			break;
1474 	}
1475 
1476 	if (enabled_backends_count == 0) {
1477 		enabled_backends_mask = 1;
1478 		enabled_backends_count = 1;
1479 	}
1480 
1481 	if (enabled_backends_count != num_backends)
1482 		num_backends = enabled_backends_count;
1483 
1484 	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
1485 	switch (rdev->family) {
1486 	case CHIP_CEDAR:
1487 	case CHIP_REDWOOD:
1488 	case CHIP_PALM:
1489 	case CHIP_SUMO:
1490 	case CHIP_SUMO2:
1491 	case CHIP_TURKS:
1492 	case CHIP_CAICOS:
1493 		force_no_swizzle = false;
1494 		break;
1495 	case CHIP_CYPRESS:
1496 	case CHIP_HEMLOCK:
1497 	case CHIP_JUNIPER:
1498 	case CHIP_BARTS:
1499 	default:
1500 		force_no_swizzle = true;
1501 		break;
1502 	}
1503 	if (force_no_swizzle) {
1504 		bool last_backend_enabled = false;
1505 
1506 		force_no_swizzle = false;
1507 		for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1508 			if (((enabled_backends_mask >> i) & 1) == 1) {
1509 				if (last_backend_enabled)
1510 					force_no_swizzle = true;
1511 				last_backend_enabled = true;
1512 			} else
1513 				last_backend_enabled = false;
1514 		}
1515 	}
1516 
1517 	switch (num_tile_pipes) {
1518 	case 1:
1519 	case 3:
1520 	case 5:
1521 	case 7:
1522 		DRM_ERROR("odd number of pipes!\n");
1523 		break;
1524 	case 2:
1525 		swizzle_pipe[0] = 0;
1526 		swizzle_pipe[1] = 1;
1527 		break;
1528 	case 4:
1529 		if (force_no_swizzle) {
1530 			swizzle_pipe[0] = 0;
1531 			swizzle_pipe[1] = 1;
1532 			swizzle_pipe[2] = 2;
1533 			swizzle_pipe[3] = 3;
1534 		} else {
1535 			swizzle_pipe[0] = 0;
1536 			swizzle_pipe[1] = 2;
1537 			swizzle_pipe[2] = 1;
1538 			swizzle_pipe[3] = 3;
1539 		}
1540 		break;
1541 	case 6:
1542 		if (force_no_swizzle) {
1543 			swizzle_pipe[0] = 0;
1544 			swizzle_pipe[1] = 1;
1545 			swizzle_pipe[2] = 2;
1546 			swizzle_pipe[3] = 3;
1547 			swizzle_pipe[4] = 4;
1548 			swizzle_pipe[5] = 5;
1549 		} else {
1550 			swizzle_pipe[0] = 0;
1551 			swizzle_pipe[1] = 2;
1552 			swizzle_pipe[2] = 4;
1553 			swizzle_pipe[3] = 1;
1554 			swizzle_pipe[4] = 3;
1555 			swizzle_pipe[5] = 5;
1556 		}
1557 		break;
1558 	case 8:
1559 		if (force_no_swizzle) {
1560 			swizzle_pipe[0] = 0;
1561 			swizzle_pipe[1] = 1;
1562 			swizzle_pipe[2] = 2;
1563 			swizzle_pipe[3] = 3;
1564 			swizzle_pipe[4] = 4;
1565 			swizzle_pipe[5] = 5;
1566 			swizzle_pipe[6] = 6;
1567 			swizzle_pipe[7] = 7;
1568 		} else {
1569 			swizzle_pipe[0] = 0;
1570 			swizzle_pipe[1] = 2;
1571 			swizzle_pipe[2] = 4;
1572 			swizzle_pipe[3] = 6;
1573 			swizzle_pipe[4] = 1;
1574 			swizzle_pipe[5] = 3;
1575 			swizzle_pipe[6] = 5;
1576 			swizzle_pipe[7] = 7;
1577 		}
1578 		break;
1579 	}
1580 
1581 	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1582 		while (((1 << cur_backend) & enabled_backends_mask) == 0)
1583 			cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1584 
1585 		backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1586 
1587 		cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1588 	}
1589 
1590 	return backend_map;
1591 }
1592 
1593 static void evergreen_program_channel_remap(struct radeon_device *rdev)
1594 {
1595 	u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
1596 
1597 	tmp = RREG32(MC_SHARED_CHMAP);
1598 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1599 	case 0:
1600 	case 1:
1601 	case 2:
1602 	case 3:
1603 	default:
1604 		/* default mapping */
1605 		mc_shared_chremap = 0x00fac688;
1606 		break;
1607 	}
1608 
1609 	switch (rdev->family) {
1610 	case CHIP_HEMLOCK:
1611 	case CHIP_CYPRESS:
1612 	case CHIP_BARTS:
1613 		tcp_chan_steer_lo = 0x54763210;
1614 		tcp_chan_steer_hi = 0x0000ba98;
1615 		break;
1616 	case CHIP_JUNIPER:
1617 	case CHIP_REDWOOD:
1618 	case CHIP_CEDAR:
1619 	case CHIP_PALM:
1620 	case CHIP_SUMO:
1621 	case CHIP_SUMO2:
1622 	case CHIP_TURKS:
1623 	case CHIP_CAICOS:
1624 	default:
1625 		tcp_chan_steer_lo = 0x76543210;
1626 		tcp_chan_steer_hi = 0x0000ba98;
1627 		break;
1628 	}
1629 
1630 	WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
1631 	WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
1632 	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
1633 }
1634 
1635 static void evergreen_gpu_init(struct radeon_device *rdev)
1636 {
1637 	u32 cc_rb_backend_disable = 0;
1638 	u32 cc_gc_shader_pipe_config;
1639 	u32 gb_addr_config = 0;
1640 	u32 mc_shared_chmap, mc_arb_ramcfg;
1641 	u32 gb_backend_map;
1642 	u32 grbm_gfx_index;
1643 	u32 sx_debug_1;
1644 	u32 smx_dc_ctl0;
1645 	u32 sq_config;
1646 	u32 sq_lds_resource_mgmt;
1647 	u32 sq_gpr_resource_mgmt_1;
1648 	u32 sq_gpr_resource_mgmt_2;
1649 	u32 sq_gpr_resource_mgmt_3;
1650 	u32 sq_thread_resource_mgmt;
1651 	u32 sq_thread_resource_mgmt_2;
1652 	u32 sq_stack_resource_mgmt_1;
1653 	u32 sq_stack_resource_mgmt_2;
1654 	u32 sq_stack_resource_mgmt_3;
1655 	u32 vgt_cache_invalidation;
1656 	u32 hdp_host_path_cntl, tmp;
1657 	int i, j, num_shader_engines, ps_thread_count;
1658 
1659 	switch (rdev->family) {
1660 	case CHIP_CYPRESS:
1661 	case CHIP_HEMLOCK:
1662 		rdev->config.evergreen.num_ses = 2;
1663 		rdev->config.evergreen.max_pipes = 4;
1664 		rdev->config.evergreen.max_tile_pipes = 8;
1665 		rdev->config.evergreen.max_simds = 10;
1666 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1667 		rdev->config.evergreen.max_gprs = 256;
1668 		rdev->config.evergreen.max_threads = 248;
1669 		rdev->config.evergreen.max_gs_threads = 32;
1670 		rdev->config.evergreen.max_stack_entries = 512;
1671 		rdev->config.evergreen.sx_num_of_sets = 4;
1672 		rdev->config.evergreen.sx_max_export_size = 256;
1673 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1674 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1675 		rdev->config.evergreen.max_hw_contexts = 8;
1676 		rdev->config.evergreen.sq_num_cf_insts = 2;
1677 
1678 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1679 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1680 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1681 		break;
1682 	case CHIP_JUNIPER:
1683 		rdev->config.evergreen.num_ses = 1;
1684 		rdev->config.evergreen.max_pipes = 4;
1685 		rdev->config.evergreen.max_tile_pipes = 4;
1686 		rdev->config.evergreen.max_simds = 10;
1687 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1688 		rdev->config.evergreen.max_gprs = 256;
1689 		rdev->config.evergreen.max_threads = 248;
1690 		rdev->config.evergreen.max_gs_threads = 32;
1691 		rdev->config.evergreen.max_stack_entries = 512;
1692 		rdev->config.evergreen.sx_num_of_sets = 4;
1693 		rdev->config.evergreen.sx_max_export_size = 256;
1694 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1695 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1696 		rdev->config.evergreen.max_hw_contexts = 8;
1697 		rdev->config.evergreen.sq_num_cf_insts = 2;
1698 
1699 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1700 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1701 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1702 		break;
1703 	case CHIP_REDWOOD:
1704 		rdev->config.evergreen.num_ses = 1;
1705 		rdev->config.evergreen.max_pipes = 4;
1706 		rdev->config.evergreen.max_tile_pipes = 4;
1707 		rdev->config.evergreen.max_simds = 5;
1708 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1709 		rdev->config.evergreen.max_gprs = 256;
1710 		rdev->config.evergreen.max_threads = 248;
1711 		rdev->config.evergreen.max_gs_threads = 32;
1712 		rdev->config.evergreen.max_stack_entries = 256;
1713 		rdev->config.evergreen.sx_num_of_sets = 4;
1714 		rdev->config.evergreen.sx_max_export_size = 256;
1715 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1716 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1717 		rdev->config.evergreen.max_hw_contexts = 8;
1718 		rdev->config.evergreen.sq_num_cf_insts = 2;
1719 
1720 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1721 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1722 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1723 		break;
1724 	case CHIP_CEDAR:
1725 	default:
1726 		rdev->config.evergreen.num_ses = 1;
1727 		rdev->config.evergreen.max_pipes = 2;
1728 		rdev->config.evergreen.max_tile_pipes = 2;
1729 		rdev->config.evergreen.max_simds = 2;
1730 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1731 		rdev->config.evergreen.max_gprs = 256;
1732 		rdev->config.evergreen.max_threads = 192;
1733 		rdev->config.evergreen.max_gs_threads = 16;
1734 		rdev->config.evergreen.max_stack_entries = 256;
1735 		rdev->config.evergreen.sx_num_of_sets = 4;
1736 		rdev->config.evergreen.sx_max_export_size = 128;
1737 		rdev->config.evergreen.sx_max_export_pos_size = 32;
1738 		rdev->config.evergreen.sx_max_export_smx_size = 96;
1739 		rdev->config.evergreen.max_hw_contexts = 4;
1740 		rdev->config.evergreen.sq_num_cf_insts = 1;
1741 
1742 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1743 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1744 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1745 		break;
1746 	case CHIP_PALM:
1747 		rdev->config.evergreen.num_ses = 1;
1748 		rdev->config.evergreen.max_pipes = 2;
1749 		rdev->config.evergreen.max_tile_pipes = 2;
1750 		rdev->config.evergreen.max_simds = 2;
1751 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1752 		rdev->config.evergreen.max_gprs = 256;
1753 		rdev->config.evergreen.max_threads = 192;
1754 		rdev->config.evergreen.max_gs_threads = 16;
1755 		rdev->config.evergreen.max_stack_entries = 256;
1756 		rdev->config.evergreen.sx_num_of_sets = 4;
1757 		rdev->config.evergreen.sx_max_export_size = 128;
1758 		rdev->config.evergreen.sx_max_export_pos_size = 32;
1759 		rdev->config.evergreen.sx_max_export_smx_size = 96;
1760 		rdev->config.evergreen.max_hw_contexts = 4;
1761 		rdev->config.evergreen.sq_num_cf_insts = 1;
1762 
1763 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1764 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1765 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1766 		break;
1767 	case CHIP_SUMO:
1768 		rdev->config.evergreen.num_ses = 1;
1769 		rdev->config.evergreen.max_pipes = 4;
1770 		rdev->config.evergreen.max_tile_pipes = 2;
1771 		if (rdev->pdev->device == 0x9648)
1772 			rdev->config.evergreen.max_simds = 3;
1773 		else if ((rdev->pdev->device == 0x9647) ||
1774 			 (rdev->pdev->device == 0x964a))
1775 			rdev->config.evergreen.max_simds = 4;
1776 		else
1777 			rdev->config.evergreen.max_simds = 5;
1778 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1779 		rdev->config.evergreen.max_gprs = 256;
1780 		rdev->config.evergreen.max_threads = 248;
1781 		rdev->config.evergreen.max_gs_threads = 32;
1782 		rdev->config.evergreen.max_stack_entries = 256;
1783 		rdev->config.evergreen.sx_num_of_sets = 4;
1784 		rdev->config.evergreen.sx_max_export_size = 256;
1785 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1786 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1787 		rdev->config.evergreen.max_hw_contexts = 8;
1788 		rdev->config.evergreen.sq_num_cf_insts = 2;
1789 
1790 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1791 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1792 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1793 		break;
1794 	case CHIP_SUMO2:
1795 		rdev->config.evergreen.num_ses = 1;
1796 		rdev->config.evergreen.max_pipes = 4;
1797 		rdev->config.evergreen.max_tile_pipes = 4;
1798 		rdev->config.evergreen.max_simds = 2;
1799 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1800 		rdev->config.evergreen.max_gprs = 256;
1801 		rdev->config.evergreen.max_threads = 248;
1802 		rdev->config.evergreen.max_gs_threads = 32;
1803 		rdev->config.evergreen.max_stack_entries = 512;
1804 		rdev->config.evergreen.sx_num_of_sets = 4;
1805 		rdev->config.evergreen.sx_max_export_size = 256;
1806 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1807 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1808 		rdev->config.evergreen.max_hw_contexts = 8;
1809 		rdev->config.evergreen.sq_num_cf_insts = 2;
1810 
1811 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1812 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1813 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1814 		break;
1815 	case CHIP_BARTS:
1816 		rdev->config.evergreen.num_ses = 2;
1817 		rdev->config.evergreen.max_pipes = 4;
1818 		rdev->config.evergreen.max_tile_pipes = 8;
1819 		rdev->config.evergreen.max_simds = 7;
1820 		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1821 		rdev->config.evergreen.max_gprs = 256;
1822 		rdev->config.evergreen.max_threads = 248;
1823 		rdev->config.evergreen.max_gs_threads = 32;
1824 		rdev->config.evergreen.max_stack_entries = 512;
1825 		rdev->config.evergreen.sx_num_of_sets = 4;
1826 		rdev->config.evergreen.sx_max_export_size = 256;
1827 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1828 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1829 		rdev->config.evergreen.max_hw_contexts = 8;
1830 		rdev->config.evergreen.sq_num_cf_insts = 2;
1831 
1832 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1833 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1834 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1835 		break;
1836 	case CHIP_TURKS:
1837 		rdev->config.evergreen.num_ses = 1;
1838 		rdev->config.evergreen.max_pipes = 4;
1839 		rdev->config.evergreen.max_tile_pipes = 4;
1840 		rdev->config.evergreen.max_simds = 6;
1841 		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1842 		rdev->config.evergreen.max_gprs = 256;
1843 		rdev->config.evergreen.max_threads = 248;
1844 		rdev->config.evergreen.max_gs_threads = 32;
1845 		rdev->config.evergreen.max_stack_entries = 256;
1846 		rdev->config.evergreen.sx_num_of_sets = 4;
1847 		rdev->config.evergreen.sx_max_export_size = 256;
1848 		rdev->config.evergreen.sx_max_export_pos_size = 64;
1849 		rdev->config.evergreen.sx_max_export_smx_size = 192;
1850 		rdev->config.evergreen.max_hw_contexts = 8;
1851 		rdev->config.evergreen.sq_num_cf_insts = 2;
1852 
1853 		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1854 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1855 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1856 		break;
1857 	case CHIP_CAICOS:
1858 		rdev->config.evergreen.num_ses = 1;
1859 		rdev->config.evergreen.max_pipes = 4;
1860 		rdev->config.evergreen.max_tile_pipes = 2;
1861 		rdev->config.evergreen.max_simds = 2;
1862 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1863 		rdev->config.evergreen.max_gprs = 256;
1864 		rdev->config.evergreen.max_threads = 192;
1865 		rdev->config.evergreen.max_gs_threads = 16;
1866 		rdev->config.evergreen.max_stack_entries = 256;
1867 		rdev->config.evergreen.sx_num_of_sets = 4;
1868 		rdev->config.evergreen.sx_max_export_size = 128;
1869 		rdev->config.evergreen.sx_max_export_pos_size = 32;
1870 		rdev->config.evergreen.sx_max_export_smx_size = 96;
1871 		rdev->config.evergreen.max_hw_contexts = 4;
1872 		rdev->config.evergreen.sq_num_cf_insts = 1;
1873 
1874 		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1875 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1876 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1877 		break;
1878 	}
1879 
1880 	/* Initialize HDP */
1881 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1882 		WREG32((0x2c14 + j), 0x00000000);
1883 		WREG32((0x2c18 + j), 0x00000000);
1884 		WREG32((0x2c1c + j), 0x00000000);
1885 		WREG32((0x2c20 + j), 0x00000000);
1886 		WREG32((0x2c24 + j), 0x00000000);
1887 	}
1888 
1889 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1890 
1891 	evergreen_fix_pci_max_read_req_size(rdev);
1892 
1893 	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1894 
1895 	cc_gc_shader_pipe_config |=
1896 		INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
1897 				  & EVERGREEN_MAX_PIPES_MASK);
1898 	cc_gc_shader_pipe_config |=
1899 		INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
1900 			       & EVERGREEN_MAX_SIMDS_MASK);
1901 
1902 	cc_rb_backend_disable =
1903 		BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
1904 				& EVERGREEN_MAX_BACKENDS_MASK);
1905 
1906 
1907 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1908 	if (rdev->flags & RADEON_IS_IGP)
1909 		mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
1910 	else
1911 		mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1912 
1913 	switch (rdev->config.evergreen.max_tile_pipes) {
1914 	case 1:
1915 	default:
1916 		gb_addr_config |= NUM_PIPES(0);
1917 		break;
1918 	case 2:
1919 		gb_addr_config |= NUM_PIPES(1);
1920 		break;
1921 	case 4:
1922 		gb_addr_config |= NUM_PIPES(2);
1923 		break;
1924 	case 8:
1925 		gb_addr_config |= NUM_PIPES(3);
1926 		break;
1927 	}
1928 
1929 	gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1930 	gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
1931 	gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
1932 	gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
1933 	gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
1934 	gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
1935 
1936 	if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
1937 		gb_addr_config |= ROW_SIZE(2);
1938 	else
1939 		gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
1940 
1941 	if (rdev->ddev->pdev->device == 0x689e) {
1942 		u32 efuse_straps_4;
1943 		u32 efuse_straps_3;
1944 		u8 efuse_box_bit_131_124;
1945 
1946 		WREG32(RCU_IND_INDEX, 0x204);
1947 		efuse_straps_4 = RREG32(RCU_IND_DATA);
1948 		WREG32(RCU_IND_INDEX, 0x203);
1949 		efuse_straps_3 = RREG32(RCU_IND_DATA);
1950 		efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
1951 
1952 		switch(efuse_box_bit_131_124) {
1953 		case 0x00:
1954 			gb_backend_map = 0x76543210;
1955 			break;
1956 		case 0x55:
1957 			gb_backend_map = 0x77553311;
1958 			break;
1959 		case 0x56:
1960 			gb_backend_map = 0x77553300;
1961 			break;
1962 		case 0x59:
1963 			gb_backend_map = 0x77552211;
1964 			break;
1965 		case 0x66:
1966 			gb_backend_map = 0x77443300;
1967 			break;
1968 		case 0x99:
1969 			gb_backend_map = 0x66552211;
1970 			break;
1971 		case 0x5a:
1972 			gb_backend_map = 0x77552200;
1973 			break;
1974 		case 0xaa:
1975 			gb_backend_map = 0x66442200;
1976 			break;
1977 		case 0x95:
1978 			gb_backend_map = 0x66553311;
1979 			break;
1980 		default:
1981 			DRM_ERROR("bad backend map, using default\n");
1982 			gb_backend_map =
1983 				evergreen_get_tile_pipe_to_backend_map(rdev,
1984 								       rdev->config.evergreen.max_tile_pipes,
1985 								       rdev->config.evergreen.max_backends,
1986 								       ((EVERGREEN_MAX_BACKENDS_MASK <<
1987 								   rdev->config.evergreen.max_backends) &
1988 									EVERGREEN_MAX_BACKENDS_MASK));
1989 			break;
1990 		}
1991 	} else if (rdev->ddev->pdev->device == 0x68b9) {
1992 		u32 efuse_straps_3;
1993 		u8 efuse_box_bit_127_124;
1994 
1995 		WREG32(RCU_IND_INDEX, 0x203);
1996 		efuse_straps_3 = RREG32(RCU_IND_DATA);
1997 		efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
1998 
1999 		switch(efuse_box_bit_127_124) {
2000 		case 0x0:
2001 			gb_backend_map = 0x00003210;
2002 			break;
2003 		case 0x5:
2004 		case 0x6:
2005 		case 0x9:
2006 		case 0xa:
2007 			gb_backend_map = 0x00003311;
2008 			break;
2009 		default:
2010 			DRM_ERROR("bad backend map, using default\n");
2011 			gb_backend_map =
2012 				evergreen_get_tile_pipe_to_backend_map(rdev,
2013 								       rdev->config.evergreen.max_tile_pipes,
2014 								       rdev->config.evergreen.max_backends,
2015 								       ((EVERGREEN_MAX_BACKENDS_MASK <<
2016 								   rdev->config.evergreen.max_backends) &
2017 									EVERGREEN_MAX_BACKENDS_MASK));
2018 			break;
2019 		}
2020 	} else {
2021 		switch (rdev->family) {
2022 		case CHIP_CYPRESS:
2023 		case CHIP_HEMLOCK:
2024 		case CHIP_BARTS:
2025 			gb_backend_map = 0x66442200;
2026 			break;
2027 		case CHIP_JUNIPER:
2028 			gb_backend_map = 0x00002200;
2029 			break;
2030 		default:
2031 			gb_backend_map =
2032 				evergreen_get_tile_pipe_to_backend_map(rdev,
2033 								       rdev->config.evergreen.max_tile_pipes,
2034 								       rdev->config.evergreen.max_backends,
2035 								       ((EVERGREEN_MAX_BACKENDS_MASK <<
2036 									 rdev->config.evergreen.max_backends) &
2037 									EVERGREEN_MAX_BACKENDS_MASK));
2038 		}
2039 	}
2040 
2041 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
2042 	 * not have bank info, so create a custom tiling dword.
2043 	 * bits 3:0   num_pipes
2044 	 * bits 7:4   num_banks
2045 	 * bits 11:8  group_size
2046 	 * bits 15:12 row_size
2047 	 */
2048 	rdev->config.evergreen.tile_config = 0;
2049 	switch (rdev->config.evergreen.max_tile_pipes) {
2050 	case 1:
2051 	default:
2052 		rdev->config.evergreen.tile_config |= (0 << 0);
2053 		break;
2054 	case 2:
2055 		rdev->config.evergreen.tile_config |= (1 << 0);
2056 		break;
2057 	case 4:
2058 		rdev->config.evergreen.tile_config |= (2 << 0);
2059 		break;
2060 	case 8:
2061 		rdev->config.evergreen.tile_config |= (3 << 0);
2062 		break;
2063 	}
2064 	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
2065 	if (rdev->flags & RADEON_IS_IGP)
2066 		rdev->config.evergreen.tile_config |= 1 << 4;
2067 	else
2068 		rdev->config.evergreen.tile_config |=
2069 			((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
2070 	rdev->config.evergreen.tile_config |=
2071 		((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
2072 	rdev->config.evergreen.tile_config |=
2073 		((gb_addr_config & 0x30000000) >> 28) << 12;
2074 
2075 	rdev->config.evergreen.backend_map = gb_backend_map;
2076 	WREG32(GB_BACKEND_MAP, gb_backend_map);
2077 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
2078 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2079 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2080 
2081 	evergreen_program_channel_remap(rdev);
2082 
2083 	num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
2084 	grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
2085 
2086 	for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
2087 		u32 rb = cc_rb_backend_disable | (0xf0 << 16);
2088 		u32 sp = cc_gc_shader_pipe_config;
2089 		u32 gfx = grbm_gfx_index | SE_INDEX(i);
2090 
2091 		if (i == num_shader_engines) {
2092 			rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
2093 			sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
2094 		}
2095 
2096 		WREG32(GRBM_GFX_INDEX, gfx);
2097 		WREG32(RLC_GFX_INDEX, gfx);
2098 
2099 		WREG32(CC_RB_BACKEND_DISABLE, rb);
2100 		WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
2101 		WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
2102 		WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
2103         }
2104 
2105 	grbm_gfx_index |= SE_BROADCAST_WRITES;
2106 	WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
2107 	WREG32(RLC_GFX_INDEX, grbm_gfx_index);
2108 
2109 	WREG32(CGTS_SYS_TCC_DISABLE, 0);
2110 	WREG32(CGTS_TCC_DISABLE, 0);
2111 	WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
2112 	WREG32(CGTS_USER_TCC_DISABLE, 0);
2113 
2114 	/* set HW defaults for 3D engine */
2115 	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
2116 				     ROQ_IB2_START(0x2b)));
2117 
2118 	WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
2119 
2120 	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
2121 			     SYNC_GRADIENT |
2122 			     SYNC_WALKER |
2123 			     SYNC_ALIGNER));
2124 
2125 	sx_debug_1 = RREG32(SX_DEBUG_1);
2126 	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
2127 	WREG32(SX_DEBUG_1, sx_debug_1);
2128 
2129 
2130 	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
2131 	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
2132 	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2133 	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2134 
2135 	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2136 					POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2137 					SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2138 
2139 	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
2140 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
2141 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
2142 
2143 	WREG32(VGT_NUM_INSTANCES, 1);
2144 	WREG32(SPI_CONFIG_CNTL, 0);
2145 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
2146 	WREG32(CP_PERFMON_CNTL, 0);
2147 
2148 	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
2149 				  FETCH_FIFO_HIWATER(0x4) |
2150 				  DONE_FIFO_HIWATER(0xe0) |
2151 				  ALU_UPDATE_FIFO_HIWATER(0x8)));
2152 
2153 	sq_config = RREG32(SQ_CONFIG);
2154 	sq_config &= ~(PS_PRIO(3) |
2155 		       VS_PRIO(3) |
2156 		       GS_PRIO(3) |
2157 		       ES_PRIO(3));
2158 	sq_config |= (VC_ENABLE |
2159 		      EXPORT_SRC_C |
2160 		      PS_PRIO(0) |
2161 		      VS_PRIO(1) |
2162 		      GS_PRIO(2) |
2163 		      ES_PRIO(3));
2164 
2165 	switch (rdev->family) {
2166 	case CHIP_CEDAR:
2167 	case CHIP_PALM:
2168 	case CHIP_SUMO:
2169 	case CHIP_SUMO2:
2170 	case CHIP_CAICOS:
2171 		/* no vertex cache */
2172 		sq_config &= ~VC_ENABLE;
2173 		break;
2174 	default:
2175 		break;
2176 	}
2177 
2178 	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
2179 
2180 	sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
2181 	sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
2182 	sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
2183 	sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2184 	sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
2185 	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2186 	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
2187 
2188 	switch (rdev->family) {
2189 	case CHIP_CEDAR:
2190 	case CHIP_PALM:
2191 	case CHIP_SUMO:
2192 	case CHIP_SUMO2:
2193 		ps_thread_count = 96;
2194 		break;
2195 	default:
2196 		ps_thread_count = 128;
2197 		break;
2198 	}
2199 
2200 	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
2201 	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2202 	sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2203 	sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2204 	sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2205 	sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
2206 
2207 	sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2208 	sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2209 	sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2210 	sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2211 	sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2212 	sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
2213 
2214 	WREG32(SQ_CONFIG, sq_config);
2215 	WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
2216 	WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
2217 	WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
2218 	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
2219 	WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
2220 	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
2221 	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
2222 	WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
2223 	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
2224 	WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
2225 
2226 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
2227 					  FORCE_EOV_MAX_REZ_CNT(255)));
2228 
2229 	switch (rdev->family) {
2230 	case CHIP_CEDAR:
2231 	case CHIP_PALM:
2232 	case CHIP_SUMO:
2233 	case CHIP_SUMO2:
2234 	case CHIP_CAICOS:
2235 		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
2236 		break;
2237 	default:
2238 		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
2239 		break;
2240 	}
2241 	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
2242 	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
2243 
2244 	WREG32(VGT_GS_VERTEX_REUSE, 16);
2245 	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
2246 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
2247 
2248 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
2249 	WREG32(VGT_OUT_DEALLOC_CNTL, 16);
2250 
2251 	WREG32(CB_PERF_CTR0_SEL_0, 0);
2252 	WREG32(CB_PERF_CTR0_SEL_1, 0);
2253 	WREG32(CB_PERF_CTR1_SEL_0, 0);
2254 	WREG32(CB_PERF_CTR1_SEL_1, 0);
2255 	WREG32(CB_PERF_CTR2_SEL_0, 0);
2256 	WREG32(CB_PERF_CTR2_SEL_1, 0);
2257 	WREG32(CB_PERF_CTR3_SEL_0, 0);
2258 	WREG32(CB_PERF_CTR3_SEL_1, 0);
2259 
2260 	/* clear render buffer base addresses */
2261 	WREG32(CB_COLOR0_BASE, 0);
2262 	WREG32(CB_COLOR1_BASE, 0);
2263 	WREG32(CB_COLOR2_BASE, 0);
2264 	WREG32(CB_COLOR3_BASE, 0);
2265 	WREG32(CB_COLOR4_BASE, 0);
2266 	WREG32(CB_COLOR5_BASE, 0);
2267 	WREG32(CB_COLOR6_BASE, 0);
2268 	WREG32(CB_COLOR7_BASE, 0);
2269 	WREG32(CB_COLOR8_BASE, 0);
2270 	WREG32(CB_COLOR9_BASE, 0);
2271 	WREG32(CB_COLOR10_BASE, 0);
2272 	WREG32(CB_COLOR11_BASE, 0);
2273 
2274 	/* set the shader const cache sizes to 0 */
2275 	for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
2276 		WREG32(i, 0);
2277 	for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
2278 		WREG32(i, 0);
2279 
2280 	tmp = RREG32(HDP_MISC_CNTL);
2281 	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
2282 	WREG32(HDP_MISC_CNTL, tmp);
2283 
2284 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
2285 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
2286 
2287 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
2288 
2289 	udelay(50);
2290 
2291 }
2292 
2293 int evergreen_mc_init(struct radeon_device *rdev)
2294 {
2295 	u32 tmp;
2296 	int chansize, numchan;
2297 
2298 	/* Get VRAM informations */
2299 	rdev->mc.vram_is_ddr = true;
2300 	if (rdev->flags & RADEON_IS_IGP)
2301 		tmp = RREG32(FUS_MC_ARB_RAMCFG);
2302 	else
2303 		tmp = RREG32(MC_ARB_RAMCFG);
2304 	if (tmp & CHANSIZE_OVERRIDE) {
2305 		chansize = 16;
2306 	} else if (tmp & CHANSIZE_MASK) {
2307 		chansize = 64;
2308 	} else {
2309 		chansize = 32;
2310 	}
2311 	tmp = RREG32(MC_SHARED_CHMAP);
2312 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2313 	case 0:
2314 	default:
2315 		numchan = 1;
2316 		break;
2317 	case 1:
2318 		numchan = 2;
2319 		break;
2320 	case 2:
2321 		numchan = 4;
2322 		break;
2323 	case 3:
2324 		numchan = 8;
2325 		break;
2326 	}
2327 	rdev->mc.vram_width = numchan * chansize;
2328 	/* Could aper size report 0 ? */
2329 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2330 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2331 	/* Setup GPU memory space */
2332 	if (rdev->flags & RADEON_IS_IGP) {
2333 		/* size in bytes on fusion */
2334 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2335 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2336 	} else {
2337 		/* size in MB on evergreen */
2338 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2339 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2340 	}
2341 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
2342 	r700_vram_gtt_location(rdev, &rdev->mc);
2343 	radeon_update_bandwidth_info(rdev);
2344 
2345 	return 0;
2346 }
2347 
2348 bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
2349 {
2350 	u32 srbm_status;
2351 	u32 grbm_status;
2352 	u32 grbm_status_se0, grbm_status_se1;
2353 	struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
2354 	int r;
2355 
2356 	srbm_status = RREG32(SRBM_STATUS);
2357 	grbm_status = RREG32(GRBM_STATUS);
2358 	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2359 	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2360 	if (!(grbm_status & GUI_ACTIVE)) {
2361 		r100_gpu_lockup_update(lockup, &rdev->cp);
2362 		return false;
2363 	}
2364 	/* force CP activities */
2365 	r = radeon_ring_lock(rdev, 2);
2366 	if (!r) {
2367 		/* PACKET2 NOP */
2368 		radeon_ring_write(rdev, 0x80000000);
2369 		radeon_ring_write(rdev, 0x80000000);
2370 		radeon_ring_unlock_commit(rdev);
2371 	}
2372 	rdev->cp.rptr = RREG32(CP_RB_RPTR);
2373 	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
2374 }
2375 
2376 static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2377 {
2378 	struct evergreen_mc_save save;
2379 	u32 grbm_reset = 0;
2380 
2381 	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2382 		return 0;
2383 
2384 	dev_info(rdev->dev, "GPU softreset \n");
2385 	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
2386 		RREG32(GRBM_STATUS));
2387 	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
2388 		RREG32(GRBM_STATUS_SE0));
2389 	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
2390 		RREG32(GRBM_STATUS_SE1));
2391 	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
2392 		RREG32(SRBM_STATUS));
2393 	evergreen_mc_stop(rdev, &save);
2394 	if (evergreen_mc_wait_for_idle(rdev)) {
2395 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2396 	}
2397 	/* Disable CP parsing/prefetching */
2398 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2399 
2400 	/* reset all the gfx blocks */
2401 	grbm_reset = (SOFT_RESET_CP |
2402 		      SOFT_RESET_CB |
2403 		      SOFT_RESET_DB |
2404 		      SOFT_RESET_PA |
2405 		      SOFT_RESET_SC |
2406 		      SOFT_RESET_SPI |
2407 		      SOFT_RESET_SH |
2408 		      SOFT_RESET_SX |
2409 		      SOFT_RESET_TC |
2410 		      SOFT_RESET_TA |
2411 		      SOFT_RESET_VC |
2412 		      SOFT_RESET_VGT);
2413 
2414 	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2415 	WREG32(GRBM_SOFT_RESET, grbm_reset);
2416 	(void)RREG32(GRBM_SOFT_RESET);
2417 	udelay(50);
2418 	WREG32(GRBM_SOFT_RESET, 0);
2419 	(void)RREG32(GRBM_SOFT_RESET);
2420 	/* Wait a little for things to settle down */
2421 	udelay(50);
2422 	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
2423 		RREG32(GRBM_STATUS));
2424 	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
2425 		RREG32(GRBM_STATUS_SE0));
2426 	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
2427 		RREG32(GRBM_STATUS_SE1));
2428 	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
2429 		RREG32(SRBM_STATUS));
2430 	evergreen_mc_resume(rdev, &save);
2431 	return 0;
2432 }
2433 
2434 int evergreen_asic_reset(struct radeon_device *rdev)
2435 {
2436 	return evergreen_gpu_soft_reset(rdev);
2437 }
2438 
2439 /* Interrupts */
2440 
2441 u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2442 {
2443 	switch (crtc) {
2444 	case 0:
2445 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
2446 	case 1:
2447 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
2448 	case 2:
2449 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
2450 	case 3:
2451 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
2452 	case 4:
2453 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
2454 	case 5:
2455 		return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
2456 	default:
2457 		return 0;
2458 	}
2459 }
2460 
2461 void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2462 {
2463 	u32 tmp;
2464 
2465 	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2466 	WREG32(GRBM_INT_CNTL, 0);
2467 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2468 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2469 	if (rdev->num_crtc >= 4) {
2470 		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2471 		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2472 	}
2473 	if (rdev->num_crtc >= 6) {
2474 		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2475 		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2476 	}
2477 
2478 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2479 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2480 	if (rdev->num_crtc >= 4) {
2481 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2482 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2483 	}
2484 	if (rdev->num_crtc >= 6) {
2485 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2486 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2487 	}
2488 
2489 	WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2490 	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2491 
2492 	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2493 	WREG32(DC_HPD1_INT_CONTROL, tmp);
2494 	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2495 	WREG32(DC_HPD2_INT_CONTROL, tmp);
2496 	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2497 	WREG32(DC_HPD3_INT_CONTROL, tmp);
2498 	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2499 	WREG32(DC_HPD4_INT_CONTROL, tmp);
2500 	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2501 	WREG32(DC_HPD5_INT_CONTROL, tmp);
2502 	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2503 	WREG32(DC_HPD6_INT_CONTROL, tmp);
2504 
2505 }
2506 
2507 int evergreen_irq_set(struct radeon_device *rdev)
2508 {
2509 	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2510 	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2511 	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
2512 	u32 grbm_int_cntl = 0;
2513 	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2514 
2515 	if (!rdev->irq.installed) {
2516 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2517 		return -EINVAL;
2518 	}
2519 	/* don't enable anything if the ih is disabled */
2520 	if (!rdev->ih.enabled) {
2521 		r600_disable_interrupts(rdev);
2522 		/* force the active interrupt state to all disabled */
2523 		evergreen_disable_interrupt_state(rdev);
2524 		return 0;
2525 	}
2526 
2527 	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2528 	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2529 	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2530 	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2531 	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2532 	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2533 
2534 	if (rdev->irq.sw_int) {
2535 		DRM_DEBUG("evergreen_irq_set: sw int\n");
2536 		cp_int_cntl |= RB_INT_ENABLE;
2537 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2538 	}
2539 	if (rdev->irq.crtc_vblank_int[0] ||
2540 	    rdev->irq.pflip[0]) {
2541 		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2542 		crtc1 |= VBLANK_INT_MASK;
2543 	}
2544 	if (rdev->irq.crtc_vblank_int[1] ||
2545 	    rdev->irq.pflip[1]) {
2546 		DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2547 		crtc2 |= VBLANK_INT_MASK;
2548 	}
2549 	if (rdev->irq.crtc_vblank_int[2] ||
2550 	    rdev->irq.pflip[2]) {
2551 		DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2552 		crtc3 |= VBLANK_INT_MASK;
2553 	}
2554 	if (rdev->irq.crtc_vblank_int[3] ||
2555 	    rdev->irq.pflip[3]) {
2556 		DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2557 		crtc4 |= VBLANK_INT_MASK;
2558 	}
2559 	if (rdev->irq.crtc_vblank_int[4] ||
2560 	    rdev->irq.pflip[4]) {
2561 		DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2562 		crtc5 |= VBLANK_INT_MASK;
2563 	}
2564 	if (rdev->irq.crtc_vblank_int[5] ||
2565 	    rdev->irq.pflip[5]) {
2566 		DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2567 		crtc6 |= VBLANK_INT_MASK;
2568 	}
2569 	if (rdev->irq.hpd[0]) {
2570 		DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2571 		hpd1 |= DC_HPDx_INT_EN;
2572 	}
2573 	if (rdev->irq.hpd[1]) {
2574 		DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2575 		hpd2 |= DC_HPDx_INT_EN;
2576 	}
2577 	if (rdev->irq.hpd[2]) {
2578 		DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2579 		hpd3 |= DC_HPDx_INT_EN;
2580 	}
2581 	if (rdev->irq.hpd[3]) {
2582 		DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2583 		hpd4 |= DC_HPDx_INT_EN;
2584 	}
2585 	if (rdev->irq.hpd[4]) {
2586 		DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2587 		hpd5 |= DC_HPDx_INT_EN;
2588 	}
2589 	if (rdev->irq.hpd[5]) {
2590 		DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2591 		hpd6 |= DC_HPDx_INT_EN;
2592 	}
2593 	if (rdev->irq.gui_idle) {
2594 		DRM_DEBUG("gui idle\n");
2595 		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2596 	}
2597 
2598 	WREG32(CP_INT_CNTL, cp_int_cntl);
2599 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2600 
2601 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
2602 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
2603 	if (rdev->num_crtc >= 4) {
2604 		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
2605 		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
2606 	}
2607 	if (rdev->num_crtc >= 6) {
2608 		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2609 		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2610 	}
2611 
2612 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2613 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
2614 	if (rdev->num_crtc >= 4) {
2615 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2616 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2617 	}
2618 	if (rdev->num_crtc >= 6) {
2619 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2620 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2621 	}
2622 
2623 	WREG32(DC_HPD1_INT_CONTROL, hpd1);
2624 	WREG32(DC_HPD2_INT_CONTROL, hpd2);
2625 	WREG32(DC_HPD3_INT_CONTROL, hpd3);
2626 	WREG32(DC_HPD4_INT_CONTROL, hpd4);
2627 	WREG32(DC_HPD5_INT_CONTROL, hpd5);
2628 	WREG32(DC_HPD6_INT_CONTROL, hpd6);
2629 
2630 	return 0;
2631 }
2632 
2633 static inline void evergreen_irq_ack(struct radeon_device *rdev)
2634 {
2635 	u32 tmp;
2636 
2637 	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2638 	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2639 	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2640 	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2641 	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2642 	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2643 	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2644 	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2645 	if (rdev->num_crtc >= 4) {
2646 		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2647 		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2648 	}
2649 	if (rdev->num_crtc >= 6) {
2650 		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2651 		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2652 	}
2653 
2654 	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2655 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2656 	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2657 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2658 	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
2659 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
2660 	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
2661 		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
2662 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
2663 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
2664 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
2665 		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2666 
2667 	if (rdev->num_crtc >= 4) {
2668 		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2669 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2670 		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2671 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2672 		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2673 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2674 		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2675 			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2676 		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2677 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2678 		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2679 			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2680 	}
2681 
2682 	if (rdev->num_crtc >= 6) {
2683 		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2684 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2685 		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2686 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2687 		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2688 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2689 		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2690 			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2691 		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2692 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2693 		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2694 			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2695 	}
2696 
2697 	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2698 		tmp = RREG32(DC_HPD1_INT_CONTROL);
2699 		tmp |= DC_HPDx_INT_ACK;
2700 		WREG32(DC_HPD1_INT_CONTROL, tmp);
2701 	}
2702 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2703 		tmp = RREG32(DC_HPD2_INT_CONTROL);
2704 		tmp |= DC_HPDx_INT_ACK;
2705 		WREG32(DC_HPD2_INT_CONTROL, tmp);
2706 	}
2707 	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2708 		tmp = RREG32(DC_HPD3_INT_CONTROL);
2709 		tmp |= DC_HPDx_INT_ACK;
2710 		WREG32(DC_HPD3_INT_CONTROL, tmp);
2711 	}
2712 	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2713 		tmp = RREG32(DC_HPD4_INT_CONTROL);
2714 		tmp |= DC_HPDx_INT_ACK;
2715 		WREG32(DC_HPD4_INT_CONTROL, tmp);
2716 	}
2717 	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2718 		tmp = RREG32(DC_HPD5_INT_CONTROL);
2719 		tmp |= DC_HPDx_INT_ACK;
2720 		WREG32(DC_HPD5_INT_CONTROL, tmp);
2721 	}
2722 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2723 		tmp = RREG32(DC_HPD5_INT_CONTROL);
2724 		tmp |= DC_HPDx_INT_ACK;
2725 		WREG32(DC_HPD6_INT_CONTROL, tmp);
2726 	}
2727 }
2728 
2729 void evergreen_irq_disable(struct radeon_device *rdev)
2730 {
2731 	r600_disable_interrupts(rdev);
2732 	/* Wait and acknowledge irq */
2733 	mdelay(1);
2734 	evergreen_irq_ack(rdev);
2735 	evergreen_disable_interrupt_state(rdev);
2736 }
2737 
2738 void evergreen_irq_suspend(struct radeon_device *rdev)
2739 {
2740 	evergreen_irq_disable(rdev);
2741 	r600_rlc_stop(rdev);
2742 }
2743 
2744 static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
2745 {
2746 	u32 wptr, tmp;
2747 
2748 	if (rdev->wb.enabled)
2749 		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
2750 	else
2751 		wptr = RREG32(IH_RB_WPTR);
2752 
2753 	if (wptr & RB_OVERFLOW) {
2754 		/* When a ring buffer overflow happen start parsing interrupt
2755 		 * from the last not overwritten vector (wptr + 16). Hopefully
2756 		 * this should allow us to catchup.
2757 		 */
2758 		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2759 			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2760 		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2761 		tmp = RREG32(IH_RB_CNTL);
2762 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
2763 		WREG32(IH_RB_CNTL, tmp);
2764 	}
2765 	return (wptr & rdev->ih.ptr_mask);
2766 }
2767 
2768 int evergreen_irq_process(struct radeon_device *rdev)
2769 {
2770 	u32 wptr;
2771 	u32 rptr;
2772 	u32 src_id, src_data;
2773 	u32 ring_index;
2774 	unsigned long flags;
2775 	bool queue_hotplug = false;
2776 
2777 	if (!rdev->ih.enabled || rdev->shutdown)
2778 		return IRQ_NONE;
2779 
2780 	wptr = evergreen_get_ih_wptr(rdev);
2781 	rptr = rdev->ih.rptr;
2782 	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2783 
2784 	spin_lock_irqsave(&rdev->ih.lock, flags);
2785 	if (rptr == wptr) {
2786 		spin_unlock_irqrestore(&rdev->ih.lock, flags);
2787 		return IRQ_NONE;
2788 	}
2789 restart_ih:
2790 	/* Order reading of wptr vs. reading of IH ring data */
2791 	rmb();
2792 
2793 	/* display interrupts */
2794 	evergreen_irq_ack(rdev);
2795 
2796 	rdev->ih.wptr = wptr;
2797 	while (rptr != wptr) {
2798 		/* wptr/rptr are in bytes! */
2799 		ring_index = rptr / 4;
2800 		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2801 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
2802 
2803 		switch (src_id) {
2804 		case 1: /* D1 vblank/vline */
2805 			switch (src_data) {
2806 			case 0: /* D1 vblank */
2807 				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
2808 					if (rdev->irq.crtc_vblank_int[0]) {
2809 						drm_handle_vblank(rdev->ddev, 0);
2810 						rdev->pm.vblank_sync = true;
2811 						wake_up(&rdev->irq.vblank_queue);
2812 					}
2813 					if (rdev->irq.pflip[0])
2814 						radeon_crtc_handle_flip(rdev, 0);
2815 					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2816 					DRM_DEBUG("IH: D1 vblank\n");
2817 				}
2818 				break;
2819 			case 1: /* D1 vline */
2820 				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
2821 					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
2822 					DRM_DEBUG("IH: D1 vline\n");
2823 				}
2824 				break;
2825 			default:
2826 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2827 				break;
2828 			}
2829 			break;
2830 		case 2: /* D2 vblank/vline */
2831 			switch (src_data) {
2832 			case 0: /* D2 vblank */
2833 				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2834 					if (rdev->irq.crtc_vblank_int[1]) {
2835 						drm_handle_vblank(rdev->ddev, 1);
2836 						rdev->pm.vblank_sync = true;
2837 						wake_up(&rdev->irq.vblank_queue);
2838 					}
2839 					if (rdev->irq.pflip[1])
2840 						radeon_crtc_handle_flip(rdev, 1);
2841 					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2842 					DRM_DEBUG("IH: D2 vblank\n");
2843 				}
2844 				break;
2845 			case 1: /* D2 vline */
2846 				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
2847 					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
2848 					DRM_DEBUG("IH: D2 vline\n");
2849 				}
2850 				break;
2851 			default:
2852 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2853 				break;
2854 			}
2855 			break;
2856 		case 3: /* D3 vblank/vline */
2857 			switch (src_data) {
2858 			case 0: /* D3 vblank */
2859 				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2860 					if (rdev->irq.crtc_vblank_int[2]) {
2861 						drm_handle_vblank(rdev->ddev, 2);
2862 						rdev->pm.vblank_sync = true;
2863 						wake_up(&rdev->irq.vblank_queue);
2864 					}
2865 					if (rdev->irq.pflip[2])
2866 						radeon_crtc_handle_flip(rdev, 2);
2867 					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2868 					DRM_DEBUG("IH: D3 vblank\n");
2869 				}
2870 				break;
2871 			case 1: /* D3 vline */
2872 				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
2873 					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
2874 					DRM_DEBUG("IH: D3 vline\n");
2875 				}
2876 				break;
2877 			default:
2878 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2879 				break;
2880 			}
2881 			break;
2882 		case 4: /* D4 vblank/vline */
2883 			switch (src_data) {
2884 			case 0: /* D4 vblank */
2885 				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2886 					if (rdev->irq.crtc_vblank_int[3]) {
2887 						drm_handle_vblank(rdev->ddev, 3);
2888 						rdev->pm.vblank_sync = true;
2889 						wake_up(&rdev->irq.vblank_queue);
2890 					}
2891 					if (rdev->irq.pflip[3])
2892 						radeon_crtc_handle_flip(rdev, 3);
2893 					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2894 					DRM_DEBUG("IH: D4 vblank\n");
2895 				}
2896 				break;
2897 			case 1: /* D4 vline */
2898 				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
2899 					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
2900 					DRM_DEBUG("IH: D4 vline\n");
2901 				}
2902 				break;
2903 			default:
2904 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2905 				break;
2906 			}
2907 			break;
2908 		case 5: /* D5 vblank/vline */
2909 			switch (src_data) {
2910 			case 0: /* D5 vblank */
2911 				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2912 					if (rdev->irq.crtc_vblank_int[4]) {
2913 						drm_handle_vblank(rdev->ddev, 4);
2914 						rdev->pm.vblank_sync = true;
2915 						wake_up(&rdev->irq.vblank_queue);
2916 					}
2917 					if (rdev->irq.pflip[4])
2918 						radeon_crtc_handle_flip(rdev, 4);
2919 					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2920 					DRM_DEBUG("IH: D5 vblank\n");
2921 				}
2922 				break;
2923 			case 1: /* D5 vline */
2924 				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
2925 					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
2926 					DRM_DEBUG("IH: D5 vline\n");
2927 				}
2928 				break;
2929 			default:
2930 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2931 				break;
2932 			}
2933 			break;
2934 		case 6: /* D6 vblank/vline */
2935 			switch (src_data) {
2936 			case 0: /* D6 vblank */
2937 				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2938 					if (rdev->irq.crtc_vblank_int[5]) {
2939 						drm_handle_vblank(rdev->ddev, 5);
2940 						rdev->pm.vblank_sync = true;
2941 						wake_up(&rdev->irq.vblank_queue);
2942 					}
2943 					if (rdev->irq.pflip[5])
2944 						radeon_crtc_handle_flip(rdev, 5);
2945 					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2946 					DRM_DEBUG("IH: D6 vblank\n");
2947 				}
2948 				break;
2949 			case 1: /* D6 vline */
2950 				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
2951 					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
2952 					DRM_DEBUG("IH: D6 vline\n");
2953 				}
2954 				break;
2955 			default:
2956 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2957 				break;
2958 			}
2959 			break;
2960 		case 42: /* HPD hotplug */
2961 			switch (src_data) {
2962 			case 0:
2963 				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2964 					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
2965 					queue_hotplug = true;
2966 					DRM_DEBUG("IH: HPD1\n");
2967 				}
2968 				break;
2969 			case 1:
2970 				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2971 					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
2972 					queue_hotplug = true;
2973 					DRM_DEBUG("IH: HPD2\n");
2974 				}
2975 				break;
2976 			case 2:
2977 				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2978 					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
2979 					queue_hotplug = true;
2980 					DRM_DEBUG("IH: HPD3\n");
2981 				}
2982 				break;
2983 			case 3:
2984 				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2985 					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
2986 					queue_hotplug = true;
2987 					DRM_DEBUG("IH: HPD4\n");
2988 				}
2989 				break;
2990 			case 4:
2991 				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2992 					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
2993 					queue_hotplug = true;
2994 					DRM_DEBUG("IH: HPD5\n");
2995 				}
2996 				break;
2997 			case 5:
2998 				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2999 					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
3000 					queue_hotplug = true;
3001 					DRM_DEBUG("IH: HPD6\n");
3002 				}
3003 				break;
3004 			default:
3005 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3006 				break;
3007 			}
3008 			break;
3009 		case 176: /* CP_INT in ring buffer */
3010 		case 177: /* CP_INT in IB1 */
3011 		case 178: /* CP_INT in IB2 */
3012 			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3013 			radeon_fence_process(rdev);
3014 			break;
3015 		case 181: /* CP EOP event */
3016 			DRM_DEBUG("IH: CP EOP\n");
3017 			radeon_fence_process(rdev);
3018 			break;
3019 		case 233: /* GUI IDLE */
3020 			DRM_DEBUG("IH: GUI idle\n");
3021 			rdev->pm.gui_idle = true;
3022 			wake_up(&rdev->irq.idle_queue);
3023 			break;
3024 		default:
3025 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3026 			break;
3027 		}
3028 
3029 		/* wptr/rptr are in bytes! */
3030 		rptr += 16;
3031 		rptr &= rdev->ih.ptr_mask;
3032 	}
3033 	/* make sure wptr hasn't changed while processing */
3034 	wptr = evergreen_get_ih_wptr(rdev);
3035 	if (wptr != rdev->ih.wptr)
3036 		goto restart_ih;
3037 	if (queue_hotplug)
3038 		schedule_work(&rdev->hotplug_work);
3039 	rdev->ih.rptr = rptr;
3040 	WREG32(IH_RB_RPTR, rdev->ih.rptr);
3041 	spin_unlock_irqrestore(&rdev->ih.lock, flags);
3042 	return IRQ_HANDLED;
3043 }
3044 
3045 static int evergreen_startup(struct radeon_device *rdev)
3046 {
3047 	int r;
3048 
3049 	/* enable pcie gen2 link */
3050 	if (!ASIC_IS_DCE5(rdev))
3051 		evergreen_pcie_gen2_enable(rdev);
3052 
3053 	if (ASIC_IS_DCE5(rdev)) {
3054 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
3055 			r = ni_init_microcode(rdev);
3056 			if (r) {
3057 				DRM_ERROR("Failed to load firmware!\n");
3058 				return r;
3059 			}
3060 		}
3061 		r = ni_mc_load_microcode(rdev);
3062 		if (r) {
3063 			DRM_ERROR("Failed to load MC firmware!\n");
3064 			return r;
3065 		}
3066 	} else {
3067 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
3068 			r = r600_init_microcode(rdev);
3069 			if (r) {
3070 				DRM_ERROR("Failed to load firmware!\n");
3071 				return r;
3072 			}
3073 		}
3074 	}
3075 
3076 	evergreen_mc_program(rdev);
3077 	if (rdev->flags & RADEON_IS_AGP) {
3078 		evergreen_agp_enable(rdev);
3079 	} else {
3080 		r = evergreen_pcie_gart_enable(rdev);
3081 		if (r)
3082 			return r;
3083 	}
3084 	evergreen_gpu_init(rdev);
3085 
3086 	r = evergreen_blit_init(rdev);
3087 	if (r) {
3088 		evergreen_blit_fini(rdev);
3089 		rdev->asic->copy = NULL;
3090 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
3091 	}
3092 
3093 	/* allocate wb buffer */
3094 	r = radeon_wb_init(rdev);
3095 	if (r)
3096 		return r;
3097 
3098 	/* Enable IRQ */
3099 	r = r600_irq_init(rdev);
3100 	if (r) {
3101 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
3102 		radeon_irq_kms_fini(rdev);
3103 		return r;
3104 	}
3105 	evergreen_irq_set(rdev);
3106 
3107 	r = radeon_ring_init(rdev, rdev->cp.ring_size);
3108 	if (r)
3109 		return r;
3110 	r = evergreen_cp_load_microcode(rdev);
3111 	if (r)
3112 		return r;
3113 	r = evergreen_cp_resume(rdev);
3114 	if (r)
3115 		return r;
3116 
3117 	return 0;
3118 }
3119 
3120 int evergreen_resume(struct radeon_device *rdev)
3121 {
3122 	int r;
3123 
3124 	/* reset the asic, the gfx blocks are often in a bad state
3125 	 * after the driver is unloaded or after a resume
3126 	 */
3127 	if (radeon_asic_reset(rdev))
3128 		dev_warn(rdev->dev, "GPU reset failed !\n");
3129 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3130 	 * posting will perform necessary task to bring back GPU into good
3131 	 * shape.
3132 	 */
3133 	/* post card */
3134 	atom_asic_init(rdev->mode_info.atom_context);
3135 
3136 	r = evergreen_startup(rdev);
3137 	if (r) {
3138 		DRM_ERROR("evergreen startup failed on resume\n");
3139 		return r;
3140 	}
3141 
3142 	r = r600_ib_test(rdev);
3143 	if (r) {
3144 		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3145 		return r;
3146 	}
3147 
3148 	return r;
3149 
3150 }
3151 
3152 int evergreen_suspend(struct radeon_device *rdev)
3153 {
3154 	int r;
3155 
3156 	/* FIXME: we should wait for ring to be empty */
3157 	r700_cp_stop(rdev);
3158 	rdev->cp.ready = false;
3159 	evergreen_irq_suspend(rdev);
3160 	radeon_wb_disable(rdev);
3161 	evergreen_pcie_gart_disable(rdev);
3162 
3163 	/* unpin shaders bo */
3164 	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
3165 	if (likely(r == 0)) {
3166 		radeon_bo_unpin(rdev->r600_blit.shader_obj);
3167 		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
3168 	}
3169 
3170 	return 0;
3171 }
3172 
3173 int evergreen_copy_blit(struct radeon_device *rdev,
3174 			uint64_t src_offset,
3175 			uint64_t dst_offset,
3176 			unsigned num_gpu_pages,
3177 			struct radeon_fence *fence)
3178 {
3179 	int r;
3180 
3181 	mutex_lock(&rdev->r600_blit.mutex);
3182 	rdev->r600_blit.vb_ib = NULL;
3183 	r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
3184 	if (r) {
3185 		if (rdev->r600_blit.vb_ib)
3186 			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
3187 		mutex_unlock(&rdev->r600_blit.mutex);
3188 		return r;
3189 	}
3190 	evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
3191 	evergreen_blit_done_copy(rdev, fence);
3192 	mutex_unlock(&rdev->r600_blit.mutex);
3193 	return 0;
3194 }
3195 
3196 /* Plan is to move initialization in that function and use
3197  * helper function so that radeon_device_init pretty much
3198  * do nothing more than calling asic specific function. This
3199  * should also allow to remove a bunch of callback function
3200  * like vram_info.
3201  */
3202 int evergreen_init(struct radeon_device *rdev)
3203 {
3204 	int r;
3205 
3206 	/* This don't do much */
3207 	r = radeon_gem_init(rdev);
3208 	if (r)
3209 		return r;
3210 	/* Read BIOS */
3211 	if (!radeon_get_bios(rdev)) {
3212 		if (ASIC_IS_AVIVO(rdev))
3213 			return -EINVAL;
3214 	}
3215 	/* Must be an ATOMBIOS */
3216 	if (!rdev->is_atom_bios) {
3217 		dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
3218 		return -EINVAL;
3219 	}
3220 	r = radeon_atombios_init(rdev);
3221 	if (r)
3222 		return r;
3223 	/* reset the asic, the gfx blocks are often in a bad state
3224 	 * after the driver is unloaded or after a resume
3225 	 */
3226 	if (radeon_asic_reset(rdev))
3227 		dev_warn(rdev->dev, "GPU reset failed !\n");
3228 	/* Post card if necessary */
3229 	if (!radeon_card_posted(rdev)) {
3230 		if (!rdev->bios) {
3231 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
3232 			return -EINVAL;
3233 		}
3234 		DRM_INFO("GPU not posted. posting now...\n");
3235 		atom_asic_init(rdev->mode_info.atom_context);
3236 	}
3237 	/* Initialize scratch registers */
3238 	r600_scratch_init(rdev);
3239 	/* Initialize surface registers */
3240 	radeon_surface_init(rdev);
3241 	/* Initialize clocks */
3242 	radeon_get_clock_info(rdev->ddev);
3243 	/* Fence driver */
3244 	r = radeon_fence_driver_init(rdev);
3245 	if (r)
3246 		return r;
3247 	/* initialize AGP */
3248 	if (rdev->flags & RADEON_IS_AGP) {
3249 		r = radeon_agp_init(rdev);
3250 		if (r)
3251 			radeon_agp_disable(rdev);
3252 	}
3253 	/* initialize memory controller */
3254 	r = evergreen_mc_init(rdev);
3255 	if (r)
3256 		return r;
3257 	/* Memory manager */
3258 	r = radeon_bo_init(rdev);
3259 	if (r)
3260 		return r;
3261 
3262 	r = radeon_irq_kms_init(rdev);
3263 	if (r)
3264 		return r;
3265 
3266 	rdev->cp.ring_obj = NULL;
3267 	r600_ring_init(rdev, 1024 * 1024);
3268 
3269 	rdev->ih.ring_obj = NULL;
3270 	r600_ih_ring_init(rdev, 64 * 1024);
3271 
3272 	r = r600_pcie_gart_init(rdev);
3273 	if (r)
3274 		return r;
3275 
3276 	rdev->accel_working = true;
3277 	r = evergreen_startup(rdev);
3278 	if (r) {
3279 		dev_err(rdev->dev, "disabling GPU acceleration\n");
3280 		r700_cp_fini(rdev);
3281 		r600_irq_fini(rdev);
3282 		radeon_wb_fini(rdev);
3283 		radeon_irq_kms_fini(rdev);
3284 		evergreen_pcie_gart_fini(rdev);
3285 		rdev->accel_working = false;
3286 	}
3287 	if (rdev->accel_working) {
3288 		r = radeon_ib_pool_init(rdev);
3289 		if (r) {
3290 			DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
3291 			rdev->accel_working = false;
3292 		}
3293 		r = r600_ib_test(rdev);
3294 		if (r) {
3295 			DRM_ERROR("radeon: failed testing IB (%d).\n", r);
3296 			rdev->accel_working = false;
3297 		}
3298 	}
3299 	return 0;
3300 }
3301 
3302 void evergreen_fini(struct radeon_device *rdev)
3303 {
3304 	evergreen_blit_fini(rdev);
3305 	r700_cp_fini(rdev);
3306 	r600_irq_fini(rdev);
3307 	radeon_wb_fini(rdev);
3308 	radeon_ib_pool_fini(rdev);
3309 	radeon_irq_kms_fini(rdev);
3310 	evergreen_pcie_gart_fini(rdev);
3311 	radeon_gem_fini(rdev);
3312 	radeon_fence_driver_fini(rdev);
3313 	radeon_agp_fini(rdev);
3314 	radeon_bo_fini(rdev);
3315 	radeon_atombios_fini(rdev);
3316 	kfree(rdev->bios);
3317 	rdev->bios = NULL;
3318 }
3319 
3320 static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3321 {
3322 	u32 link_width_cntl, speed_cntl;
3323 
3324 	if (radeon_pcie_gen2 == 0)
3325 		return;
3326 
3327 	if (rdev->flags & RADEON_IS_IGP)
3328 		return;
3329 
3330 	if (!(rdev->flags & RADEON_IS_PCIE))
3331 		return;
3332 
3333 	/* x2 cards have a special sequence */
3334 	if (ASIC_IS_X2(rdev))
3335 		return;
3336 
3337 	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3338 	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3339 	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3340 
3341 		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3342 		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3343 		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3344 
3345 		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3346 		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3347 		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3348 
3349 		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3350 		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3351 		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3352 
3353 		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3354 		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3355 		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3356 
3357 		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3358 		speed_cntl |= LC_GEN2_EN_STRAP;
3359 		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3360 
3361 	} else {
3362 		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3363 		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3364 		if (1)
3365 			link_width_cntl |= LC_UPCONFIGURE_DIS;
3366 		else
3367 			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3368 		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3369 	}
3370 }
3371